platform: modularize api/gui, add docs-tests-web foundation, and refresh root config

This commit is contained in:
beckline
2026-03-26 22:40:54 +03:00
parent 0e2d7f61ea
commit 6a56d734c2
562 changed files with 70151 additions and 16423 deletions

75
tests/api_sanity.sh Executable file
View File

@@ -0,0 +1,75 @@
#!/usr/bin/env bash
set -euo pipefail
API_URL="${API_URL:-http://127.0.0.1:8080}"
TMP_DIR="$(mktemp -d)"
trap 'rm -rf "$TMP_DIR"' EXIT
check_json_get() {
local path="$1"
local keys_csv="$2"
local out_file="$TMP_DIR/out.json"
local code
code="$(curl -sS --max-time 15 -o "$out_file" -w "%{http_code}" "${API_URL}${path}")"
if [[ "$code" != "200" ]]; then
echo "[sanity] ${path} -> HTTP ${code}" >&2
cat "$out_file" >&2 || true
return 1
fi
python3 - "$out_file" "$keys_csv" "$path" <<'PY'
import json
import sys
file_path, keys_csv, path = sys.argv[1], sys.argv[2], sys.argv[3]
keys = [k for k in keys_csv.split(",") if k]
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
for key in keys:
if key not in data:
print(f"[sanity] {path}: missing key '{key}'", file=sys.stderr)
sys.exit(1)
print(f"[sanity] {path}: OK")
PY
}
echo "[sanity] API_URL=${API_URL}"
check_json_get "/healthz" "status,time"
check_json_get "/api/v1/status" "timestamp,iface,table"
check_json_get "/api/v1/routes/status" "timestamp,iface,table"
check_json_get "/api/v1/traffic/mode" "mode,healthy,message"
check_json_get "/api/v1/traffic/interfaces" "interfaces,preferred_iface,active_iface"
check_json_get "/api/v1/dns/status" "mode,smartdns_addr,via_smartdns"
check_json_get "/api/v1/vpn/status" "status_word,unit_state"
check_json_get "/api/v1/vpn/autoloop-status" "status_word,raw_text"
check_json_get "/api/v1/routes/timer" "enabled"
# Method check: POST-only endpoint should reject GET.
code="$(curl -sS --max-time 10 -o /dev/null -w "%{http_code}" "${API_URL}/api/v1/routes/update")"
if [[ "$code" != "405" ]]; then
echo "[sanity] /api/v1/routes/update GET expected 405, got ${code}" >&2
exit 1
fi
echo "[sanity] method guard OK (/api/v1/routes/update GET => 405)"
# Quick stream availability: open SSE briefly; timeout is expected.
headers_file="$TMP_DIR/events.headers"
set +e
http_code="$(curl -sS --max-time 2 -D "$headers_file" -o /dev/null -w "%{http_code}" "${API_URL}/api/v1/events/stream")"
curl_rc=$?
set -e
if [[ "$http_code" != "200" ]]; then
echo "[sanity] /api/v1/events/stream -> HTTP ${http_code}" >&2
cat "$headers_file" >&2 || true
exit 1
fi
if [[ $curl_rc -ne 0 && $curl_rc -ne 28 ]]; then
echo "[sanity] unexpected curl rc=${curl_rc} for events stream" >&2
exit 1
fi
if ! grep -qi '^Content-Type: text/event-stream' "$headers_file"; then
echo "[sanity] /api/v1/events/stream missing text/event-stream content-type" >&2
cat "$headers_file" >&2 || true
exit 1
fi
echo "[sanity] events stream headers OK (curl rc=${curl_rc})"
echo "[sanity] all checks passed"

105
tests/events_stream.py Executable file
View File

@@ -0,0 +1,105 @@
#!/usr/bin/env python3
"""SSE smoke test with active event trigger via /api/v1/trace/append."""
import http.client
import json
import os
import sys
import threading
import time
from urllib.parse import urlparse
API_URL = os.environ.get("API_URL", "http://127.0.0.1:8080")
TIMEOUT = float(os.environ.get("EVENTS_TIMEOUT_SEC", "12"))
EVENT_REQUIRED = "trace_append"
def parse_base(api_url: str):
parsed = urlparse(api_url)
if parsed.scheme != "http":
raise ValueError("only http API_URL is supported for this smoke test")
host = parsed.hostname or "127.0.0.1"
port = parsed.port or 80
base_path = parsed.path.rstrip("/")
return host, port, base_path
def post_trace_append(host: str, port: int, base_path: str):
# Small delay to ensure SSE subscription is active before trigger.
time.sleep(1.0)
conn = http.client.HTTPConnection(host, port, timeout=8)
body = json.dumps({"kind": "gui", "line": f"sse-probe-{int(time.time())}"})
path = f"{base_path}/api/v1/trace/append"
conn.request("POST", path, body=body, headers={"Content-Type": "application/json"})
resp = conn.getresponse()
payload = resp.read().decode("utf-8", errors="ignore")
if resp.status != 200:
raise RuntimeError(f"trace/append failed: HTTP {resp.status}, body={payload}")
conn.close()
def main():
host, port, base_path = parse_base(API_URL)
stream_path = f"{base_path}/api/v1/events/stream"
conn = http.client.HTTPConnection(host, port, timeout=TIMEOUT)
conn.putrequest("GET", stream_path)
conn.putheader("Accept", "text/event-stream")
conn.putheader("Cache-Control", "no-cache")
conn.putheader("Connection", "keep-alive")
conn.endheaders()
resp = conn.getresponse()
if resp.status != 200:
print(f"[events] unexpected HTTP {resp.status}", file=sys.stderr)
sys.exit(1)
content_type = resp.getheader("Content-Type", "")
if "text/event-stream" not in content_type:
print(f"[events] bad Content-Type: {content_type}", file=sys.stderr)
sys.exit(1)
trigger_err = []
def trigger():
try:
post_trace_append(host, port, base_path)
except Exception as exc:
trigger_err.append(str(exc))
t = threading.Thread(target=trigger, daemon=True)
t.start()
got_id = False
got_required = False
deadline = time.time() + TIMEOUT
while time.time() < deadline:
raw = resp.readline()
if not raw:
break
line = raw.decode("utf-8", errors="ignore").strip()
if line.startswith("id:"):
got_id = True
if line.startswith("event:"):
event = line.split(":", 1)[1].strip()
if event == EVENT_REQUIRED:
got_required = True
print(f"[events] got required event: {event}")
break
resp.close()
conn.close()
t.join(timeout=0.5)
if trigger_err:
print(f"[events] trigger failed: {trigger_err[0]}", file=sys.stderr)
sys.exit(1)
if not got_id:
print("[events] no SSE event id observed", file=sys.stderr)
sys.exit(1)
if not got_required:
print(f"[events] missing required event: {EVENT_REQUIRED}", file=sys.stderr)
sys.exit(1)
print("[events] stream smoke passed")
if __name__ == "__main__":
main()

33
tests/run_all.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -euo pipefail
API_URL="${API_URL:-http://127.0.0.1:8080}"
echo "[run_all] API_URL=${API_URL}"
run() {
local name="$1"
shift
echo "[run_all] >>> ${name}"
"$@"
echo "[run_all] <<< ${name}: OK"
}
run "api_sanity" env API_URL="${API_URL}" ./tests/api_sanity.sh
run "transport_packaging_smoke" ./tests/transport_packaging_smoke.sh
run "transport_packaging_auto_update" ./tests/transport_packaging_auto_update.sh
run "transport_packaging_policy_rollout" ./tests/transport_packaging_policy_rollout.sh
run "vpn_locations_swr" env API_URL="${API_URL}" ./tests/vpn_locations_swr.sh
run "trace_append" env API_URL="${API_URL}" ./tests/trace_append.sh
run "events_stream" env API_URL="${API_URL}" ./tests/events_stream.py
run "vpn_login_flow" env API_URL="${API_URL}" ./tests/vpn_login_flow.py
run "transport_flow" env API_URL="${API_URL}" ./tests/transport_flow_smoke.py
run "transport_platform_compatibility" env API_URL="${API_URL}" ./tests/transport_platform_compatibility_smoke.py
run "transport_runbook_cli" env API_URL="${API_URL}" ./tests/transport_runbook_cli_smoke.sh
run "transport_recovery_runbook" env API_URL="${API_URL}" ./tests/transport_recovery_runbook_smoke.sh
run "transport_systemd_real_e2e" env API_URL="${API_URL}" ./tests/transport_systemd_real_e2e.py
run "transport_production_like_e2e" env API_URL="${API_URL}" ./tests/transport_production_like_e2e.py
run "transport_singbox_e2e" env API_URL="${API_URL}" ./tests/transport_singbox_e2e.py
run "transport_dnstt_e2e" env API_URL="${API_URL}" ./tests/transport_dnstt_e2e.py
run "transport_phoenix_e2e" env API_URL="${API_URL}" ./tests/transport_phoenix_e2e.py
echo "[run_all] all smoke tests passed"

43
tests/trace_append.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
set -euo pipefail
API_URL="${API_URL:-http://127.0.0.1:8080}"
SENT_LINE="test-trace-$(date +%s)-$RANDOM"
TMP_JSON="$(mktemp)"
trap 'rm -f "$TMP_JSON"' EXIT
resp="$(curl -sS --max-time 15 "${API_URL}/api/v1/trace/append" \
-H "Content-Type: application/json" \
-d "{\"kind\":\"gui\",\"line\":\"${SENT_LINE}\"}")"
python3 - "$resp" <<'PY'
import json
import sys
obj = json.loads(sys.argv[1])
if obj.get("ok") is not True:
print(f"[trace] append response not ok: {obj}", file=sys.stderr)
sys.exit(1)
PY
plain="$(curl -sS --max-time 15 "${API_URL}/api/v1/trace")"
if ! grep -q -- "${SENT_LINE}" <<<"${plain}"; then
echo "[trace] appended line not found in /api/v1/trace" >&2
exit 1
fi
curl -sS --max-time 15 "${API_URL}/api/v1/trace-json?mode=full" >"$TMP_JSON"
python3 - "$TMP_JSON" "$SENT_LINE" <<'PY'
import json
import sys
file_path, needle = sys.argv[1], sys.argv[2]
with open(file_path, "r", encoding="utf-8") as f:
obj = json.load(f)
lines = obj.get("lines")
if not isinstance(lines, list):
print("[trace] /trace-json invalid payload: no lines[]", file=sys.stderr)
sys.exit(1)
if not any(needle in line for line in lines if isinstance(line, str)):
print("[trace] appended line not found in /trace-json", file=sys.stderr)
sys.exit(1)
PY
echo "[trace] append and readback OK"

255
tests/transport_dnstt_e2e.py Executable file
View File

@@ -0,0 +1,255 @@
#!/usr/bin/env python3
from __future__ import annotations
import json
import os
import time
from typing import Dict, Optional, Tuple
import urllib.error
import urllib.request
def fail(msg: str) -> int:
print(f"[transport_dnstt_e2e] ERROR: {msg}")
return 1
def request_json(api_url: str, method: str, path: str, payload: Optional[Dict] = None) -> Tuple[int, Dict]:
data = None
headers = {"Accept": "application/json"}
if payload is not None:
data = json.dumps(payload).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(
f"{api_url.rstrip('/')}{path}",
data=data,
method=method.upper(),
headers=headers,
)
try:
with urllib.request.urlopen(req, timeout=20.0) as resp:
raw = resp.read().decode("utf-8", errors="replace")
status = int(resp.getcode() or 200)
except urllib.error.HTTPError as e:
raw = e.read().decode("utf-8", errors="replace")
status = int(e.code or 500)
except Exception:
return 0, {}
try:
data_json = json.loads(raw) if raw else {}
except Exception:
data_json = {}
if not isinstance(data_json, dict):
data_json = {}
return status, data_json
def ensure_client_deleted(api_url: str, client_id: str) -> None:
request_json(api_url, "DELETE", f"/api/v1/transport/clients/{client_id}?force=true")
def create_client(api_url: str, payload: Dict) -> Tuple[int, Dict]:
return request_json(api_url, "POST", "/api/v1/transport/clients", payload)
def main() -> int:
api_url = os.environ.get("API_URL", "http://127.0.0.1:8080").strip()
if not api_url:
return fail("empty API_URL")
print(f"[transport_dnstt_e2e] API_URL={api_url}")
status, caps = request_json(api_url, "GET", "/api/v1/transport/capabilities")
if status == 404:
print("[transport_dnstt_e2e] SKIP: transport endpoints are not available on this backend")
return 0
if status != 200 or not bool(caps.get("ok", False)):
return fail(f"capabilities failed status={status} payload={caps}")
clients_caps = caps.get("clients") or {}
if not isinstance(clients_caps, dict) or "dnstt" not in clients_caps:
return fail(f"dnstt capability is missing: {caps}")
runtime_modes = caps.get("runtime_modes") or {}
if isinstance(runtime_modes, dict) and runtime_modes:
if not bool(runtime_modes.get("exec", False)):
return fail(f"runtime_modes.exec is not supported: {caps}")
else:
print("[transport_dnstt_e2e] WARN: runtime_modes are not advertised by current backend build")
ts = int(time.time())
pid = os.getpid()
# Case 1: successful lifecycle on mock runner.
client_ok = f"e2e-dnstt-ok-{ts}-{pid}"
ensure_client_deleted(api_url, client_ok)
status, create_ok = create_client(
api_url,
{
"id": client_ok,
"name": "E2E DNSTT Mock",
"kind": "dnstt",
"enabled": False,
"config": {
"runner": "mock",
"runtime_mode": "exec",
},
},
)
if status != 200 or not bool(create_ok.get("ok", False)):
return fail(f"create mock dnstt failed status={status} payload={create_ok}")
try:
status, provision = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/provision")
if status == 404:
print("[transport_dnstt_e2e] SKIP: provision endpoint is not available on current backend build")
return 0
if status != 200 or not bool(provision.get("ok", False)):
return fail(f"provision mock dnstt failed status={status} payload={provision}")
status, start = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/start")
if status != 200 or not bool(start.get("ok", False)):
return fail(f"start mock dnstt failed status={status} payload={start}")
if str(start.get("status_after") or "").strip().lower() != "up":
return fail(f"start did not set status_after=up: {start}")
status, health = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_ok}/health")
if status != 200 or not bool(health.get("ok", False)):
return fail(f"health mock dnstt failed status={status} payload={health}")
if str(health.get("status") or "").strip().lower() not in ("up", "degraded"):
return fail(f"unexpected health status after start: {health}")
status, restart = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/restart")
if status != 200 or not bool(restart.get("ok", False)):
return fail(f"restart mock dnstt failed status={status} payload={restart}")
status, stop = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/stop")
if status != 200 or not bool(stop.get("ok", False)):
return fail(f"stop mock dnstt failed status={status} payload={stop}")
if str(stop.get("status_after") or "").strip().lower() != "down":
return fail(f"stop did not set status_after=down: {stop}")
status, metrics = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_ok}/metrics")
if status == 404:
print("[transport_dnstt_e2e] WARN: metrics endpoint is not available on current backend build")
else:
if status != 200 or not bool(metrics.get("ok", False)):
return fail(f"metrics mock dnstt failed status={status} payload={metrics}")
metrics_obj = metrics.get("metrics") or {}
if not isinstance(metrics_obj, dict):
return fail(f"metrics payload is invalid: {metrics}")
if int(metrics_obj.get("state_changes", 0) or 0) < 2:
return fail(f"state_changes must be >=2 after lifecycle sequence: {metrics}")
print("[transport_dnstt_e2e] case1 mock lifecycle: ok")
finally:
ensure_client_deleted(api_url, client_ok)
# Case 2: ssh overlay requires ssh_host.
client_ssh = f"e2e-dnstt-ssh-host-{ts}-{pid}"
ensure_client_deleted(api_url, client_ssh)
status, create_ssh = create_client(
api_url,
{
"id": client_ssh,
"name": "E2E DNSTT SSH Overlay",
"kind": "dnstt",
"enabled": False,
"config": {
"runner": "systemd",
"runtime_mode": "exec",
"unit": f"{client_ssh}.service",
"exec_start": "/usr/bin/true",
"ssh_tunnel": True,
},
},
)
if status != 200 or not bool(create_ssh.get("ok", False)):
return fail(f"create dnstt ssh overlay failed status={status} payload={create_ssh}")
try:
status, provision_ssh = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ssh}/provision")
if status != 200 or bool(provision_ssh.get("ok", True)):
return fail(f"dnstt ssh overlay provision must fail status={status} payload={provision_ssh}")
if str(provision_ssh.get("code") or "").strip() != "TRANSPORT_BACKEND_PROVISION_CONFIG_REQUIRED":
return fail(f"dnstt ssh overlay wrong code: {provision_ssh}")
msg = str(provision_ssh.get("message") or "").strip().lower()
if "ssh_host" not in msg:
return fail(f"dnstt ssh overlay message must reference ssh_host: {provision_ssh}")
print("[transport_dnstt_e2e] case2 ssh overlay host guard: ok")
finally:
ensure_client_deleted(api_url, client_ssh)
# Case 3: ssh overlay requires valid ssh_unit when provided.
client_ssh_unit = f"e2e-dnstt-ssh-unit-{ts}-{pid}"
ensure_client_deleted(api_url, client_ssh_unit)
status, create_ssh_unit = create_client(
api_url,
{
"id": client_ssh_unit,
"name": "E2E DNSTT SSH Unit Validation",
"kind": "dnstt",
"enabled": False,
"config": {
"runner": "systemd",
"runtime_mode": "exec",
"unit": f"{client_ssh_unit}.service",
"exec_start": "/usr/bin/true",
"ssh_tunnel": True,
"ssh_host": "127.0.0.1",
"ssh_unit": "invalid unit name",
},
},
)
if status != 200 or not bool(create_ssh_unit.get("ok", False)):
return fail(f"create dnstt ssh unit validation client failed status={status} payload={create_ssh_unit}")
try:
status, provision_ssh_unit = request_json(
api_url, "POST", f"/api/v1/transport/clients/{client_ssh_unit}/provision"
)
if status != 200 or bool(provision_ssh_unit.get("ok", True)):
return fail(f"dnstt ssh unit validation provision must fail status={status} payload={provision_ssh_unit}")
if str(provision_ssh_unit.get("code") or "").strip() != "TRANSPORT_BACKEND_PROVISION_CONFIG_REQUIRED":
return fail(f"dnstt ssh unit validation wrong code: {provision_ssh_unit}")
msg = str(provision_ssh_unit.get("message") or "").strip().lower()
if "ssh_unit" not in msg:
return fail(f"dnstt ssh unit validation message must reference ssh_unit: {provision_ssh_unit}")
print("[transport_dnstt_e2e] case3 ssh overlay unit guard: ok")
finally:
ensure_client_deleted(api_url, client_ssh_unit)
# Case 4: dnstt template validation must reject incomplete config.
client_tpl = f"e2e-dnstt-template-{ts}-{pid}"
ensure_client_deleted(api_url, client_tpl)
status, create_tpl = create_client(
api_url,
{
"id": client_tpl,
"name": "E2E DNSTT Template Validation",
"kind": "dnstt",
"enabled": False,
"config": {
"runner": "systemd",
"runtime_mode": "exec",
"unit": f"{client_tpl}.service",
# no exec_start, missing template fields on purpose.
},
},
)
if status != 200 or not bool(create_tpl.get("ok", False)):
return fail(f"create dnstt template client failed status={status} payload={create_tpl}")
try:
status, provision_tpl = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_tpl}/provision")
if status != 200 or bool(provision_tpl.get("ok", True)):
return fail(f"dnstt template provision must fail status={status} payload={provision_tpl}")
if str(provision_tpl.get("code") or "").strip() != "TRANSPORT_BACKEND_PROVISION_CONFIG_REQUIRED":
return fail(f"dnstt template wrong code: {provision_tpl}")
msg = str(provision_tpl.get("message") or "").strip().lower()
if "dnstt template requires" not in msg:
return fail(f"dnstt template message must mention required fields: {provision_tpl}")
print("[transport_dnstt_e2e] case4 template validation guard: ok")
finally:
ensure_client_deleted(api_url, client_tpl)
print("[transport_dnstt_e2e] passed")
return 0
if __name__ == "__main__":
raise SystemExit(main())

206
tests/transport_flow_smoke.py Executable file
View File

@@ -0,0 +1,206 @@
#!/usr/bin/env python3
from __future__ import annotations
import json
import os
import sys
import time
from typing import Dict, Optional, Tuple
import urllib.error
import urllib.request
REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
GUI_DIR = os.path.join(REPO_ROOT, "selective-vpn-gui")
if GUI_DIR not in sys.path:
sys.path.insert(0, GUI_DIR)
from api_client import ApiClient, ApiError
from dashboard_controller import DashboardController
def fail(msg: str) -> int:
print(f"[transport] ERROR: {msg}")
return 1
def request_json(api_url: str, method: str, path: str, payload: Optional[Dict] = None) -> Tuple[int, Dict]:
data = None
headers = {"Accept": "application/json"}
if payload is not None:
data = json.dumps(payload).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(
f"{api_url.rstrip('/')}{path}",
data=data,
method=method.upper(),
headers=headers,
)
try:
with urllib.request.urlopen(req, timeout=15.0) as resp:
raw = resp.read().decode("utf-8", errors="replace")
status = int(resp.getcode() or 200)
except urllib.error.HTTPError as e:
raw = e.read().decode("utf-8", errors="replace")
status = int(e.code or 500)
except Exception:
return 0, {}
try:
data_json = json.loads(raw) if raw else {}
except Exception:
data_json = {}
if not isinstance(data_json, dict):
data_json = {}
return status, data_json
def main() -> int:
api_url = os.environ.get("API_URL", "http://127.0.0.1:8080").strip()
if not api_url:
return fail("empty API_URL")
print(f"[transport] API_URL={api_url}")
client = ApiClient(api_url)
ctrl = DashboardController(client)
try:
caps = ctrl.transport_capabilities()
except ApiError as e:
if int(getattr(e, "status_code", 0) or 0) == 404:
print("[transport] SKIP: running backend has no /api/v1/transport/* endpoints")
return 0
return fail(str(e))
if not caps.clients:
return fail("empty transport capabilities")
print(f"[transport] capabilities: {', '.join(sorted(caps.clients.keys()))}")
status, caps_raw = request_json(api_url, "GET", "/api/v1/transport/capabilities")
if status == 200 and bool(caps_raw.get("ok", False)):
clients_raw = caps_raw.get("clients") or {}
dnstt_caps = clients_raw.get("dnstt") if isinstance(clients_raw, dict) else {}
if isinstance(dnstt_caps, dict):
if bool(dnstt_caps.get("ssh_tunnel", False)):
print("[transport] dnstt ssh_tunnel capability detected")
else:
print("[transport] WARN: dnstt ssh_tunnel capability is not advertised")
runtime_modes_raw = caps_raw.get("runtime_modes") or {}
if isinstance(runtime_modes_raw, dict):
if bool(runtime_modes_raw.get("exec", False)):
print("[transport] runtime_mode exec supported")
else:
print("[transport] WARN: runtime_mode exec is not advertised")
if "embedded" in runtime_modes_raw or "sidecar" in runtime_modes_raw:
print(
"[transport] runtime_modes map: "
+ ", ".join(f"{k}={v}" for k, v in sorted(runtime_modes_raw.items()))
)
packaging_profiles = caps_raw.get("packaging_profiles") or {}
if isinstance(packaging_profiles, dict):
if bool(packaging_profiles.get("system", False)):
print("[transport] packaging profile system supported")
else:
print("[transport] WARN: packaging profile system is not advertised")
if isinstance(caps_raw.get("error_codes"), list):
print(f"[transport] capabilities error_codes={len(caps_raw.get('error_codes') or [])}")
# D4.1 contract smoke: lifecycle + health + metrics + unified runtime/error fields.
client_id = f"smoke-{int(time.time())}-{os.getpid()}"
status, create_data = request_json(
api_url,
"POST",
"/api/v1/transport/clients",
{
"id": client_id,
"name": "Smoke Transport",
"kind": "singbox",
"enabled": False,
},
)
if status != 200 or not bool(create_data.get("ok", False)):
return fail(f"create client failed status={status} payload={create_data}")
status, provision_data = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/provision")
if status == 404:
print("[transport] WARN: provision endpoint not available on current backend build")
elif status != 200:
return fail(f"provision failed status={status} payload={provision_data}")
elif not bool(provision_data.get("ok", False)):
print(f"[transport] WARN: provision returned ok=false payload={provision_data}")
else:
print("[transport] provision action ok")
status, start_data = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/start")
if status != 200 or not bool(start_data.get("ok", False)):
return fail(f"start failed status={status} payload={start_data}")
status_after = str(start_data.get("status_after") or "").strip().lower()
if status_after and status_after != "up":
return fail(f"start did not set status_up: {start_data}")
status, health_data = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_id}/health")
if status != 200 or not bool(health_data.get("ok", False)):
return fail(f"health failed status={status} payload={health_data}")
if status_after == "" and str(health_data.get("status") or "").strip().lower() not in ("up", "degraded"):
return fail(f"health status is not up/degraded after start: {health_data}")
health_client_id = str(health_data.get("client_id") or "").strip()
if health_client_id and health_client_id != client_id:
return fail(f"health client_id mismatch: {health_data}")
runtime = health_data.get("runtime") or {}
if isinstance(runtime, dict) and isinstance(runtime.get("metrics"), dict):
print("[transport] health runtime.metrics found")
else:
print("[transport] WARN: legacy health payload without runtime.metrics")
status, metrics_data = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_id}/metrics")
if status == 404:
print("[transport] WARN: metrics endpoint not available on current backend build")
else:
if status != 200 or not bool(metrics_data.get("ok", False)):
return fail(f"metrics failed status={status} payload={metrics_data}")
metrics = metrics_data.get("metrics") or {}
if not isinstance(metrics, dict) or "state_changes" not in metrics:
return fail(f"metrics payload missing state_changes: {metrics_data}")
status, stop_data = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/stop")
if status != 200 or not bool(stop_data.get("ok", False)):
return fail(f"stop failed status={status} payload={stop_data}")
status, _ = request_json(api_url, "DELETE", f"/api/v1/transport/clients/{client_id}?force=true")
if status != 200:
return fail(f"cleanup delete failed status={status}")
print("[transport] backend-contract smoke: lifecycle/health/metrics ok")
pol = ctrl.transport_policy()
print(f"[transport] current revision={pol.revision} intents={len(pol.intents)}")
flow = ctrl.transport_flow_draft(pol.intents, base_revision=pol.revision)
flow = ctrl.transport_flow_validate(flow)
print(
f"[transport] validate phase={flow.phase} valid={flow.valid} "
f"blocks={flow.block_count} warns={flow.warn_count}"
)
if flow.phase == "risky":
flow = ctrl.transport_flow_confirm(flow)
flow = ctrl.transport_flow_apply(flow, force_override=True)
else:
flow = ctrl.transport_flow_apply(flow, force_override=False)
if flow.phase != "applied":
return fail(f"apply phase={flow.phase} code={flow.code} message={flow.message}")
print(f"[transport] apply ok revision={flow.applied_revision} apply_id={flow.apply_id}")
flow = ctrl.transport_flow_rollback(flow)
if flow.phase != "applied":
return fail(f"rollback phase={flow.phase} code={flow.code} message={flow.message}")
print(f"[transport] rollback ok revision={flow.applied_revision} apply_id={flow.apply_id}")
conflicts = ctrl.transport_conflicts()
print(
f"[transport] conflicts: count={len(conflicts.items)} has_blocking={conflicts.has_blocking}"
)
print("[transport] flow smoke passed")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,161 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
AUTO_UPDATER="${ROOT_DIR}/scripts/transport-packaging/auto_update.sh"
require_cmd() {
local cmd="$1"
if ! command -v "$cmd" >/dev/null 2>&1; then
echo "[transport_packaging_auto_update] missing command: ${cmd}" >&2
exit 1
fi
}
require_cmd bash
require_cmd sha256sum
require_cmd flock
tmp_dir="$(mktemp -d)"
trap 'rm -rf "$tmp_dir"' EXIT
assets_dir="${tmp_dir}/assets"
bin_root="${tmp_dir}/bin-root"
state_dir="${tmp_dir}/state"
mkdir -p "$assets_dir" "$bin_root" "$state_dir"
asset_v1="${assets_dir}/sing-box-v1"
asset_v2="${assets_dir}/sing-box-v2"
cat >"$asset_v1" <<'EOF'
#!/usr/bin/env bash
echo "auto sing-box v1"
EOF
chmod +x "$asset_v1"
cat >"$asset_v2" <<'EOF'
#!/usr/bin/env bash
echo "auto sing-box v2"
EOF
chmod +x "$asset_v2"
sha_v1="$(sha256sum "$asset_v1" | awk '{print $1}')"
sha_v2="$(sha256sum "$asset_v2" | awk '{print $1}')"
manifest_v1="${tmp_dir}/manifest-v1.json"
manifest_v2="${tmp_dir}/manifest-v2.json"
cat >"$manifest_v1" <<EOF
{
"schema_version": 1,
"components": {
"singbox": {
"enabled": true,
"binary_name": "sing-box",
"targets": {
"linux-amd64": {
"version": "1.0.0",
"url": "file://${asset_v1}",
"sha256": "${sha_v1}",
"asset_type": "raw",
"rollout": {
"stage": "stable",
"percent": 100
}
}
}
}
}
}
EOF
cat >"$manifest_v2" <<EOF
{
"schema_version": 1,
"components": {
"singbox": {
"enabled": true,
"binary_name": "sing-box",
"targets": {
"linux-amd64": {
"version": "2.0.0",
"url": "file://${asset_v2}",
"sha256": "${sha_v2}",
"asset_type": "raw",
"rollout": {
"stage": "stable",
"percent": 100
}
}
}
}
}
}
EOF
echo "[transport_packaging_auto_update] disabled -> skip"
"$AUTO_UPDATER" \
--enabled false \
--manifest "$manifest_v1" \
--bin-root "$bin_root" \
--state-dir "$state_dir" \
--component singbox \
--target linux-amd64 \
--min-interval-sec 3600
if [[ -e "${bin_root}/sing-box" ]]; then
echo "[transport_packaging_auto_update] expected no install when disabled" >&2
exit 1
fi
echo "[transport_packaging_auto_update] enabled -> install v1"
"$AUTO_UPDATER" \
--enabled true \
--manifest "$manifest_v1" \
--bin-root "$bin_root" \
--state-dir "$state_dir" \
--component singbox \
--target linux-amd64 \
--min-interval-sec 3600
out_v1="$("${bin_root}/sing-box")"
if [[ "$out_v1" != "auto sing-box v1" ]]; then
echo "[transport_packaging_auto_update] expected v1 output, got: ${out_v1}" >&2
exit 1
fi
echo "[transport_packaging_auto_update] interval gate -> skip update to v2"
"$AUTO_UPDATER" \
--enabled true \
--manifest "$manifest_v2" \
--bin-root "$bin_root" \
--state-dir "$state_dir" \
--component singbox \
--target linux-amd64 \
--min-interval-sec 3600
out_after_gate="$("${bin_root}/sing-box")"
if [[ "$out_after_gate" != "auto sing-box v1" ]]; then
echo "[transport_packaging_auto_update] expected interval-gated v1 output, got: ${out_after_gate}" >&2
exit 1
fi
echo "[transport_packaging_auto_update] force-now -> install v2"
"$AUTO_UPDATER" \
--enabled true \
--manifest "$manifest_v2" \
--bin-root "$bin_root" \
--state-dir "$state_dir" \
--component singbox \
--target linux-amd64 \
--min-interval-sec 3600 \
--force-now
out_v2="$("${bin_root}/sing-box")"
if [[ "$out_v2" != "auto sing-box v2" ]]; then
echo "[transport_packaging_auto_update] expected forced v2 output, got: ${out_v2}" >&2
exit 1
fi
if [[ ! -s "${state_dir}/last_success_epoch" ]]; then
echo "[transport_packaging_auto_update] expected last_success_epoch file" >&2
exit 1
fi
echo "[transport_packaging_auto_update] passed"

View File

@@ -0,0 +1,220 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
UPDATER="${ROOT_DIR}/scripts/transport-packaging/update.sh"
require_cmd() {
local cmd="$1"
if ! command -v "$cmd" >/dev/null 2>&1; then
echo "[transport_packaging_policy_rollout] missing command: ${cmd}" >&2
exit 1
fi
}
require_cmd bash
require_cmd openssl
require_cmd sha256sum
require_cmd jq
tmp_dir="$(mktemp -d)"
trap 'rm -rf "$tmp_dir"' EXIT
assets_dir="${tmp_dir}/assets"
bin_root="${tmp_dir}/bin-root"
keys_dir="${tmp_dir}/keys"
mkdir -p "$assets_dir" "$bin_root" "$keys_dir"
asset_v1="${assets_dir}/sing-box-v1"
asset_v2="${assets_dir}/sing-box-v2"
sig_v1="${assets_dir}/sing-box-v1.sig"
sig_v2="${assets_dir}/sing-box-v2.sig"
sig_bad="${assets_dir}/sing-box-v2.bad.sig"
cat >"$asset_v1" <<'EOF'
#!/usr/bin/env bash
echo "signed sing-box v1"
EOF
chmod +x "$asset_v1"
cat >"$asset_v2" <<'EOF'
#!/usr/bin/env bash
echo "signed sing-box v2"
EOF
chmod +x "$asset_v2"
sha_v1="$(sha256sum "$asset_v1" | awk '{print $1}')"
sha_v2="$(sha256sum "$asset_v2" | awk '{print $1}')"
openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -out "${keys_dir}/release-private.pem" >/dev/null 2>&1
openssl rsa -pubout -in "${keys_dir}/release-private.pem" -out "${keys_dir}/release-public.pem" >/dev/null 2>&1
openssl dgst -sha256 -sign "${keys_dir}/release-private.pem" -out "$sig_v1" "$asset_v1"
openssl dgst -sha256 -sign "${keys_dir}/release-private.pem" -out "$sig_v2" "$asset_v2"
cp "$sig_v1" "$sig_bad"
sig_sha_v1="$(sha256sum "$sig_v1" | awk '{print $1}')"
sig_sha_v2="$(sha256sum "$sig_v2" | awk '{print $1}')"
sig_sha_bad="$(sha256sum "$sig_bad" | awk '{print $1}')"
policy="${tmp_dir}/source-policy.json"
cat >"$policy" <<'EOF'
{
"schema_version": 1,
"require_https": false,
"allow_file_scheme": true,
"signature": {
"default_mode": "required",
"allowed_types": ["openssl-sha256"]
},
"components": {
"singbox": {
"allowed_url_prefixes": ["file://"],
"signature_mode": "required"
}
}
}
EOF
manifest="${tmp_dir}/manifest-canary.json"
cat >"$manifest" <<EOF
{
"schema_version": 1,
"components": {
"singbox": {
"enabled": true,
"binary_name": "sing-box",
"targets": {
"linux-amd64": {
"version": "1.0.0",
"url": "file://${asset_v1}",
"sha256": "${sha_v1}",
"asset_type": "raw",
"rollout": {
"stage": "canary",
"percent": 20
},
"signature": {
"type": "openssl-sha256",
"url": "file://${sig_v1}",
"sha256": "${sig_sha_v1}",
"public_key_path": "${keys_dir}/release-public.pem"
}
}
}
}
}
}
EOF
echo "[transport_packaging_policy_rollout] stage mismatch -> skip"
"$UPDATER" --manifest "$manifest" --source-policy "$policy" --bin-root "$bin_root" --component singbox --target linux-amd64 --rollout-stage stable --cohort-id 5
if [[ -e "${bin_root}/sing-box" ]]; then
echo "[transport_packaging_policy_rollout] expected no install on stage mismatch" >&2
exit 1
fi
echo "[transport_packaging_policy_rollout] canary gated by cohort -> skip"
"$UPDATER" --manifest "$manifest" --source-policy "$policy" --bin-root "$bin_root" --component singbox --target linux-amd64 --rollout-stage canary --cohort-id 55
if [[ -e "${bin_root}/sing-box" ]]; then
echo "[transport_packaging_policy_rollout] expected no install when cohort is out of rollout percent" >&2
exit 1
fi
echo "[transport_packaging_policy_rollout] canary in cohort -> install"
"$UPDATER" --manifest "$manifest" --source-policy "$policy" --bin-root "$bin_root" --component singbox --target linux-amd64 --rollout-stage canary --cohort-id 5
out_v1="$("${bin_root}/sing-box")"
if [[ "$out_v1" != "signed sing-box v1" ]]; then
echo "[transport_packaging_policy_rollout] expected signed v1 output, got: ${out_v1}" >&2
exit 1
fi
echo "[transport_packaging_policy_rollout] untrusted source must fail"
manifest_untrusted="${tmp_dir}/manifest-untrusted.json"
jq \
'.components.singbox.targets["linux-amd64"].version = "1.1.0" |
.components.singbox.targets["linux-amd64"].url = "https://example.com/sing-box" |
.components.singbox.targets["linux-amd64"].sha256 = "0000000000000000000000000000000000000000000000000000000000000000" |
.components.singbox.targets["linux-amd64"].rollout.stage = "stable" |
.components.singbox.targets["linux-amd64"].rollout.percent = 100' \
"$manifest" >"$manifest_untrusted"
if "$UPDATER" --manifest "$manifest_untrusted" --source-policy "$policy" --bin-root "$bin_root" --component singbox --target linux-amd64 --rollout-stage stable --cohort-id 5 --dry-run; then
echo "[transport_packaging_policy_rollout] expected failure for untrusted source" >&2
exit 1
fi
echo "[transport_packaging_policy_rollout] bad signature must fail"
manifest_bad_sig="${tmp_dir}/manifest-bad-sig.json"
cat >"$manifest_bad_sig" <<EOF
{
"schema_version": 1,
"components": {
"singbox": {
"enabled": true,
"binary_name": "sing-box",
"targets": {
"linux-amd64": {
"version": "1.2.0",
"url": "file://${asset_v2}",
"sha256": "${sha_v2}",
"asset_type": "raw",
"rollout": {
"stage": "stable",
"percent": 100
},
"signature": {
"type": "openssl-sha256",
"url": "file://${sig_bad}",
"sha256": "${sig_sha_bad}",
"public_key_path": "${keys_dir}/release-public.pem"
}
}
}
}
}
}
EOF
if "$UPDATER" --manifest "$manifest_bad_sig" --source-policy "$policy" --bin-root "$bin_root" --component singbox --target linux-amd64 --rollout-stage stable --cohort-id 5; then
echo "[transport_packaging_policy_rollout] expected failure for bad signature" >&2
exit 1
fi
echo "[transport_packaging_policy_rollout] valid signature update -> install v2"
manifest_good_sig="${tmp_dir}/manifest-good-sig.json"
cat >"$manifest_good_sig" <<EOF
{
"schema_version": 1,
"components": {
"singbox": {
"enabled": true,
"binary_name": "sing-box",
"targets": {
"linux-amd64": {
"version": "1.3.0",
"url": "file://${asset_v2}",
"sha256": "${sha_v2}",
"asset_type": "raw",
"rollout": {
"stage": "stable",
"percent": 100
},
"signature": {
"type": "openssl-sha256",
"url": "file://${sig_v2}",
"sha256": "${sig_sha_v2}",
"public_key_path": "${keys_dir}/release-public.pem"
}
}
}
}
}
}
EOF
"$UPDATER" --manifest "$manifest_good_sig" --source-policy "$policy" --bin-root "$bin_root" --component singbox --target linux-amd64 --rollout-stage stable --cohort-id 5
out_v2="$("${bin_root}/sing-box")"
if [[ "$out_v2" != "signed sing-box v2" ]]; then
echo "[transport_packaging_policy_rollout] expected signed v2 output, got: ${out_v2}" >&2
exit 1
fi
echo "[transport_packaging_policy_rollout] passed"

View File

@@ -0,0 +1,113 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
UPDATER="${ROOT_DIR}/scripts/transport-packaging/update.sh"
ROLLBACK="${ROOT_DIR}/scripts/transport-packaging/rollback.sh"
require_cmd() {
local cmd="$1"
if ! command -v "$cmd" >/dev/null 2>&1; then
echo "[transport_packaging_smoke] missing command: ${cmd}" >&2
exit 1
fi
}
require_cmd bash
require_cmd python3
require_cmd curl
require_cmd sha256sum
tmp_dir="$(mktemp -d)"
trap 'rm -rf "$tmp_dir"' EXIT
assets_dir="${tmp_dir}/assets"
bin_root="${tmp_dir}/bin-root"
mkdir -p "$assets_dir" "$bin_root"
asset_v1="${assets_dir}/sing-box-v1"
asset_v2="${assets_dir}/sing-box-v2"
cat >"$asset_v1" <<'EOF'
#!/usr/bin/env bash
echo "sing-box v1"
EOF
chmod +x "$asset_v1"
cat >"$asset_v2" <<'EOF'
#!/usr/bin/env bash
echo "sing-box v2"
EOF
chmod +x "$asset_v2"
sha_v1="$(sha256sum "$asset_v1" | awk '{print $1}')"
sha_v2="$(sha256sum "$asset_v2" | awk '{print $1}')"
manifest_v1="${tmp_dir}/manifest-v1.json"
manifest_v2="${tmp_dir}/manifest-v2.json"
cat >"$manifest_v1" <<EOF
{
"schema_version": 1,
"components": {
"singbox": {
"enabled": true,
"binary_name": "sing-box",
"targets": {
"linux-amd64": {
"version": "1.0.0",
"url": "file://${asset_v1}",
"sha256": "${sha_v1}",
"asset_type": "raw"
}
}
}
}
}
EOF
cat >"$manifest_v2" <<EOF
{
"schema_version": 1,
"components": {
"singbox": {
"enabled": true,
"binary_name": "sing-box",
"targets": {
"linux-amd64": {
"version": "2.0.0",
"url": "file://${asset_v2}",
"sha256": "${sha_v2}",
"asset_type": "raw"
}
}
}
}
}
EOF
echo "[transport_packaging_smoke] install v1"
"$UPDATER" --manifest "$manifest_v1" --bin-root "$bin_root" --component singbox --target linux-amd64
out_v1="$("$bin_root/sing-box")"
if [[ "$out_v1" != "sing-box v1" ]]; then
echo "[transport_packaging_smoke] expected v1 output, got: ${out_v1}" >&2
exit 1
fi
echo "[transport_packaging_smoke] update to v2"
"$UPDATER" --manifest "$manifest_v2" --bin-root "$bin_root" --component singbox --target linux-amd64
out_v2="$("$bin_root/sing-box")"
if [[ "$out_v2" != "sing-box v2" ]]; then
echo "[transport_packaging_smoke] expected v2 output, got: ${out_v2}" >&2
exit 1
fi
echo "[transport_packaging_smoke] rollback to v1"
"$ROLLBACK" --bin-root "$bin_root" --component singbox
out_after_rb="$("$bin_root/sing-box")"
if [[ "$out_after_rb" != "sing-box v1" ]]; then
echo "[transport_packaging_smoke] expected rollback to v1, got: ${out_after_rb}" >&2
exit 1
fi
echo "[transport_packaging_smoke] passed"

227
tests/transport_phoenix_e2e.py Executable file
View File

@@ -0,0 +1,227 @@
#!/usr/bin/env python3
from __future__ import annotations
import json
import os
import time
from typing import Dict, Optional, Tuple
import urllib.error
import urllib.request
def fail(msg: str) -> int:
print(f"[transport_phoenix_e2e] ERROR: {msg}")
return 1
def request_json(api_url: str, method: str, path: str, payload: Optional[Dict] = None) -> Tuple[int, Dict]:
data = None
headers = {"Accept": "application/json"}
if payload is not None:
data = json.dumps(payload).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(
f"{api_url.rstrip('/')}{path}",
data=data,
method=method.upper(),
headers=headers,
)
try:
with urllib.request.urlopen(req, timeout=20.0) as resp:
raw = resp.read().decode("utf-8", errors="replace")
status = int(resp.getcode() or 200)
except urllib.error.HTTPError as e:
raw = e.read().decode("utf-8", errors="replace")
status = int(e.code or 500)
except Exception:
return 0, {}
try:
data_json = json.loads(raw) if raw else {}
except Exception:
data_json = {}
if not isinstance(data_json, dict):
data_json = {}
return status, data_json
def ensure_client_deleted(api_url: str, client_id: str) -> None:
request_json(api_url, "DELETE", f"/api/v1/transport/clients/{client_id}?force=true")
def create_client(api_url: str, payload: Dict) -> Tuple[int, Dict]:
return request_json(api_url, "POST", "/api/v1/transport/clients", payload)
def main() -> int:
api_url = os.environ.get("API_URL", "http://127.0.0.1:8080").strip()
if not api_url:
return fail("empty API_URL")
print(f"[transport_phoenix_e2e] API_URL={api_url}")
status, caps = request_json(api_url, "GET", "/api/v1/transport/capabilities")
if status == 404:
print("[transport_phoenix_e2e] SKIP: transport endpoints are not available on this backend")
return 0
if status != 200 or not bool(caps.get("ok", False)):
return fail(f"capabilities failed status={status} payload={caps}")
clients_caps = caps.get("clients") or {}
if not isinstance(clients_caps, dict) or "phoenix" not in clients_caps:
return fail(f"phoenix capability is missing: {caps}")
runtime_modes = caps.get("runtime_modes") or {}
if isinstance(runtime_modes, dict) and runtime_modes:
if not bool(runtime_modes.get("exec", False)):
return fail(f"runtime_modes.exec is not supported: {caps}")
else:
print("[transport_phoenix_e2e] WARN: runtime_modes are not advertised by current backend build")
ts = int(time.time())
pid = os.getpid()
# Case 1: successful lifecycle on mock runner.
client_ok = f"e2e-phoenix-ok-{ts}-{pid}"
ensure_client_deleted(api_url, client_ok)
status, create_ok = create_client(
api_url,
{
"id": client_ok,
"name": "E2E Phoenix Mock",
"kind": "phoenix",
"enabled": False,
"config": {
"runner": "mock",
"runtime_mode": "exec",
},
},
)
if status != 200 or not bool(create_ok.get("ok", False)):
return fail(f"create mock phoenix failed status={status} payload={create_ok}")
try:
status, provision = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/provision")
if status == 404:
print("[transport_phoenix_e2e] SKIP: provision endpoint is not available on current backend build")
return 0
if status != 200 or not bool(provision.get("ok", False)):
return fail(f"provision mock phoenix failed status={status} payload={provision}")
status, start = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/start")
if status != 200 or not bool(start.get("ok", False)):
return fail(f"start mock phoenix failed status={status} payload={start}")
if str(start.get("status_after") or "").strip().lower() != "up":
return fail(f"start did not set status_after=up: {start}")
status, health = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_ok}/health")
if status != 200 or not bool(health.get("ok", False)):
return fail(f"health mock phoenix failed status={status} payload={health}")
if str(health.get("status") or "").strip().lower() not in ("up", "degraded"):
return fail(f"unexpected health status after start: {health}")
status, restart = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/restart")
if status != 200 or not bool(restart.get("ok", False)):
return fail(f"restart mock phoenix failed status={status} payload={restart}")
status, stop = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/stop")
if status != 200 or not bool(stop.get("ok", False)):
return fail(f"stop mock phoenix failed status={status} payload={stop}")
if str(stop.get("status_after") or "").strip().lower() != "down":
return fail(f"stop did not set status_after=down: {stop}")
status, metrics = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_ok}/metrics")
if status == 404:
print("[transport_phoenix_e2e] WARN: metrics endpoint is not available on current backend build")
else:
if status != 200 or not bool(metrics.get("ok", False)):
return fail(f"metrics mock phoenix failed status={status} payload={metrics}")
metrics_obj = metrics.get("metrics") or {}
if not isinstance(metrics_obj, dict):
return fail(f"metrics payload is invalid: {metrics}")
if int(metrics_obj.get("state_changes", 0) or 0) < 2:
return fail(f"state_changes must be >=2 after lifecycle sequence: {metrics}")
print("[transport_phoenix_e2e] case1 mock lifecycle: ok")
finally:
ensure_client_deleted(api_url, client_ok)
# Case 2: embedded runtime mode must be rejected.
client_emb = f"e2e-phoenix-embedded-{ts}-{pid}"
ensure_client_deleted(api_url, client_emb)
status, create_emb = create_client(
api_url,
{
"id": client_emb,
"name": "E2E Phoenix Embedded",
"kind": "phoenix",
"enabled": False,
"config": {
"runner": "mock",
"runtime_mode": "embedded",
},
},
)
if status != 200 or not bool(create_emb.get("ok", False)):
return fail(f"create embedded phoenix failed status={status} payload={create_emb}")
try:
status, provision_emb = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_emb}/provision")
if status != 200 or bool(provision_emb.get("ok", True)):
return fail(f"embedded provision must fail status={status} payload={provision_emb}")
if str(provision_emb.get("code") or "").strip() != "TRANSPORT_BACKEND_RUNTIME_MODE_UNSUPPORTED":
return fail(f"embedded provision wrong code: {provision_emb}")
status, start_emb = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_emb}/start")
if status != 200 or bool(start_emb.get("ok", True)):
return fail(f"embedded start must fail status={status} payload={start_emb}")
if str(start_emb.get("code") or "").strip() != "TRANSPORT_BACKEND_RUNTIME_MODE_UNSUPPORTED":
return fail(f"embedded start wrong code: {start_emb}")
status, health_emb = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_emb}/health")
if status != 200 or not bool(health_emb.get("ok", False)):
return fail(f"embedded health request failed status={status} payload={health_emb}")
if str(health_emb.get("code") or "").strip() != "TRANSPORT_BACKEND_RUNTIME_MODE_UNSUPPORTED":
return fail(f"embedded health wrong code: {health_emb}")
print("[transport_phoenix_e2e] case2 runtime_mode=embedded guard: ok")
finally:
ensure_client_deleted(api_url, client_emb)
# Case 3: require_binary fail-fast for missing phoenix binary.
client_req = f"e2e-phoenix-requirebin-{ts}-{pid}"
ensure_client_deleted(api_url, client_req)
status, create_req = create_client(
api_url,
{
"id": client_req,
"name": "E2E Phoenix RequireBinary",
"kind": "phoenix",
"enabled": False,
"config": {
"runner": "systemd",
"runtime_mode": "exec",
"unit": f"{client_req}.service",
"packaging_profile": "bundled",
"bin_root": "/opt/selective-vpn/bin",
"require_binary": True,
"phoenix_bin": "/tmp/definitely-missing-phoenix-binary",
"phoenix_config_path": "/etc/phoenix/client.toml",
},
},
)
if status != 200 or not bool(create_req.get("ok", False)):
return fail(f"create require_binary phoenix failed status={status} payload={create_req}")
try:
status, provision_req = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_req}/provision")
if status != 200 or bool(provision_req.get("ok", True)):
return fail(f"require_binary provision must fail status={status} payload={provision_req}")
if str(provision_req.get("code") or "").strip() != "TRANSPORT_BACKEND_PROVISION_CONFIG_REQUIRED":
return fail(f"require_binary provision wrong code: {provision_req}")
msg = str(provision_req.get("message") or "").strip().lower()
if "required phoenix binary not found" not in msg:
return fail(f"require_binary provision wrong message: {provision_req}")
print("[transport_phoenix_e2e] case3 require_binary fail-fast: ok")
finally:
ensure_client_deleted(api_url, client_req)
print("[transport_phoenix_e2e] passed")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,121 @@
#!/usr/bin/env python3
from __future__ import annotations
import json
import os
import urllib.error
import urllib.request
def fail(msg: str) -> int:
print(f"[transport_platform_compat] ERROR: {msg}")
return 1
def request_json(api_url: str, method: str, path: str, payload: dict | None = None) -> tuple[int, dict]:
data = None
headers = {"Accept": "application/json"}
if payload is not None:
data = json.dumps(payload).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(
f"{api_url.rstrip('/')}{path}",
data=data,
method=method.upper(),
headers=headers,
)
try:
with urllib.request.urlopen(req, timeout=20.0) as resp:
raw = resp.read().decode("utf-8", errors="replace")
status = int(resp.getcode() or 200)
except urllib.error.HTTPError as e:
raw = e.read().decode("utf-8", errors="replace")
status = int(e.code or 500)
except Exception:
return 0, {}
try:
body = json.loads(raw) if raw else {}
except Exception:
body = {}
if not isinstance(body, dict):
body = {}
return status, body
def assert_capability_true(caps: dict, section: str, key: str) -> tuple[bool, str]:
obj = caps.get(section) or {}
if not isinstance(obj, dict):
return False, f"section `{section}` is missing in capabilities payload"
if key not in obj:
return False, f"`{section}.{key}` is missing in capabilities payload"
if not bool(obj.get(key)):
return False, f"`{section}.{key}` must be true for cross-platform contract"
return True, ""
def main() -> int:
api_url = os.environ.get("API_URL", "http://127.0.0.1:8080").strip()
if not api_url:
return fail("empty API_URL")
print(f"[transport_platform_compat] API_URL={api_url}")
status, caps = request_json(api_url, "GET", "/api/v1/transport/capabilities")
if status == 404:
print("[transport_platform_compat] SKIP: /api/v1/transport/* is unavailable on current backend build")
return 0
if status != 200 or not bool(caps.get("ok", False)):
return fail(f"capabilities failed status={status} payload={caps}")
clients = caps.get("clients") or {}
if not isinstance(clients, dict):
return fail(f"clients map is invalid: {caps}")
required_clients = ("singbox", "dnstt", "phoenix")
for kind in required_clients:
if kind not in clients:
return fail(f"missing transport client `{kind}` in capabilities")
if not isinstance(clients.get(kind), dict):
return fail(f"client capability `{kind}` must be an object")
ok, msg = assert_capability_true(caps, "runtime_modes", "exec")
if not ok:
return fail(msg)
ok, msg = assert_capability_true(caps, "packaging_profiles", "system")
if not ok:
return fail(msg)
ok, msg = assert_capability_true(caps, "packaging_profiles", "bundled")
if not ok:
return fail(msg)
# Базовый policy-контракт должен быть одинаково доступен для web/iOS/Android клиентов.
status, policy = request_json(api_url, "GET", "/api/v1/transport/policies")
if status != 200 or not bool(policy.get("ok", False)):
return fail(f"transport/policies failed status={status} payload={policy}")
revision = int(policy.get("policy_revision") or 0)
intents = policy.get("intents") or []
if not isinstance(intents, list):
return fail(f"policy intents must be array: {policy}")
status, validated = request_json(
api_url,
"POST",
"/api/v1/transport/policies/validate",
{"base_revision": revision, "intents": intents},
)
if status != 200 or not bool(validated.get("ok", False)):
return fail(f"transport/policies/validate failed status={status} payload={validated}")
if int(validated.get("base_revision") or 0) <= 0:
return fail(f"validate response has invalid base_revision: {validated}")
status, conflicts = request_json(api_url, "GET", "/api/v1/transport/conflicts")
if status != 200 or not bool(conflicts.get("ok", False)):
return fail(f"transport/conflicts failed status={status} payload={conflicts}")
print("[transport_platform_compat] capabilities + policy contract are compatible with web/iOS/Android clients")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,341 @@
#!/usr/bin/env python3
from __future__ import annotations
import json
import os
import tempfile
import time
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import urllib.error
import urllib.request
SINGBOX_INSTANCE_DROPIN = "10-selective-vpn.conf"
def fail(msg: str) -> int:
print(f"[transport_production_like_e2e] ERROR: {msg}")
return 1
def request_json(api_url: str, method: str, path: str, payload: Optional[Dict] = None) -> Tuple[int, Dict]:
data = None
headers = {"Accept": "application/json"}
if payload is not None:
data = json.dumps(payload).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(
f"{api_url.rstrip('/')}{path}",
data=data,
method=method.upper(),
headers=headers,
)
try:
with urllib.request.urlopen(req, timeout=30.0) as resp:
raw = resp.read().decode("utf-8", errors="replace")
status = int(resp.getcode() or 200)
except urllib.error.HTTPError as e:
raw = e.read().decode("utf-8", errors="replace")
status = int(e.code or 500)
except Exception:
return 0, {}
try:
data_json = json.loads(raw) if raw else {}
except Exception:
data_json = {}
if not isinstance(data_json, dict):
data_json = {}
return status, data_json
def ensure_client_deleted(api_url: str, client_id: str) -> None:
request_json(api_url, "DELETE", f"/api/v1/transport/clients/{client_id}?force=true")
def is_systemd_unavailable(resp: Dict) -> bool:
text = (
str(resp.get("message") or "")
+ " "
+ str(resp.get("stderr") or "")
+ " "
+ str(resp.get("stdout") or "")
).lower()
checks = (
"not been booted with systemd",
"failed to connect to bus",
"systemctl daemon-reload failed",
"operation not permitted",
)
return any(c in text for c in checks)
def write_fake_binary(path: Path) -> None:
body = "#!/usr/bin/env bash\nexec /usr/bin/sleep 120\n"
path.write_text(body, encoding="utf-8")
path.chmod(0o755)
def unit_file_path(unit: str) -> Path:
return Path("/etc/systemd/system") / unit
def unit_dropin_path(unit: str, file_name: str = SINGBOX_INSTANCE_DROPIN) -> Path:
return Path("/etc/systemd/system") / f"{unit}.d" / file_name
def read_managed_unit_text(unit: str) -> str:
unit_path = unit_file_path(unit)
dropin_path = unit_dropin_path(unit)
chunks: List[str] = []
if unit_path.exists():
chunks.append(unit_path.read_text(encoding="utf-8", errors="replace"))
if dropin_path.exists():
chunks.append(dropin_path.read_text(encoding="utf-8", errors="replace"))
if not chunks:
raise AssertionError(f"unit artifacts are missing: {unit_path} {dropin_path}")
return "\n".join(chunks)
def assert_unit_contains(unit: str, expected_parts: List[str]) -> None:
text = read_managed_unit_text(unit)
for part in expected_parts:
if part not in text:
raise AssertionError(f"unit {unit} missing {part!r}")
def assert_unit_removed(unit: str, client_id: str) -> None:
marker = f"Environment=SVPN_TRANSPORT_ID={client_id}"
unit_path = unit_file_path(unit)
dropin_path = unit_dropin_path(unit)
if dropin_path.exists():
raise AssertionError(f"drop-in file still exists after cleanup: {dropin_path}")
if unit_path.exists():
text = unit_path.read_text(encoding="utf-8", errors="replace")
if marker in text:
raise AssertionError(f"owned unit still exists after cleanup: {unit_path}")
def assert_file_exists(path: str) -> None:
p = Path(path)
if not p.exists():
raise AssertionError(f"expected file missing: {p}")
def run_case(
api_url: str,
*,
client_id: str,
kind: str,
cfg: Dict,
units: List[str],
expected_unit_parts: List[str],
template_units: Optional[List[str]] = None,
) -> Tuple[bool, str]:
ensure_client_deleted(api_url, client_id)
status, created = request_json(
api_url,
"POST",
"/api/v1/transport/clients",
{
"id": client_id,
"name": f"E2E ProductionLike {kind}",
"kind": kind,
"enabled": False,
"config": cfg,
},
)
if status != 200 or not bool(created.get("ok", False)):
raise AssertionError(f"create failed status={status} payload={created}")
try:
status, provision = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/provision")
if status == 404:
return False, "provision endpoint is not available on current backend build"
if status != 200:
raise AssertionError(f"provision failed status={status} payload={provision}")
if not bool(provision.get("ok", False)):
if is_systemd_unavailable(provision):
return False, f"systemd is unavailable: {provision}"
raise AssertionError(f"provision returned ok=false payload={provision}")
for unit in units:
assert_unit_contains(unit, [f"Environment=SVPN_TRANSPORT_ID={client_id}"])
assert_unit_contains(units[0], expected_unit_parts)
for t_unit in (template_units or []):
assert_file_exists(str(unit_file_path(t_unit)))
status, started = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/start")
if status != 200 or not bool(started.get("ok", False)):
raise AssertionError(f"start failed status={status} payload={started}")
if str(started.get("status_after") or "").strip().lower() != "up":
raise AssertionError(f"start did not set status_after=up payload={started}")
status, health = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_id}/health")
if status != 200 or not bool(health.get("ok", False)):
raise AssertionError(f"health failed status={status} payload={health}")
status, metrics = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_id}/metrics")
if status != 200 or not bool(metrics.get("ok", False)):
raise AssertionError(f"metrics failed status={status} payload={metrics}")
m = metrics.get("metrics") or {}
if int(m.get("state_changes", 0) or 0) < 1:
raise AssertionError(f"state_changes must be >=1 payload={metrics}")
status, restarted = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/restart")
if status != 200 or not bool(restarted.get("ok", False)):
raise AssertionError(f"restart failed status={status} payload={restarted}")
status, stopped = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/stop")
if status != 200 or not bool(stopped.get("ok", False)):
raise AssertionError(f"stop failed status={status} payload={stopped}")
finally:
status, deleted = request_json(api_url, "DELETE", f"/api/v1/transport/clients/{client_id}?force=true")
if status != 200 or not bool(deleted.get("ok", False)):
raise AssertionError(f"delete failed status={status} payload={deleted}")
for unit in units:
assert_unit_removed(unit, client_id)
for t_unit in (template_units or []):
assert_file_exists(str(unit_file_path(t_unit)))
return True, "ok"
def main() -> int:
api_url = os.environ.get("API_URL", "http://127.0.0.1:8080").strip()
if not api_url:
return fail("empty API_URL")
print(f"[transport_production_like_e2e] API_URL={api_url}")
status, caps = request_json(api_url, "GET", "/api/v1/transport/capabilities")
if status == 404:
print("[transport_production_like_e2e] SKIP: transport endpoints are not available on this backend")
return 0
if status != 200 or not bool(caps.get("ok", False)):
return fail(f"capabilities failed status={status} payload={caps}")
runtime_modes = caps.get("runtime_modes") or {}
if isinstance(runtime_modes, dict) and runtime_modes:
if not bool(runtime_modes.get("exec", False)):
return fail(f"runtime_modes.exec is not supported: {caps}")
packaging_profiles = caps.get("packaging_profiles") or {}
if isinstance(packaging_profiles, dict) and packaging_profiles:
if not bool(packaging_profiles.get("bundled", False)):
return fail(f"packaging_profiles.bundled is not supported: {caps}")
if not bool(packaging_profiles.get("system", False)):
return fail(f"packaging_profiles.system is not supported: {caps}")
ts = int(time.time())
pid = os.getpid()
tag = f"{ts}-{pid}"
with tempfile.TemporaryDirectory(prefix="svpn-prodlike-") as tmp:
root = Path(tmp)
bin_root = root / "bin"
bin_root.mkdir(parents=True, exist_ok=True)
singbox_bin = bin_root / "sing-box"
phoenix_bin = bin_root / "phoenix-client"
dnstt_bin = bin_root / "dnstt-client"
write_fake_binary(singbox_bin)
write_fake_binary(phoenix_bin)
write_fake_binary(dnstt_bin)
singbox_cfg = root / "singbox.json"
phoenix_cfg = root / "phoenix.toml"
singbox_cfg.write_text("{}", encoding="utf-8")
phoenix_cfg.write_text("{}", encoding="utf-8")
phoenix_unit = f"svpn-prodlike-phoenix-{tag}.service"
dnstt_unit = f"svpn-prodlike-dnstt-{tag}.service"
dnstt_ssh_unit = f"svpn-prodlike-dnstt-ssh-{tag}.service"
singbox_client_id = f"e2e-prodlike-singbox-{tag}"
singbox_unit = f"singbox@{singbox_client_id}.service"
cases = [
{
"client_id": singbox_client_id,
"kind": "singbox",
"cfg": {
"runner": "systemd",
"runtime_mode": "exec",
"packaging_profile": "bundled",
"bin_root": str(bin_root),
"packaging_system_fallback": False,
"require_binary": True,
"singbox_config_path": str(singbox_cfg),
"hardening_enabled": False,
},
"units": [singbox_unit],
"template_units": ["singbox@.service"],
"expected": [str(singbox_bin), "run", str(singbox_cfg)],
},
{
"client_id": f"e2e-prodlike-phoenix-{tag}",
"kind": "phoenix",
"cfg": {
"runner": "systemd",
"runtime_mode": "exec",
"unit": phoenix_unit,
"packaging_profile": "bundled",
"bin_root": str(bin_root),
"packaging_system_fallback": False,
"require_binary": True,
"phoenix_config_path": str(phoenix_cfg),
"hardening_enabled": False,
},
"units": [phoenix_unit],
"expected": [str(phoenix_bin), "-config", str(phoenix_cfg)],
},
{
"client_id": f"e2e-prodlike-dnstt-{tag}",
"kind": "dnstt",
"cfg": {
"runner": "systemd",
"runtime_mode": "exec",
"unit": dnstt_unit,
"packaging_profile": "bundled",
"bin_root": str(bin_root),
"packaging_system_fallback": False,
"require_binary": True,
"resolver_mode": "doh",
"doh_url": "https://dns.google/dns-query",
"pubkey": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
"domain": "tunnel.example.com",
"local_addr": "127.0.0.1:7005",
"ssh_tunnel": True,
"ssh_unit": dnstt_ssh_unit,
"ssh_exec_start": "/usr/bin/sleep 120",
"hardening_enabled": False,
"ssh_hardening_enabled": False,
},
"units": [dnstt_unit, dnstt_ssh_unit],
"expected": [str(dnstt_bin), "-doh", "dns.google", "tunnel.example.com", "127.0.0.1:7005"],
},
]
for case in cases:
try:
ok, reason = run_case(
api_url,
client_id=case["client_id"],
kind=case["kind"],
cfg=case["cfg"],
units=case["units"],
expected_unit_parts=case["expected"],
template_units=case.get("template_units"),
)
except AssertionError as e:
return fail(f"{case['kind']} failed: {e}")
if not ok:
print(f"[transport_production_like_e2e] SKIP: {reason}")
return 0
print(f"[transport_production_like_e2e] {case['kind']} production-like lifecycle: ok")
print("[transport_production_like_e2e] passed")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,89 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
API_URL="${API_URL:-http://127.0.0.1:8080}"
RUNBOOK="${ROOT_DIR}/scripts/transport_runbook.py"
RECOVERY="${ROOT_DIR}/scripts/transport_recovery_runbook.py"
for f in "$RUNBOOK" "$RECOVERY"; do
if [[ ! -x "$f" ]]; then
if [[ -f "$f" ]]; then
chmod +x "$f"
else
echo "[transport_recovery_runbook_smoke] missing script: $f" >&2
exit 1
fi
fi
done
ts="$(date +%s)"
pid="$$"
ok_id="smoke-recovery-ok-${ts}-${pid}"
fail_id="smoke-recovery-fail-${ts}-${pid}"
diag_ok="/tmp/${ok_id}.json"
diag_fail="/tmp/${fail_id}.json"
trap 'rm -f "$diag_ok" "$diag_fail"' EXIT
echo "[transport_recovery_runbook_smoke] API_URL=${API_URL}"
echo "[transport_recovery_runbook_smoke] case1: recovery success"
env API_URL="${API_URL}" "$RUNBOOK" \
--api-url "${API_URL}" \
--client-id "${ok_id}" \
--kind singbox \
--name "Recovery OK ${ok_id}" \
--config-json '{"runner":"mock","runtime_mode":"exec"}' \
--actions "create"
env API_URL="${API_URL}" "$RECOVERY" \
--api-url "${API_URL}" \
--client-id "${ok_id}" \
--max-restarts 1 \
--provision-if-needed \
--diagnostics-json "$diag_ok"
env API_URL="${API_URL}" "$RUNBOOK" \
--api-url "${API_URL}" \
--client-id "${ok_id}" \
--actions "delete" \
--force-delete
echo "[transport_recovery_runbook_smoke] case2: recovery fail-path with diagnostics"
env API_URL="${API_URL}" "$RUNBOOK" \
--api-url "${API_URL}" \
--client-id "${fail_id}" \
--kind phoenix \
--name "Recovery FAIL ${fail_id}" \
--config-json '{"runner":"mock","runtime_mode":"embedded"}' \
--actions "create"
set +e
env API_URL="${API_URL}" "$RECOVERY" \
--api-url "${API_URL}" \
--client-id "${fail_id}" \
--max-restarts 1 \
--provision-if-needed \
--diagnostics-json "$diag_fail"
rc=$?
set -e
if [[ "$rc" -eq 0 ]]; then
echo "[transport_recovery_runbook_smoke] expected non-zero for fail-path case" >&2
exit 1
fi
if [[ "$rc" -ne 2 ]]; then
echo "[transport_recovery_runbook_smoke] expected rc=2 for unrecovered case, got rc=${rc}" >&2
exit 1
fi
if [[ ! -s "$diag_fail" ]]; then
echo "[transport_recovery_runbook_smoke] diagnostics file was not produced: $diag_fail" >&2
exit 1
fi
env API_URL="${API_URL}" "$RUNBOOK" \
--api-url "${API_URL}" \
--client-id "${fail_id}" \
--actions "delete" \
--force-delete
echo "[transport_recovery_runbook_smoke] passed"

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
RUNBOOK="${ROOT_DIR}/scripts/transport_runbook.py"
API_URL="${API_URL:-http://127.0.0.1:8080}"
if [[ ! -x "$RUNBOOK" ]]; then
if [[ -f "$RUNBOOK" ]]; then
chmod +x "$RUNBOOK"
else
echo "[transport_runbook_cli_smoke] missing runbook script: $RUNBOOK" >&2
exit 1
fi
fi
ts="$(date +%s)"
pid="$$"
client_id="smoke-runbook-${ts}-${pid}"
cfg='{"runner":"mock","runtime_mode":"exec"}'
echo "[transport_runbook_cli_smoke] API_URL=${API_URL}"
echo "[transport_runbook_cli_smoke] client_id=${client_id}"
env API_URL="${API_URL}" "$RUNBOOK" \
--api-url "${API_URL}" \
--client-id "${client_id}" \
--kind singbox \
--name "Runbook Smoke ${client_id}" \
--config-json "${cfg}" \
--actions "create,provision,start,health,metrics,restart,stop,delete"
echo "[transport_runbook_cli_smoke] passed"

233
tests/transport_singbox_e2e.py Executable file
View File

@@ -0,0 +1,233 @@
#!/usr/bin/env python3
from __future__ import annotations
import json
import os
import sys
import time
from typing import Dict, Optional, Tuple
import urllib.error
import urllib.request
def fail(msg: str) -> int:
print(f"[transport_singbox_e2e] ERROR: {msg}")
return 1
def request_json(api_url: str, method: str, path: str, payload: Optional[Dict] = None) -> Tuple[int, Dict]:
data = None
headers = {"Accept": "application/json"}
if payload is not None:
data = json.dumps(payload).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(
f"{api_url.rstrip('/')}{path}",
data=data,
method=method.upper(),
headers=headers,
)
try:
with urllib.request.urlopen(req, timeout=20.0) as resp:
raw = resp.read().decode("utf-8", errors="replace")
status = int(resp.getcode() or 200)
except urllib.error.HTTPError as e:
raw = e.read().decode("utf-8", errors="replace")
status = int(e.code or 500)
except Exception:
return 0, {}
try:
data_json = json.loads(raw) if raw else {}
except Exception:
data_json = {}
if not isinstance(data_json, dict):
data_json = {}
return status, data_json
def ensure_client_deleted(api_url: str, client_id: str) -> None:
request_json(api_url, "DELETE", f"/api/v1/transport/clients/{client_id}?force=true")
def create_client(api_url: str, payload: Dict) -> Tuple[int, Dict]:
return request_json(api_url, "POST", "/api/v1/transport/clients", payload)
def main() -> int:
api_url = os.environ.get("API_URL", "http://127.0.0.1:8080").strip()
if not api_url:
return fail("empty API_URL")
print(f"[transport_singbox_e2e] API_URL={api_url}")
status, caps = request_json(api_url, "GET", "/api/v1/transport/capabilities")
if status == 404:
print("[transport_singbox_e2e] SKIP: transport endpoints are not available on this backend")
return 0
if status != 200 or not bool(caps.get("ok", False)):
return fail(f"capabilities failed status={status} payload={caps}")
clients_caps = caps.get("clients") or {}
if not isinstance(clients_caps, dict) or "singbox" not in clients_caps:
return fail(f"singbox capability is missing: {caps}")
runtime_modes = caps.get("runtime_modes") or {}
if isinstance(runtime_modes, dict) and runtime_modes:
if not bool(runtime_modes.get("exec", False)):
return fail(f"runtime_modes.exec is not supported: {caps}")
else:
print("[transport_singbox_e2e] WARN: runtime_modes are not advertised by current backend build")
ts = int(time.time())
pid = os.getpid()
# Case 1: successful lifecycle on mock runner.
client_ok = f"e2e-singbox-ok-{ts}-{pid}"
ensure_client_deleted(api_url, client_ok)
status, create_ok = create_client(
api_url,
{
"id": client_ok,
"name": "E2E Singbox Mock",
"kind": "singbox",
"enabled": False,
"config": {
"runner": "mock",
"runtime_mode": "exec",
"packaging_profile": "bundled",
"bin_root": "/opt/selective-vpn/bin",
"require_binary": False,
"singbox_config_path": "/etc/singbox/e2e.json",
},
},
)
if status != 200 or not bool(create_ok.get("ok", False)):
return fail(f"create mock singbox failed status={status} payload={create_ok}")
try:
status, provision = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/provision")
if status == 404:
print("[transport_singbox_e2e] SKIP: provision endpoint is not available on current backend build")
return 0
if status != 200 or not bool(provision.get("ok", False)):
return fail(f"provision mock singbox failed status={status} payload={provision}")
status, start = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/start")
if status != 200 or not bool(start.get("ok", False)):
return fail(f"start mock singbox failed status={status} payload={start}")
if str(start.get("status_after") or "").strip().lower() != "up":
return fail(f"start did not set status_after=up: {start}")
status, health = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_ok}/health")
if status != 200 or not bool(health.get("ok", False)):
return fail(f"health mock singbox failed status={status} payload={health}")
if str(health.get("status") or "").strip().lower() not in ("up", "degraded"):
return fail(f"unexpected health status after start: {health}")
status, restart = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/restart")
if status != 200 or not bool(restart.get("ok", False)):
return fail(f"restart mock singbox failed status={status} payload={restart}")
status, stop = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_ok}/stop")
if status != 200 or not bool(stop.get("ok", False)):
return fail(f"stop mock singbox failed status={status} payload={stop}")
if str(stop.get("status_after") or "").strip().lower() != "down":
return fail(f"stop did not set status_after=down: {stop}")
status, metrics = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_ok}/metrics")
if status == 404:
print("[transport_singbox_e2e] WARN: metrics endpoint is not available on current backend build")
else:
if status != 200 or not bool(metrics.get("ok", False)):
return fail(f"metrics mock singbox failed status={status} payload={metrics}")
metrics_obj = metrics.get("metrics") or {}
if not isinstance(metrics_obj, dict):
return fail(f"metrics payload is invalid: {metrics}")
if int(metrics_obj.get("state_changes", 0) or 0) < 2:
return fail(f"state_changes must be >=2 after lifecycle sequence: {metrics}")
print("[transport_singbox_e2e] case1 mock lifecycle: ok")
finally:
ensure_client_deleted(api_url, client_ok)
# Case 2: embedded runtime mode must be rejected.
client_emb = f"e2e-singbox-embedded-{ts}-{pid}"
ensure_client_deleted(api_url, client_emb)
status, create_emb = create_client(
api_url,
{
"id": client_emb,
"name": "E2E Singbox Embedded",
"kind": "singbox",
"enabled": False,
"config": {
"runner": "mock",
"runtime_mode": "embedded",
},
},
)
if status != 200 or not bool(create_emb.get("ok", False)):
return fail(f"create embedded singbox failed status={status} payload={create_emb}")
try:
status, provision_emb = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_emb}/provision")
if status != 200 or bool(provision_emb.get("ok", True)):
return fail(f"embedded provision must fail status={status} payload={provision_emb}")
if str(provision_emb.get("code") or "").strip() != "TRANSPORT_BACKEND_RUNTIME_MODE_UNSUPPORTED":
return fail(f"embedded provision wrong code: {provision_emb}")
status, start_emb = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_emb}/start")
if status != 200 or bool(start_emb.get("ok", True)):
return fail(f"embedded start must fail status={status} payload={start_emb}")
if str(start_emb.get("code") or "").strip() != "TRANSPORT_BACKEND_RUNTIME_MODE_UNSUPPORTED":
return fail(f"embedded start wrong code: {start_emb}")
status, health_emb = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_emb}/health")
if status != 200 or not bool(health_emb.get("ok", False)):
return fail(f"embedded health request failed status={status} payload={health_emb}")
if str(health_emb.get("code") or "").strip() != "TRANSPORT_BACKEND_RUNTIME_MODE_UNSUPPORTED":
return fail(f"embedded health wrong code: {health_emb}")
print("[transport_singbox_e2e] case2 runtime_mode=embedded guard: ok")
finally:
ensure_client_deleted(api_url, client_emb)
# Case 3: require_binary fail-fast for missing singbox binary.
client_req = f"e2e-singbox-requirebin-{ts}-{pid}"
ensure_client_deleted(api_url, client_req)
status, create_req = create_client(
api_url,
{
"id": client_req,
"name": "E2E Singbox RequireBinary",
"kind": "singbox",
"enabled": False,
"config": {
"runner": "systemd",
"runtime_mode": "exec",
"unit": f"{client_req}.service",
"packaging_profile": "bundled",
"bin_root": "/opt/selective-vpn/bin",
"require_binary": True,
"singbox_bin": "/tmp/definitely-missing-sing-box-binary",
"singbox_config_path": "/etc/singbox/e2e.json",
},
},
)
if status != 200 or not bool(create_req.get("ok", False)):
return fail(f"create require_binary singbox failed status={status} payload={create_req}")
try:
status, provision_req = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_req}/provision")
if status != 200 or bool(provision_req.get("ok", True)):
return fail(f"require_binary provision must fail status={status} payload={provision_req}")
if str(provision_req.get("code") or "").strip() != "TRANSPORT_BACKEND_PROVISION_CONFIG_REQUIRED":
return fail(f"require_binary provision wrong code: {provision_req}")
msg = str(provision_req.get("message") or "").strip().lower()
if "required singbox binary not found" not in msg:
return fail(f"require_binary provision wrong message: {provision_req}")
print("[transport_singbox_e2e] case3 require_binary fail-fast: ok")
finally:
ensure_client_deleted(api_url, client_req)
print("[transport_singbox_e2e] passed")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,296 @@
#!/usr/bin/env python3
from __future__ import annotations
import json
import os
import time
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import urllib.error
import urllib.request
SINGBOX_INSTANCE_DROPIN = "10-selective-vpn.conf"
def fail(msg: str) -> int:
print(f"[transport_systemd_real_e2e] ERROR: {msg}")
return 1
def request_json(api_url: str, method: str, path: str, payload: Optional[Dict] = None) -> Tuple[int, Dict]:
data = None
headers = {"Accept": "application/json"}
if payload is not None:
data = json.dumps(payload).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(
f"{api_url.rstrip('/')}{path}",
data=data,
method=method.upper(),
headers=headers,
)
try:
with urllib.request.urlopen(req, timeout=30.0) as resp:
raw = resp.read().decode("utf-8", errors="replace")
status = int(resp.getcode() or 200)
except urllib.error.HTTPError as e:
raw = e.read().decode("utf-8", errors="replace")
status = int(e.code or 500)
except Exception:
return 0, {}
try:
data_json = json.loads(raw) if raw else {}
except Exception:
data_json = {}
if not isinstance(data_json, dict):
data_json = {}
return status, data_json
def ensure_client_deleted(api_url: str, client_id: str) -> None:
request_json(api_url, "DELETE", f"/api/v1/transport/clients/{client_id}?force=true")
def is_systemd_unavailable(resp: Dict) -> bool:
text = (
str(resp.get("message") or "")
+ " "
+ str(resp.get("stderr") or "")
+ " "
+ str(resp.get("stdout") or "")
).lower()
checks = (
"not been booted with systemd",
"failed to connect to bus",
"systemctl daemon-reload failed",
"operation not permitted",
)
return any(c in text for c in checks)
def unit_file_path(unit: str) -> Path:
return Path("/etc/systemd/system") / unit
def unit_dropin_path(unit: str, file_name: str = SINGBOX_INSTANCE_DROPIN) -> Path:
return Path("/etc/systemd/system") / f"{unit}.d" / file_name
def assert_unit_owned(unit: str, client_id: str) -> None:
marker = f"Environment=SVPN_TRANSPORT_ID={client_id}"
unit_path = unit_file_path(unit)
if unit_path.exists():
body = unit_path.read_text(encoding="utf-8", errors="replace")
if marker in body:
return
dropin_path = unit_dropin_path(unit)
if dropin_path.exists():
body = dropin_path.read_text(encoding="utf-8", errors="replace")
if marker in body:
return
if unit_path.exists() and not dropin_path.exists():
raise AssertionError(f"ownership marker {marker} not found in unit: {unit_path}")
raise AssertionError(f"unit artifacts are missing for ownership check: {unit_path} {dropin_path}")
def assert_unit_removed(unit: str, client_id: str) -> None:
marker = f"Environment=SVPN_TRANSPORT_ID={client_id}"
unit_path = unit_file_path(unit)
dropin_path = unit_dropin_path(unit)
if dropin_path.exists():
raise AssertionError(f"drop-in file still exists after cleanup: {dropin_path}")
if unit_path.exists():
body = unit_path.read_text(encoding="utf-8", errors="replace")
if marker in body:
raise AssertionError(f"owned unit file still exists after cleanup: {unit_path}")
def assert_file_exists(path: str) -> None:
p = Path(path)
if not p.exists():
raise AssertionError(f"expected file missing: {p}")
def create_client(api_url: str, payload: Dict) -> Tuple[int, Dict]:
return request_json(api_url, "POST", "/api/v1/transport/clients", payload)
def run_case(
api_url: str,
*,
client_id: str,
name: str,
kind: str,
cfg: Dict,
units: List[str],
template_units: Optional[List[str]] = None,
) -> Tuple[bool, str]:
ensure_client_deleted(api_url, client_id)
status, created = create_client(
api_url,
{
"id": client_id,
"name": name,
"kind": kind,
"enabled": False,
"config": cfg,
},
)
if status != 200 or not bool(created.get("ok", False)):
raise AssertionError(f"create failed status={status} payload={created}")
try:
status, provision = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/provision")
if status == 404:
return False, "provision endpoint is not available on current backend build"
if status != 200:
raise AssertionError(f"provision failed status={status} payload={provision}")
if not bool(provision.get("ok", False)):
if is_systemd_unavailable(provision):
return False, f"systemd is unavailable: {provision}"
raise AssertionError(f"provision returned ok=false payload={provision}")
for unit in units:
assert_unit_owned(unit, client_id)
for t_unit in (template_units or []):
assert_file_exists(str(unit_file_path(t_unit)))
status, started = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/start")
if status != 200 or not bool(started.get("ok", False)):
raise AssertionError(f"start failed status={status} payload={started}")
if str(started.get("status_after") or "").strip().lower() != "up":
raise AssertionError(f"start did not set status_after=up payload={started}")
status, health = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_id}/health")
if status != 200 or not bool(health.get("ok", False)):
raise AssertionError(f"health failed status={status} payload={health}")
if str(health.get("status") or "").strip().lower() not in ("up", "degraded"):
raise AssertionError(f"health status is unexpected payload={health}")
status, metrics = request_json(api_url, "GET", f"/api/v1/transport/clients/{client_id}/metrics")
if status != 200 or not bool(metrics.get("ok", False)):
raise AssertionError(f"metrics failed status={status} payload={metrics}")
m = metrics.get("metrics") or {}
if not isinstance(m, dict):
raise AssertionError(f"metrics payload is invalid: {metrics}")
if int(m.get("state_changes", 0) or 0) < 1:
raise AssertionError(f"state_changes must be >=1 after start: {metrics}")
status, restarted = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/restart")
if status != 200 or not bool(restarted.get("ok", False)):
raise AssertionError(f"restart failed status={status} payload={restarted}")
status, stopped = request_json(api_url, "POST", f"/api/v1/transport/clients/{client_id}/stop")
if status != 200 or not bool(stopped.get("ok", False)):
raise AssertionError(f"stop failed status={status} payload={stopped}")
if str(stopped.get("status_after") or "").strip().lower() != "down":
raise AssertionError(f"stop did not set status_after=down payload={stopped}")
finally:
status, deleted = request_json(api_url, "DELETE", f"/api/v1/transport/clients/{client_id}?force=true")
if status != 200 or not bool(deleted.get("ok", False)):
raise AssertionError(f"delete failed status={status} payload={deleted}")
for unit in units:
assert_unit_removed(unit, client_id)
for t_unit in (template_units or []):
assert_file_exists(str(unit_file_path(t_unit)))
return True, "ok"
def main() -> int:
api_url = os.environ.get("API_URL", "http://127.0.0.1:8080").strip()
if not api_url:
return fail("empty API_URL")
print(f"[transport_systemd_real_e2e] API_URL={api_url}")
status, caps = request_json(api_url, "GET", "/api/v1/transport/capabilities")
if status == 404:
print("[transport_systemd_real_e2e] SKIP: transport endpoints are not available on this backend")
return 0
if status != 200 or not bool(caps.get("ok", False)):
return fail(f"capabilities failed status={status} payload={caps}")
runtime_modes = caps.get("runtime_modes") or {}
if isinstance(runtime_modes, dict) and runtime_modes:
if not bool(runtime_modes.get("exec", False)):
return fail(f"runtime_modes.exec is not supported: {caps}")
ts = int(time.time())
pid = os.getpid()
tag = f"{ts}-{pid}"
cases = [
{
"client_id": f"e2e-sys-singbox-{tag}",
"name": "E2E Systemd Singbox",
"kind": "singbox",
"cfg": {
"runner": "systemd",
"runtime_mode": "exec",
"exec_start": "/usr/bin/sleep 120",
"hardening_enabled": False,
},
"units": [f"singbox@e2e-sys-singbox-{tag}.service"],
"template_units": ["singbox@.service"],
},
{
"client_id": f"e2e-sys-phoenix-{tag}",
"name": "E2E Systemd Phoenix",
"kind": "phoenix",
"cfg": {
"runner": "systemd",
"runtime_mode": "exec",
"unit": f"svpn-e2e-phoenix-{tag}.service",
"exec_start": "/usr/bin/sleep 120",
"hardening_enabled": False,
},
"units": [f"svpn-e2e-phoenix-{tag}.service"],
},
{
"client_id": f"e2e-sys-dnstt-{tag}",
"name": "E2E Systemd DNSTT",
"kind": "dnstt",
"cfg": {
"runner": "systemd",
"runtime_mode": "exec",
"unit": f"svpn-e2e-dnstt-{tag}.service",
"exec_start": "/usr/bin/sleep 120",
"ssh_tunnel": True,
"ssh_unit": f"svpn-e2e-dnstt-ssh-{tag}.service",
"ssh_exec_start": "/usr/bin/sleep 120",
"hardening_enabled": False,
"ssh_hardening_enabled": False,
},
"units": [
f"svpn-e2e-dnstt-{tag}.service",
f"svpn-e2e-dnstt-ssh-{tag}.service",
],
},
]
for case in cases:
try:
ok, reason = run_case(
api_url,
client_id=case["client_id"],
name=case["name"],
kind=case["kind"],
cfg=case["cfg"],
units=case["units"],
template_units=case.get("template_units"),
)
except AssertionError as e:
return fail(f"{case['kind']} failed: {e}")
if not ok:
print(f"[transport_systemd_real_e2e] SKIP: {reason}")
return 0
print(f"[transport_systemd_real_e2e] {case['kind']} real-systemd lifecycle: ok")
print("[transport_systemd_real_e2e] passed")
return 0
if __name__ == "__main__":
raise SystemExit(main())

91
tests/vpn_locations_swr.sh Executable file
View File

@@ -0,0 +1,91 @@
#!/usr/bin/env bash
set -euo pipefail
API_URL="${API_URL:-http://127.0.0.1:8080}"
TMP_DIR="$(mktemp -d)"
trap 'rm -rf "$TMP_DIR"' EXIT
req_json() {
local path="$1"
local out_file="$2"
local code
local total
local metrics
metrics="$(curl -sS --max-time 6 -o "$out_file" -w "%{http_code} %{time_total}" "${API_URL}${path}")"
code="${metrics%% *}"
total="${metrics#* }"
if [[ "$code" != "200" ]]; then
echo "[vpn_locations] ${path} -> HTTP ${code}" >&2
cat "$out_file" >&2 || true
return 1
fi
printf "%s\n" "$total"
}
echo "[vpn_locations] API_URL=${API_URL}"
# Trigger background refresh (handler must respond immediately from cache/SWR path).
t1="$(req_json "/api/v1/vpn/locations?refresh=1" "$TMP_DIR/refresh.json")"
echo "[vpn_locations] refresh request time=${t1}s"
# Read current state snapshot.
t2="$(req_json "/api/v1/vpn/locations" "$TMP_DIR/state.json")"
echo "[vpn_locations] snapshot request time=${t2}s"
python3 - "$TMP_DIR/state.json" <<'PY'
import json
import sys
path = sys.argv[1]
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
required = ("locations", "stale", "refresh_in_progress")
for k in required:
if k not in data:
raise SystemExit(f"[vpn_locations] missing key: {k}")
if not isinstance(data["locations"], list):
raise SystemExit("[vpn_locations] locations must be array")
if not isinstance(data["stale"], bool):
raise SystemExit("[vpn_locations] stale must be bool")
if not isinstance(data["refresh_in_progress"], bool):
raise SystemExit("[vpn_locations] refresh_in_progress must be bool")
print(
"[vpn_locations] keys OK:",
f"count={len(data['locations'])}",
f"stale={data['stale']}",
f"refresh_in_progress={data['refresh_in_progress']}",
)
PY
# Poll short window: refresh should eventually finish or provide retry metadata.
ok=0
for _ in $(seq 1 12); do
req_json "/api/v1/vpn/locations" "$TMP_DIR/poll.json" >/dev/null
if python3 - "$TMP_DIR/poll.json" <<'PY'
import json
import sys
with open(sys.argv[1], "r", encoding="utf-8") as f:
data = json.load(f)
if not data.get("refresh_in_progress", False):
raise SystemExit(0)
if data.get("next_retry_at"):
raise SystemExit(0)
raise SystemExit(1)
PY
then
ok=1
break
fi
sleep 1
done
if [[ "$ok" != "1" ]]; then
echo "[vpn_locations] refresh state did not settle in expected window" >&2
cat "$TMP_DIR/poll.json" >&2 || true
exit 1
fi
echo "[vpn_locations] SWR checks passed"

80
tests/vpn_login_flow.py Executable file
View File

@@ -0,0 +1,80 @@
#!/usr/bin/env python3
"""VPN login session smoke: start -> state polling -> optional action -> stop."""
import json
import os
import sys
import time
from urllib import request
API_BASE = os.environ.get("API_URL", "http://127.0.0.1:8080")
TIMEOUT = int(os.environ.get("VPN_FLOW_TIMEOUT_SEC", "20"))
def call(method, path, data=None, timeout=10):
url = f"{API_BASE}{path}"
payload = json.dumps(data).encode("utf-8") if data is not None else None
req = request.Request(url, data=payload, method=method)
if payload is not None:
req.add_header("Content-Type", "application/json")
try:
with request.urlopen(req, timeout=timeout) as resp:
body = resp.read()
except Exception as err:
print(f"[vpn] request {path} failed: {err}", file=sys.stderr)
sys.exit(1)
try:
return json.loads(body)
except json.JSONDecodeError:
print(f"[vpn] non-json response for {path}: {body[:200]!r}", file=sys.stderr)
sys.exit(1)
def main():
print(f"[vpn] API_BASE={API_BASE}")
start = call("POST", "/api/v1/vpn/login/session/start")
if "ok" not in start or "phase" not in start or "level" not in start:
print(f"[vpn] invalid start payload: {start}", file=sys.stderr)
sys.exit(1)
print(f"[vpn] start phase={start.get('phase')} ok={start.get('ok')}")
cursor = 0
saw_state = False
tried_action = False
deadline = time.time() + TIMEOUT
while time.time() < deadline:
state = call("GET", f"/api/v1/vpn/login/session/state?since={cursor}")
saw_state = True
if "cursor" in state:
cursor = int(state["cursor"])
phase = state.get("phase")
alive = bool(state.get("alive"))
can_check = bool(state.get("can_check"))
print(f"[vpn] state phase={phase} alive={alive} cursor={cursor}")
if can_check and alive and not tried_action:
action = call("POST", "/api/v1/vpn/login/session/action", {"action": "check"})
if not action.get("ok", False):
print(f"[vpn] action check failed: {action}", file=sys.stderr)
sys.exit(1)
tried_action = True
print("[vpn] action=check sent")
if phase in ("success", "already_logged", "failed", "cancelled"):
break
time.sleep(1)
if not saw_state:
print("[vpn] no state response received", file=sys.stderr)
sys.exit(1)
stop = call("POST", "/api/v1/vpn/login/session/stop")
if not stop.get("ok", False):
print(f"[vpn] stop failed: {stop}", file=sys.stderr)
sys.exit(1)
print("[vpn] flow smoke passed")
if __name__ == "__main__":
main()