646 lines
19 KiB
Python
Executable File
646 lines
19 KiB
Python
Executable File
#!/usr/bin/env python3
|
||
import argparse
|
||
import concurrent.futures
|
||
import json
|
||
import os
|
||
import sys
|
||
import time
|
||
from collections import defaultdict
|
||
|
||
# --- dnspython --------------------------------------------------------
|
||
try:
|
||
import dns.resolver
|
||
import dns.reversename
|
||
import dns.exception
|
||
except ImportError as e:
|
||
print(f"[resolver] dnspython is required: {e}", file=sys.stderr)
|
||
sys.exit(2)
|
||
|
||
# --------------------------------------------------------------------
|
||
# Общий DNS-конфиг
|
||
# --------------------------------------------------------------------
|
||
DNS_CONFIG_PATH = "/etc/selective-vpn/dns-upstreams.conf"
|
||
|
||
DEFAULT_DNS_DEFAULT = ["94.140.14.14", "94.140.15.15"]
|
||
DEFAULT_DNS_META = ["46.243.231.30", "46.243.231.41"]
|
||
|
||
DNS_DEFAULT = DEFAULT_DNS_DEFAULT.copy()
|
||
DNS_META = DEFAULT_DNS_META.copy()
|
||
|
||
|
||
# --------------------------------------------------------------------
|
||
# helpers
|
||
# --------------------------------------------------------------------
|
||
def log(msg, trace_log=None):
|
||
line = f"[resolver] {msg}"
|
||
print(line, file=sys.stderr)
|
||
if trace_log:
|
||
try:
|
||
with open(trace_log, "a") as f:
|
||
f.write(line + "\n")
|
||
except Exception:
|
||
pass
|
||
|
||
|
||
def is_private_ipv4(ip: str) -> bool:
|
||
"""
|
||
ip может быть "A.B.C.D" или "A.B.C.D/nn".
|
||
Возвращаем True, если адрес из приватных диапазонов.
|
||
"""
|
||
parts = ip.split("/")
|
||
base = parts[0]
|
||
try:
|
||
o1, o2, o3, o4 = map(int, base.split("."))
|
||
except ValueError:
|
||
return True
|
||
|
||
if o1 == 10:
|
||
return True
|
||
if o1 == 127:
|
||
return True
|
||
if o1 == 0:
|
||
return True
|
||
if o1 == 192 and o2 == 168:
|
||
return True
|
||
if o1 == 172 and 16 <= o2 <= 31:
|
||
return True
|
||
return False
|
||
|
||
|
||
def load_list(path):
|
||
if not os.path.exists(path):
|
||
return []
|
||
out = []
|
||
with open(path, "r") as f:
|
||
for line in f:
|
||
s = line.strip()
|
||
if not s or s.startswith("#"):
|
||
continue
|
||
out.append(s)
|
||
return out
|
||
|
||
|
||
def load_cache(path):
|
||
if not os.path.exists(path):
|
||
return {}
|
||
try:
|
||
with open(path, "r") as f:
|
||
return json.load(f)
|
||
except Exception:
|
||
return {}
|
||
|
||
|
||
def save_cache(path, data):
|
||
tmp = path + ".tmp"
|
||
try:
|
||
with open(tmp, "w") as f:
|
||
json.dump(data, f, indent=2, sort_keys=True)
|
||
os.replace(tmp, path)
|
||
except Exception:
|
||
pass
|
||
|
||
|
||
def split_dns(dns: str):
|
||
"""
|
||
Разбор записи вида:
|
||
"1.2.3.4" -> ("1.2.3.4", None)
|
||
"1.2.3.4#6053" -> ("1.2.3.4", "6053")
|
||
"""
|
||
if "#" in dns:
|
||
host, port = dns.split("#", 1)
|
||
host = host.strip()
|
||
port = port.strip()
|
||
if not host:
|
||
host = "127.0.0.1"
|
||
if not port:
|
||
port = "53"
|
||
return host, port
|
||
return dns, None
|
||
|
||
|
||
# --------------------------------------------------------------------
|
||
# dnspython-резолвы
|
||
# --------------------------------------------------------------------
|
||
def dig_a(host, dns_list, timeout=3):
|
||
"""
|
||
A-резолв через dnspython.
|
||
dns_list: либо строка "IP[#PORT]", либо список таких строк.
|
||
"""
|
||
if isinstance(dns_list, str):
|
||
dns_list = [dns_list]
|
||
|
||
ips = []
|
||
|
||
for entry in dns_list:
|
||
server, port = split_dns(entry)
|
||
if not server:
|
||
continue
|
||
|
||
r = dns.resolver.Resolver(configure=False)
|
||
r.nameservers = [server]
|
||
if port:
|
||
try:
|
||
r.port = int(port)
|
||
except ValueError:
|
||
r.port = 53
|
||
r.timeout = timeout
|
||
r.lifetime = timeout
|
||
|
||
try:
|
||
answer = r.resolve(host, "A")
|
||
except dns.exception.DNSException:
|
||
continue
|
||
except Exception:
|
||
continue
|
||
|
||
for rr in answer:
|
||
s = rr.to_text().strip()
|
||
parts = s.split(".")
|
||
if len(parts) != 4:
|
||
continue
|
||
if all(p.isdigit() and 0 <= int(p) <= 255 for p in parts):
|
||
if not is_private_ipv4(s) and s not in ips:
|
||
ips.append(s)
|
||
|
||
return ips
|
||
|
||
|
||
def dig_ptr(ip, upstream, timeout=3):
|
||
"""
|
||
PTR-резолв: ip -> список имён.
|
||
dns может быть "IP" или "IP#PORT".
|
||
"""
|
||
server, port = split_dns(upstream)
|
||
if not server:
|
||
return []
|
||
|
||
r = dns.resolver.Resolver(configure=False)
|
||
r.nameservers = [server]
|
||
if port:
|
||
try:
|
||
r.port = int(port)
|
||
except ValueError:
|
||
r.port = 53
|
||
r.timeout = timeout
|
||
r.lifetime = timeout
|
||
|
||
try:
|
||
rev = dns.reversename.from_address(ip)
|
||
except Exception:
|
||
return []
|
||
|
||
try:
|
||
answer = r.resolve(rev, "PTR")
|
||
except dns.exception.DNSException:
|
||
return []
|
||
except Exception:
|
||
return []
|
||
|
||
names = []
|
||
for rr in answer:
|
||
s = rr.to_text().strip()
|
||
if s.endswith("."):
|
||
s = s[:-1]
|
||
if s:
|
||
names.append(s.lower())
|
||
return names
|
||
|
||
|
||
# --------------------------------------------------------------------
|
||
# Загрузка DNS-конфига
|
||
# --------------------------------------------------------------------
|
||
def load_dns_config(path=DNS_CONFIG_PATH, trace_log=None):
|
||
"""
|
||
Читает /etc/selective-vpn/dns-upstreams.conf и обновляет
|
||
глобальные DNS_DEFAULT / DNS_META.
|
||
|
||
Формат строк:
|
||
default 1.2.3.4 5.6.7.8
|
||
meta 9.9.9.9 8.8.8.8
|
||
Можно использовать "ip#port", например 127.0.0.1#6053.
|
||
"""
|
||
global DNS_DEFAULT, DNS_META
|
||
|
||
if not os.path.exists(path):
|
||
DNS_DEFAULT = DEFAULT_DNS_DEFAULT.copy()
|
||
DNS_META = DEFAULT_DNS_META.copy()
|
||
log(
|
||
f"dns-config: {path} not found, fallback to built-in defaults "
|
||
f"(default={DNS_DEFAULT}, meta={DNS_META})",
|
||
trace_log,
|
||
)
|
||
return
|
||
|
||
dflt = []
|
||
meta = []
|
||
|
||
try:
|
||
with open(path, "r") as f:
|
||
for line in f:
|
||
s = line.strip()
|
||
if not s or s.startswith("#"):
|
||
continue
|
||
parts = s.split()
|
||
if len(parts) < 2:
|
||
continue
|
||
key = parts[0].lower()
|
||
addrs = parts[1:]
|
||
if key == "default":
|
||
dflt.extend(addrs)
|
||
elif key == "meta":
|
||
meta.extend(addrs)
|
||
except Exception as e:
|
||
DNS_DEFAULT = DEFAULT_DNS_DEFAULT.copy()
|
||
DNS_META = DEFAULT_DNS_META.copy()
|
||
log(
|
||
f"dns-config: failed to read {path}: {e}, fallback to built-in defaults "
|
||
f"(default={DNS_DEFAULT}, meta={DNS_META})",
|
||
trace_log,
|
||
)
|
||
return
|
||
|
||
if not dflt:
|
||
dflt = DEFAULT_DNS_DEFAULT.copy()
|
||
log(
|
||
"dns-config: no 'default' section, fallback to built-in for default",
|
||
trace_log,
|
||
)
|
||
if not meta:
|
||
meta = DEFAULT_DNS_META.copy()
|
||
log("dns-config: no 'meta' section, fallback to built-in for meta", trace_log)
|
||
|
||
DNS_DEFAULT = dflt
|
||
DNS_META = meta
|
||
log(
|
||
f"dns-config: accept {path}: "
|
||
f"default={', '.join(DNS_DEFAULT)}; meta={', '.join(DNS_META)}",
|
||
trace_log,
|
||
)
|
||
|
||
|
||
def resolve_host(host, meta_special, trace_log=None):
|
||
"""
|
||
Forward-резолв одного домена (A-записи).
|
||
DNS берём из DNS_DEFAULT / DNS_META, которые загрузил load_dns_config().
|
||
"""
|
||
if host in meta_special:
|
||
dns_list = DNS_META
|
||
else:
|
||
dns_list = DNS_DEFAULT
|
||
|
||
ips = dig_a(host, dns_list)
|
||
|
||
uniq = []
|
||
for ip in ips:
|
||
if ip not in uniq:
|
||
uniq.append(ip)
|
||
|
||
if uniq:
|
||
log(f"{host}: {', '.join(uniq)}", trace_log)
|
||
else:
|
||
log(f"{host}: no IPs", trace_log)
|
||
return uniq
|
||
|
||
|
||
def parse_static_entries(static_lines):
|
||
"""
|
||
static_lines — строки из static-ips.txt.
|
||
Возвращаем список кортежей (ip_entry, base_ip, comment).
|
||
"""
|
||
entries = []
|
||
for line in static_lines:
|
||
s = line.strip()
|
||
if not s or s.startswith("#"):
|
||
continue
|
||
|
||
if "#" in s:
|
||
ip_part, comment = s.split("#", 1)
|
||
ip_part = ip_part.strip()
|
||
comment = comment.strip()
|
||
else:
|
||
ip_part = s
|
||
comment = ""
|
||
|
||
if not ip_part:
|
||
continue
|
||
if is_private_ipv4(ip_part):
|
||
continue
|
||
|
||
base_ip = ip_part.split("/", 1)[0]
|
||
entries.append((ip_part, base_ip, comment))
|
||
return entries
|
||
|
||
|
||
def resolve_static_entries(static_entries, ptr_cache, ttl_sec, trace_log=None):
|
||
"""
|
||
static_entries: список кортежей (ip_entry, base_ip, comment).
|
||
ip_entry — как в static-ips.txt (может быть с /mask)
|
||
base_ip — A.B.C.D (без маски)
|
||
comment — текст после # или "".
|
||
|
||
Возвращаем dict: ip_entry -> список меток,
|
||
уже с префиксом '*' (чтобы можно было искать).
|
||
"""
|
||
now = int(time.time())
|
||
result = {}
|
||
for ip_entry, base_ip, comment in static_entries:
|
||
labels = []
|
||
# 1) если есть комментарий — он главнее всего
|
||
if comment:
|
||
labels.append(f"*{comment}")
|
||
# 2) если комментария нет, пробуем PTR (с кэшем)
|
||
if not comment:
|
||
cache_entry = ptr_cache.get(base_ip)
|
||
names = []
|
||
if (
|
||
cache_entry
|
||
and isinstance(cache_entry, dict)
|
||
and isinstance(cache_entry.get("last_resolved"), (int, float))
|
||
):
|
||
age = now - cache_entry["last_resolved"]
|
||
cached_names = cache_entry.get("names") or []
|
||
if age <= ttl_sec and cached_names:
|
||
names = cached_names
|
||
if not names:
|
||
# PTR через те же DNS, что и обычный трафик (используем первый из default)
|
||
dns_for_ptr = DNS_DEFAULT[0] if DNS_DEFAULT else DEFAULT_DNS_DEFAULT[0]
|
||
|
||
try:
|
||
names = dig_ptr(base_ip, dns_for_ptr) or []
|
||
except Exception as e:
|
||
log(
|
||
f"PTR failed for {base_ip} (using {dns_for_ptr}): "
|
||
f"{type(e).__name__}: {e}",
|
||
trace_log,
|
||
)
|
||
names = []
|
||
|
||
uniq_names = []
|
||
for n in names:
|
||
if n not in uniq_names:
|
||
uniq_names.append(n)
|
||
names = uniq_names
|
||
ptr_cache[base_ip] = {
|
||
"names": names,
|
||
"last_resolved": now,
|
||
}
|
||
for n in names:
|
||
labels.append(f"*{n}")
|
||
# 3) если вообще ничего нет — ставим общий тег
|
||
if not labels:
|
||
labels = ["*[STATIC-IP]"]
|
||
result[ip_entry] = labels
|
||
log(f"static {ip_entry}: labels={', '.join(labels)}", trace_log)
|
||
return result
|
||
|
||
|
||
# --------------------------------------------------------------------
|
||
# API-слой: одна чистая функция, которую легко вызвать откуда угодно
|
||
# --------------------------------------------------------------------
|
||
def run_resolver_job(
|
||
*,
|
||
domains,
|
||
meta_special,
|
||
static_lines,
|
||
cache_path,
|
||
ptr_cache_path,
|
||
ttl_sec,
|
||
workers,
|
||
trace_log=None,
|
||
):
|
||
"""
|
||
Главный API резолвера.
|
||
|
||
Вход:
|
||
domains — список доменов
|
||
meta_special — set() доменов из meta-special.txt
|
||
static_lines — строки из static-ips.txt
|
||
cache_path — путь к domain-cache.json
|
||
ptr_cache_path— путь к ptr-cache.json
|
||
ttl_sec — TTL кэша доменов / PTR
|
||
workers — число потоков
|
||
trace_log — путь к trace.log (или None)
|
||
|
||
Выход: dict с ключами:
|
||
ips — отсортированный список IP/подсетей
|
||
ip_map — список (ip, label) пар (домен или *LABEL)
|
||
domain_cache — обновлённый кэш доменов
|
||
ptr_cache — обновлённый PTR-кэш
|
||
summary — статистика (dict)
|
||
"""
|
||
# --- подгружаем DNS-конфиг ---
|
||
load_dns_config(DNS_CONFIG_PATH, trace_log)
|
||
|
||
meta_special = set(meta_special or [])
|
||
|
||
log(f"domains to resolve: {len(domains)}", trace_log)
|
||
|
||
# --- кэши ---
|
||
domain_cache = load_cache(cache_path)
|
||
ptr_cache = load_cache(ptr_cache_path)
|
||
now = int(time.time())
|
||
|
||
# --- разруливаем: что берём из domain_cache, что резолвим ---
|
||
fresh_from_cache = {}
|
||
to_resolve = []
|
||
|
||
for d in domains:
|
||
entry = domain_cache.get(d)
|
||
if entry and isinstance(entry, dict):
|
||
ts = entry.get("last_resolved") or 0
|
||
ips = entry.get("ips") or []
|
||
if isinstance(ts, (int, float)) and isinstance(ips, list) and ips:
|
||
if now - ts <= ttl_sec:
|
||
valid_ips = [ip for ip in ips if not is_private_ipv4(ip)]
|
||
if valid_ips:
|
||
fresh_from_cache[d] = valid_ips
|
||
continue
|
||
|
||
to_resolve.append(d)
|
||
|
||
log(
|
||
f"from cache: {len(fresh_from_cache)}, to resolve: {len(to_resolve)}",
|
||
trace_log,
|
||
)
|
||
|
||
resolved = dict(fresh_from_cache)
|
||
|
||
total_domains = len(domains)
|
||
cache_hits = len(fresh_from_cache)
|
||
resolved_now = 0
|
||
unresolved = 0
|
||
|
||
# --- параллельный резолв доменов ---
|
||
if to_resolve:
|
||
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as ex:
|
||
fut2host = {
|
||
ex.submit(resolve_host, d, meta_special, trace_log): d
|
||
for d in to_resolve
|
||
}
|
||
for fut in concurrent.futures.as_completed(fut2host):
|
||
d = fut2host[fut]
|
||
try:
|
||
ips = fut.result()
|
||
except Exception as e:
|
||
log(f"{d}: resolver exception: {e}", trace_log)
|
||
ips = []
|
||
|
||
if ips:
|
||
resolved[d] = ips
|
||
domain_cache[d] = {
|
||
"ips": ips,
|
||
"last_resolved": now,
|
||
}
|
||
resolved_now += 1
|
||
else:
|
||
unresolved += 1
|
||
|
||
# --- читаем static-ips и готовим список для PTR ---
|
||
static_entries = parse_static_entries(static_lines)
|
||
log(f"static entries: {len(static_entries)}", trace_log)
|
||
|
||
# --- PTR/labels для static-ips ---
|
||
static_label_map = resolve_static_entries(
|
||
static_entries, ptr_cache, ttl_sec, trace_log
|
||
)
|
||
|
||
# --- собираем общий список IP и map ---
|
||
ip_set = set()
|
||
ip_to_domains = defaultdict(set)
|
||
|
||
# доменные IP
|
||
for d, ips in resolved.items():
|
||
for ip in ips:
|
||
ip_set.add(ip)
|
||
ip_to_domains[ip].add(d)
|
||
|
||
# статические IP / сети
|
||
for ip_entry, _, _ in static_entries:
|
||
ip_set.add(ip_entry)
|
||
for label in static_label_map.get(ip_entry, []):
|
||
ip_to_domains[ip_entry].add(label)
|
||
|
||
unique_ip_count = len(ip_set)
|
||
if unique_ip_count == 0:
|
||
log("no IPs resolved at all", trace_log)
|
||
else:
|
||
log(f"resolver done: {unique_ip_count} unique IPs", trace_log)
|
||
|
||
ips_sorted = sorted(ip_set)
|
||
|
||
# flatten ip_map
|
||
ip_map_pairs = []
|
||
for ip in ips_sorted:
|
||
for dom in sorted(ip_to_domains[ip]):
|
||
ip_map_pairs.append((ip, dom))
|
||
|
||
summary = {
|
||
"domains_total": total_domains,
|
||
"from_cache": cache_hits,
|
||
"resolved_now": resolved_now,
|
||
"unresolved": unresolved,
|
||
"static_entries": len(static_entries),
|
||
"unique_ips": unique_ip_count,
|
||
}
|
||
|
||
log(
|
||
"summary: domains=%d, from_cache=%d, resolved_now=%d, "
|
||
"unresolved=%d, static_entries=%d, unique_ips=%d"
|
||
% (
|
||
summary["domains_total"],
|
||
summary["from_cache"],
|
||
summary["resolved_now"],
|
||
summary["unresolved"],
|
||
summary["static_entries"],
|
||
summary["unique_ips"],
|
||
),
|
||
trace_log,
|
||
)
|
||
|
||
return {
|
||
"ips": ips_sorted,
|
||
"ip_map": ip_map_pairs,
|
||
"domain_cache": domain_cache,
|
||
"ptr_cache": ptr_cache,
|
||
"summary": summary,
|
||
}
|
||
|
||
|
||
# --------------------------------------------------------------------
|
||
# CLI-обёртка вокруг API-функции (для bash-скрипта)
|
||
# --------------------------------------------------------------------
|
||
def main():
|
||
ap = argparse.ArgumentParser()
|
||
ap.add_argument("--domains", required=True, help="file with domains (one per line)")
|
||
ap.add_argument("--output-ips", required=True, help="file to write unique IPs")
|
||
ap.add_argument(
|
||
"--output-map",
|
||
required=True,
|
||
help="file to write IP<TAB>domain map",
|
||
)
|
||
ap.add_argument("--meta-file", required=True, help="meta-special.txt path")
|
||
ap.add_argument("--static-ips", required=True, help="static-ips.txt path")
|
||
ap.add_argument("--cache", required=True, help="domain-cache.json path")
|
||
ap.add_argument("--ptr-cache", required=True, help="ptr-cache.json path")
|
||
ap.add_argument("--trace-log", default=None)
|
||
ap.add_argument("--workers", type=int, default=40)
|
||
ap.add_argument("--ttl-sec", type=int, default=24 * 3600)
|
||
args = ap.parse_args()
|
||
|
||
trace_log = args.trace_log
|
||
|
||
try:
|
||
# входные данные для API-функции
|
||
domains = load_list(args.domains)
|
||
meta_special = load_list(args.meta_file)
|
||
|
||
static_lines = []
|
||
if os.path.exists(args.static_ips):
|
||
with open(args.static_ips, "r") as f:
|
||
static_lines = f.read().splitlines()
|
||
|
||
job_result = run_resolver_job(
|
||
domains=domains,
|
||
meta_special=meta_special,
|
||
static_lines=static_lines,
|
||
cache_path=args.cache,
|
||
ptr_cache_path=args.ptr_cache,
|
||
ttl_sec=args.ttl_sec,
|
||
workers=args.workers,
|
||
trace_log=trace_log,
|
||
)
|
||
|
||
ips_sorted = job_result["ips"]
|
||
ip_map_pairs = job_result["ip_map"]
|
||
domain_cache = job_result["domain_cache"]
|
||
ptr_cache = job_result["ptr_cache"]
|
||
|
||
# output-ips: по одному IP/подсети
|
||
with open(args.output_ips, "w") as f:
|
||
for ip in ips_sorted:
|
||
f.write(ip + "\n")
|
||
|
||
# output-map: IP<TAB>домен/метка
|
||
with open(args.output_map, "w") as f:
|
||
for ip, dom in ip_map_pairs:
|
||
f.write(f"{ip}\t{dom}\n")
|
||
|
||
# сохраняем кэши
|
||
save_cache(args.cache, domain_cache)
|
||
save_cache(args.ptr_cache, ptr_cache)
|
||
|
||
return 0
|
||
|
||
except Exception as e:
|
||
# настоящий фатал
|
||
log(f"FATAL resolver error: {e}", trace_log)
|
||
import traceback
|
||
|
||
traceback.print_exc(file=sys.stderr)
|
||
return 2
|
||
|
||
|
||
if __name__ == "__main__":
|
||
raise SystemExit(main())
|
||
|