From 7586685d5f01322a6569e9d74b5c782dbd98337e Mon Sep 17 00:00:00 2001 From: Tacit Lab Date: Wed, 15 Apr 2026 16:40:56 +0800 Subject: [PATCH] feat: bootstrap coinhunter cli package --- .gitignore | 7 + README.md | 23 + pyproject.toml | 25 + src/coinhunter/__init__.py | 1 + src/coinhunter/__main__.py | 2 + src/coinhunter/auto_trader.py | 291 +++++++ src/coinhunter/check_api.py | 31 + src/coinhunter/cli.py | 70 ++ src/coinhunter/external_gate.py | 82 ++ src/coinhunter/init_user_state.py | 63 ++ src/coinhunter/logger.py | 107 +++ src/coinhunter/market_probe.py | 243 ++++++ src/coinhunter/precheck.py | 962 +++++++++++++++++++++ src/coinhunter/review_context.py | 32 + src/coinhunter/review_engine.py | 315 +++++++ src/coinhunter/rotate_external_gate_log.py | 26 + src/coinhunter/smart_executor.py | 614 +++++++++++++ 17 files changed, 2894 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 pyproject.toml create mode 100644 src/coinhunter/__init__.py create mode 100644 src/coinhunter/__main__.py create mode 100755 src/coinhunter/auto_trader.py create mode 100755 src/coinhunter/check_api.py create mode 100755 src/coinhunter/cli.py create mode 100755 src/coinhunter/external_gate.py create mode 100755 src/coinhunter/init_user_state.py create mode 100755 src/coinhunter/logger.py create mode 100755 src/coinhunter/market_probe.py create mode 100755 src/coinhunter/precheck.py create mode 100755 src/coinhunter/review_context.py create mode 100755 src/coinhunter/review_engine.py create mode 100755 src/coinhunter/rotate_external_gate_log.py create mode 100755 src/coinhunter/smart_executor.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bb916cf --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +__pycache__/ +*.pyc +.pytest_cache/ +.venv/ +dist/ +build/ +*.egg-info/ diff --git a/README.md b/README.md new file mode 100644 index 0000000..a698804 --- /dev/null +++ b/README.md @@ -0,0 +1,23 @@ +# coinhunter-cli + +CoinHunter CLI is the executable tooling layer for CoinHunter. + +- Code lives in this repository. +- User runtime data lives in `~/.coinhunter/`. +- Hermes skills can call this CLI instead of embedding large script collections. + +## Install (editable) + +```bash +pip install -e . +``` + +## Example commands + +```bash +coinhunter check-api +coinhunter smart-executor balances +coinhunter precheck +coinhunter review-context 12 +coinhunter market-probe bybit-ticker BTCUSDT +``` diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..d99e62e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,25 @@ +[build-system] +requires = ["setuptools>=68", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "coinhunter-cli" +version = "0.1.0" +description = "CoinHunter trading CLI with user runtime data in ~/.coinhunter" +readme = "README.md" +requires-python = ">=3.10" +dependencies = [ + "ccxt>=4.4.0" +] +authors = [ + {name = "Tacit Lab", email = "ouyangcarlos@gmail.com"} +] + +[project.scripts] +coinhunter = "coinhunter.cli:main" + +[tool.setuptools] +package-dir = {"" = "src"} + +[tool.setuptools.packages.find] +where = ["src"] diff --git a/src/coinhunter/__init__.py b/src/coinhunter/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/src/coinhunter/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/src/coinhunter/__main__.py b/src/coinhunter/__main__.py new file mode 100644 index 0000000..13be635 --- /dev/null +++ b/src/coinhunter/__main__.py @@ -0,0 +1,2 @@ +from .cli import main +raise SystemExit(main()) diff --git a/src/coinhunter/auto_trader.py b/src/coinhunter/auto_trader.py new file mode 100755 index 0000000..c395b50 --- /dev/null +++ b/src/coinhunter/auto_trader.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python3 +""" +Coin Hunter Auto Trader +全自动妖币猎人 + 币安执行器 + +运行前请在 ~/.hermes/.env 配置: + BINANCE_API_KEY=你的API_KEY + BINANCE_API_SECRET=你的API_SECRET + +首次运行建议用 DRY_RUN=True 测试逻辑。 +""" +import json +import os +import sys +import time +from datetime import datetime, timezone, timedelta +from pathlib import Path + +import ccxt + +# ============== 配置 ============== +COINS_DIR = Path.home() / ".coinhunter" +POSITIONS_FILE = COINS_DIR / "positions.json" +ENV_FILE = Path.home() / ".hermes" / ".env" + +CST = timezone(timedelta(hours=8)) + +# 风控参数 +DRY_RUN = os.getenv("DRY_RUN", "true").lower() == "true" # 默认测试模式 +MAX_POSITIONS = 2 # 最大同时持仓数 + +# 资金配置(根据总资产动态计算) +CAPITAL_ALLOCATION_PCT = 0.95 # 用总资产的95%玩这个策略(留5%缓冲给手续费和滑点) +MIN_POSITION_USDT = 50 # 单次最小下单金额(避免过小) + +MIN_VOLUME_24H = 1_000_000 # 最小24h成交额 ($) +MIN_PRICE_CHANGE_24H = 0.05 # 最小涨幅 5% +MAX_PRICE = 1.0 # 只玩低价币(meme特征) +STOP_LOSS_PCT = -0.07 # 止损 -7% +TAKE_PROFIT_1_PCT = 0.15 # 止盈1 +15% +TAKE_PROFIT_2_PCT = 0.30 # 止盈2 +30% +BLACKLIST = {"USDC", "BUSD", "TUSD", "FDUSD", "USTC", "PAXG", "XRP", "ETH", "BTC"} + +# ============== 工具函数 ============== +def log(msg: str): + print(f"[{datetime.now(CST).strftime('%Y-%m-%d %H:%M:%S')} CST] {msg}") + + +def load_positions() -> list: + if POSITIONS_FILE.exists(): + return json.loads(POSITIONS_FILE.read_text(encoding="utf-8")).get("positions", []) + return [] + + +def save_positions(positions: list): + COINS_DIR.mkdir(parents=True, exist_ok=True) + POSITIONS_FILE.write_text(json.dumps({"positions": positions}, indent=2, ensure_ascii=False), encoding="utf-8") + + +def load_env(): + if ENV_FILE.exists(): + for line in ENV_FILE.read_text(encoding="utf-8").splitlines(): + line = line.strip() + if line and not line.startswith("#") and "=" in line: + key, val = line.split("=", 1) + os.environ.setdefault(key.strip(), val.strip()) + + +def calculate_position_size(total_usdt: float, available_usdt: float, open_slots: int) -> float: + """ + 根据总资产动态计算每次下单金额。 + 逻辑:先确定策略总上限,再按剩余开仓位均分。 + """ + strategy_cap = total_usdt * CAPITAL_ALLOCATION_PCT + # 已用于策略的资金约等于总上限 − 可用余额 + used_in_strategy = max(0, strategy_cap - available_usdt) + remaining_strategy_cap = max(0, strategy_cap - used_in_strategy) + + if open_slots <= 0 or remaining_strategy_cap < MIN_POSITION_USDT: + return 0 + + size = remaining_strategy_cap / open_slots + # 同时不能超过当前可用余额 + size = min(size, available_usdt) + # 四舍五入到整数 + size = max(0, round(size, 2)) + return size if size >= MIN_POSITION_USDT else 0 + + +# ============== 币安客户端 ============== +class BinanceTrader: + def __init__(self): + api_key = os.getenv("BINANCE_API_KEY") + secret = os.getenv("BINANCE_API_SECRET") + if not api_key or not secret: + raise RuntimeError("缺少 BINANCE_API_KEY 或 BINANCE_API_SECRET,请配置 ~/.hermes/.env") + self.exchange = ccxt.binance({ + "apiKey": api_key, + "secret": secret, + "options": {"defaultType": "spot"}, + "enableRateLimit": True, + }) + self.exchange.load_markets() + + def get_balance(self, asset: str = "USDT") -> float: + bal = self.exchange.fetch_balance()["free"].get(asset, 0) + return float(bal) + + def fetch_tickers(self) -> dict: + return self.exchange.fetch_tickers() + + def create_market_buy_order(self, symbol: str, amount_usdt: float): + if DRY_RUN: + log(f"[DRY RUN] 模拟买入 {symbol},金额 ${amount_usdt}") + return {"id": "dry-run-buy", "price": None, "amount": amount_usdt} + ticker = self.exchange.fetch_ticker(symbol) + price = float(ticker["last"]) + qty = amount_usdt / price + order = self.exchange.create_market_buy_order(symbol, qty) + log(f"✅ 买入 {symbol} | 数量 {qty:.4f} | 价格 ~${price}") + return order + + def create_market_sell_order(self, symbol: str, qty: float): + if DRY_RUN: + log(f"[DRY RUN] 模拟卖出 {symbol},数量 {qty}") + return {"id": "dry-run-sell"} + order = self.exchange.create_market_sell_order(symbol, qty) + log(f"✅ 卖出 {symbol} | 数量 {qty:.4f}") + return order + + +# ============== 选币引擎 ============== +class CoinPicker: + def __init__(self, exchange: ccxt.binance): + self.exchange = exchange + + def scan(self) -> list: + tickers = self.exchange.fetch_tickers() + candidates = [] + for symbol, t in tickers.items(): + if not symbol.endswith("/USDT"): + continue + base = symbol.replace("/USDT", "") + if base in BLACKLIST: + continue + + price = float(t["last"] or 0) + change = float(t.get("percentage", 0)) / 100 + volume = float(t.get("quoteVolume", 0)) + + if price <= 0 or price > MAX_PRICE: + continue + if volume < MIN_VOLUME_24H: + continue + if change < MIN_PRICE_CHANGE_24H: + continue + + score = change * (volume / MIN_VOLUME_24H) + candidates.append({ + "symbol": symbol, + "base": base, + "price": price, + "change_24h": change, + "volume_24h": volume, + "score": score, + }) + + candidates.sort(key=lambda x: x["score"], reverse=True) + return candidates[:5] + + +# ============== 主控制器 ============== +def run_cycle(): + load_env() + trader = BinanceTrader() + picker = CoinPicker(trader.exchange) + positions = load_positions() + + log(f"当前持仓数: {len(positions)} | 最大允许: {MAX_POSITIONS} | DRY_RUN={DRY_RUN}") + + # 1. 检查现有持仓(止盈止损) + tickers = trader.fetch_tickers() + new_positions = [] + for pos in positions: + sym = pos["symbol"] + qty = float(pos["quantity"]) + cost = float(pos["avg_cost"]) + # ccxt tickers 使用 slash 格式,如 PENGU/USDT + sym_ccxt = sym.replace("USDT", "/USDT") if "/" not in sym else sym + ticker = tickers.get(sym_ccxt) + if not ticker: + new_positions.append(pos) + continue + + price = float(ticker["last"]) + pnl_pct = (price - cost) / cost + log(f"监控 {sym} | 现价 ${price:.8f} | 成本 ${cost:.8f} | 盈亏 {pnl_pct:+.2%}") + + action = None + if pnl_pct <= STOP_LOSS_PCT: + action = "STOP_LOSS" + elif pnl_pct >= TAKE_PROFIT_2_PCT: + action = "TAKE_PROFIT_2" + elif pnl_pct >= TAKE_PROFIT_1_PCT: + # 检查是否已经止盈过一部分 + sold_pct = float(pos.get("take_profit_1_sold_pct", 0)) + if sold_pct == 0: + action = "TAKE_PROFIT_1" + + if action == "STOP_LOSS": + trader.create_market_sell_order(sym, qty) + log(f"🛑 {sym} 触发止损,全部清仓") + continue + + if action == "TAKE_PROFIT_1": + sell_qty = qty * 0.5 + trader.create_market_sell_order(sym, sell_qty) + pos["quantity"] = qty - sell_qty + pos["take_profit_1_sold_pct"] = 50 + pos["updated_at"] = datetime.now(CST).isoformat() + log(f"🎯 {sym} 触发止盈1,卖出50%,剩余 {pos['quantity']:.4f}") + new_positions.append(pos) + continue + + if action == "TAKE_PROFIT_2": + trader.create_market_sell_order(sym, float(pos["quantity"])) + log(f"🚀 {sym} 触发止盈2,全部清仓") + continue + + new_positions.append(pos) + + # 2. 开新仓 + if len(new_positions) < MAX_POSITIONS: + candidates = picker.scan() + held_bases = {p["base_asset"] for p in new_positions} + total_usdt = trader.get_balance("USDT") + # 计算持仓市值并加入总资产 + for pos in new_positions: + sym_ccxt = pos["symbol"].replace("USDT", "/USDT") if "/" not in pos["symbol"] else pos["symbol"] + ticker = tickers.get(sym_ccxt) + if ticker: + total_usdt += float(pos["quantity"]) * float(ticker["last"]) + + available_usdt = trader.get_balance("USDT") + open_slots = MAX_POSITIONS - len(new_positions) + position_size = calculate_position_size(total_usdt, available_usdt, open_slots) + + log(f"总资产 USDT: ${total_usdt:.2f} | 策略上限({CAPITAL_ALLOCATION_PCT:.0%}): ${total_usdt*CAPITAL_ALLOCATION_PCT:.2f} | 每仓建议金额: ${position_size:.2f}") + + for cand in candidates: + if len(new_positions) >= MAX_POSITIONS: + break + base = cand["base"] + if base in held_bases: + continue + if position_size <= 0: + log("策略资金已用完或余额不足,停止开新仓") + break + + symbol = cand["symbol"] + order = trader.create_market_buy_order(symbol, position_size) + avg_price = float(order.get("price") or cand["price"]) + qty = position_size / avg_price if avg_price else 0 + + new_positions.append({ + "account_id": "binance-main", + "symbol": symbol.replace("/", ""), + "base_asset": base, + "quote_asset": "USDT", + "market_type": "spot", + "quantity": qty, + "avg_cost": avg_price, + "opened_at": datetime.now(CST).isoformat(), + "updated_at": datetime.now(CST).isoformat(), + "note": "Auto-trader entry", + }) + held_bases.add(base) + available_usdt -= position_size + position_size = calculate_position_size(total_usdt, available_usdt, MAX_POSITIONS - len(new_positions)) + log(f"📈 新开仓 {symbol} | 买入价 ${avg_price:.8f} | 数量 {qty:.2f}") + + save_positions(new_positions) + log("周期结束,持仓已保存") + + +if __name__ == "__main__": + try: + run_cycle() + except Exception as e: + log(f"❌ 错误: {e}") + sys.exit(1) diff --git a/src/coinhunter/check_api.py b/src/coinhunter/check_api.py new file mode 100755 index 0000000..55bf4a4 --- /dev/null +++ b/src/coinhunter/check_api.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +"""检查自动交易的环境配置是否就绪""" +import os +from pathlib import Path + + +def main(): + env_file = Path.home() / ".hermes" / ".env" + if env_file.exists(): + for line in env_file.read_text(encoding="utf-8").splitlines(): + line = line.strip() + if line and not line.startswith("#") and "=" in line: + k, v = line.split("=", 1) + os.environ.setdefault(k.strip(), v.strip()) + + api_key = os.getenv("BINANCE_API_KEY", "") + secret = os.getenv("BINANCE_API_SECRET", "") + + if not api_key or api_key.startswith("***") or api_key.startswith("your_"): + print("❌ 未配置 BINANCE_API_KEY") + return 1 + if not secret or secret.startswith("***") or secret.startswith("your_"): + print("❌ 未配置 BINANCE_API_SECRET") + return 1 + + print("✅ API 配置正常") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/src/coinhunter/cli.py b/src/coinhunter/cli.py new file mode 100755 index 0000000..c4e8534 --- /dev/null +++ b/src/coinhunter/cli.py @@ -0,0 +1,70 @@ +"""CoinHunter unified CLI entrypoint.""" + +import argparse +import importlib +import sys + +MODULE_MAP = { + "smart-executor": "smart_executor", + "auto-trader": "auto_trader", + "precheck": "precheck", + "external-gate": "external_gate", + "review-context": "review_context", + "review-engine": "review_engine", + "market-probe": "market_probe", + "check-api": "check_api", + "rotate-external-gate-log": "rotate_external_gate_log", + "init": "init_user_state", +} + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="coinhunter", + description="CoinHunter trading operations CLI", + formatter_class=argparse.RawTextHelpFormatter, + epilog=( + "Examples:\n" + " coinhunter check-api\n" + " coinhunter smart-executor hold\n" + " coinhunter smart-executor --analysis '...' --reasoning '...' buy ENJUSDT 50\n" + " coinhunter precheck\n" + " coinhunter precheck --ack '分析完成:HOLD'\n" + " coinhunter external-gate\n" + " coinhunter review-context 12\n" + " coinhunter market-probe bybit-ticker BTCUSDT\n" + " coinhunter init\n" + ), + ) + parser.add_argument("command", choices=sorted(MODULE_MAP.keys())) + parser.add_argument("args", nargs=argparse.REMAINDER) + return parser + + +def run_python_module(module_name: str, argv: list[str]) -> int: + module = importlib.import_module(f".{module_name}", package="coinhunter") + if not hasattr(module, "main"): + raise RuntimeError(f"Module {module_name} has no main()") + old_argv = sys.argv[:] + try: + sys.argv = [f"coinhunter {module_name}", *argv] + result = module.main() + return int(result) if isinstance(result, int) else 0 + except SystemExit as exc: + return exc.code if isinstance(exc.code, int) else 0 + finally: + sys.argv = old_argv + + +def main() -> int: + parser = build_parser() + parsed = parser.parse_args() + module_name = MODULE_MAP[parsed.command] + argv = list(parsed.args) + if argv and argv[0] == "--": + argv = argv[1:] + return run_python_module(module_name, argv) + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/src/coinhunter/external_gate.py b/src/coinhunter/external_gate.py new file mode 100755 index 0000000..802c4bc --- /dev/null +++ b/src/coinhunter/external_gate.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +import fcntl +import json +import subprocess +import sys +from datetime import datetime, timezone +from pathlib import Path + +BASE_DIR = Path.home() / ".coinhunter" +STATE_DIR = BASE_DIR / "state" +LOCK_FILE = STATE_DIR / "external_gate.lock" +COINHUNTER_MODULE = [sys.executable, "-m", "coinhunter"] +HERMES_BIN = Path.home() / ".local" / "bin" / "hermes" +TRADE_JOB_ID = "4e6593fff158" + + +def utc_now(): + return datetime.now(timezone.utc).isoformat() + + +def log(message: str): + print(f"[{utc_now()}] {message}") + + +def run_cmd(args: list[str]) -> subprocess.CompletedProcess: + return subprocess.run(args, capture_output=True, text=True) + + +def parse_json_output(text: str) -> dict: + text = (text or "").strip() + if not text: + return {} + return json.loads(text) + + +def main(): + STATE_DIR.mkdir(parents=True, exist_ok=True) + with open(LOCK_FILE, "w", encoding="utf-8") as lockf: + try: + fcntl.flock(lockf.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) + except BlockingIOError: + log("gate already running; skip") + return 0 + + precheck = run_cmd(COINHUNTER_MODULE + ["precheck"]) + if precheck.returncode != 0: + log(f"precheck returned non-zero ({precheck.returncode}); stdout={precheck.stdout.strip()} stderr={precheck.stderr.strip()}") + return 1 + + try: + data = parse_json_output(precheck.stdout) + except Exception as e: + log(f"failed to parse precheck JSON: {e}; raw={precheck.stdout.strip()[:1000]}") + return 1 + + if not data.get("should_analyze"): + log("no trigger; skip model run") + return 0 + + if data.get("run_requested"): + log(f"trigger already queued at {data.get('run_requested_at')}; skip duplicate") + return 0 + + mark = run_cmd(COINHUNTER_MODULE + ["precheck", "--mark-run-requested", "external-gate queued cron run"]) + if mark.returncode != 0: + log(f"failed to mark run requested; stdout={mark.stdout.strip()} stderr={mark.stderr.strip()}") + return 1 + + trigger = run_cmd([str(HERMES_BIN), "cron", "run", TRADE_JOB_ID]) + if trigger.returncode != 0: + log(f"failed to trigger trade cron job; stdout={trigger.stdout.strip()} stderr={trigger.stderr.strip()}") + return 1 + + reasons = ", ".join(data.get("reasons", [])) or "unknown" + log(f"queued trade job {TRADE_JOB_ID}; reasons={reasons}") + if trigger.stdout.strip(): + log(trigger.stdout.strip()) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/src/coinhunter/init_user_state.py b/src/coinhunter/init_user_state.py new file mode 100755 index 0000000..1465136 --- /dev/null +++ b/src/coinhunter/init_user_state.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +import json +from datetime import datetime, timezone +from pathlib import Path + +ROOT = Path.home() / ".coinhunter" +CACHE_DIR = ROOT / "cache" + + +def now_iso(): + return datetime.now(timezone.utc).replace(microsecond=0).isoformat() + + +def ensure_file(path: Path, payload: dict): + if path.exists(): + return False + path.write_text(json.dumps(payload, ensure_ascii=False, indent=2) + "\n", encoding="utf-8") + return True + + +def main(): + ROOT.mkdir(parents=True, exist_ok=True) + CACHE_DIR.mkdir(parents=True, exist_ok=True) + + created = [] + ts = now_iso() + + templates = { + ROOT / "config.json": { + "default_exchange": "bybit", + "default_quote_currency": "USDT", + "timezone": "Asia/Shanghai", + "preferred_chains": ["solana", "base"], + "created_at": ts, + "updated_at": ts, + }, + ROOT / "accounts.json": { + "accounts": [] + }, + ROOT / "positions.json": { + "positions": [] + }, + ROOT / "watchlist.json": { + "watchlist": [] + }, + ROOT / "notes.json": { + "notes": [] + }, + } + + for path, payload in templates.items(): + if ensure_file(path, payload): + created.append(str(path)) + + print(json.dumps({ + "root": str(ROOT), + "created": created, + "cache_dir": str(CACHE_DIR), + }, ensure_ascii=False, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/src/coinhunter/logger.py b/src/coinhunter/logger.py new file mode 100755 index 0000000..295c5ed --- /dev/null +++ b/src/coinhunter/logger.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +"""Coin Hunter structured logger.""" +import json +import traceback +from datetime import datetime, timezone, timedelta +from pathlib import Path + +BASE_DIR = Path.home() / ".coinhunter" +LOG_DIR = BASE_DIR / "logs" +SCHEMA_VERSION = 2 + +CST = timezone(timedelta(hours=8)) + + +def bj_now(): + return datetime.now(CST) + + +def ensure_dir(): + LOG_DIR.mkdir(parents=True, exist_ok=True) + + +def _append_jsonl(prefix: str, payload: dict): + ensure_dir() + date_str = bj_now().strftime("%Y%m%d") + log_file = LOG_DIR / f"{prefix}_{date_str}.jsonl" + with open(log_file, "a", encoding="utf-8") as f: + f.write(json.dumps(payload, ensure_ascii=False) + "\n") + + +def log_event(prefix: str, payload: dict): + entry = { + "schema_version": SCHEMA_VERSION, + "timestamp": bj_now().isoformat(), + **payload, + } + _append_jsonl(prefix, entry) + return entry + + +def log_decision(data: dict): + return log_event("decisions", data) + + +def log_trade(action: str, symbol: str, qty: float = None, amount_usdt: float = None, + price: float = None, note: str = "", **extra): + payload = { + "action": action, + "symbol": symbol, + "qty": qty, + "amount_usdt": amount_usdt, + "price": price, + "note": note, + **extra, + } + return log_event("trades", payload) + + +def log_snapshot(market_data: dict, note: str = "", **extra): + return log_event("snapshots", {"market_data": market_data, "note": note, **extra}) + + +def log_error(where: str, error: Exception | str, **extra): + payload = { + "where": where, + "error_type": error.__class__.__name__ if isinstance(error, Exception) else "Error", + "error": str(error), + "traceback": traceback.format_exc() if isinstance(error, Exception) else None, + **extra, + } + return log_event("errors", payload) + + +def get_logs_by_date(log_type: str, date_str: str = None) -> list: + if date_str is None: + date_str = bj_now().strftime("%Y%m%d") + log_file = LOG_DIR / f"{log_type}_{date_str}.jsonl" + if not log_file.exists(): + return [] + entries = [] + with open(log_file, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + if not line: + continue + try: + entries.append(json.loads(line)) + except json.JSONDecodeError: + continue + return entries + + +def get_logs_last_n_hours(log_type: str, n_hours: int = 1) -> list: + now = bj_now() + cutoff = now - timedelta(hours=n_hours) + entries = [] + for offset in [0, -1]: + date_str = (now + timedelta(days=offset)).strftime("%Y%m%d") + for entry in get_logs_by_date(log_type, date_str): + try: + ts = datetime.fromisoformat(entry["timestamp"]) + except Exception: + continue + if ts >= cutoff: + entries.append(entry) + entries.sort(key=lambda x: x.get("timestamp", "")) + return entries diff --git a/src/coinhunter/market_probe.py b/src/coinhunter/market_probe.py new file mode 100755 index 0000000..2e41344 --- /dev/null +++ b/src/coinhunter/market_probe.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python3 +import argparse +import json +import os +import sys +import urllib.parse +import urllib.request + +DEFAULT_TIMEOUT = 20 + + +def fetch_json(url, headers=None, timeout=DEFAULT_TIMEOUT): + merged_headers = { + "Accept": "application/json", + "User-Agent": "Mozilla/5.0 (compatible; OpenClaw Coin Hunter/1.0)", + } + if headers: + merged_headers.update(headers) + req = urllib.request.Request(url, headers=merged_headers) + with urllib.request.urlopen(req, timeout=timeout) as resp: + data = resp.read() + return json.loads(data.decode("utf-8")) + + +def print_json(data): + print(json.dumps(data, ensure_ascii=False, indent=2)) + + +def bybit_ticker(symbol: str): + url = ( + "https://api.bybit.com/v5/market/tickers?category=spot&symbol=" + + urllib.parse.quote(symbol.upper()) + ) + payload = fetch_json(url) + items = payload.get("result", {}).get("list", []) + if not items: + raise SystemExit(f"No Bybit spot ticker found for {symbol}") + item = items[0] + out = { + "provider": "bybit", + "symbol": symbol.upper(), + "lastPrice": item.get("lastPrice"), + "price24hPcnt": item.get("price24hPcnt"), + "highPrice24h": item.get("highPrice24h"), + "lowPrice24h": item.get("lowPrice24h"), + "turnover24h": item.get("turnover24h"), + "volume24h": item.get("volume24h"), + "bid1Price": item.get("bid1Price"), + "ask1Price": item.get("ask1Price"), + } + print_json(out) + + +def bybit_klines(symbol: str, interval: str, limit: int): + params = urllib.parse.urlencode({ + "category": "spot", + "symbol": symbol.upper(), + "interval": interval, + "limit": str(limit), + }) + url = f"https://api.bybit.com/v5/market/kline?{params}" + payload = fetch_json(url) + rows = payload.get("result", {}).get("list", []) + out = { + "provider": "bybit", + "symbol": symbol.upper(), + "interval": interval, + "candles": [ + { + "startTime": r[0], + "open": r[1], + "high": r[2], + "low": r[3], + "close": r[4], + "volume": r[5], + "turnover": r[6], + } + for r in rows + ], + } + print_json(out) + + +def dexscreener_search(query: str): + url = "https://api.dexscreener.com/latest/dex/search/?q=" + urllib.parse.quote(query) + payload = fetch_json(url) + pairs = payload.get("pairs") or [] + out = [] + for p in pairs[:10]: + out.append({ + "chainId": p.get("chainId"), + "dexId": p.get("dexId"), + "pairAddress": p.get("pairAddress"), + "url": p.get("url"), + "baseToken": p.get("baseToken"), + "quoteToken": p.get("quoteToken"), + "priceUsd": p.get("priceUsd"), + "liquidityUsd": (p.get("liquidity") or {}).get("usd"), + "fdv": p.get("fdv"), + "marketCap": p.get("marketCap"), + "volume24h": (p.get("volume") or {}).get("h24"), + "buys24h": ((p.get("txns") or {}).get("h24") or {}).get("buys"), + "sells24h": ((p.get("txns") or {}).get("h24") or {}).get("sells"), + }) + print_json({"provider": "dexscreener", "query": query, "pairs": out}) + + +def dexscreener_token(chain: str, address: str): + url = f"https://api.dexscreener.com/tokens/v1/{urllib.parse.quote(chain)}/{urllib.parse.quote(address)}" + payload = fetch_json(url) + pairs = payload if isinstance(payload, list) else payload.get("pairs") or [] + out = [] + for p in pairs[:10]: + out.append({ + "chainId": p.get("chainId"), + "dexId": p.get("dexId"), + "pairAddress": p.get("pairAddress"), + "baseToken": p.get("baseToken"), + "quoteToken": p.get("quoteToken"), + "priceUsd": p.get("priceUsd"), + "liquidityUsd": (p.get("liquidity") or {}).get("usd"), + "fdv": p.get("fdv"), + "marketCap": p.get("marketCap"), + "volume24h": (p.get("volume") or {}).get("h24"), + }) + print_json({"provider": "dexscreener", "chain": chain, "address": address, "pairs": out}) + + +def coingecko_search(query: str): + url = "https://api.coingecko.com/api/v3/search?query=" + urllib.parse.quote(query) + payload = fetch_json(url) + coins = payload.get("coins") or [] + out = [] + for c in coins[:10]: + out.append({ + "id": c.get("id"), + "name": c.get("name"), + "symbol": c.get("symbol"), + "marketCapRank": c.get("market_cap_rank"), + "thumb": c.get("thumb"), + }) + print_json({"provider": "coingecko", "query": query, "coins": out}) + + +def coingecko_coin(coin_id: str): + params = urllib.parse.urlencode({ + "localization": "false", + "tickers": "false", + "market_data": "true", + "community_data": "false", + "developer_data": "false", + "sparkline": "false", + }) + url = f"https://api.coingecko.com/api/v3/coins/{urllib.parse.quote(coin_id)}?{params}" + payload = fetch_json(url) + md = payload.get("market_data") or {} + out = { + "provider": "coingecko", + "id": payload.get("id"), + "symbol": payload.get("symbol"), + "name": payload.get("name"), + "marketCapRank": payload.get("market_cap_rank"), + "currentPriceUsd": (md.get("current_price") or {}).get("usd"), + "marketCapUsd": (md.get("market_cap") or {}).get("usd"), + "fullyDilutedValuationUsd": (md.get("fully_diluted_valuation") or {}).get("usd"), + "totalVolumeUsd": (md.get("total_volume") or {}).get("usd"), + "priceChangePercentage24h": md.get("price_change_percentage_24h"), + "priceChangePercentage7d": md.get("price_change_percentage_7d"), + "priceChangePercentage30d": md.get("price_change_percentage_30d"), + "circulatingSupply": md.get("circulating_supply"), + "totalSupply": md.get("total_supply"), + "maxSupply": md.get("max_supply"), + "homepage": (payload.get("links") or {}).get("homepage", [None])[0], + } + print_json(out) + + +def birdeye_token(address: str): + api_key = os.getenv("BIRDEYE_API_KEY") or os.getenv("BIRDEYE_APIKEY") + if not api_key: + raise SystemExit("Birdeye requires BIRDEYE_API_KEY in the environment") + url = "https://public-api.birdeye.so/defi/token_overview?address=" + urllib.parse.quote(address) + payload = fetch_json(url, headers={ + "x-api-key": api_key, + "x-chain": "solana", + }) + print_json({"provider": "birdeye", "address": address, "data": payload.get("data")}) + + +def build_parser(): + parser = argparse.ArgumentParser(description="Coin Hunter market data probe") + sub = parser.add_subparsers(dest="command", required=True) + + p = sub.add_parser("bybit-ticker", help="Fetch Bybit spot ticker") + p.add_argument("symbol") + + p = sub.add_parser("bybit-klines", help="Fetch Bybit spot klines") + p.add_argument("symbol") + p.add_argument("--interval", default="60", help="Bybit interval, e.g. 1, 5, 15, 60, 240, D") + p.add_argument("--limit", type=int, default=10) + + p = sub.add_parser("dex-search", help="Search DexScreener by query") + p.add_argument("query") + + p = sub.add_parser("dex-token", help="Fetch DexScreener token pairs by chain/address") + p.add_argument("chain") + p.add_argument("address") + + p = sub.add_parser("gecko-search", help="Search CoinGecko") + p.add_argument("query") + + p = sub.add_parser("gecko-coin", help="Fetch CoinGecko coin by id") + p.add_argument("coin_id") + + p = sub.add_parser("birdeye-token", help="Fetch Birdeye token overview (Solana)") + p.add_argument("address") + + return parser + + +def main(): + parser = build_parser() + args = parser.parse_args() + if args.command == "bybit-ticker": + bybit_ticker(args.symbol) + elif args.command == "bybit-klines": + bybit_klines(args.symbol, args.interval, args.limit) + elif args.command == "dex-search": + dexscreener_search(args.query) + elif args.command == "dex-token": + dexscreener_token(args.chain, args.address) + elif args.command == "gecko-search": + coingecko_search(args.query) + elif args.command == "gecko-coin": + coingecko_coin(args.coin_id) + elif args.command == "birdeye-token": + birdeye_token(args.address) + else: + parser.error("Unknown command") + + +if __name__ == "__main__": + main() diff --git a/src/coinhunter/precheck.py b/src/coinhunter/precheck.py new file mode 100755 index 0000000..bdf661d --- /dev/null +++ b/src/coinhunter/precheck.py @@ -0,0 +1,962 @@ +#!/usr/bin/env python3 +import json +import os +import re +import sys +import hashlib +from datetime import datetime, timezone, timedelta +from pathlib import Path +from zoneinfo import ZoneInfo + +import ccxt + +BASE_DIR = Path.home() / ".coinhunter" +STATE_DIR = BASE_DIR / "state" +STATE_FILE = STATE_DIR / "precheck_state.json" +POSITIONS_FILE = BASE_DIR / "positions.json" +CONFIG_FILE = BASE_DIR / "config.json" +ENV_FILE = Path.home() / ".hermes" / ".env" + +BASE_PRICE_MOVE_TRIGGER_PCT = 0.025 +BASE_PNL_TRIGGER_PCT = 0.03 +BASE_PORTFOLIO_MOVE_TRIGGER_PCT = 0.03 +BASE_CANDIDATE_SCORE_TRIGGER_RATIO = 1.15 +BASE_FORCE_ANALYSIS_AFTER_MINUTES = 180 +BASE_COOLDOWN_MINUTES = 45 +TOP_CANDIDATES = 10 +MIN_ACTIONABLE_USDT = 12.0 +MIN_REAL_POSITION_VALUE_USDT = 8.0 +BLACKLIST = {"USDC", "BUSD", "TUSD", "FDUSD", "USTC", "PAXG"} +HARD_STOP_PCT = -0.08 +HARD_MOON_PCT = 0.25 +MIN_CHANGE_PCT = 1.0 +MAX_PRICE_CAP = None +HARD_REASON_DEDUP_MINUTES = 15 +MAX_PENDING_TRIGGER_MINUTES = 30 +MAX_RUN_REQUEST_MINUTES = 20 + + +def utc_now(): + return datetime.now(timezone.utc) + + +def utc_iso(): + return utc_now().isoformat() + + +def parse_ts(value: str | None): + if not value: + return None + try: + ts = datetime.fromisoformat(value) + if ts.tzinfo is None: + ts = ts.replace(tzinfo=timezone.utc) + return ts + except Exception: + return None + + +def load_json(path: Path, default): + if not path.exists(): + return default + try: + return json.loads(path.read_text(encoding="utf-8")) + except Exception: + return default + + +def load_env(): + if not ENV_FILE.exists(): + return + for line in ENV_FILE.read_text(encoding="utf-8").splitlines(): + line = line.strip() + if line and not line.startswith("#") and "=" in line: + key, val = line.split("=", 1) + os.environ.setdefault(key.strip(), val.strip()) + + +def load_positions(): + return load_json(POSITIONS_FILE, {}).get("positions", []) + + +def load_state(): + return load_json(STATE_FILE, {}) + + +def load_config(): + return load_json(CONFIG_FILE, {}) + + +def clear_run_request_fields(state: dict): + state.pop("run_requested_at", None) + state.pop("run_request_note", None) + + +def sanitize_state_for_stale_triggers(state: dict): + sanitized = dict(state) + notes = [] + now = utc_now() + run_requested_at = parse_ts(sanitized.get("run_requested_at")) + last_deep_analysis_at = parse_ts(sanitized.get("last_deep_analysis_at")) + last_triggered_at = parse_ts(sanitized.get("last_triggered_at")) + pending_trigger = bool(sanitized.get("pending_trigger")) + + if run_requested_at and last_deep_analysis_at and last_deep_analysis_at >= run_requested_at: + clear_run_request_fields(sanitized) + if pending_trigger and (not last_triggered_at or last_deep_analysis_at >= last_triggered_at): + sanitized["pending_trigger"] = False + sanitized["pending_reasons"] = [] + sanitized["last_ack_note"] = ( + f"auto-cleared completed trigger at {utc_iso()} because last_deep_analysis_at >= run_requested_at" + ) + pending_trigger = False + notes.append( + f"自动清理已完成的 run_requested 标记:最近深度分析时间 {last_deep_analysis_at.isoformat()} >= 请求时间 {run_requested_at.isoformat()}" + ) + run_requested_at = None + + if run_requested_at and now - run_requested_at > timedelta(minutes=MAX_RUN_REQUEST_MINUTES): + clear_run_request_fields(sanitized) + notes.append( + f"自动清理超时 run_requested 标记:已等待 {(now - run_requested_at).total_seconds() / 60:.1f} 分钟,超过 {MAX_RUN_REQUEST_MINUTES} 分钟" + ) + run_requested_at = None + + pending_anchor = run_requested_at or last_triggered_at or last_deep_analysis_at + if pending_trigger and pending_anchor and now - pending_anchor > timedelta(minutes=MAX_PENDING_TRIGGER_MINUTES): + sanitized["pending_trigger"] = False + sanitized["pending_reasons"] = [] + sanitized["last_ack_note"] = ( + f"auto-recovered stale pending trigger at {utc_iso()} after waiting " + f"{(now - pending_anchor).total_seconds() / 60:.1f} minutes" + ) + notes.append( + f"自动解除 pending_trigger:触发状态已悬挂 {(now - pending_anchor).total_seconds() / 60:.1f} 分钟,超过 {MAX_PENDING_TRIGGER_MINUTES} 分钟" + ) + + sanitized["_stale_recovery_notes"] = notes + return sanitized + + +def save_state(state: dict): + STATE_DIR.mkdir(parents=True, exist_ok=True) + state_to_save = dict(state) + state_to_save.pop("_stale_recovery_notes", None) + STATE_FILE.write_text(json.dumps(state_to_save, indent=2, ensure_ascii=False), encoding="utf-8") + + +def stable_hash(data) -> str: + payload = json.dumps(data, sort_keys=True, ensure_ascii=False, separators=(",", ":")) + return hashlib.sha1(payload.encode("utf-8")).hexdigest() + + +def get_exchange(): + load_env() + api_key = os.getenv("BINANCE_API_KEY") + secret = os.getenv("BINANCE_API_SECRET") + if not api_key or not secret: + raise RuntimeError("Missing BINANCE_API_KEY or BINANCE_API_SECRET in ~/.hermes/.env") + ex = ccxt.binance({ + "apiKey": api_key, + "secret": secret, + "options": {"defaultType": "spot"}, + "enableRateLimit": True, + }) + ex.load_markets() + return ex + + +def fetch_ohlcv_batch(ex, symbols: set, timeframe: str, limit: int): + results = {} + for sym in sorted(symbols): + try: + ohlcv = ex.fetch_ohlcv(sym, timeframe=timeframe, limit=limit) + if ohlcv and len(ohlcv) >= 2: + results[sym] = ohlcv + except Exception: + pass + return results + + +def compute_ohlcv_metrics(ohlcv_1h, ohlcv_4h, current_price, volume_24h=None): + metrics = {} + if ohlcv_1h and len(ohlcv_1h) >= 2: + closes = [c[4] for c in ohlcv_1h] + volumes = [c[5] for c in ohlcv_1h] + metrics["change_1h_pct"] = round((closes[-1] - closes[-2]) / closes[-2] * 100, 2) if closes[-2] != 0 else None + if len(closes) >= 5: + metrics["change_4h_pct"] = round((closes[-1] - closes[-5]) / closes[-5] * 100, 2) if closes[-5] != 0 else None + recent_vol = sum(volumes[-4:]) / 4 if len(volumes) >= 4 else None + metrics["volume_1h_avg"] = round(recent_vol, 2) if recent_vol else None + highs = [c[2] for c in ohlcv_1h[-4:]] + lows = [c[3] for c in ohlcv_1h[-4:]] + metrics["high_4h"] = round(max(highs), 8) if highs else None + metrics["low_4h"] = round(min(lows), 8) if lows else None + + if ohlcv_4h and len(ohlcv_4h) >= 2: + closes_4h = [c[4] for c in ohlcv_4h] + volumes_4h = [c[5] for c in ohlcv_4h] + metrics["change_4h_pct_from_4h"] = round((closes_4h[-1] - closes_4h[-2]) / closes_4h[-2] * 100, 2) if closes_4h[-2] != 0 else None + recent_vol_4h = sum(volumes_4h[-2:]) / 2 if len(volumes_4h) >= 2 else None + metrics["volume_4h_avg"] = round(recent_vol_4h, 2) if recent_vol_4h else None + highs_4h = [c[2] for c in ohlcv_4h] + lows_4h = [c[3] for c in ohlcv_4h] + metrics["high_24h_calc"] = round(max(highs_4h), 8) if highs_4h else None + metrics["low_24h_calc"] = round(min(lows_4h), 8) if lows_4h else None + if highs_4h and lows_4h: + avg_price = sum(closes_4h) / len(closes_4h) + metrics["volatility_4h_pct"] = round((max(highs_4h) - min(lows_4h)) / avg_price * 100, 2) + + if current_price: + if metrics.get("high_4h"): + metrics["distance_from_4h_high_pct"] = round((metrics["high_4h"] - current_price) / metrics["high_4h"] * 100, 2) + if metrics.get("low_4h"): + metrics["distance_from_4h_low_pct"] = round((current_price - metrics["low_4h"]) / metrics["low_4h"] * 100, 2) + if metrics.get("high_24h_calc"): + metrics["distance_from_24h_high_pct"] = round((metrics["high_24h_calc"] - current_price) / metrics["high_24h_calc"] * 100, 2) + if metrics.get("low_24h_calc"): + metrics["distance_from_24h_low_pct"] = round((current_price - metrics["low_24h_calc"]) / metrics["low_24h_calc"] * 100, 2) + + if volume_24h and volume_24h > 0 and metrics.get("volume_1h_avg"): + daily_avg_1h = volume_24h / 24 + metrics["volume_1h_multiple"] = round(metrics["volume_1h_avg"] / daily_avg_1h, 2) + if volume_24h and volume_24h > 0 and metrics.get("volume_4h_avg"): + daily_avg_4h = volume_24h / 6 + metrics["volume_4h_multiple"] = round(metrics["volume_4h_avg"] / daily_avg_4h, 2) + + return metrics + + +def enrich_candidates_and_positions(global_candidates, candidate_layers, positions_view, tickers, ex): + symbols = set() + for c in global_candidates: + symbols.add(c["symbol"]) + for p in positions_view: + sym = p.get("symbol") + if sym: + sym_ccxt = norm_symbol(sym) + symbols.add(sym_ccxt) + + ohlcv_1h = fetch_ohlcv_batch(ex, symbols, "1h", 24) + ohlcv_4h = fetch_ohlcv_batch(ex, symbols, "4h", 12) + + def _apply(target_list): + for item in target_list: + sym = item.get("symbol") + if not sym: + continue + sym_ccxt = norm_symbol(sym) + v24h = to_float(tickers.get(sym_ccxt, {}).get("quoteVolume")) + metrics = compute_ohlcv_metrics( + ohlcv_1h.get(sym_ccxt), + ohlcv_4h.get(sym_ccxt), + item.get("price") or item.get("last_price"), + volume_24h=v24h, + ) + item["metrics"] = metrics + + _apply(global_candidates) + for band_list in candidate_layers.values(): + _apply(band_list) + _apply(positions_view) + return global_candidates, candidate_layers, positions_view + + +def regime_from_pct(pct: float | None) -> str: + if pct is None: + return "unknown" + if pct >= 2.0: + return "bullish" + if pct <= -2.0: + return "bearish" + return "neutral" + + +def to_float(value, default=0.0): + try: + if value is None: + return default + return float(value) + except Exception: + return default + + +def norm_symbol(symbol: str) -> str: + s = symbol.upper().replace("-", "").replace("_", "") + if "/" in s: + return s + if s.endswith("USDT"): + return s[:-4] + "/USDT" + return s + + +def get_local_now(config: dict): + tz_name = config.get("timezone") or "Asia/Shanghai" + try: + tz = ZoneInfo(tz_name) + except Exception: + tz = ZoneInfo("Asia/Shanghai") + tz_name = "Asia/Shanghai" + return utc_now().astimezone(tz), tz_name + + +def session_label(local_dt: datetime) -> str: + hour = local_dt.hour + if 0 <= hour < 7: + return "overnight" + if 7 <= hour < 12: + return "asia-morning" + if 12 <= hour < 17: + return "asia-afternoon" + if 17 <= hour < 21: + return "europe-open" + return "us-session" + + +def _liquidity_score(volume: float) -> float: + return min(1.0, max(0.0, volume / 50_000_000)) + + +def _breakout_score(price: float, avg_price: float | None) -> float: + if not avg_price or avg_price <= 0: + return 0.0 + return (price - avg_price) / avg_price + + +def top_candidates_from_tickers(tickers: dict): + candidates = [] + for symbol, ticker in tickers.items(): + if not symbol.endswith("/USDT"): + continue + base = symbol.replace("/USDT", "") + if base in BLACKLIST: + continue + if not re.fullmatch(r"[A-Z0-9]{2,20}", base): + continue + price = to_float(ticker.get("last")) + change_pct = to_float(ticker.get("percentage")) + volume = to_float(ticker.get("quoteVolume")) + high = to_float(ticker.get("high")) + low = to_float(ticker.get("low")) + avg_price = to_float(ticker.get("average"), None) + if price <= 0: + continue + if MAX_PRICE_CAP is not None and price > MAX_PRICE_CAP: + continue + if volume < 500_000: + continue + if change_pct < MIN_CHANGE_PCT: + continue + momentum = change_pct / 10.0 + liquidity = _liquidity_score(volume) + breakout = _breakout_score(price, avg_price) + score = round(momentum * 0.5 + liquidity * 0.3 + breakout * 0.2, 4) + band = "major" if price >= 10 else "mid" if price >= 1 else "meme" + distance_from_high = (high - price) / max(high, 1e-9) if high else None + candidates.append({ + "symbol": symbol, + "base": base, + "price": round(price, 8), + "change_24h_pct": round(change_pct, 2), + "volume_24h": round(volume, 2), + "breakout_pct": round(breakout * 100, 2), + "high_24h": round(high, 8) if high else None, + "low_24h": round(low, 8) if low else None, + "distance_from_high_pct": round(distance_from_high * 100, 2) if distance_from_high is not None else None, + "score": score, + "band": band, + }) + candidates.sort(key=lambda x: x["score"], reverse=True) + global_top = candidates[:TOP_CANDIDATES] + layers = {"major": [], "mid": [], "meme": []} + for c in candidates: + layers[c["band"]].append(c) + for k in layers: + layers[k] = layers[k][:5] + return global_top, layers + + +def build_snapshot(): + config = load_config() + local_dt, tz_name = get_local_now(config) + ex = get_exchange() + positions = load_positions() + tickers = ex.fetch_tickers() + balances = ex.fetch_balance()["free"] + free_usdt = to_float(balances.get("USDT")) + + positions_view = [] + total_position_value = 0.0 + largest_position_value = 0.0 + actionable_positions = 0 + for pos in positions: + symbol = pos.get("symbol") or "" + sym_ccxt = norm_symbol(symbol) + ticker = tickers.get(sym_ccxt, {}) + last = to_float(ticker.get("last"), None) + qty = to_float(pos.get("quantity")) + avg_cost = to_float(pos.get("avg_cost"), None) + value = round(qty * last, 4) if last is not None else None + pnl_pct = round((last - avg_cost) / avg_cost, 4) if last is not None and avg_cost else None + high = to_float(ticker.get("high")) + low = to_float(ticker.get("low")) + distance_from_high = (high - last) / max(high, 1e-9) if high and last else None + if value is not None: + total_position_value += value + largest_position_value = max(largest_position_value, value) + if value >= MIN_REAL_POSITION_VALUE_USDT: + actionable_positions += 1 + positions_view.append({ + "symbol": symbol, + "base_asset": pos.get("base_asset"), + "quantity": qty, + "avg_cost": avg_cost, + "last_price": last, + "market_value_usdt": value, + "pnl_pct": pnl_pct, + "high_24h": round(high, 8) if high else None, + "low_24h": round(low, 8) if low else None, + "distance_from_high_pct": round(distance_from_high * 100, 2) if distance_from_high is not None else None, + }) + + btc_pct = to_float((tickers.get("BTC/USDT") or {}).get("percentage"), None) + eth_pct = to_float((tickers.get("ETH/USDT") or {}).get("percentage"), None) + global_candidates, candidate_layers = top_candidates_from_tickers(tickers) + global_candidates, candidate_layers, positions_view = enrich_candidates_and_positions( + global_candidates, candidate_layers, positions_view, tickers, ex + ) + leader_score = global_candidates[0]["score"] if global_candidates else 0.0 + portfolio_value = round(free_usdt + total_position_value, 4) + volatility_score = round(max(abs(to_float(btc_pct, 0)), abs(to_float(eth_pct, 0))), 2) + + position_structure = [ + { + "symbol": p.get("symbol"), + "base_asset": p.get("base_asset"), + "quantity": round(to_float(p.get("quantity"), 0), 10), + "avg_cost": to_float(p.get("avg_cost"), None), + } + for p in positions_view + ] + + snapshot = { + "generated_at": utc_iso(), + "timezone": tz_name, + "local_time": local_dt.isoformat(), + "session": session_label(local_dt), + "free_usdt": round(free_usdt, 4), + "portfolio_value_usdt": portfolio_value, + "largest_position_value_usdt": round(largest_position_value, 4), + "actionable_positions": actionable_positions, + "positions": positions_view, + "positions_hash": stable_hash(position_structure), + "top_candidates": global_candidates, + "top_candidates_layers": candidate_layers, + "candidates_hash": stable_hash({"global": global_candidates, "layers": candidate_layers}), + "market_regime": { + "btc_24h_pct": round(btc_pct, 2) if btc_pct is not None else None, + "btc_regime": regime_from_pct(btc_pct), + "eth_24h_pct": round(eth_pct, 2) if eth_pct is not None else None, + "eth_regime": regime_from_pct(eth_pct), + "volatility_score": volatility_score, + "leader_score": round(leader_score, 4), + }, + } + snapshot["snapshot_hash"] = stable_hash({ + "portfolio_value_usdt": snapshot["portfolio_value_usdt"], + "positions_hash": snapshot["positions_hash"], + "candidates_hash": snapshot["candidates_hash"], + "market_regime": snapshot["market_regime"], + "session": snapshot["session"], + }) + return snapshot + + +def build_adaptive_profile(snapshot: dict): + portfolio_value = snapshot.get("portfolio_value_usdt", 0) + free_usdt = snapshot.get("free_usdt", 0) + session = snapshot.get("session") + market = snapshot.get("market_regime", {}) + volatility_score = to_float(market.get("volatility_score"), 0) + leader_score = to_float(market.get("leader_score"), 0) + actionable_positions = int(snapshot.get("actionable_positions") or 0) + largest_position_value = to_float(snapshot.get("largest_position_value_usdt"), 0) + + capital_band = "micro" if portfolio_value < 25 else "small" if portfolio_value < 100 else "normal" + session_mode = "quiet" if session in {"overnight", "asia-morning"} else "active" + volatility_mode = "high" if volatility_score >= 2.5 or leader_score >= 120 else "normal" + dust_mode = free_usdt < MIN_ACTIONABLE_USDT and largest_position_value < MIN_REAL_POSITION_VALUE_USDT + + price_trigger = BASE_PRICE_MOVE_TRIGGER_PCT + pnl_trigger = BASE_PNL_TRIGGER_PCT + portfolio_trigger = BASE_PORTFOLIO_MOVE_TRIGGER_PCT + candidate_ratio = BASE_CANDIDATE_SCORE_TRIGGER_RATIO + force_minutes = BASE_FORCE_ANALYSIS_AFTER_MINUTES + cooldown_minutes = BASE_COOLDOWN_MINUTES + soft_score_threshold = 2.0 + + if capital_band == "micro": + price_trigger += 0.02 + pnl_trigger += 0.03 + portfolio_trigger += 0.04 + candidate_ratio += 0.25 + force_minutes += 180 + cooldown_minutes += 30 + soft_score_threshold += 1.0 + elif capital_band == "small": + price_trigger += 0.01 + pnl_trigger += 0.01 + portfolio_trigger += 0.01 + candidate_ratio += 0.1 + force_minutes += 60 + cooldown_minutes += 10 + soft_score_threshold += 0.5 + + if session_mode == "quiet": + price_trigger += 0.01 + pnl_trigger += 0.01 + portfolio_trigger += 0.01 + candidate_ratio += 0.05 + soft_score_threshold += 0.5 + else: + force_minutes = max(120, force_minutes - 30) + + if volatility_mode == "high": + price_trigger = max(0.02, price_trigger - 0.01) + pnl_trigger = max(0.025, pnl_trigger - 0.005) + portfolio_trigger = max(0.025, portfolio_trigger - 0.005) + candidate_ratio = max(1.1, candidate_ratio - 0.1) + cooldown_minutes = max(20, cooldown_minutes - 10) + soft_score_threshold = max(1.0, soft_score_threshold - 0.5) + + if dust_mode: + candidate_ratio += 0.3 + force_minutes += 180 + cooldown_minutes += 30 + soft_score_threshold += 1.5 + + return { + "capital_band": capital_band, + "session_mode": session_mode, + "volatility_mode": volatility_mode, + "dust_mode": dust_mode, + "price_move_trigger_pct": round(price_trigger, 4), + "pnl_trigger_pct": round(pnl_trigger, 4), + "portfolio_move_trigger_pct": round(portfolio_trigger, 4), + "candidate_score_trigger_ratio": round(candidate_ratio, 4), + "force_analysis_after_minutes": int(force_minutes), + "cooldown_minutes": int(cooldown_minutes), + "soft_score_threshold": round(soft_score_threshold, 2), + "new_entries_allowed": free_usdt >= MIN_ACTIONABLE_USDT and not dust_mode, + "switching_allowed": actionable_positions > 0 or portfolio_value >= 25, + } + + +def _candidate_weight(snapshot: dict, profile: dict) -> float: + if not profile.get("new_entries_allowed"): + return 0.5 + if profile.get("volatility_mode") == "high": + return 1.5 + if snapshot.get("session") in {"europe-open", "us-session"}: + return 1.25 + return 1.0 + + +def analyze_trigger(snapshot: dict, state: dict): + reasons = [] + details = list(state.get("_stale_recovery_notes", [])) + hard_reasons = [] + soft_reasons = [] + soft_score = 0.0 + + profile = build_adaptive_profile(snapshot) + market = snapshot.get("market_regime", {}) + now = utc_now() + + last_positions_hash = state.get("last_positions_hash") + last_portfolio_value = state.get("last_portfolio_value_usdt") + last_market_regime = state.get("last_market_regime", {}) + last_positions_map = state.get("last_positions_map", {}) + last_top_candidate = state.get("last_top_candidate") + pending_trigger = bool(state.get("pending_trigger")) + run_requested_at = parse_ts(state.get("run_requested_at")) + last_deep_analysis_at = parse_ts(state.get("last_deep_analysis_at")) + last_triggered_at = parse_ts(state.get("last_triggered_at")) + last_trigger_snapshot_hash = state.get("last_trigger_snapshot_hash") + last_hard_reasons_at = state.get("last_hard_reasons_at", {}) + + price_trigger = profile["price_move_trigger_pct"] + pnl_trigger = profile["pnl_trigger_pct"] + portfolio_trigger = profile["portfolio_move_trigger_pct"] + candidate_ratio_trigger = profile["candidate_score_trigger_ratio"] + force_minutes = profile["force_analysis_after_minutes"] + cooldown_minutes = profile["cooldown_minutes"] + soft_score_threshold = profile["soft_score_threshold"] + + if pending_trigger: + reasons.append("pending-trigger-unacked") + hard_reasons.append("pending-trigger-unacked") + details.append("上次已触发深度分析但尚未确认完成") + if run_requested_at: + details.append(f"外部门控已在 {run_requested_at.isoformat()} 请求运行分析任务") + + if not last_deep_analysis_at: + reasons.append("first-analysis") + hard_reasons.append("first-analysis") + details.append("尚未记录过深度分析") + elif now - last_deep_analysis_at >= timedelta(minutes=force_minutes): + reasons.append("stale-analysis") + hard_reasons.append("stale-analysis") + details.append(f"距离上次深度分析已超过 {force_minutes} 分钟") + + if last_positions_hash and snapshot["positions_hash"] != last_positions_hash: + reasons.append("positions-changed") + hard_reasons.append("positions-changed") + details.append("持仓结构发生变化") + + if last_portfolio_value not in (None, 0): + portfolio_delta = abs(snapshot["portfolio_value_usdt"] - last_portfolio_value) / max(last_portfolio_value, 1e-9) + if portfolio_delta >= portfolio_trigger: + if portfolio_delta >= 1.0: + reasons.append("portfolio-extreme-move") + hard_reasons.append("portfolio-extreme-move") + details.append(f"组合净值剧烈变化 {portfolio_delta:.1%},超过 100%,视为硬触发") + else: + reasons.append("portfolio-move") + soft_reasons.append("portfolio-move") + soft_score += 1.0 + details.append(f"组合净值变化 {portfolio_delta:.1%},阈值 {portfolio_trigger:.1%}") + + for pos in snapshot["positions"]: + symbol = pos["symbol"] + prev = last_positions_map.get(symbol, {}) + cur_price = pos.get("last_price") + prev_price = prev.get("last_price") + cur_pnl = pos.get("pnl_pct") + prev_pnl = prev.get("pnl_pct") + market_value = to_float(pos.get("market_value_usdt"), 0) + actionable_position = market_value >= MIN_REAL_POSITION_VALUE_USDT + + if cur_price and prev_price: + price_move = abs(cur_price - prev_price) / max(prev_price, 1e-9) + if price_move >= price_trigger: + reasons.append(f"price-move:{symbol}") + soft_reasons.append(f"price-move:{symbol}") + soft_score += 1.0 if actionable_position else 0.4 + details.append(f"{symbol} 价格变化 {price_move:.1%},阈值 {price_trigger:.1%}") + if cur_pnl is not None and prev_pnl is not None: + pnl_move = abs(cur_pnl - prev_pnl) + if pnl_move >= pnl_trigger: + reasons.append(f"pnl-move:{symbol}") + soft_reasons.append(f"pnl-move:{symbol}") + soft_score += 1.0 if actionable_position else 0.4 + details.append(f"{symbol} 盈亏变化 {pnl_move:.1%},阈值 {pnl_trigger:.1%}") + if cur_pnl is not None: + stop_band = -0.06 if actionable_position else -0.12 + take_band = 0.14 if actionable_position else 0.25 + if cur_pnl <= stop_band or cur_pnl >= take_band: + reasons.append(f"risk-band:{symbol}") + hard_reasons.append(f"risk-band:{symbol}") + details.append(f"{symbol} 接近执行阈值,当前盈亏 {cur_pnl:.1%}") + if cur_pnl <= HARD_STOP_PCT: + reasons.append(f"hard-stop:{symbol}") + hard_reasons.append(f"hard-stop:{symbol}") + details.append(f"{symbol} 盈亏超过 {HARD_STOP_PCT:.1%},触发紧急硬触发") + + current_market = snapshot.get("market_regime", {}) + if last_market_regime: + if current_market.get("btc_regime") != last_market_regime.get("btc_regime"): + reasons.append("btc-regime-change") + hard_reasons.append("btc-regime-change") + details.append(f"BTC 由 {last_market_regime.get('btc_regime')} 切换为 {current_market.get('btc_regime')}") + if current_market.get("eth_regime") != last_market_regime.get("eth_regime"): + reasons.append("eth-regime-change") + hard_reasons.append("eth-regime-change") + details.append(f"ETH 由 {last_market_regime.get('eth_regime')} 切换为 {current_market.get('eth_regime')}") + + # Candidate hard moon trigger + for cand in snapshot.get("top_candidates", []): + if cand.get("change_24h_pct", 0) >= HARD_MOON_PCT * 100: + reasons.append(f"hard-moon:{cand['symbol']}") + hard_reasons.append(f"hard-moon:{cand['symbol']}") + details.append(f"候选币 {cand['symbol']} 24h 涨幅 {cand['change_24h_pct']:.1f}%,触发强势硬触发") + + current_leader = snapshot.get("top_candidates", [{}])[0] if snapshot.get("top_candidates") else None + candidate_weight = _candidate_weight(snapshot, profile) + + # Layer leader changes + last_layers = state.get("last_candidates_layers", {}) + current_layers = snapshot.get("top_candidates_layers", {}) + for band in ("major", "mid", "meme"): + cur_band = current_layers.get(band, []) + prev_band = last_layers.get(band, []) + cur_leader = cur_band[0] if cur_band else None + prev_leader = prev_band[0] if prev_band else None + if cur_leader and prev_leader and cur_leader["symbol"] != prev_leader["symbol"]: + score_ratio = cur_leader.get("score", 0) / max(prev_leader.get("score", 0.0001), 0.0001) + if score_ratio >= candidate_ratio_trigger: + reasons.append(f"new-leader-{band}:{cur_leader['symbol']}") + soft_reasons.append(f"new-leader-{band}:{cur_leader['symbol']}") + soft_score += candidate_weight * 0.7 + details.append( + f"{band} 层新榜首 {cur_leader['symbol']} 替代 {prev_leader['symbol']},score 比例 {score_ratio:.2f}" + ) + + current_leader = snapshot.get("top_candidates", [{}])[0] if snapshot.get("top_candidates") else None + if last_top_candidate and current_leader: + if current_leader.get("symbol") != last_top_candidate.get("symbol"): + score_ratio = current_leader.get("score", 0) / max(last_top_candidate.get("score", 0.0001), 0.0001) + if score_ratio >= candidate_ratio_trigger: + reasons.append("new-leader") + soft_reasons.append("new-leader") + soft_score += candidate_weight + details.append( + f"新候选币 {current_leader.get('symbol')} 领先上次榜首,score 比例 {score_ratio:.2f},阈值 {candidate_ratio_trigger:.2f}" + ) + elif current_leader and not last_top_candidate: + reasons.append("candidate-leader-init") + soft_reasons.append("candidate-leader-init") + soft_score += candidate_weight + details.append(f"首次记录候选榜首 {current_leader.get('symbol')}") + + # --- adaptive cooldown based on signal change magnitude --- + def _signal_delta() -> float: + delta = 0.0 + if last_trigger_snapshot_hash and snapshot.get("snapshot_hash") != last_trigger_snapshot_hash: + delta += 0.5 + if snapshot["positions_hash"] != last_positions_hash: + delta += 1.5 + for pos in snapshot["positions"]: + symbol = pos["symbol"] + prev = last_positions_map.get(symbol, {}) + cur_price = pos.get("last_price") + prev_price = prev.get("last_price") + cur_pnl = pos.get("pnl_pct") + prev_pnl = prev.get("pnl_pct") + if cur_price and prev_price: + if abs(cur_price - prev_price) / max(prev_price, 1e-9) >= 0.02: + delta += 0.5 + if cur_pnl is not None and prev_pnl is not None: + if abs(cur_pnl - prev_pnl) >= 0.03: + delta += 0.5 + current_leader = snapshot.get("top_candidates", [{}])[0] if snapshot.get("top_candidates") else None + last_leader = state.get("last_top_candidate") + if current_leader and last_leader and current_leader.get("symbol") != last_leader.get("symbol"): + delta += 1.0 + current_layers = snapshot.get("top_candidates_layers", {}) + last_layers = state.get("last_candidates_layers", {}) + for band in ("major", "mid", "meme"): + cur_band = current_layers.get(band, []) + prev_band = last_layers.get(band, []) + cur_l = cur_band[0] if cur_band else None + prev_l = prev_band[0] if prev_band else None + if cur_l and prev_l and cur_l.get("symbol") != prev_l.get("symbol"): + delta += 0.5 + if last_market_regime: + if current_market.get("btc_regime") != last_market_regime.get("btc_regime"): + delta += 1.5 + if current_market.get("eth_regime") != last_market_regime.get("eth_regime"): + delta += 1.5 + if last_portfolio_value not in (None, 0): + portfolio_delta = abs(snapshot["portfolio_value_usdt"] - last_portfolio_value) / max(last_portfolio_value, 1e-9) + if portfolio_delta >= 0.05: + delta += 1.0 + # fresh hard reason type not seen in last trigger + last_trigger_hard_types = {r.split(":")[0] for r in (state.get("last_trigger_hard_reasons") or [])} + current_hard_types = {r.split(":")[0] for r in hard_reasons} + if current_hard_types - last_trigger_hard_types: + delta += 2.0 + return delta + + signal_delta = _signal_delta() + effective_cooldown = cooldown_minutes + if signal_delta < 1.0: + effective_cooldown = max(cooldown_minutes, 90) + elif signal_delta >= 2.5: + effective_cooldown = max(0, cooldown_minutes - 15) + + cooldown_active = bool(last_triggered_at and now - last_triggered_at < timedelta(minutes=effective_cooldown)) + + # Dedup hard reasons within window to avoid repeated model wakeups for the same event + dedup_window = timedelta(minutes=HARD_REASON_DEDUP_MINUTES) + for hr in list(hard_reasons): + last_at = parse_ts(last_hard_reasons_at.get(hr)) + if last_at and now - last_at < dedup_window: + hard_reasons.remove(hr) + details.append(f"{hr} 近期已触发,{HARD_REASON_DEDUP_MINUTES}分钟内去重") + + hard_trigger = bool(hard_reasons) + if profile.get("dust_mode") and not hard_trigger and soft_score < soft_score_threshold + 1.0: + details.append("微型资金/粉尘仓位模式:抬高软触发门槛,避免无意义分析") + + if profile.get("dust_mode") and not profile.get("new_entries_allowed") and any(r in {"new-leader", "candidate-leader-init"} for r in soft_reasons): + details.append("当前可用资金低于可执行阈值,新候选币仅做观察,不单独触发深度分析") + soft_score = max(0.0, soft_score - 0.75) + + should_analyze = hard_trigger or soft_score >= soft_score_threshold + + if cooldown_active and not hard_trigger and should_analyze: + should_analyze = False + details.append(f"处于 {cooldown_minutes} 分钟冷却窗口,软触发先记录不升级") + + if cooldown_active and not hard_trigger and reasons and soft_score < soft_score_threshold: + details.append(f"处于 {cooldown_minutes} 分钟冷却窗口,且软信号强度不足 ({soft_score:.2f} < {soft_score_threshold:.2f})") + + status = "deep_analysis_required" if should_analyze else "stable" + + compact_lines = [ + f"状态: {status}", + f"组合净值: ${snapshot['portfolio_value_usdt']:.4f} | 可用USDT: ${snapshot['free_usdt']:.4f}", + f"本地时段: {snapshot['session']} | 时区: {snapshot['timezone']}", + f"BTC/ETH: {market.get('btc_regime')} ({market.get('btc_24h_pct')}%), {market.get('eth_regime')} ({market.get('eth_24h_pct')}%) | 波动分数 {market.get('volatility_score')}", + f"门控画像: capital={profile['capital_band']}, session={profile['session_mode']}, volatility={profile['volatility_mode']}, dust={profile['dust_mode']}", + f"阈值: price={price_trigger:.1%}, pnl={pnl_trigger:.1%}, portfolio={portfolio_trigger:.1%}, candidate={candidate_ratio_trigger:.2f}, cooldown={effective_cooldown}m({cooldown_minutes}m基础), force={force_minutes}m", + f"软信号分: {soft_score:.2f} / {soft_score_threshold:.2f}", + f"信号变化度: {signal_delta:.1f}", + ] + if snapshot["positions"]: + compact_lines.append("持仓:") + for pos in snapshot["positions"][:4]: + pnl = pos.get("pnl_pct") + pnl_text = f"{pnl:+.1%}" if pnl is not None else "n/a" + compact_lines.append( + f"- {pos['symbol']}: qty={pos['quantity']}, px={pos.get('last_price')}, pnl={pnl_text}, value=${pos.get('market_value_usdt')}" + ) + else: + compact_lines.append("持仓: 当前无现货仓位") + if snapshot["top_candidates"]: + compact_lines.append("候选榜:") + for cand in snapshot["top_candidates"]: + compact_lines.append( + f"- {cand['symbol']}: score={cand['score']}, 24h={cand['change_24h_pct']}%, vol=${cand['volume_24h']}" + ) + layers = snapshot.get("top_candidates_layers", {}) + for band, band_cands in layers.items(): + if band_cands: + compact_lines.append(f"{band} 层:") + for cand in band_cands: + compact_lines.append( + f"- {cand['symbol']}: score={cand['score']}, 24h={cand['change_24h_pct']}%, vol=${cand['volume_24h']}" + ) + if details: + compact_lines.append("触发说明:") + for item in details: + compact_lines.append(f"- {item}") + + return { + "generated_at": snapshot["generated_at"], + "status": status, + "should_analyze": should_analyze, + "pending_trigger": pending_trigger, + "run_requested": bool(run_requested_at), + "run_requested_at": run_requested_at.isoformat() if run_requested_at else None, + "cooldown_active": cooldown_active, + "effective_cooldown_minutes": effective_cooldown, + "signal_delta": round(signal_delta, 2), + "reasons": reasons, + "hard_reasons": hard_reasons, + "soft_reasons": soft_reasons, + "soft_score": round(soft_score, 3), + "adaptive_profile": profile, + "portfolio_value_usdt": snapshot["portfolio_value_usdt"], + "free_usdt": snapshot["free_usdt"], + "market_regime": snapshot["market_regime"], + "session": snapshot["session"], + "positions": snapshot["positions"], + "top_candidates": snapshot["top_candidates"], + "top_candidates_layers": layers, + "snapshot_hash": snapshot["snapshot_hash"], + "compact_summary": "\n".join(compact_lines), + "details": details, + } + + +def update_state_after_observation(state: dict, snapshot: dict, analysis: dict): + new_state = dict(state) + new_state.update({ + "last_observed_at": snapshot["generated_at"], + "last_snapshot_hash": snapshot["snapshot_hash"], + "last_positions_hash": snapshot["positions_hash"], + "last_candidates_hash": snapshot["candidates_hash"], + "last_portfolio_value_usdt": snapshot["portfolio_value_usdt"], + "last_market_regime": snapshot["market_regime"], + "last_positions_map": {p["symbol"]: {"last_price": p.get("last_price"), "pnl_pct": p.get("pnl_pct")} for p in snapshot["positions"]}, + "last_top_candidate": snapshot["top_candidates"][0] if snapshot["top_candidates"] else None, + "last_candidates_layers": snapshot.get("top_candidates_layers", {}), + "last_adaptive_profile": analysis.get("adaptive_profile", {}), + }) + if analysis["should_analyze"]: + new_state["pending_trigger"] = True + new_state["pending_reasons"] = analysis["details"] + new_state["last_triggered_at"] = snapshot["generated_at"] + new_state["last_trigger_snapshot_hash"] = snapshot["snapshot_hash"] + new_state["last_trigger_hard_reasons"] = analysis.get("hard_reasons", []) + new_state["last_trigger_signal_delta"] = analysis.get("signal_delta", 0.0) + + # Update hard-reason dedup timestamps and prune old entries + last_hard_reasons_at = dict(state.get("last_hard_reasons_at", {})) + for hr in analysis.get("hard_reasons", []): + last_hard_reasons_at[hr] = snapshot["generated_at"] + cutoff = utc_now() - timedelta(hours=24) + pruned = { + k: v for k, v in last_hard_reasons_at.items() + if parse_ts(v) and parse_ts(v) > cutoff + } + new_state["last_hard_reasons_at"] = pruned + return new_state + + +def mark_run_requested(note: str = ""): + state = load_state() + state["run_requested_at"] = utc_iso() + state["run_request_note"] = note + save_state(state) + print(json.dumps({"ok": True, "run_requested_at": state["run_requested_at"], "note": note}, ensure_ascii=False)) + + +def ack_analysis(note: str = ""): + state = load_state() + state["last_deep_analysis_at"] = utc_iso() + state["pending_trigger"] = False + state["pending_reasons"] = [] + state["last_ack_note"] = note + state.pop("run_requested_at", None) + state.pop("run_request_note", None) + save_state(state) + print(json.dumps({"ok": True, "acked_at": state["last_deep_analysis_at"], "note": note}, ensure_ascii=False)) + + +def main(): + if len(sys.argv) > 1 and sys.argv[1] == "--ack": + ack_analysis(" ".join(sys.argv[2:]).strip()) + return + if len(sys.argv) > 1 and sys.argv[1] == "--mark-run-requested": + mark_run_requested(" ".join(sys.argv[2:]).strip()) + return + + try: + state = sanitize_state_for_stale_triggers(load_state()) + snapshot = build_snapshot() + analysis = analyze_trigger(snapshot, state) + save_state(update_state_after_observation(state, snapshot, analysis)) + print(json.dumps(analysis, ensure_ascii=False, indent=2)) + except Exception as e: + failure = { + "generated_at": utc_iso(), + "status": "deep_analysis_required", + "should_analyze": True, + "pending_trigger": True, + "cooldown_active": False, + "reasons": ["precheck-error"], + "hard_reasons": ["precheck-error"], + "soft_reasons": [], + "soft_score": 0, + "details": [str(e)], + "compact_summary": f"预检查失败,转入深度分析兜底: {e}", + } + print(json.dumps(failure, ensure_ascii=False, indent=2)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/src/coinhunter/review_context.py b/src/coinhunter/review_context.py new file mode 100755 index 0000000..8a605be --- /dev/null +++ b/src/coinhunter/review_context.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +import json +import sys + +from . import review_engine + + +def main(): + hours = int(sys.argv[1]) if len(sys.argv) > 1 else 12 + review = review_engine.generate_review(hours) + compact = { + "review_period_hours": review.get("review_period_hours", hours), + "review_timestamp": review.get("review_timestamp"), + "total_decisions": review.get("total_decisions", 0), + "total_trades": review.get("total_trades", 0), + "total_errors": review.get("total_errors", 0), + "stats": review.get("stats", {}), + "insights": review.get("insights", []), + "recommendations": review.get("recommendations", []), + "decision_quality_top": review.get("decision_quality", [])[:5], + "should_report": bool( + review.get("total_decisions", 0) + or review.get("total_trades", 0) + or review.get("total_errors", 0) + or review.get("insights") + ), + } + print(json.dumps(compact, ensure_ascii=False, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/src/coinhunter/review_engine.py b/src/coinhunter/review_engine.py new file mode 100755 index 0000000..a79261d --- /dev/null +++ b/src/coinhunter/review_engine.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python3 +"""Coin Hunter hourly review engine.""" +import json +import os +import sys +from datetime import datetime, timezone, timedelta +from pathlib import Path + +import ccxt + +from .logger import get_logs_last_n_hours, log_error + +ENV_FILE = Path.home() / ".hermes" / ".env" +REVIEW_DIR = Path.home() / ".coinhunter" / "reviews" + +CST = timezone(timedelta(hours=8)) + + +def load_env(): + if ENV_FILE.exists(): + for line in ENV_FILE.read_text(encoding="utf-8").splitlines(): + line = line.strip() + if line and not line.startswith("#") and "=" in line: + key, val = line.split("=", 1) + os.environ.setdefault(key.strip(), val.strip()) + + +def get_exchange(): + load_env() + ex = ccxt.binance({ + "apiKey": os.getenv("BINANCE_API_KEY"), + "secret": os.getenv("BINANCE_API_SECRET"), + "options": {"defaultType": "spot"}, + "enableRateLimit": True, + }) + ex.load_markets() + return ex + + +def ensure_review_dir(): + REVIEW_DIR.mkdir(parents=True, exist_ok=True) + + +def norm_symbol(symbol: str) -> str: + s = symbol.upper().replace("-", "").replace("_", "") + if "/" in s: + return s + if s.endswith("USDT"): + return s[:-4] + "/USDT" + return s + + +def fetch_current_price(ex, symbol: str): + try: + return float(ex.fetch_ticker(norm_symbol(symbol))["last"]) + except Exception: + return None + + +def analyze_trade(trade: dict, ex) -> dict: + symbol = trade.get("symbol") + price = trade.get("price") + action = trade.get("action", "") + current_price = fetch_current_price(ex, symbol) if symbol else None + pnl_estimate = None + outcome = "neutral" + if price and current_price and symbol: + change_pct = (current_price - float(price)) / float(price) * 100 + if action == "BUY": + pnl_estimate = round(change_pct, 2) + outcome = "good" if change_pct > 2 else "bad" if change_pct < -2 else "neutral" + elif action == "SELL_ALL": + pnl_estimate = round(-change_pct, 2) + # Lowered missed threshold: >2% is a missed opportunity in short-term trading + outcome = "good" if change_pct < -2 else "missed" if change_pct > 2 else "neutral" + return { + "timestamp": trade.get("timestamp"), + "symbol": symbol, + "action": action, + "decision_id": trade.get("decision_id"), + "execution_price": price, + "current_price": current_price, + "pnl_estimate_pct": pnl_estimate, + "outcome_assessment": outcome, + } + + +def analyze_hold_passes(decisions: list, ex) -> list: + """Check HOLD decisions where an opportunity was explicitly PASSed but later rallied.""" + misses = [] + for d in decisions: + if d.get("decision") != "HOLD": + continue + analysis = d.get("analysis") + if not isinstance(analysis, dict): + continue + opportunities = analysis.get("opportunities_evaluated", []) + market_snapshot = d.get("market_snapshot", {}) + if not opportunities or not market_snapshot: + continue + for op in opportunities: + verdict = op.get("verdict", "") + if "PASS" not in verdict and "pass" not in verdict: + continue + symbol = op.get("symbol", "") + # Try to extract decision-time price from market_snapshot + snap = market_snapshot.get(symbol) or market_snapshot.get(symbol.replace("/", "")) + if not snap: + continue + decision_price = None + if isinstance(snap, dict): + decision_price = float(snap.get("lastPrice", 0)) or float(snap.get("last", 0)) + elif isinstance(snap, (int, float, str)): + decision_price = float(snap) + if not decision_price: + continue + current_price = fetch_current_price(ex, symbol) + if not current_price: + continue + change_pct = (current_price - decision_price) / decision_price * 100 + if change_pct > 3: # >3% rally after being passed = missed watch + misses.append({ + "timestamp": d.get("timestamp"), + "symbol": symbol, + "decision_price": round(decision_price, 8), + "current_price": round(current_price, 8), + "change_pct": round(change_pct, 2), + "verdict_snippet": verdict[:80], + }) + return misses + + +def analyze_cash_misses(decisions: list, ex) -> list: + """If portfolio was mostly USDT but a watchlist coin rallied >5%, flag it.""" + misses = [] + watchlist = set() + for d in decisions: + snap = d.get("market_snapshot", {}) + if isinstance(snap, dict): + for k in snap.keys(): + if k.endswith("USDT"): + watchlist.add(k) + for d in decisions: + ts = d.get("timestamp") + balances = d.get("balances") or d.get("balances_before", {}) + if not balances: + continue + total = sum(float(v) if isinstance(v, (int, float, str)) else 0 for v in balances.values()) + usdt = float(balances.get("USDT", 0)) + if total == 0 or (usdt / total) < 0.9: + continue + # Portfolio mostly cash — check watchlist performance + snap = d.get("market_snapshot", {}) + if not isinstance(snap, dict): + continue + for symbol, data in snap.items(): + if not symbol.endswith("USDT"): + continue + decision_price = None + if isinstance(data, dict): + decision_price = float(data.get("lastPrice", 0)) or float(data.get("last", 0)) + elif isinstance(data, (int, float, str)): + decision_price = float(data) + if not decision_price: + continue + current_price = fetch_current_price(ex, symbol) + if not current_price: + continue + change_pct = (current_price - decision_price) / decision_price * 100 + if change_pct > 5: + misses.append({ + "timestamp": ts, + "symbol": symbol, + "decision_price": round(decision_price, 8), + "current_price": round(current_price, 8), + "change_pct": round(change_pct, 2), + }) + # Deduplicate by symbol keeping the worst miss + seen = {} + for m in misses: + sym = m["symbol"] + if sym not in seen or m["change_pct"] > seen[sym]["change_pct"]: + seen[sym] = m + return list(seen.values()) + + +def generate_review(hours: int = 1) -> dict: + decisions = get_logs_last_n_hours("decisions", hours) + trades = get_logs_last_n_hours("trades", hours) + errors = get_logs_last_n_hours("errors", hours) + + review = { + "review_period_hours": hours, + "review_timestamp": datetime.now(CST).isoformat(), + "total_decisions": len(decisions), + "total_trades": len(trades), + "total_errors": len(errors), + "decision_quality": [], + "stats": {}, + "insights": [], + "recommendations": [], + } + + if not decisions and not trades: + review["insights"].append("本周期无决策/交易记录") + return review + + ex = get_exchange() + outcomes = {"good": 0, "neutral": 0, "bad": 0, "missed": 0} + pnl_samples = [] + + for trade in trades: + analysis = analyze_trade(trade, ex) + review["decision_quality"].append(analysis) + outcomes[analysis["outcome_assessment"]] += 1 + if analysis["pnl_estimate_pct"] is not None: + pnl_samples.append(analysis["pnl_estimate_pct"]) + + # New: analyze missed opportunities from HOLD / cash decisions + hold_pass_misses = analyze_hold_passes(decisions, ex) + cash_misses = analyze_cash_misses(decisions, ex) + total_missed = outcomes["missed"] + len(hold_pass_misses) + len(cash_misses) + + review["stats"] = { + "good_decisions": outcomes["good"], + "neutral_decisions": outcomes["neutral"], + "bad_decisions": outcomes["bad"], + "missed_opportunities": total_missed, + "missed_sell_all": outcomes["missed"], + "missed_hold_passes": len(hold_pass_misses), + "missed_cash_sits": len(cash_misses), + "avg_estimated_edge_pct": round(sum(pnl_samples) / len(pnl_samples), 2) if pnl_samples else None, + } + + if errors: + review["insights"].append(f"本周期出现 {len(errors)} 次执行/系统错误,健壮性需优先关注") + if outcomes["bad"] > outcomes["good"]: + review["insights"].append("最近交易质量偏弱,建议降低交易频率或提高入场门槛") + if total_missed > 0: + parts = [] + if outcomes["missed"]: + parts.append(f"卖出后继续上涨 {outcomes['missed']} 次") + if hold_pass_misses: + parts.append(f"PASS 后错失 {len(hold_pass_misses)} 次") + if cash_misses: + parts.append(f"空仓观望错失 {len(cash_misses)} 次") + review["insights"].append("存在错失机会: " + ",".join(parts) + ",建议放宽趋势跟随或入场条件") + if outcomes["good"] >= max(1, outcomes["bad"] + total_missed): + review["insights"].append("近期决策总体可接受") + if not trades and decisions: + review["insights"].append("有决策无成交,可能是观望、最小成交额限制或执行被拦截") + if len(trades) < len(decisions) * 0.1 and decisions: + review["insights"].append("大量决策未转化为交易,需检查执行门槛(最小成交额/精度/手续费缓冲)是否过高") + if hold_pass_misses: + for m in hold_pass_misses[:3]: + review["insights"].append(f"HOLD 时 PASS 了 {m['symbol']},之后上涨 {m['change_pct']}%") + if cash_misses: + for m in cash_misses[:3]: + review["insights"].append(f"持仓以 USDT 为主时 {m['symbol']} 上涨 {m['change_pct']}%") + + review["recommendations"] = [ + "优先检查最小成交额/精度拒单是否影响小资金执行", + "若连续两个复盘周期 edge 为负,下一小时减少换仓频率", + "若错误日志增加,优先进入防守模式(多持 USDT)", + ] + return review + + +def save_review(review: dict): + ensure_review_dir() + ts = datetime.now(CST).strftime("%Y%m%d_%H%M%S") + path = REVIEW_DIR / f"review_{ts}.json" + path.write_text(json.dumps(review, indent=2, ensure_ascii=False), encoding="utf-8") + return str(path) + + +def print_review(review: dict): + print("=" * 50) + print("📊 Coin Hunter 小时复盘报告") + print(f"复盘时间: {review['review_timestamp']}") + print(f"统计周期: 过去 {review['review_period_hours']} 小时") + print(f"总决策数: {review['total_decisions']} | 总交易数: {review['total_trades']} | 总错误数: {review['total_errors']}") + stats = review.get("stats", {}) + print("\n决策质量统计:") + print(f" ✓ 优秀: {stats.get('good_decisions', 0)}") + print(f" ○ 中性: {stats.get('neutral_decisions', 0)}") + print(f" ✗ 失误: {stats.get('bad_decisions', 0)}") + print(f" ↗ 错过机会: {stats.get('missed_opportunities', 0)}") + if stats.get("avg_estimated_edge_pct") is not None: + print(f" 平均估计 edge: {stats['avg_estimated_edge_pct']}%") + if review.get("insights"): + print("\n💡 见解:") + for item in review["insights"]: + print(f" • {item}") + if review.get("recommendations"): + print("\n🔧 优化建议:") + for item in review["recommendations"]: + print(f" • {item}") + print("=" * 50) + + +def main(): + try: + hours = int(sys.argv[1]) if len(sys.argv) > 1 else 1 + review = generate_review(hours) + path = save_review(review) + print_review(review) + print(f"复盘已保存至: {path}") + except Exception as e: + log_error("review_engine", e) + raise + + +if __name__ == "__main__": + main() diff --git a/src/coinhunter/rotate_external_gate_log.py b/src/coinhunter/rotate_external_gate_log.py new file mode 100755 index 0000000..42695f6 --- /dev/null +++ b/src/coinhunter/rotate_external_gate_log.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +"""Rotate external gate log using the user's logrotate config/state.""" +import subprocess +from pathlib import Path + +BASE_DIR = Path.home() / ".coinhunter" +STATE_DIR = BASE_DIR / "state" +LOGROTATE_STATUS = STATE_DIR / "logrotate_external_gate.status" +LOGROTATE_CONF = BASE_DIR / "logrotate_external_gate.conf" +LOGS_DIR = BASE_DIR / "logs" + + +def main(): + STATE_DIR.mkdir(parents=True, exist_ok=True) + LOGS_DIR.mkdir(parents=True, exist_ok=True) + cmd = ["/usr/sbin/logrotate", "-s", str(LOGROTATE_STATUS), str(LOGROTATE_CONF)] + result = subprocess.run(cmd, capture_output=True, text=True) + if result.stdout.strip(): + print(result.stdout.strip()) + if result.stderr.strip(): + print(result.stderr.strip()) + return result.returncode + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/src/coinhunter/smart_executor.py b/src/coinhunter/smart_executor.py new file mode 100755 index 0000000..2292e7a --- /dev/null +++ b/src/coinhunter/smart_executor.py @@ -0,0 +1,614 @@ +#!/usr/bin/env python3 +"""Coin Hunter robust smart executor. + +Supports both: +1. Clear argparse CLI subcommands for humans / cron prompts +2. Backward-compatible legacy flag forms previously emitted by AI agents +""" +import argparse +import fcntl +import hashlib +import json +import math +import os +import sys +import time +from contextlib import contextmanager +from datetime import datetime, timezone, timedelta +from pathlib import Path + +import ccxt + +from .logger import log_decision, log_error, log_trade + +BASE_DIR = Path.home() / ".coinhunter" +POSITIONS_FILE = BASE_DIR / "positions.json" +POSITIONS_LOCK = BASE_DIR / "positions.lock" +EXECUTIONS_FILE = BASE_DIR / "executions.json" +EXECUTIONS_LOCK = BASE_DIR / "executions.lock" +ENV_FILE = Path.home() / ".hermes" / ".env" +DRY_RUN = os.getenv("DRY_RUN", "false").lower() == "true" +USDT_BUFFER_PCT = 0.03 +MIN_REMAINING_DUST_USDT = 1.0 + +CST = timezone(timedelta(hours=8)) + + +def log(msg: str): + print(f"[{datetime.now(CST).strftime('%Y-%m-%d %H:%M:%S')} CST] {msg}") + + +def bj_now_iso(): + return datetime.now(CST).isoformat() + + +def load_env(): + if ENV_FILE.exists(): + for line in ENV_FILE.read_text(encoding="utf-8").splitlines(): + line = line.strip() + if line and not line.startswith("#") and "=" in line: + key, val = line.split("=", 1) + os.environ.setdefault(key.strip(), val.strip()) + + +@contextmanager +def locked_file(path: Path): + path.parent.mkdir(parents=True, exist_ok=True) + with open(path, "a+", encoding="utf-8") as f: + fcntl.flock(f.fileno(), fcntl.LOCK_EX) + f.seek(0) + yield f + f.flush() + os.fsync(f.fileno()) + fcntl.flock(f.fileno(), fcntl.LOCK_UN) + + +def atomic_write_json(path: Path, data: dict): + path.parent.mkdir(parents=True, exist_ok=True) + tmp = path.with_suffix(path.suffix + ".tmp") + tmp.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8") + os.replace(tmp, path) + + +def load_json_locked(path: Path, lock_path: Path, default): + with locked_file(lock_path): + if not path.exists(): + return default + try: + return json.loads(path.read_text(encoding="utf-8")) + except Exception: + return default + + +def save_json_locked(path: Path, lock_path: Path, data: dict): + with locked_file(lock_path): + atomic_write_json(path, data) + + +def load_positions() -> list: + return load_json_locked(POSITIONS_FILE, POSITIONS_LOCK, {"positions": []}).get("positions", []) + + +def save_positions(positions: list): + save_json_locked(POSITIONS_FILE, POSITIONS_LOCK, {"positions": positions}) + + +def load_executions() -> dict: + return load_json_locked(EXECUTIONS_FILE, EXECUTIONS_LOCK, {"executions": {}}).get("executions", {}) + + +def save_executions(executions: dict): + save_json_locked(EXECUTIONS_FILE, EXECUTIONS_LOCK, {"executions": executions}) + + +def default_decision_id(action: str, argv_tail: list[str]) -> str: + now = datetime.now(CST) + bucket_min = (now.minute // 15) * 15 + bucket = now.strftime(f"%Y%m%dT%H{bucket_min:02d}") + raw = f"{bucket}|{action}|{'|'.join(argv_tail)}" + return hashlib.sha1(raw.encode()).hexdigest()[:16] + + +def get_exchange(): + load_env() + api_key = os.getenv("BINANCE_API_KEY") + secret = os.getenv("BINANCE_API_SECRET") + if not api_key or not secret: + raise RuntimeError("缺少 BINANCE_API_KEY 或 BINANCE_API_SECRET") + ex = ccxt.binance({ + "apiKey": api_key, + "secret": secret, + "options": {"defaultType": "spot", "createMarketBuyOrderRequiresPrice": False}, + "enableRateLimit": True, + }) + ex.load_markets() + return ex + + +def norm_symbol(symbol: str) -> str: + s = symbol.upper().replace("-", "").replace("_", "") + if "/" in s: + return s + if s.endswith("USDT"): + return s[:-4] + "/USDT" + raise ValueError(f"不支持的 symbol: {symbol}") + + +def storage_symbol(symbol: str) -> str: + return norm_symbol(symbol).replace("/", "") + + +def fetch_balances(ex): + bal = ex.fetch_balance()["free"] + return {k: float(v) for k, v in bal.items() if float(v) > 0} + + +def build_market_snapshot(ex): + try: + tickers = ex.fetch_tickers() + except Exception: + return {} + snapshot = {} + for sym, t in tickers.items(): + if not sym.endswith("/USDT"): + continue + price = t.get("last") + if price is None or float(price) <= 0: + continue + vol = float(t.get("quoteVolume") or 0) + if vol < 200_000: + continue + base = sym.replace("/", "") + snapshot[base] = { + "lastPrice": round(float(price), 8), + "price24hPcnt": round(float(t.get("percentage") or 0), 4), + "highPrice24h": round(float(t.get("high") or 0), 8) if t.get("high") else None, + "lowPrice24h": round(float(t.get("low") or 0), 8) if t.get("low") else None, + "turnover24h": round(float(vol), 2), + } + return snapshot + + +def market_and_ticker(ex, symbol: str): + sym = norm_symbol(symbol) + market = ex.market(sym) + ticker = ex.fetch_ticker(sym) + return sym, market, ticker + + +def floor_to_step(value: float, step: float) -> float: + if not step or step <= 0: + return value + return math.floor(value / step) * step + + +def prepare_buy_quantity(ex, symbol: str, amount_usdt: float): + sym, market, ticker = market_and_ticker(ex, symbol) + ask = float(ticker.get("ask") or ticker.get("last") or 0) + if ask <= 0: + raise RuntimeError(f"{sym} 无法获取有效 ask 价格") + budget = amount_usdt * (1 - USDT_BUFFER_PCT) + raw_qty = budget / ask + qty = float(ex.amount_to_precision(sym, raw_qty)) + min_amt = (market.get("limits", {}).get("amount", {}) or {}).get("min") or 0 + min_cost = (market.get("limits", {}).get("cost", {}) or {}).get("min") or 0 + if min_amt and qty < float(min_amt): + raise RuntimeError(f"{sym} 买入数量 {qty} 小于最小数量 {min_amt}") + est_cost = qty * ask + if min_cost and est_cost < float(min_cost): + raise RuntimeError(f"{sym} 买入金额 ${est_cost:.4f} 小于最小成交额 ${float(min_cost):.4f}") + return sym, qty, ask, est_cost + + +def prepare_sell_quantity(ex, symbol: str, free_qty: float): + sym, market, ticker = market_and_ticker(ex, symbol) + bid = float(ticker.get("bid") or ticker.get("last") or 0) + if bid <= 0: + raise RuntimeError(f"{sym} 无法获取有效 bid 价格") + qty = float(ex.amount_to_precision(sym, free_qty)) + min_amt = (market.get("limits", {}).get("amount", {}) or {}).get("min") or 0 + min_cost = (market.get("limits", {}).get("cost", {}) or {}).get("min") or 0 + if min_amt and qty < float(min_amt): + raise RuntimeError(f"{sym} 卖出数量 {qty} 小于最小数量 {min_amt}") + est_cost = qty * bid + if min_cost and est_cost < float(min_cost): + raise RuntimeError(f"{sym} 卖出金额 ${est_cost:.4f} 小于最小成交额 ${float(min_cost):.4f}") + return sym, qty, bid, est_cost + + +def upsert_position(positions: list, position: dict): + sym = position["symbol"] + for i, existing in enumerate(positions): + if existing.get("symbol") == sym: + positions[i] = position + return positions + positions.append(position) + return positions + + +def reconcile_positions_with_exchange(ex, positions: list): + balances = fetch_balances(ex) + existing_by_symbol = {p.get("symbol"): p for p in positions} + reconciled = [] + for asset, qty in balances.items(): + if asset == "USDT": + continue + if qty <= 0: + continue + sym = f"{asset}USDT" + old = existing_by_symbol.get(sym, {}) + reconciled.append({ + "account_id": old.get("account_id", "binance-main"), + "symbol": sym, + "base_asset": asset, + "quote_asset": "USDT", + "market_type": "spot", + "quantity": qty, + "avg_cost": old.get("avg_cost"), + "opened_at": old.get("opened_at", bj_now_iso()), + "updated_at": bj_now_iso(), + "note": old.get("note", "Reconciled from Binance balances"), + }) + save_positions(reconciled) + return reconciled, balances + + +def record_execution_state(decision_id: str, payload: dict): + executions = load_executions() + executions[decision_id] = payload + save_executions(executions) + + +def get_execution_state(decision_id: str): + return load_executions().get(decision_id) + + +def build_decision_context(ex, action: str, argv_tail: list[str], decision_id: str): + balances = fetch_balances(ex) + positions = load_positions() + return { + "decision_id": decision_id, + "balances_before": balances, + "positions_before": positions, + "decision": action.upper(), + "action_taken": f"{action} {' '.join(argv_tail)}".strip(), + "risk_level": "high" if len(positions) <= 1 else "medium", + "data_sources": ["binance"], + } + + +def market_sell(ex, symbol: str, qty: float, decision_id: str): + sym, qty, bid, est_cost = prepare_sell_quantity(ex, symbol, qty) + if DRY_RUN: + log(f"[DRY RUN] 卖出 {sym} 数量 {qty}") + return {"id": f"dry-sell-{decision_id}", "symbol": sym, "amount": qty, "price": bid, "cost": est_cost, "status": "closed"} + order = ex.create_market_sell_order(sym, qty, params={"newClientOrderId": f"ch-{decision_id}-sell"}) + return order + + +def market_buy(ex, symbol: str, amount_usdt: float, decision_id: str): + sym, qty, ask, est_cost = prepare_buy_quantity(ex, symbol, amount_usdt) + if DRY_RUN: + log(f"[DRY RUN] 买入 {sym} 金额 ${est_cost:.4f} 数量 {qty}") + return {"id": f"dry-buy-{decision_id}", "symbol": sym, "amount": qty, "price": ask, "cost": est_cost, "status": "closed"} + order = ex.create_market_buy_order(sym, qty, params={"newClientOrderId": f"ch-{decision_id}-buy"}) + return order + + +def action_sell_all(ex, symbol: str, decision_id: str, decision_context: dict): + balances_before = fetch_balances(ex) + base = norm_symbol(symbol).split("/")[0] + qty = float(balances_before.get(base, 0)) + if qty <= 0: + raise RuntimeError(f"{base} 余额为0,无法卖出") + order = market_sell(ex, symbol, qty, decision_id) + positions_after, balances_after = reconcile_positions_with_exchange(ex, load_positions()) if not DRY_RUN else (load_positions(), balances_before) + log_trade( + "SELL_ALL", norm_symbol(symbol), qty=order.get("amount"), price=order.get("price"), + amount_usdt=order.get("cost"), note="Smart executor sell_all", + decision_id=decision_id, order_id=order.get("id"), status=order.get("status"), + balances_before=balances_before, balances_after=balances_after, + ) + log_decision({ + **decision_context, + "balances_after": balances_after, + "positions_after": positions_after, + "execution_result": {"order": order}, + "analysis": decision_context.get("analysis", ""), + "reasoning": decision_context.get("reasoning", "sell_all execution"), + }) + return order + + +def action_buy(ex, symbol: str, amount_usdt: float, decision_id: str, decision_context: dict): + balances_before = fetch_balances(ex) + usdt = float(balances_before.get("USDT", 0)) + if usdt < amount_usdt: + raise RuntimeError(f"USDT 余额不足(${usdt:.4f} < ${amount_usdt:.4f})") + order = market_buy(ex, symbol, amount_usdt, decision_id) + positions_existing = load_positions() + sym_store = storage_symbol(symbol) + price = float(order.get("price") or 0) + qty = float(order.get("amount") or 0) + position = { + "account_id": "binance-main", + "symbol": sym_store, + "base_asset": norm_symbol(symbol).split("/")[0], + "quote_asset": "USDT", + "market_type": "spot", + "quantity": qty, + "avg_cost": price, + "opened_at": bj_now_iso(), + "updated_at": bj_now_iso(), + "note": "Smart executor entry", + } + upsert_position(positions_existing, position) + if DRY_RUN: + save_positions(positions_existing) + balances_after = balances_before + positions_after = positions_existing + else: + positions_after, balances_after = reconcile_positions_with_exchange(ex, positions_existing) + for p in positions_after: + if p["symbol"] == sym_store and price: + p["avg_cost"] = price + p["updated_at"] = bj_now_iso() + save_positions(positions_after) + log_trade( + "BUY", norm_symbol(symbol), qty=qty, amount_usdt=order.get("cost"), price=price, + note="Smart executor buy", decision_id=decision_id, order_id=order.get("id"), + status=order.get("status"), balances_before=balances_before, balances_after=balances_after, + ) + log_decision({ + **decision_context, + "balances_after": balances_after, + "positions_after": positions_after, + "execution_result": {"order": order}, + "analysis": decision_context.get("analysis", ""), + "reasoning": decision_context.get("reasoning", "buy execution"), + }) + return order + + +def action_rebalance(ex, from_symbol: str, to_symbol: str, decision_id: str, decision_context: dict): + sell_order = action_sell_all(ex, from_symbol, decision_id + "s", decision_context) + balances = fetch_balances(ex) + usdt = float(balances.get("USDT", 0)) + spend = usdt * (1 - USDT_BUFFER_PCT) + if spend < 5: + raise RuntimeError(f"卖出后 USDT ${usdt:.4f} 不足,无法买入新币") + buy_order = action_buy(ex, to_symbol, spend, decision_id + "b", decision_context) + return {"sell": sell_order, "buy": buy_order} + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description="Coin Hunter Smart Executor", + formatter_class=argparse.RawTextHelpFormatter, + epilog=( + "示例:\n" + " python smart_executor.py hold\n" + " python smart_executor.py sell-all ETHUSDT\n" + " python smart_executor.py buy ENJUSDT 100\n" + " python smart_executor.py rebalance PEPEUSDT ETHUSDT\n" + " python smart_executor.py balances\n\n" + "兼容旧调用:\n" + " python smart_executor.py HOLD\n" + " python smart_executor.py --decision HOLD --dry-run\n" + ), + ) + parser.add_argument("--decision-id", help="Override decision id (otherwise derived automatically)") + parser.add_argument("--analysis", help="Decision analysis text to persist into logs") + parser.add_argument("--reasoning", help="Decision reasoning text to persist into logs") + parser.add_argument("--dry-run", action="store_true", help="Force dry-run mode for this invocation") + + subparsers = parser.add_subparsers(dest="command") + + subparsers.add_parser("hold", help="Log a HOLD decision without trading") + subparsers.add_parser("balances", help="Print live balances as JSON") + subparsers.add_parser("balance", help="Alias of balances") + subparsers.add_parser("status", help="Print balances + positions + snapshot as JSON") + + sell_all = subparsers.add_parser("sell-all", help="Sell all of one symbol") + sell_all.add_argument("symbol") + sell_all_legacy = subparsers.add_parser("sell_all", help=argparse.SUPPRESS) + sell_all_legacy.add_argument("symbol") + + buy = subparsers.add_parser("buy", help="Buy symbol with USDT amount") + buy.add_argument("symbol") + buy.add_argument("amount_usdt", type=float) + + rebalance = subparsers.add_parser("rebalance", help="Sell one symbol and rotate to another") + rebalance.add_argument("from_symbol") + rebalance.add_argument("to_symbol") + + return parser + + +def normalize_legacy_argv(argv: list[str]) -> list[str]: + if not argv: + return argv + + action_aliases = { + "HOLD": ["hold"], + "hold": ["hold"], + "SELL_ALL": ["sell-all"], + "sell_all": ["sell-all"], + "sell-all": ["sell-all"], + "BUY": ["buy"], + "buy": ["buy"], + "REBALANCE": ["rebalance"], + "rebalance": ["rebalance"], + "BALANCE": ["balances"], + "balance": ["balances"], + "BALANCES": ["balances"], + "balances": ["balances"], + "STATUS": ["status"], + "status": ["status"], + } + + for idx, token in enumerate(argv): + if token in action_aliases: + prefix = argv[:idx] + suffix = argv[idx + 1:] + return prefix + action_aliases[token] + suffix + + if argv[0].startswith("-"): + legacy = argparse.ArgumentParser(add_help=False) + legacy.add_argument("--decision") + legacy.add_argument("--symbol") + legacy.add_argument("--from-symbol") + legacy.add_argument("--to-symbol") + legacy.add_argument("--amount-usdt", type=float) + legacy.add_argument("--decision-id") + legacy.add_argument("--analysis") + legacy.add_argument("--reasoning") + legacy.add_argument("--dry-run", action="store_true") + ns, unknown = legacy.parse_known_args(argv) + + if ns.decision: + decision = (ns.decision or "").strip().upper() + rebuilt = [] + if ns.decision_id: + rebuilt += ["--decision-id", ns.decision_id] + if ns.analysis: + rebuilt += ["--analysis", ns.analysis] + if ns.reasoning: + rebuilt += ["--reasoning", ns.reasoning] + if ns.dry_run: + rebuilt += ["--dry-run"] + + if decision == "HOLD": + rebuilt += ["hold"] + elif decision == "SELL_ALL": + if not ns.symbol: + raise RuntimeError("旧式 --decision SELL_ALL 需要搭配 --symbol") + rebuilt += ["sell-all", ns.symbol] + elif decision == "BUY": + if not ns.symbol or ns.amount_usdt is None: + raise RuntimeError("旧式 --decision BUY 需要 --symbol 和 --amount-usdt") + rebuilt += ["buy", ns.symbol, str(ns.amount_usdt)] + elif decision == "REBALANCE": + if not ns.from_symbol or not ns.to_symbol: + raise RuntimeError("旧式 --decision REBALANCE 需要 --from-symbol 和 --to-symbol") + rebuilt += ["rebalance", ns.from_symbol, ns.to_symbol] + else: + raise RuntimeError(f"不支持的旧式 decision: {decision}") + + return rebuilt + unknown + + return argv + + +def parse_cli_args(argv: list[str]): + parser = build_parser() + normalized = normalize_legacy_argv(argv) + args = parser.parse_args(normalized) + if not args.command: + parser.print_help() + raise SystemExit(1) + if args.command == "sell_all": + args.command = "sell-all" + return args, normalized + + +def command_status(ex): + balances = fetch_balances(ex) + positions = load_positions() + market_snapshot = build_market_snapshot(ex) + payload = { + "balances": balances, + "positions": positions, + "market_snapshot": market_snapshot, + } + print(json.dumps(payload, ensure_ascii=False, indent=2)) + return payload + + +def command_balances(ex): + balances = fetch_balances(ex) + print(json.dumps({"balances": balances}, ensure_ascii=False, indent=2)) + return balances + + +def cli_action_args(args, action: str) -> list[str]: + if action == "sell_all": + return [args.symbol] + if action == "buy": + return [args.symbol, str(args.amount_usdt)] + if action == "rebalance": + return [args.from_symbol, args.to_symbol] + return [] + + +def main(): + global DRY_RUN + args, normalized_argv = parse_cli_args(sys.argv[1:]) + action = args.command.replace("-", "_") + argv_tail = cli_action_args(args, action) + decision_id = args.decision_id or os.getenv("DECISION_ID") or default_decision_id(action, normalized_argv) + if args.dry_run: + DRY_RUN = True + + previous = get_execution_state(decision_id) + read_only_action = action in {"balance", "balances", "status"} + if previous and previous.get("status") == "success" and not read_only_action: + log(f"⚠️ decision_id={decision_id} 已执行成功,跳过重复执行") + return + + try: + ex = get_exchange() + if read_only_action: + if action in {"balance", "balances"}: + command_balances(ex) + else: + command_status(ex) + return + + decision_context = build_decision_context(ex, action, argv_tail, decision_id) + if args.analysis: + decision_context["analysis"] = args.analysis + elif os.getenv("DECISION_ANALYSIS"): + decision_context["analysis"] = os.getenv("DECISION_ANALYSIS") + if args.reasoning: + decision_context["reasoning"] = args.reasoning + elif os.getenv("DECISION_REASONING"): + decision_context["reasoning"] = os.getenv("DECISION_REASONING") + + record_execution_state(decision_id, {"status": "pending", "started_at": bj_now_iso(), "action": action, "args": argv_tail}) + + if action == "sell_all": + result = action_sell_all(ex, args.symbol, decision_id, decision_context) + elif action == "buy": + result = action_buy(ex, args.symbol, float(args.amount_usdt), decision_id, decision_context) + elif action == "rebalance": + result = action_rebalance(ex, args.from_symbol, args.to_symbol, decision_id, decision_context) + elif action == "hold": + balances = fetch_balances(ex) + positions = load_positions() + market_snapshot = build_market_snapshot(ex) + log_decision({ + **decision_context, + "balances_after": balances, + "positions_after": positions, + "market_snapshot": market_snapshot, + "analysis": decision_context.get("analysis", "hold"), + "reasoning": decision_context.get("reasoning", "hold"), + "execution_result": {"status": "hold"}, + }) + log("😴 决策: 持续持有,无操作") + result = {"status": "hold"} + else: + raise RuntimeError(f"未知动作: {action};请运行 --help 查看正确 CLI 用法") + + record_execution_state(decision_id, {"status": "success", "finished_at": bj_now_iso(), "action": action, "args": argv_tail, "result": result}) + log(f"✅ 执行完成 decision_id={decision_id}") + except Exception as e: + record_execution_state(decision_id, {"status": "failed", "finished_at": bj_now_iso(), "action": action, "args": argv_tail, "error": str(e)}) + log_error("smart_executor", e, decision_id=decision_id, action=action, args=argv_tail) + log(f"❌ 执行失败: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main()