refactor: address high-priority debt and publish to PyPI

- Fix TOCTOU race conditions by wrapping read-modify-write cycles
  under single-file locks in execution_state, portfolio_service,
  precheck_state, state_manager, and precheck_service.
- Add missing test coverage (96 tests total):
  - test_review_service.py (15 tests)
  - test_check_api.py (6 tests)
  - test_external_gate.py main branches (+10 tests)
  - test_trade_execution.py new commands (+8 tests)
- Unify all agent-consumed JSON messages to English.
- Config-ize hardcoded values (volume filter, schema_version) via
  get_user_config with sensible defaults.
- Add 1-hour TTL to exchange cache with force_new override.
- Add ruff and mypy to dev dependencies; fix all type errors.
- Add __all__ declarations to 11 service modules.
- Sync README with new commands, config tuning docs, and PyPI badge.
- Publish package as coinhunter==1.0.0 on PyPI with MIT license.
- Deprecate coinhunter-cli==1.0.1 with runtime warning.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-16 01:21:27 +08:00
parent 01bb54dee5
commit 62c40a9776
53 changed files with 2338 additions and 671 deletions

View File

@@ -6,7 +6,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
- **Editable install:** `pip install -e .`
- **Run the CLI locally:** `python -m coinhunter --help`
- **Install for end users:** `./scripts/install_local.sh` (creates `~/.local/bin/coinhunter`)
- **Install for end users:** `./scripts/install_local.sh` (standard `pip install -e .` wrapper)
- **Tests:** There is no test suite yet. The README lists next priorities as adding pytest coverage for runtime paths, state manager, and trigger analyzer.
- **Lint / type-check:** Not configured yet.
@@ -14,7 +14,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
`src/coinhunter/cli.py` is the single entrypoint. It resolves aliases to canonical commands, maps canonical commands to Python modules via `MODULE_MAP`, then imports the module and calls `module.main()` after mutating `sys.argv` to match the display name.
Active commands live in `src/coinhunter/commands/` and are thin adapters that delegate to `src/coinhunter/services/`. A few legacy commands (`review_context.py`, `review_engine.py`) still live at the `src/coinhunter/` root and are imported directly by `cli.py`. Root-level backward-compat facades (e.g., `precheck.py`, `smart_executor.py`) re-export the moved commands.
Active commands live in `src/coinhunter/commands/` and are thin adapters that delegate to `src/coinhunter/services/`. Root-level backward-compat facades (e.g., `precheck.py`, `smart_executor.py`, `review_engine.py`, `review_context.py`) re-export the moved commands.
## Architecture
@@ -28,7 +28,7 @@ Active commands live in `src/coinhunter/commands/` and are thin adapters that de
### Runtime and environment
`runtime.py` defines `RuntimePaths` and `get_runtime_paths()`. User data lives in `~/.coinhunter/` by default (override with `COINHUNTER_HOME`). Credentials are loaded from `~/.hermes/.env` by default (override with `COINHUNTER_ENV_FILE`). Many modules eagerly instantiate `PATHS = get_runtime_paths()` at import time.
`runtime.py` defines `RuntimePaths` and `get_runtime_paths()`. User data lives in `~/.coinhunter/` by default (override with `COINHUNTER_HOME`). Credentials are loaded from `~/.hermes/.env` by default (override with `COINHUNTER_ENV_FILE`). Modules should call `get_runtime_paths()` at function scope rather than eagerly at import time.
### Smart executor (`exec`)
@@ -56,8 +56,8 @@ State is stored in `~/.coinhunter/state/precheck_state.json`.
### Review commands
- `review N` (`review_context.py`) — generates review context for the last N hours.
- `recap N` (`review_engine.py`) — generates a full review report by reading JSONL decision/trade/error logs, computing PnL estimates, missed opportunities, and saving a report to `~/.coinhunter/reviews/`.
- `review N` (`commands/review_context.py``services/review_service.py`) — generates review context for the last N hours.
- `recap N` (`commands/review_engine.py``services/review_service.py`) — generates a full review report by reading JSONL decision/trade/error logs, computing PnL estimates, missed opportunities, and saving a report to `~/.coinhunter/reviews/`.
### Logging model

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2026 Tacit Lab
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -23,17 +23,19 @@
</p>
<p align="center">
<a href="https://pypi.org/project/coinhunter/"><img src="https://img.shields.io/pypi/v/coinhunter?style=flat-square&color=3776ab" /></a>
<a href="#"><img src="https://img.shields.io/badge/python-3.10%2B-3776ab?style=flat-square&logo=python&logoColor=white" /></a>
<a href="#"><img src="https://img.shields.io/badge/status-active%20development-22c55e?style=flat-square" /></a>
<a href="#"><img src="https://img.shields.io/badge/architecture-cli%20%2B%20commands%20%2B%20services-8b5cf6?style=flat-square" /></a>
<a href="#"><img src="https://img.shields.io/badge/tests-needed-ef4444?style=flat-square" /></a>
</p>
---
## What is this?
`coinhunter-cli` is the **executable tooling layer** for CoinHunter — an installable Python CLI that handles trading operations, market probes, precheck orchestration, and review workflows.
`coinhunter` is the **executable tooling layer** for CoinHunter — an installable Python CLI that handles trading operations, market probes, precheck orchestration, and review workflows.
> **Note:** The old package name `coinhunter-cli` is deprecated. Please install `coinhunter` going forward.
| Layer | Responsibility | Location |
|-------|----------------|----------|
@@ -74,7 +76,8 @@ src/coinhunter/
│ ├── smart_executor_parser.py
│ ├── execution_state.py
│ ├── precheck_service.py
│ ├── precheck_constants.py # thresholds & paths
│ ├── review_service.py # review generation logic
│ ├── precheck_constants.py # thresholds
│ ├── time_utils.py # UTC/local time helpers
│ ├── data_utils.py # json, hash, float, symbol norm
│ ├── state_manager.py # state load/save/sanitize
@@ -96,21 +99,13 @@ src/coinhunter/
## Installation
### Quick user install
### From PyPI (recommended)
```bash
./scripts/install_local.sh
pip install coinhunter
```
Creates:
- Virtualenv: `~/.local/share/coinhunter-cli/venv`
- Launcher: `~/.local/bin/coinhunter`
### Editable dev install
```bash
pip install -e .
```
This installs the latest stable release and creates the `coinhunter` console script entry point.
Verify:
@@ -119,6 +114,22 @@ coinhunter --help
coinhunter --version
```
### Development install (editable)
If you're working on this repo locally:
```bash
pip install -e ".[dev]"
```
Or use the convenience script:
```bash
./scripts/install_local.sh
```
A thin wrapper that runs `pip install -e .` and verifies the entrypoint is on your PATH.
---
## Command Reference
@@ -136,6 +147,9 @@ coinhunter exec hold # record a HOLD decision
coinhunter exec buy ENJUSDT 50 # buy $50 of ENJUSDT
coinhunter exec flat ENJUSDT # sell entire ENJUSDT position
coinhunter exec rotate PEPEUSDT ETHUSDT # rotate exposure
coinhunter exec orders # list open spot orders
coinhunter exec order-status ENJUSDT 123456 # check specific order
coinhunter exec cancel ENJUSDT 123456 # cancel an open order
coinhunter gate # external gate orchestration
coinhunter review 12 # generate review context (last 12h)
coinhunter recap 12 # generate review report (last 12h)
@@ -208,6 +222,45 @@ Run the external gate:
coinhunter gate
```
The gate reads `trigger_command` from `~/.coinhunter/config.json` under `external_gate`.
- By default, no external trigger is configured — gate runs precheck and marks state, then exits cleanly.
- Set `trigger_command` to a command list to integrate with your own scheduler:
```json
{
"external_gate": {
"trigger_command": ["hermes", "cron", "run", "JOB_ID"]
}
}
```
- Set to `null` or `[]` to explicitly disable the external trigger.
### Dynamic tuning via `config.json`
You can override internal defaults without editing code by adding keys to `~/.coinhunter/config.json`:
```json
{
"external_gate": {
"trigger_command": ["hermes", "cron", "run", "JOB_ID"]
},
"exchange": {
"min_quote_volume": 200000,
"cache_ttl_seconds": 3600
},
"logging": {
"schema_version": 2
}
}
```
| Key | Default | Effect |
|-----|---------|--------|
| `exchange.min_quote_volume` | `200000` | Minimum 24h quote volume for a symbol to appear in market snapshots |
| `exchange.cache_ttl_seconds` | `3600` | How long the ccxt exchange instance (and `load_markets()` result) is cached |
| `logging.schema_version` | `2` | Schema version stamped on every JSONL log entry |
---
## Runtime Model
@@ -248,14 +301,17 @@ The codebase is actively maintained and refactored in small, safe steps.
- ✅ Extracted `smart-executor` into `commands/` + `services/`
- ✅ Extracted `precheck` into 9 focused service modules
- ✅ Migrated all active command modules into `commands/`
- ✅ Extracted `review_engine.py` core logic into `services/review_service.py`
- ✅ Removed eager `PATHS` instantiation across services and commands
- ✅ Fixed `smart_executor.py` lazy-loading facade
- ✅ Standardized install to use `pip install -e .`
- ✅ Made `external_gate` trigger_command configurable (no longer hardcodes hermes)
- ✅ Removed dead `auto-trader` command
- ✅ Backward-compatible root facades preserved
**Next priorities:**
- 🧪 Add pytest coverage for runtime paths, state manager, and trigger analyzer
- 🔧 Extract `review_engine.py` core logic into `services/`
- 🔧 Unify output contract (JSON-first with `--pretty` option)
- 🔧 Add basic CI (lint + compileall + pytest)
- 🔧 Unify output contract (JSON-first with `--pretty` option)
---

View File

@@ -3,10 +3,11 @@ requires = ["setuptools>=68", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "coinhunter-cli"
version = "0.1.0"
name = "coinhunter"
version = "1.0.0"
description = "CoinHunter trading CLI with user runtime data in ~/.coinhunter"
readme = "README.md"
license = {text = "MIT"}
requires-python = ">=3.10"
dependencies = [
"ccxt>=4.4.0"
@@ -15,6 +16,9 @@ authors = [
{name = "Tacit Lab", email = "ouyangcarlos@gmail.com"}
]
[project.optional-dependencies]
dev = ["pytest>=8.0", "ruff>=0.4.0", "mypy>=1.0"]
[project.scripts]
coinhunter = "coinhunter.cli:main"
@@ -23,3 +27,17 @@ package-dir = {"" = "src"}
[tool.setuptools.packages.find]
where = ["src"]
[tool.ruff]
line-length = 120
target-version = "py310"
[tool.ruff.lint]
select = ["E", "F", "W", "I"]
ignore = ["E501"]
[tool.mypy]
python_version = "3.10"
warn_return_any = true
warn_unused_configs = true
ignore_missing_imports = true

View File

@@ -1,10 +1,10 @@
#!/usr/bin/env bash
set -euo pipefail
APP_HOME="${COINHUNTER_APP_HOME:-$HOME/.local/share/coinhunter-cli}"
VENV_DIR="$APP_HOME/venv"
# Standard local install using pip editable mode.
# This creates the 'coinhunter' console script entry point as defined in pyproject.toml.
BIN_DIR="${COINHUNTER_BIN_DIR:-$HOME/.local/bin}"
LAUNCHER="$BIN_DIR/coinhunter"
PYTHON_BIN="${PYTHON:-}"
if [[ -z "$PYTHON_BIN" ]]; then
@@ -18,18 +18,13 @@ if [[ -z "$PYTHON_BIN" ]]; then
fi
fi
mkdir -p "$APP_HOME" "$BIN_DIR"
mkdir -p "$BIN_DIR"
"$PYTHON_BIN" -m venv "$VENV_DIR"
"$VENV_DIR/bin/python" -m pip install --upgrade pip setuptools wheel
"$VENV_DIR/bin/python" -m pip install --upgrade "$(pwd)"
"$PYTHON_BIN" -m pip install --upgrade pip setuptools wheel
"$PYTHON_BIN" -m pip install --upgrade -e "$(pwd)[dev]"
cat >"$LAUNCHER" <<EOF
#!/usr/bin/env bash
exec "$VENV_DIR/bin/coinhunter" "\$@"
EOF
chmod +x "$LAUNCHER"
echo "Installed coinhunter into:"
echo " venv: $VENV_DIR"
echo " launcher: $LAUNCHER"
echo "Installed coinhunter in editable mode."
echo " python: $PYTHON_BIN"
echo " entrypoint: $(command -v coinhunter || echo 'not in PATH')"
echo ""
echo "Make sure '$BIN_DIR' is in your PATH if the entrypoint is not found."

View File

@@ -1,2 +1,3 @@
from .cli import main
raise SystemExit(main())

View File

@@ -16,8 +16,8 @@ MODULE_MAP = {
"market-probe": "commands.market_probe",
"paths": "commands.paths",
"precheck": "commands.precheck",
"review-context": "review_context",
"review-engine": "review_engine",
"review-context": "commands.review_context",
"review-engine": "commands.review_engine",
"rotate-external-gate-log": "commands.rotate_external_gate_log",
"smart-executor": "commands.smart_executor",
}
@@ -84,14 +84,17 @@ def build_parser() -> argparse.ArgumentParser:
" coinhunter exec overview\n"
" coinhunter exec hold\n"
" coinhunter exec --analysis '...' --reasoning '...' buy ENJUSDT 50\n"
" coinhunter exec orders\n"
" coinhunter exec order-status ENJUSDT 123456\n"
" coinhunter exec cancel ENJUSDT 123456\n"
" coinhunter pre\n"
" coinhunter pre --ack '分析完成:HOLD'\n"
" coinhunter pre --ack 'Analysis complete: HOLD'\n"
" coinhunter gate\n"
" coinhunter review 12\n"
" coinhunter recap 12\n"
" coinhunter probe bybit-ticker BTCUSDT\n"
"\n"
"Preferred exec verbs are bal, overview, hold, buy, flat, and rotate.\n"
"Preferred exec verbs are bal, overview, hold, buy, flat, rotate, orders, order-status, and cancel.\n"
"Legacy command names remain supported for backward compatibility.\n"
),
)

View File

@@ -1,7 +1,10 @@
#!/usr/bin/env python3
"""检查自动交易的环境配置是否就绪"""
"""Check whether the trading environment is ready and API permissions are sufficient."""
import json
import os
import ccxt
from ..runtime import load_env_file
@@ -12,14 +15,41 @@ def main():
secret = os.getenv("BINANCE_API_SECRET", "")
if not api_key or api_key.startswith("***") or api_key.startswith("your_"):
print("❌ 未配置 BINANCE_API_KEY")
print(json.dumps({"ok": False, "error": "BINANCE_API_KEY not configured"}, ensure_ascii=False))
return 1
if not secret or secret.startswith("***") or secret.startswith("your_"):
print("❌ 未配置 BINANCE_API_SECRET")
print(json.dumps({"ok": False, "error": "BINANCE_API_SECRET not configured"}, ensure_ascii=False))
return 1
print("✅ API 配置正常")
return 0
try:
ex = ccxt.binance({
"apiKey": api_key,
"secret": secret,
"options": {"defaultType": "spot"},
"enableRateLimit": True,
})
balance = ex.fetch_balance()
except Exception as e:
print(json.dumps({"ok": False, "error": f"Failed to connect or fetch balance: {e}"}, ensure_ascii=False))
return 1
read_permission = bool(balance and isinstance(balance, dict))
spot_trading_enabled = None
try:
restrictions = ex.sapi_get_account_api_restrictions()
spot_trading_enabled = restrictions.get("enableSpotAndMarginTrading") or restrictions.get("enableSpotTrading")
except Exception:
pass
report = {
"ok": read_permission,
"read_permission": read_permission,
"spot_trading_enabled": spot_trading_enabled,
"note": "spot_trading_enabled may be null if the key lacks permission to query restrictions; it does not necessarily mean trading is disabled.",
}
print(json.dumps(report, ensure_ascii=False, indent=2))
return 0 if read_permission else 1
if __name__ == "__main__":

View File

@@ -10,7 +10,6 @@ import sys
from ..runtime import ensure_runtime_dirs, get_runtime_paths, load_env_file, resolve_hermes_executable
REQUIRED_ENV_VARS = ["BINANCE_API_KEY", "BINANCE_API_SECRET"]

View File

@@ -5,13 +5,14 @@ import subprocess
import sys
from datetime import datetime, timezone
from ..runtime import ensure_runtime_dirs, get_runtime_paths, resolve_hermes_executable
from ..runtime import ensure_runtime_dirs, get_runtime_paths
def _paths():
return get_runtime_paths()
PATHS = get_runtime_paths()
STATE_DIR = PATHS.state_dir
LOCK_FILE = PATHS.external_gate_lock
COINHUNTER_MODULE = [sys.executable, "-m", "coinhunter"]
TRADE_JOB_ID = "4e6593fff158"
def utc_now():
@@ -19,7 +20,7 @@ def utc_now():
def log(message: str):
print(f"[{utc_now()}] {message}")
print(f"[{utc_now()}] {message}", file=sys.stderr)
def run_cmd(args: list[str]) -> subprocess.CompletedProcess:
@@ -30,51 +31,129 @@ def parse_json_output(text: str) -> dict:
text = (text or "").strip()
if not text:
return {}
return json.loads(text)
return json.loads(text) # type: ignore[no-any-return]
def _load_config() -> dict:
config_path = _paths().config_file
if not config_path.exists():
return {}
try:
return json.loads(config_path.read_text(encoding="utf-8")) # type: ignore[no-any-return]
except Exception:
return {}
def _resolve_trigger_command(paths) -> list[str] | None:
config = _load_config()
gate_config = config.get("external_gate", {})
if "trigger_command" not in gate_config:
return None
trigger = gate_config["trigger_command"]
if trigger is None:
return None
if isinstance(trigger, str):
return [trigger]
if isinstance(trigger, list):
if not trigger:
return None
return [str(item) for item in trigger]
log(f"warn: unexpected trigger_command type {type(trigger).__name__}; skipping trigger")
return None
def main():
ensure_runtime_dirs(PATHS)
with open(LOCK_FILE, "w", encoding="utf-8") as lockf:
paths = _paths()
ensure_runtime_dirs(paths)
result = {"ok": False, "triggered": False, "reason": "", "logs": []}
lock_file = paths.external_gate_lock
def append_log(msg: str):
log(msg)
result["logs"].append(msg)
with open(lock_file, "w", encoding="utf-8") as lockf:
try:
fcntl.flock(lockf.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError:
log("gate already running; skip")
append_log("gate already running; skip")
result["reason"] = "already_running"
print(json.dumps(result, ensure_ascii=False))
return 0
precheck = run_cmd(COINHUNTER_MODULE + ["precheck"])
if precheck.returncode != 0:
log(f"precheck returned non-zero ({precheck.returncode}); stdout={precheck.stdout.strip()} stderr={precheck.stderr.strip()}")
append_log(f"precheck returned non-zero ({precheck.returncode}); stdout={precheck.stdout.strip()} stderr={precheck.stderr.strip()}")
result["reason"] = "precheck_failed"
print(json.dumps(result, ensure_ascii=False))
return 1
try:
data = parse_json_output(precheck.stdout)
except Exception as e:
log(f"failed to parse precheck JSON: {e}; raw={precheck.stdout.strip()[:1000]}")
append_log(f"failed to parse precheck JSON: {e}; raw={precheck.stdout.strip()[:1000]}")
result["reason"] = "precheck_parse_error"
print(json.dumps(result, ensure_ascii=False))
return 1
if not data.get("ok"):
append_log("precheck reported failure; skip model run")
result["reason"] = "precheck_not_ok"
print(json.dumps(result, ensure_ascii=False))
return 1
if not data.get("should_analyze"):
log("no trigger; skip model run")
append_log("no trigger; skip model run")
result["ok"] = True
result["reason"] = "no_trigger"
print(json.dumps(result, ensure_ascii=False))
return 0
if data.get("run_requested"):
log(f"trigger already queued at {data.get('run_requested_at')}; skip duplicate")
append_log(f"trigger already queued at {data.get('run_requested_at')}; skip duplicate")
result["ok"] = True
result["reason"] = "already_queued"
print(json.dumps(result, ensure_ascii=False))
return 0
mark = run_cmd(COINHUNTER_MODULE + ["precheck", "--mark-run-requested", "external-gate queued cron run"])
if mark.returncode != 0:
log(f"failed to mark run requested; stdout={mark.stdout.strip()} stderr={mark.stderr.strip()}")
append_log(f"failed to mark run requested; stdout={mark.stdout.strip()} stderr={mark.stderr.strip()}")
result["reason"] = "mark_failed"
print(json.dumps(result, ensure_ascii=False))
return 1
trigger = run_cmd([resolve_hermes_executable(PATHS), "cron", "run", TRADE_JOB_ID])
trigger_cmd = _resolve_trigger_command(paths)
if trigger_cmd is None:
append_log("trigger_command is disabled; skipping external trigger")
result["ok"] = True
result["reason"] = "trigger_disabled"
print(json.dumps(result, ensure_ascii=False))
return 0
trigger = run_cmd(trigger_cmd)
if trigger.returncode != 0:
log(f"failed to trigger trade cron job; stdout={trigger.stdout.strip()} stderr={trigger.stderr.strip()}")
append_log(f"failed to trigger trade job; cmd={' '.join(trigger_cmd)}; stdout={trigger.stdout.strip()} stderr={trigger.stderr.strip()}")
result["reason"] = "trigger_failed"
print(json.dumps(result, ensure_ascii=False))
return 1
reasons = ", ".join(data.get("reasons", [])) or "unknown"
log(f"queued trade job {TRADE_JOB_ID}; reasons={reasons}")
append_log(f"queued trade job via {' '.join(trigger_cmd)}; reasons={reasons}")
if trigger.stdout.strip():
log(trigger.stdout.strip())
append_log(trigger.stdout.strip())
result["ok"] = True
result["triggered"] = True
result["reason"] = reasons
result["command"] = trigger_cmd
print(json.dumps(result, ensure_ascii=False))
return 0

View File

@@ -5,9 +5,9 @@ from pathlib import Path
from ..runtime import ensure_runtime_dirs, get_runtime_paths
PATHS = get_runtime_paths()
ROOT = PATHS.root
CACHE_DIR = PATHS.cache_dir
def _paths():
return get_runtime_paths()
def now_iso():
@@ -22,30 +22,60 @@ def ensure_file(path: Path, payload: dict):
def main():
ensure_runtime_dirs(PATHS)
paths = _paths()
ensure_runtime_dirs(paths)
created = []
ts = now_iso()
templates = {
ROOT / "config.json": {
paths.root / "config.json": {
"default_exchange": "bybit",
"default_quote_currency": "USDT",
"timezone": "Asia/Shanghai",
"preferred_chains": ["solana", "base"],
"external_gate": {
"trigger_command": None,
"_comment": "Set to a command list like ['hermes', 'cron', 'run', 'JOB_ID'] or null to disable"
},
"trading": {
"usdt_buffer_pct": 0.03,
"min_remaining_dust_usdt": 1.0,
"_comment": "Adjust buffer and dust thresholds for your account size"
},
"precheck": {
"base_price_move_trigger_pct": 0.025,
"base_pnl_trigger_pct": 0.03,
"base_portfolio_move_trigger_pct": 0.03,
"base_candidate_score_trigger_ratio": 1.15,
"base_force_analysis_after_minutes": 180,
"base_cooldown_minutes": 45,
"top_candidates": 10,
"min_actionable_usdt": 12.0,
"min_real_position_value_usdt": 8.0,
"blacklist": ["USDC", "BUSD", "TUSD", "FDUSD", "USTC", "PAXG"],
"hard_stop_pct": -0.08,
"hard_moon_pct": 0.25,
"min_change_pct": 1.0,
"max_price_cap": None,
"hard_reason_dedup_minutes": 15,
"max_pending_trigger_minutes": 30,
"max_run_request_minutes": 20,
"_comment": "Tune trigger sensitivity without redeploying code"
},
"created_at": ts,
"updated_at": ts,
},
ROOT / "accounts.json": {
paths.root / "accounts.json": {
"accounts": []
},
ROOT / "positions.json": {
paths.root / "positions.json": {
"positions": []
},
ROOT / "watchlist.json": {
paths.root / "watchlist.json": {
"watchlist": []
},
ROOT / "notes.json": {
paths.root / "notes.json": {
"notes": []
},
}
@@ -55,9 +85,9 @@ def main():
created.append(str(path))
print(json.dumps({
"root": str(ROOT),
"root": str(paths.root),
"created": created,
"cache_dir": str(CACHE_DIR),
"cache_dir": str(paths.cache_dir),
}, ensure_ascii=False, indent=2))

View File

@@ -2,7 +2,6 @@
import argparse
import json
import os
import sys
import urllib.parse
import urllib.request

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env python3
"""CLI adapter for review context."""
import json
import sys
from ..services.review_service import generate_review
def main():
hours = int(sys.argv[1]) if len(sys.argv) > 1 else 12
review = generate_review(hours)
compact = {
"review_period_hours": review.get("review_period_hours", hours),
"review_timestamp": review.get("review_timestamp"),
"total_decisions": review.get("total_decisions", 0),
"total_trades": review.get("total_trades", 0),
"total_errors": review.get("total_errors", 0),
"stats": review.get("stats", {}),
"insights": review.get("insights", []),
"recommendations": review.get("recommendations", []),
"decision_quality_top": review.get("decision_quality", [])[:5],
"should_report": bool(
review.get("total_decisions", 0)
or review.get("total_trades", 0)
or review.get("total_errors", 0)
or review.get("insights")
),
}
print(json.dumps(compact, ensure_ascii=False, indent=2))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env python3
"""CLI adapter for review engine."""
import json
import sys
from ..services.review_service import generate_review, save_review
def main():
try:
hours = int(sys.argv[1]) if len(sys.argv) > 1 else 1
review = generate_review(hours)
path = save_review(review)
print(json.dumps({"ok": True, "saved_path": path, "review": review}, ensure_ascii=False, indent=2))
except Exception as e:
from ..logger import log_error
log_error("review_engine", e)
print(json.dumps({"ok": False, "error": str(e)}, ensure_ascii=False), file=sys.stderr)
raise SystemExit(1)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,27 +1,30 @@
#!/usr/bin/env python3
"""Rotate external gate log using the user's logrotate config/state."""
import json
import shutil
import subprocess
from ..runtime import ensure_runtime_dirs, get_runtime_paths
PATHS = get_runtime_paths()
STATE_DIR = PATHS.state_dir
LOGROTATE_STATUS = PATHS.logrotate_status
LOGROTATE_CONF = PATHS.logrotate_config
LOGS_DIR = PATHS.logs_dir
def _paths():
return get_runtime_paths()
def main():
ensure_runtime_dirs(PATHS)
paths = _paths()
ensure_runtime_dirs(paths)
logrotate_bin = shutil.which("logrotate") or "/usr/sbin/logrotate"
cmd = [logrotate_bin, "-s", str(LOGROTATE_STATUS), str(LOGROTATE_CONF)]
cmd = [logrotate_bin, "-s", str(paths.logrotate_status), str(paths.logrotate_config)]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.stdout.strip():
print(result.stdout.strip())
if result.stderr.strip():
print(result.stderr.strip())
return result.returncode
output = {
"ok": result.returncode == 0,
"returncode": result.returncode,
"stdout": result.stdout.strip(),
"stderr": result.stderr.strip(),
}
print(json.dumps(output, ensure_ascii=False, indent=2))
return 0 if result.returncode == 0 else 1
if __name__ == "__main__":

View File

@@ -2,28 +2,41 @@
"""Coin Hunter structured logger."""
import json
import traceback
from datetime import datetime, timezone, timedelta
from .runtime import get_runtime_paths
__all__ = [
"SCHEMA_VERSION",
"log_decision",
"log_trade",
"log_snapshot",
"log_error",
"get_logs_by_date",
"get_logs_last_n_hours",
]
from datetime import datetime, timedelta, timezone
LOG_DIR = get_runtime_paths().logs_dir
SCHEMA_VERSION = 2
from .runtime import get_runtime_paths, get_user_config
SCHEMA_VERSION = get_user_config("logging.schema_version", 2)
CST = timezone(timedelta(hours=8))
def _log_dir():
return get_runtime_paths().logs_dir
def bj_now():
return datetime.now(CST)
def ensure_dir():
LOG_DIR.mkdir(parents=True, exist_ok=True)
_log_dir().mkdir(parents=True, exist_ok=True)
def _append_jsonl(prefix: str, payload: dict):
ensure_dir()
date_str = bj_now().strftime("%Y%m%d")
log_file = LOG_DIR / f"{prefix}_{date_str}.jsonl"
log_file = _log_dir() / f"{prefix}_{date_str}.jsonl"
with open(log_file, "a", encoding="utf-8") as f:
f.write(json.dumps(payload, ensure_ascii=False) + "\n")
@@ -42,8 +55,8 @@ def log_decision(data: dict):
return log_event("decisions", data)
def log_trade(action: str, symbol: str, qty: float = None, amount_usdt: float = None,
price: float = None, note: str = "", **extra):
def log_trade(action: str, symbol: str, qty: float | None = None, amount_usdt: float | None = None,
price: float | None = None, note: str = "", **extra):
payload = {
"action": action,
"symbol": symbol,
@@ -71,10 +84,10 @@ def log_error(where: str, error: Exception | str, **extra):
return log_event("errors", payload)
def get_logs_by_date(log_type: str, date_str: str = None) -> list:
def get_logs_by_date(log_type: str, date_str: str | None = None) -> list:
if date_str is None:
date_str = bj_now().strftime("%Y%m%d")
log_file = LOG_DIR / f"{log_type}_{date_str}.jsonl"
log_file = _log_dir() / f"{log_type}_{date_str}.jsonl"
if not log_file.exists():
return []
entries = []

View File

@@ -13,13 +13,6 @@ from importlib import import_module
from .services.precheck_service import run as _run_service
_CORE_EXPORTS = {
"PATHS",
"BASE_DIR",
"STATE_DIR",
"STATE_FILE",
"POSITIONS_FILE",
"CONFIG_FILE",
"ENV_FILE",
"BASE_PRICE_MOVE_TRIGGER_PCT",
"BASE_PNL_TRIGGER_PCT",
"BASE_PORTFOLIO_MOVE_TRIGGER_PCT",
@@ -64,14 +57,28 @@ _CORE_EXPORTS = {
"analyze_trigger",
"update_state_after_observation",
}
# Path-related exports are now lazy in precheck_core
_PATH_EXPORTS = {
"PATHS",
"BASE_DIR",
"STATE_DIR",
"STATE_FILE",
"POSITIONS_FILE",
"CONFIG_FILE",
"ENV_FILE",
}
_STATE_EXPORTS = {"mark_run_requested", "ack_analysis"}
__all__ = sorted(_CORE_EXPORTS | _STATE_EXPORTS | {"main"})
__all__ = sorted(_CORE_EXPORTS | _PATH_EXPORTS | _STATE_EXPORTS | {"main"})
def __getattr__(name: str):
if name in _CORE_EXPORTS:
return getattr(import_module(".services.precheck_core", __package__), name)
if name in _PATH_EXPORTS:
return getattr(import_module(".services.precheck_core", __package__), name)
if name in _STATE_EXPORTS:
return getattr(import_module(".services.precheck_state", __package__), name)
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View File

@@ -1,32 +1,12 @@
#!/usr/bin/env python3
import json
import sys
"""Backward-compatible facade for review context.
from . import review_engine
The executable implementation lives in ``coinhunter.commands.review_context``.
"""
from __future__ import annotations
def main():
hours = int(sys.argv[1]) if len(sys.argv) > 1 else 12
review = review_engine.generate_review(hours)
compact = {
"review_period_hours": review.get("review_period_hours", hours),
"review_timestamp": review.get("review_timestamp"),
"total_decisions": review.get("total_decisions", 0),
"total_trades": review.get("total_trades", 0),
"total_errors": review.get("total_errors", 0),
"stats": review.get("stats", {}),
"insights": review.get("insights", []),
"recommendations": review.get("recommendations", []),
"decision_quality_top": review.get("decision_quality", [])[:5],
"should_report": bool(
review.get("total_decisions", 0)
or review.get("total_trades", 0)
or review.get("total_errors", 0)
or review.get("insights")
),
}
print(json.dumps(compact, ensure_ascii=False, indent=2))
from .commands.review_context import main
if __name__ == "__main__":
main()

View File

@@ -1,312 +1,48 @@
#!/usr/bin/env python3
"""Coin Hunter hourly review engine."""
import json
import os
import sys
from datetime import datetime, timezone, timedelta
from pathlib import Path
"""Backward-compatible facade for review engine.
import ccxt
The executable implementation lives in ``coinhunter.commands.review_engine``.
Core logic is in ``coinhunter.services.review_service``.
"""
from .logger import get_logs_last_n_hours, log_error
from .runtime import get_runtime_paths, load_env_file
from __future__ import annotations
PATHS = get_runtime_paths()
ENV_FILE = PATHS.env_file
REVIEW_DIR = PATHS.reviews_dir
from importlib import import_module
CST = timezone(timedelta(hours=8))
# Re-export service functions for backward compatibility
_EXPORT_MAP = {
"load_env": (".services.review_service", "load_env"),
"get_exchange": (".services.review_service", "get_exchange"),
"ensure_review_dir": (".services.review_service", "ensure_review_dir"),
"norm_symbol": (".services.review_service", "norm_symbol"),
"fetch_current_price": (".services.review_service", "fetch_current_price"),
"analyze_trade": (".services.review_service", "analyze_trade"),
"analyze_hold_passes": (".services.review_service", "analyze_hold_passes"),
"analyze_cash_misses": (".services.review_service", "analyze_cash_misses"),
"generate_review": (".services.review_service", "generate_review"),
"save_review": (".services.review_service", "save_review"),
"print_review": (".services.review_service", "print_review"),
}
__all__ = sorted(set(_EXPORT_MAP) | {"main"})
def load_env():
load_env_file(PATHS)
def __getattr__(name: str):
if name not in _EXPORT_MAP:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
module_name, attr_name = _EXPORT_MAP[name]
module = import_module(module_name, __package__)
return getattr(module, attr_name)
def get_exchange():
load_env()
ex = ccxt.binance({
"apiKey": os.getenv("BINANCE_API_KEY"),
"secret": os.getenv("BINANCE_API_SECRET"),
"options": {"defaultType": "spot"},
"enableRateLimit": True,
})
ex.load_markets()
return ex
def ensure_review_dir():
REVIEW_DIR.mkdir(parents=True, exist_ok=True)
def norm_symbol(symbol: str) -> str:
s = symbol.upper().replace("-", "").replace("_", "")
if "/" in s:
return s
if s.endswith("USDT"):
return s[:-4] + "/USDT"
return s
def fetch_current_price(ex, symbol: str):
try:
return float(ex.fetch_ticker(norm_symbol(symbol))["last"])
except Exception:
return None
def analyze_trade(trade: dict, ex) -> dict:
symbol = trade.get("symbol")
price = trade.get("price")
action = trade.get("action", "")
current_price = fetch_current_price(ex, symbol) if symbol else None
pnl_estimate = None
outcome = "neutral"
if price and current_price and symbol:
change_pct = (current_price - float(price)) / float(price) * 100
if action == "BUY":
pnl_estimate = round(change_pct, 2)
outcome = "good" if change_pct > 2 else "bad" if change_pct < -2 else "neutral"
elif action == "SELL_ALL":
pnl_estimate = round(-change_pct, 2)
# Lowered missed threshold: >2% is a missed opportunity in short-term trading
outcome = "good" if change_pct < -2 else "missed" if change_pct > 2 else "neutral"
return {
"timestamp": trade.get("timestamp"),
"symbol": symbol,
"action": action,
"decision_id": trade.get("decision_id"),
"execution_price": price,
"current_price": current_price,
"pnl_estimate_pct": pnl_estimate,
"outcome_assessment": outcome,
}
def analyze_hold_passes(decisions: list, ex) -> list:
"""Check HOLD decisions where an opportunity was explicitly PASSed but later rallied."""
misses = []
for d in decisions:
if d.get("decision") != "HOLD":
continue
analysis = d.get("analysis")
if not isinstance(analysis, dict):
continue
opportunities = analysis.get("opportunities_evaluated", [])
market_snapshot = d.get("market_snapshot", {})
if not opportunities or not market_snapshot:
continue
for op in opportunities:
verdict = op.get("verdict", "")
if "PASS" not in verdict and "pass" not in verdict:
continue
symbol = op.get("symbol", "")
# Try to extract decision-time price from market_snapshot
snap = market_snapshot.get(symbol) or market_snapshot.get(symbol.replace("/", ""))
if not snap:
continue
decision_price = None
if isinstance(snap, dict):
decision_price = float(snap.get("lastPrice", 0)) or float(snap.get("last", 0))
elif isinstance(snap, (int, float, str)):
decision_price = float(snap)
if not decision_price:
continue
current_price = fetch_current_price(ex, symbol)
if not current_price:
continue
change_pct = (current_price - decision_price) / decision_price * 100
if change_pct > 3: # >3% rally after being passed = missed watch
misses.append({
"timestamp": d.get("timestamp"),
"symbol": symbol,
"decision_price": round(decision_price, 8),
"current_price": round(current_price, 8),
"change_pct": round(change_pct, 2),
"verdict_snippet": verdict[:80],
})
return misses
def analyze_cash_misses(decisions: list, ex) -> list:
"""If portfolio was mostly USDT but a watchlist coin rallied >5%, flag it."""
misses = []
watchlist = set()
for d in decisions:
snap = d.get("market_snapshot", {})
if isinstance(snap, dict):
for k in snap.keys():
if k.endswith("USDT"):
watchlist.add(k)
for d in decisions:
ts = d.get("timestamp")
balances = d.get("balances") or d.get("balances_before", {})
if not balances:
continue
total = sum(float(v) if isinstance(v, (int, float, str)) else 0 for v in balances.values())
usdt = float(balances.get("USDT", 0))
if total == 0 or (usdt / total) < 0.9:
continue
# Portfolio mostly cash — check watchlist performance
snap = d.get("market_snapshot", {})
if not isinstance(snap, dict):
continue
for symbol, data in snap.items():
if not symbol.endswith("USDT"):
continue
decision_price = None
if isinstance(data, dict):
decision_price = float(data.get("lastPrice", 0)) or float(data.get("last", 0))
elif isinstance(data, (int, float, str)):
decision_price = float(data)
if not decision_price:
continue
current_price = fetch_current_price(ex, symbol)
if not current_price:
continue
change_pct = (current_price - decision_price) / decision_price * 100
if change_pct > 5:
misses.append({
"timestamp": ts,
"symbol": symbol,
"decision_price": round(decision_price, 8),
"current_price": round(current_price, 8),
"change_pct": round(change_pct, 2),
})
# Deduplicate by symbol keeping the worst miss
seen = {}
for m in misses:
sym = m["symbol"]
if sym not in seen or m["change_pct"] > seen[sym]["change_pct"]:
seen[sym] = m
return list(seen.values())
def generate_review(hours: int = 1) -> dict:
decisions = get_logs_last_n_hours("decisions", hours)
trades = get_logs_last_n_hours("trades", hours)
errors = get_logs_last_n_hours("errors", hours)
review = {
"review_period_hours": hours,
"review_timestamp": datetime.now(CST).isoformat(),
"total_decisions": len(decisions),
"total_trades": len(trades),
"total_errors": len(errors),
"decision_quality": [],
"stats": {},
"insights": [],
"recommendations": [],
}
if not decisions and not trades:
review["insights"].append("本周期无决策/交易记录")
return review
ex = get_exchange()
outcomes = {"good": 0, "neutral": 0, "bad": 0, "missed": 0}
pnl_samples = []
for trade in trades:
analysis = analyze_trade(trade, ex)
review["decision_quality"].append(analysis)
outcomes[analysis["outcome_assessment"]] += 1
if analysis["pnl_estimate_pct"] is not None:
pnl_samples.append(analysis["pnl_estimate_pct"])
# New: analyze missed opportunities from HOLD / cash decisions
hold_pass_misses = analyze_hold_passes(decisions, ex)
cash_misses = analyze_cash_misses(decisions, ex)
total_missed = outcomes["missed"] + len(hold_pass_misses) + len(cash_misses)
review["stats"] = {
"good_decisions": outcomes["good"],
"neutral_decisions": outcomes["neutral"],
"bad_decisions": outcomes["bad"],
"missed_opportunities": total_missed,
"missed_sell_all": outcomes["missed"],
"missed_hold_passes": len(hold_pass_misses),
"missed_cash_sits": len(cash_misses),
"avg_estimated_edge_pct": round(sum(pnl_samples) / len(pnl_samples), 2) if pnl_samples else None,
}
if errors:
review["insights"].append(f"本周期出现 {len(errors)} 次执行/系统错误,健壮性需优先关注")
if outcomes["bad"] > outcomes["good"]:
review["insights"].append("最近交易质量偏弱,建议降低交易频率或提高入场门槛")
if total_missed > 0:
parts = []
if outcomes["missed"]:
parts.append(f"卖出后继续上涨 {outcomes['missed']}")
if hold_pass_misses:
parts.append(f"PASS 后错失 {len(hold_pass_misses)}")
if cash_misses:
parts.append(f"空仓观望错失 {len(cash_misses)}")
review["insights"].append("存在错失机会: " + "".join(parts) + ",建议放宽趋势跟随或入场条件")
if outcomes["good"] >= max(1, outcomes["bad"] + total_missed):
review["insights"].append("近期决策总体可接受")
if not trades and decisions:
review["insights"].append("有决策无成交,可能是观望、最小成交额限制或执行被拦截")
if len(trades) < len(decisions) * 0.1 and decisions:
review["insights"].append("大量决策未转化为交易,需检查执行门槛(最小成交额/精度/手续费缓冲)是否过高")
if hold_pass_misses:
for m in hold_pass_misses[:3]:
review["insights"].append(f"HOLD 时 PASS 了 {m['symbol']},之后上涨 {m['change_pct']}%")
if cash_misses:
for m in cash_misses[:3]:
review["insights"].append(f"持仓以 USDT 为主时 {m['symbol']} 上涨 {m['change_pct']}%")
review["recommendations"] = [
"优先检查最小成交额/精度拒单是否影响小资金执行",
"若连续两个复盘周期 edge 为负,下一小时减少换仓频率",
"若错误日志增加,优先进入防守模式(多持 USDT",
]
return review
def save_review(review: dict):
ensure_review_dir()
ts = datetime.now(CST).strftime("%Y%m%d_%H%M%S")
path = REVIEW_DIR / f"review_{ts}.json"
path.write_text(json.dumps(review, indent=2, ensure_ascii=False), encoding="utf-8")
return str(path)
def print_review(review: dict):
print("=" * 50)
print("📊 Coin Hunter 小时复盘报告")
print(f"复盘时间: {review['review_timestamp']}")
print(f"统计周期: 过去 {review['review_period_hours']} 小时")
print(f"总决策数: {review['total_decisions']} | 总交易数: {review['total_trades']} | 总错误数: {review['total_errors']}")
stats = review.get("stats", {})
print("\n决策质量统计:")
print(f" ✓ 优秀: {stats.get('good_decisions', 0)}")
print(f" ○ 中性: {stats.get('neutral_decisions', 0)}")
print(f" ✗ 失误: {stats.get('bad_decisions', 0)}")
print(f" ↗ 错过机会: {stats.get('missed_opportunities', 0)}")
if stats.get("avg_estimated_edge_pct") is not None:
print(f" 平均估计 edge: {stats['avg_estimated_edge_pct']}%")
if review.get("insights"):
print("\n💡 见解:")
for item in review["insights"]:
print(f"{item}")
if review.get("recommendations"):
print("\n🔧 优化建议:")
for item in review["recommendations"]:
print(f"{item}")
print("=" * 50)
def __dir__():
return sorted(set(globals()) | set(__all__))
def main():
try:
hours = int(sys.argv[1]) if len(sys.argv) > 1 else 1
review = generate_review(hours)
path = save_review(review)
print_review(review)
print(f"复盘已保存至: {path}")
except Exception as e:
log_error("review_engine", e)
raise
from .commands.review_engine import main as _main
return _main()
if __name__ == "__main__":
main()
raise SystemExit(main())

View File

@@ -2,6 +2,7 @@
from __future__ import annotations
import json
import os
import shutil
from dataclasses import asdict, dataclass
@@ -24,6 +25,7 @@ class RuntimePaths:
positions_lock: Path
executions_lock: Path
precheck_state_file: Path
precheck_state_lock: Path
external_gate_lock: Path
logrotate_config: Path
logrotate_status: Path
@@ -64,6 +66,7 @@ def get_runtime_paths() -> RuntimePaths:
positions_lock=root / "positions.lock",
executions_lock=root / "executions.lock",
precheck_state_file=state_dir / "precheck_state.json",
precheck_state_lock=state_dir / "precheck_state.lock",
external_gate_lock=state_dir / "external_gate.lock",
logrotate_config=root / "logrotate_external_gate.conf",
logrotate_status=state_dir / "logrotate_external_gate.status",
@@ -105,3 +108,20 @@ def mask_secret(value: str | None, *, tail: int = 4) -> str:
if len(value) <= tail:
return "*" * len(value)
return "*" * max(4, len(value) - tail) + value[-tail:]
def get_user_config(key: str, default=None):
"""Read a dotted key from the user config file."""
paths = get_runtime_paths()
try:
config = json.loads(paths.config_file.read_text(encoding="utf-8"))
except Exception:
return default
for part in key.split("."):
if isinstance(config, dict):
config = config.get(part)
if config is None:
return default
else:
return default
return config if config is not None else default

View File

@@ -31,7 +31,7 @@ def build_adaptive_profile(snapshot: dict):
dust_mode = free_usdt < MIN_ACTIONABLE_USDT and largest_position_value < MIN_REAL_POSITION_VALUE_USDT
price_trigger = BASE_PRICE_MOVE_TRIGGER_PCT
pnl_trigger = BASE_PN_L_TRIGGER_PCT
pnl_trigger = BASE_PNL_TRIGGER_PCT
portfolio_trigger = BASE_PORTFOLIO_MOVE_TRIGGER_PCT
candidate_ratio = BASE_CANDIDATE_SCORE_TRIGGER_RATIO
force_minutes = BASE_FORCE_ANALYSIS_AFTER_MINUTES

View File

@@ -63,7 +63,7 @@ def top_candidates_from_tickers(tickers: dict):
})
candidates.sort(key=lambda x: x["score"], reverse=True)
global_top = candidates[:TOP_CANDIDATES]
layers = {"major": [], "mid": [], "meme": []}
layers: dict[str, list[dict]] = {"major": [], "mid": [], "meme": []}
for c in candidates:
layers[c["band"]].append(c)
for k in layers:

View File

@@ -1,25 +1,47 @@
"""Exchange helpers (ccxt, markets, balances, order prep)."""
import math
import os
import time
__all__ = [
"load_env",
"get_exchange",
"norm_symbol",
"storage_symbol",
"fetch_balances",
"build_market_snapshot",
"market_and_ticker",
"floor_to_step",
"prepare_buy_quantity",
"prepare_sell_quantity",
]
import ccxt
from ..runtime import get_runtime_paths, load_env_file
from .trade_common import log
from ..runtime import get_runtime_paths, get_user_config, load_env_file
PATHS = get_runtime_paths()
_exchange_cache = None
_exchange_cached_at = None
CACHE_TTL_SECONDS = 3600
def load_env():
load_env_file(PATHS)
load_env_file(get_runtime_paths())
def get_exchange():
def get_exchange(force_new: bool = False):
global _exchange_cache, _exchange_cached_at
now = time.time()
if not force_new and _exchange_cache is not None and _exchange_cached_at is not None:
ttl = get_user_config("exchange.cache_ttl_seconds", CACHE_TTL_SECONDS)
if now - _exchange_cached_at < ttl:
return _exchange_cache
load_env()
api_key = os.getenv("BINANCE_API_KEY")
secret = os.getenv("BINANCE_API_SECRET")
if not api_key or not secret:
raise RuntimeError("缺少 BINANCE_API_KEY BINANCE_API_SECRET")
raise RuntimeError("Missing BINANCE_API_KEY or BINANCE_API_SECRET")
ex = ccxt.binance(
{
"apiKey": api_key,
@@ -29,6 +51,8 @@ def get_exchange():
}
)
ex.load_markets()
_exchange_cache = ex
_exchange_cached_at = now
return ex
@@ -38,7 +62,7 @@ def norm_symbol(symbol: str) -> str:
return s
if s.endswith("USDT"):
return s[:-4] + "/USDT"
raise ValueError(f"不支持的 symbol: {symbol}")
raise ValueError(f"Unsupported symbol: {symbol}")
def storage_symbol(symbol: str) -> str:
@@ -63,7 +87,8 @@ def build_market_snapshot(ex):
if price is None or float(price) <= 0:
continue
vol = float(t.get("quoteVolume") or 0)
if vol < 200_000:
min_volume = get_user_config("exchange.min_quote_volume", 200_000)
if vol < min_volume:
continue
base = sym.replace("/", "")
snapshot[base] = {
@@ -95,17 +120,17 @@ def prepare_buy_quantity(ex, symbol: str, amount_usdt: float):
sym, market, ticker = market_and_ticker(ex, symbol)
ask = float(ticker.get("ask") or ticker.get("last") or 0)
if ask <= 0:
raise RuntimeError(f"{sym} 无法获取有效 ask 价格")
raise RuntimeError(f"No valid ask price for {sym}")
budget = amount_usdt * (1 - USDT_BUFFER_PCT)
raw_qty = budget / ask
qty = float(ex.amount_to_precision(sym, raw_qty))
min_amt = (market.get("limits", {}).get("amount", {}) or {}).get("min") or 0
min_cost = (market.get("limits", {}).get("cost", {}) or {}).get("min") or 0
if min_amt and qty < float(min_amt):
raise RuntimeError(f"{sym} 买入数量 {qty} 小于最小数量 {min_amt}")
raise RuntimeError(f"Buy quantity {qty} for {sym} below minimum {min_amt}")
est_cost = qty * ask
if min_cost and est_cost < float(min_cost):
raise RuntimeError(f"{sym} 买入金额 ${est_cost:.4f} 小于最小成交额 ${float(min_cost):.4f}")
raise RuntimeError(f"Buy cost ${est_cost:.4f} for {sym} below minimum ${float(min_cost):.4f}")
return sym, qty, ask, est_cost
@@ -113,13 +138,13 @@ def prepare_sell_quantity(ex, symbol: str, free_qty: float):
sym, market, ticker = market_and_ticker(ex, symbol)
bid = float(ticker.get("bid") or ticker.get("last") or 0)
if bid <= 0:
raise RuntimeError(f"{sym} 无法获取有效 bid 价格")
raise RuntimeError(f"No valid bid price for {sym}")
qty = float(ex.amount_to_precision(sym, free_qty))
min_amt = (market.get("limits", {}).get("amount", {}) or {}).get("min") or 0
min_cost = (market.get("limits", {}).get("cost", {}) or {}).get("min") or 0
if min_amt and qty < float(min_amt):
raise RuntimeError(f"{sym} 卖出数量 {qty} 小于最小数量 {min_amt}")
raise RuntimeError(f"Sell quantity {qty} for {sym} below minimum {min_amt}")
est_cost = qty * bid
if min_cost and est_cost < float(min_cost):
raise RuntimeError(f"{sym} 卖出金额 ${est_cost:.4f} 小于最小成交额 ${float(min_cost):.4f}")
raise RuntimeError(f"Sell cost ${est_cost:.4f} for {sym} below minimum ${float(min_cost):.4f}")
return sym, qty, bid, est_cost

View File

@@ -1,17 +1,25 @@
"""Execution state helpers (decision deduplication, executions.json)."""
import hashlib
from ..runtime import get_runtime_paths
from .file_utils import load_json_locked, save_json_locked
from .trade_common import bj_now_iso
__all__ = [
"default_decision_id",
"load_executions",
"save_executions",
"record_execution_state",
"get_execution_state",
]
PATHS = get_runtime_paths()
EXECUTIONS_FILE = PATHS.executions_file
EXECUTIONS_LOCK = PATHS.executions_lock
from ..runtime import get_runtime_paths
from .file_utils import load_json_locked, read_modify_write_json, save_json_locked
def _paths():
return get_runtime_paths()
def default_decision_id(action: str, argv_tail: list[str]) -> str:
from datetime import datetime
from .trade_common import CST
now = datetime.now(CST)
@@ -22,17 +30,24 @@ def default_decision_id(action: str, argv_tail: list[str]) -> str:
def load_executions() -> dict:
return load_json_locked(EXECUTIONS_FILE, EXECUTIONS_LOCK, {"executions": {}}).get("executions", {})
paths = _paths()
data = load_json_locked(paths.executions_file, paths.executions_lock, {"executions": {}})
return data.get("executions", {}) # type: ignore[no-any-return]
def save_executions(executions: dict):
save_json_locked(EXECUTIONS_FILE, EXECUTIONS_LOCK, {"executions": executions})
paths = _paths()
save_json_locked(paths.executions_file, paths.executions_lock, {"executions": executions})
def record_execution_state(decision_id: str, payload: dict):
executions = load_executions()
executions[decision_id] = payload
save_executions(executions)
paths = _paths()
read_modify_write_json(
paths.executions_file,
paths.executions_lock,
{"executions": {}},
lambda data: data.setdefault("executions", {}).__setitem__(decision_id, payload) or data,
)
def get_execution_state(decision_id: str):

View File

@@ -2,6 +2,14 @@
import fcntl
import json
import os
__all__ = [
"locked_file",
"atomic_write_json",
"load_json_locked",
"save_json_locked",
"read_modify_write_json",
]
from contextlib import contextmanager
from pathlib import Path
@@ -9,13 +17,22 @@ from pathlib import Path
@contextmanager
def locked_file(path: Path):
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "a+", encoding="utf-8") as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
f.seek(0)
yield f
f.flush()
os.fsync(f.fileno())
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
fd = None
try:
fd = os.open(path, os.O_RDWR | os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_EX)
yield fd
finally:
if fd is not None:
try:
os.fsync(fd)
except Exception:
pass
try:
fcntl.flock(fd, fcntl.LOCK_UN)
except Exception:
pass
os.close(fd)
def atomic_write_json(path: Path, data: dict):
@@ -38,3 +55,22 @@ def load_json_locked(path: Path, lock_path: Path, default):
def save_json_locked(path: Path, lock_path: Path, data: dict):
with locked_file(lock_path):
atomic_write_json(path, data)
def read_modify_write_json(path: Path, lock_path: Path, default, modifier):
"""Atomic read-modify-write under a single file lock.
Loads JSON from *path* (or uses *default* if missing/invalid),
calls ``modifier(data)``, then atomically writes the result back.
If *modifier* returns None, the mutated *data* is written.
"""
with locked_file(lock_path):
if path.exists():
try:
data = json.loads(path.read_text(encoding="utf-8"))
except Exception:
data = default
else:
data = default
result = modifier(data)
atomic_write_json(path, result if result is not None else data)

View File

@@ -7,15 +7,12 @@ import os
import ccxt
from .data_utils import norm_symbol, to_float
from .precheck_constants import BLACKLIST, MAX_PRICE_CAP, MIN_CHANGE_PCT
from .time_utils import utc_now
def get_exchange():
from ..runtime import load_env_file
from .precheck_constants import ENV_FILE
load_env_file(ENV_FILE)
load_env_file()
api_key = os.getenv("BINANCE_API_KEY")
secret = os.getenv("BINANCE_API_SECRET")
if not api_key or not secret:

View File

@@ -1,19 +1,47 @@
"""Portfolio state helpers (positions.json, reconcile with exchange)."""
from ..runtime import get_runtime_paths
from .file_utils import load_json_locked, save_json_locked
__all__ = [
"load_positions",
"save_positions",
"update_positions",
"upsert_position",
"reconcile_positions_with_exchange",
]
from .file_utils import load_json_locked, read_modify_write_json, save_json_locked
from .trade_common import bj_now_iso
PATHS = get_runtime_paths()
POSITIONS_FILE = PATHS.positions_file
POSITIONS_LOCK = PATHS.positions_lock
def _paths():
return get_runtime_paths()
def load_positions() -> list:
return load_json_locked(POSITIONS_FILE, POSITIONS_LOCK, {"positions": []}).get("positions", [])
paths = _paths()
data = load_json_locked(paths.positions_file, paths.positions_lock, {"positions": []})
return data.get("positions", []) # type: ignore[no-any-return]
def save_positions(positions: list):
save_json_locked(POSITIONS_FILE, POSITIONS_LOCK, {"positions": positions})
paths = _paths()
save_json_locked(paths.positions_file, paths.positions_lock, {"positions": positions})
def update_positions(modifier):
"""Atomic read-modify-write for positions under a single lock.
*modifier* receives the current positions list and may mutate it in-place
or return a new list. If it returns None, the mutated list is saved.
"""
paths = _paths()
def _modify(data):
positions = data.get("positions", [])
result = modifier(positions)
data["positions"] = result if result is not None else positions
return data
read_modify_write_json(paths.positions_file, paths.positions_lock, {"positions": []}, _modify)
def upsert_position(positions: list, position: dict):
@@ -26,32 +54,43 @@ def upsert_position(positions: list, position: dict):
return positions
def reconcile_positions_with_exchange(ex, positions: list):
def reconcile_positions_with_exchange(ex, positions_hint: list | None = None):
from .exchange_service import fetch_balances
balances = fetch_balances(ex)
existing_by_symbol = {p.get("symbol"): p for p in positions}
reconciled = []
for asset, qty in balances.items():
if asset == "USDT":
continue
if qty <= 0:
continue
sym = f"{asset}USDT"
old = existing_by_symbol.get(sym, {})
reconciled.append(
{
"account_id": old.get("account_id", "binance-main"),
"symbol": sym,
"base_asset": asset,
"quote_asset": "USDT",
"market_type": "spot",
"quantity": qty,
"avg_cost": old.get("avg_cost"),
"opened_at": old.get("opened_at", bj_now_iso()),
"updated_at": bj_now_iso(),
"note": old.get("note", "Reconciled from Binance balances"),
}
)
save_positions(reconciled)
def _modify(data):
nonlocal reconciled
existing = data.get("positions", [])
existing_by_symbol = {p.get("symbol"): p for p in existing}
if positions_hint is not None:
existing_by_symbol.update({p.get("symbol"): p for p in positions_hint})
reconciled = []
for asset, qty in balances.items():
if asset == "USDT":
continue
if qty <= 0:
continue
sym = f"{asset}USDT"
old = existing_by_symbol.get(sym, {})
reconciled.append(
{
"account_id": old.get("account_id", "binance-main"),
"symbol": sym,
"base_asset": asset,
"quote_asset": "USDT",
"market_type": "spot",
"quantity": qty,
"avg_cost": old.get("avg_cost"),
"opened_at": old.get("opened_at", bj_now_iso()),
"updated_at": bj_now_iso(),
"note": old.get("note", "Reconciled from Binance balances"),
}
)
data["positions"] = reconciled
return data
paths = _paths()
read_modify_write_json(paths.positions_file, paths.positions_lock, {"positions": []}, _modify)
return reconciled, balances

View File

@@ -3,7 +3,6 @@
from __future__ import annotations
from .time_utils import utc_iso
from .trigger_analyzer import analyze_trigger
def build_failure_payload(exc: Exception) -> dict:
@@ -18,5 +17,5 @@ def build_failure_payload(exc: Exception) -> dict:
"soft_reasons": [],
"soft_score": 0,
"details": [str(exc)],
"compact_summary": f"预检查失败,转入深度分析兑底: {exc}",
"compact_summary": f"Precheck failed, falling back to deep analysis: {exc}",
}

View File

@@ -1,31 +1,25 @@
"""Precheck constants and runtime paths."""
"""Precheck constants and thresholds."""
from __future__ import annotations
from ..runtime import get_runtime_paths
from ..runtime import get_user_config
PATHS = get_runtime_paths()
BASE_DIR = PATHS.root
STATE_DIR = PATHS.state_dir
STATE_FILE = PATHS.precheck_state_file
POSITIONS_FILE = PATHS.positions_file
CONFIG_FILE = PATHS.config_file
ENV_FILE = PATHS.env_file
_BASE = "precheck"
BASE_PRICE_MOVE_TRIGGER_PCT = 0.025
BASE_PNL_TRIGGER_PCT = 0.03
BASE_PORTFOLIO_MOVE_TRIGGER_PCT = 0.03
BASE_CANDIDATE_SCORE_TRIGGER_RATIO = 1.15
BASE_FORCE_ANALYSIS_AFTER_MINUTES = 180
BASE_COOLDOWN_MINUTES = 45
TOP_CANDIDATES = 10
MIN_ACTIONABLE_USDT = 12.0
MIN_REAL_POSITION_VALUE_USDT = 8.0
BLACKLIST = {"USDC", "BUSD", "TUSD", "FDUSD", "USTC", "PAXG"}
HARD_STOP_PCT = -0.08
HARD_MOON_PCT = 0.25
MIN_CHANGE_PCT = 1.0
MAX_PRICE_CAP = None
HARD_REASON_DEDUP_MINUTES = 15
MAX_PENDING_TRIGGER_MINUTES = 30
MAX_RUN_REQUEST_MINUTES = 20
BASE_PRICE_MOVE_TRIGGER_PCT = get_user_config(f"{_BASE}.base_price_move_trigger_pct", 0.025)
BASE_PNL_TRIGGER_PCT = get_user_config(f"{_BASE}.base_pnl_trigger_pct", 0.03)
BASE_PORTFOLIO_MOVE_TRIGGER_PCT = get_user_config(f"{_BASE}.base_portfolio_move_trigger_pct", 0.03)
BASE_CANDIDATE_SCORE_TRIGGER_RATIO = get_user_config(f"{_BASE}.base_candidate_score_trigger_ratio", 1.15)
BASE_FORCE_ANALYSIS_AFTER_MINUTES = get_user_config(f"{_BASE}.base_force_analysis_after_minutes", 180)
BASE_COOLDOWN_MINUTES = get_user_config(f"{_BASE}.base_cooldown_minutes", 45)
TOP_CANDIDATES = get_user_config(f"{_BASE}.top_candidates", 10)
MIN_ACTIONABLE_USDT = get_user_config(f"{_BASE}.min_actionable_usdt", 12.0)
MIN_REAL_POSITION_VALUE_USDT = get_user_config(f"{_BASE}.min_real_position_value_usdt", 8.0)
BLACKLIST = set(get_user_config(f"{_BASE}.blacklist", ["USDC", "BUSD", "TUSD", "FDUSD", "USTC", "PAXG"]))
HARD_STOP_PCT = get_user_config(f"{_BASE}.hard_stop_pct", -0.08)
HARD_MOON_PCT = get_user_config(f"{_BASE}.hard_moon_pct", 0.25)
MIN_CHANGE_PCT = get_user_config(f"{_BASE}.min_change_pct", 1.0)
MAX_PRICE_CAP = get_user_config(f"{_BASE}.max_price_cap", None)
HARD_REASON_DEDUP_MINUTES = get_user_config(f"{_BASE}.hard_reason_dedup_minutes", 15)
MAX_PENDING_TRIGGER_MINUTES = get_user_config(f"{_BASE}.max_pending_trigger_minutes", 30)
MAX_RUN_REQUEST_MINUTES = get_user_config(f"{_BASE}.max_run_request_minutes", 20)

View File

@@ -18,14 +18,19 @@ from __future__ import annotations
from importlib import import_module
from ..runtime import get_runtime_paths
_PATH_ALIASES = {
"PATHS": lambda: get_runtime_paths(),
"BASE_DIR": lambda: get_runtime_paths().root,
"STATE_DIR": lambda: get_runtime_paths().state_dir,
"STATE_FILE": lambda: get_runtime_paths().precheck_state_file,
"POSITIONS_FILE": lambda: get_runtime_paths().positions_file,
"CONFIG_FILE": lambda: get_runtime_paths().config_file,
"ENV_FILE": lambda: get_runtime_paths().env_file,
}
_MODULE_MAP = {
"PATHS": ".precheck_constants",
"BASE_DIR": ".precheck_constants",
"STATE_DIR": ".precheck_constants",
"STATE_FILE": ".precheck_constants",
"POSITIONS_FILE": ".precheck_constants",
"CONFIG_FILE": ".precheck_constants",
"ENV_FILE": ".precheck_constants",
"BASE_PRICE_MOVE_TRIGGER_PCT": ".precheck_constants",
"BASE_PNL_TRIGGER_PCT": ".precheck_constants",
"BASE_PORTFOLIO_MOVE_TRIGGER_PCT": ".precheck_constants",
@@ -74,10 +79,12 @@ _MODULE_MAP = {
"analyze_trigger": ".trigger_analyzer",
}
__all__ = sorted(set(_MODULE_MAP) | {"main"})
__all__ = sorted(set(_MODULE_MAP) | set(_PATH_ALIASES) | {"main"})
def __getattr__(name: str):
if name in _PATH_ALIASES:
return _PATH_ALIASES[name]()
if name not in _MODULE_MAP:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
module_name = _MODULE_MAP[name]
@@ -90,8 +97,9 @@ def __dir__():
def main():
from .precheck_service import run as _run_service
import sys
from .precheck_service import run as _run_service
return _run_service(sys.argv[1:])

View File

@@ -3,6 +3,8 @@
from __future__ import annotations
import json
__all__ = ["run"]
import sys
from . import precheck_analysis, precheck_snapshot, precheck_state
@@ -19,12 +21,24 @@ def run(argv: list[str] | None = None) -> int:
return 0
try:
state = precheck_state.sanitize_state_for_stale_triggers(precheck_state.load_state())
snapshot = precheck_snapshot.build_snapshot()
analysis = precheck_analysis.analyze_trigger(snapshot, state)
precheck_state.save_state(precheck_state.update_state_after_observation(state, snapshot, analysis))
print(json.dumps(analysis, ensure_ascii=False, indent=2))
captured = {}
def _modifier(state):
state = precheck_state.sanitize_state_for_stale_triggers(state)
snapshot = precheck_snapshot.build_snapshot()
analysis = precheck_analysis.analyze_trigger(snapshot, state)
new_state = precheck_state.update_state_after_observation(state, snapshot, analysis)
state.clear()
state.update(new_state)
captured["analysis"] = analysis
return state
precheck_state.modify_state(_modifier)
result = {"ok": True, **captured["analysis"]}
print(json.dumps(result, ensure_ascii=False, indent=2))
return 0
except Exception as exc:
print(json.dumps(precheck_analysis.build_failure_payload(exc), ensure_ascii=False, indent=2))
return 0
payload = precheck_analysis.build_failure_payload(exc)
result = {"ok": False, **payload}
print(json.dumps(result, ensure_ascii=False, indent=2))
return 1

View File

@@ -2,4 +2,3 @@
from __future__ import annotations
from .snapshot_builder import build_snapshot

View File

@@ -6,32 +6,36 @@ import json
from .state_manager import (
load_state,
sanitize_state_for_stale_triggers,
save_state,
update_state_after_observation,
modify_state,
)
from .time_utils import utc_iso
def mark_run_requested(note: str = "") -> dict:
def _modifier(state):
state["run_requested_at"] = utc_iso()
state["run_request_note"] = note
return state
modify_state(_modifier)
state = load_state()
state["run_requested_at"] = utc_iso()
state["run_request_note"] = note
save_state(state)
payload = {"ok": True, "run_requested_at": state["run_requested_at"], "note": note}
print(json.dumps(payload, ensure_ascii=False))
return payload
def ack_analysis(note: str = "") -> dict:
def _modifier(state):
state["last_deep_analysis_at"] = utc_iso()
state["pending_trigger"] = False
state["pending_reasons"] = []
state["last_ack_note"] = note
state.pop("run_requested_at", None)
state.pop("run_request_note", None)
return state
modify_state(_modifier)
state = load_state()
state["last_deep_analysis_at"] = utc_iso()
state["pending_trigger"] = False
state["pending_reasons"] = []
state["last_ack_note"] = note
state.pop("run_requested_at", None)
state.pop("run_request_note", None)
save_state(state)
payload = {"ok": True, "acked_at": state["last_deep_analysis_at"], "note": note}
print(json.dumps(payload, ensure_ascii=False))
return payload

View File

@@ -0,0 +1,292 @@
"""Review generation service for CoinHunter."""
from __future__ import annotations
import json
__all__ = [
"ensure_review_dir",
"norm_symbol",
"fetch_current_price",
"analyze_trade",
"analyze_hold_passes",
"analyze_cash_misses",
"generate_review",
"save_review",
"print_review",
]
from datetime import datetime, timedelta, timezone
from pathlib import Path
from ..logger import get_logs_last_n_hours
from ..runtime import get_runtime_paths
from .exchange_service import get_exchange
CST = timezone(timedelta(hours=8))
def _paths():
return get_runtime_paths()
def _review_dir() -> Path:
return _paths().reviews_dir # type: ignore[no-any-return]
def _exchange():
return get_exchange()
def ensure_review_dir():
_review_dir().mkdir(parents=True, exist_ok=True)
def norm_symbol(symbol: str) -> str:
s = symbol.upper().replace("-", "").replace("_", "")
if "/" in s:
return s
if s.endswith("USDT"):
return s[:-4] + "/USDT"
return s
def fetch_current_price(ex, symbol: str):
try:
return float(ex.fetch_ticker(norm_symbol(symbol))["last"])
except Exception:
return None
def analyze_trade(trade: dict, ex) -> dict:
symbol = trade.get("symbol")
price = trade.get("price")
action = trade.get("action", "")
current_price = fetch_current_price(ex, symbol) if symbol else None
pnl_estimate = None
outcome = "neutral"
if price and current_price and symbol:
change_pct = (current_price - float(price)) / float(price) * 100
if action == "BUY":
pnl_estimate = round(change_pct, 2)
outcome = "good" if change_pct > 2 else "bad" if change_pct < -2 else "neutral"
elif action == "SELL_ALL":
pnl_estimate = round(-change_pct, 2)
outcome = "good" if change_pct < -2 else "missed" if change_pct > 2 else "neutral"
return {
"timestamp": trade.get("timestamp"),
"symbol": symbol,
"action": action,
"decision_id": trade.get("decision_id"),
"execution_price": price,
"current_price": current_price,
"pnl_estimate_pct": pnl_estimate,
"outcome_assessment": outcome,
}
def analyze_hold_passes(decisions: list, ex) -> list:
misses = []
for d in decisions:
if d.get("decision") != "HOLD":
continue
analysis = d.get("analysis")
if not isinstance(analysis, dict):
continue
opportunities = analysis.get("opportunities_evaluated", [])
market_snapshot = d.get("market_snapshot", {})
if not opportunities or not market_snapshot:
continue
for op in opportunities:
verdict = op.get("verdict", "")
if "PASS" not in verdict and "pass" not in verdict:
continue
symbol = op.get("symbol", "")
snap = market_snapshot.get(symbol) or market_snapshot.get(symbol.replace("/", ""))
if not snap:
continue
decision_price = None
if isinstance(snap, dict):
decision_price = float(snap.get("lastPrice", 0)) or float(snap.get("last", 0))
elif isinstance(snap, (int, float, str)):
decision_price = float(snap)
if not decision_price:
continue
current_price = fetch_current_price(ex, symbol)
if not current_price:
continue
change_pct = (current_price - decision_price) / decision_price * 100
if change_pct > 3:
misses.append({
"timestamp": d.get("timestamp"),
"symbol": symbol,
"decision_price": round(decision_price, 8),
"current_price": round(current_price, 8),
"change_pct": round(change_pct, 2),
"verdict_snippet": verdict[:80],
})
return misses
def analyze_cash_misses(decisions: list, ex) -> list:
misses = []
watchlist = set()
for d in decisions:
snap = d.get("market_snapshot", {})
if isinstance(snap, dict):
for k in snap.keys():
if k.endswith("USDT"):
watchlist.add(k)
for d in decisions:
ts = d.get("timestamp")
balances = d.get("balances") or d.get("balances_before", {})
if not balances:
continue
total = sum(float(v) if isinstance(v, (int, float, str)) else 0 for v in balances.values())
usdt = float(balances.get("USDT", 0))
if total == 0 or (usdt / total) < 0.9:
continue
snap = d.get("market_snapshot", {})
if not isinstance(snap, dict):
continue
for symbol, data in snap.items():
if not symbol.endswith("USDT"):
continue
decision_price = None
if isinstance(data, dict):
decision_price = float(data.get("lastPrice", 0)) or float(data.get("last", 0))
elif isinstance(data, (int, float, str)):
decision_price = float(data)
if not decision_price:
continue
current_price = fetch_current_price(ex, symbol)
if not current_price:
continue
change_pct = (current_price - decision_price) / decision_price * 100
if change_pct > 5:
misses.append({
"timestamp": ts,
"symbol": symbol,
"decision_price": round(decision_price, 8),
"current_price": round(current_price, 8),
"change_pct": round(change_pct, 2),
})
seen: dict[str, dict] = {}
for m in misses:
sym = m["symbol"]
if sym not in seen or m["change_pct"] > seen[sym]["change_pct"]:
seen[sym] = m
return list(seen.values())
def generate_review(hours: int = 1) -> dict:
decisions = get_logs_last_n_hours("decisions", hours)
trades = get_logs_last_n_hours("trades", hours)
errors = get_logs_last_n_hours("errors", hours)
review: dict = {
"review_period_hours": hours,
"review_timestamp": datetime.now(CST).isoformat(),
"total_decisions": len(decisions),
"total_trades": len(trades),
"total_errors": len(errors),
"decision_quality": [],
"stats": {},
"insights": [],
"recommendations": [],
}
if not decisions and not trades:
review["insights"].append("No decisions or trades in this period")
return review
ex = get_exchange()
outcomes = {"good": 0, "neutral": 0, "bad": 0, "missed": 0}
pnl_samples = []
for trade in trades:
analysis = analyze_trade(trade, ex)
review["decision_quality"].append(analysis)
outcomes[analysis["outcome_assessment"]] += 1
if analysis["pnl_estimate_pct"] is not None:
pnl_samples.append(analysis["pnl_estimate_pct"])
hold_pass_misses = analyze_hold_passes(decisions, ex)
cash_misses = analyze_cash_misses(decisions, ex)
total_missed = outcomes["missed"] + len(hold_pass_misses) + len(cash_misses)
review["stats"] = {
"good_decisions": outcomes["good"],
"neutral_decisions": outcomes["neutral"],
"bad_decisions": outcomes["bad"],
"missed_opportunities": total_missed,
"missed_sell_all": outcomes["missed"],
"missed_hold_passes": len(hold_pass_misses),
"missed_cash_sits": len(cash_misses),
"avg_estimated_edge_pct": round(sum(pnl_samples) / len(pnl_samples), 2) if pnl_samples else None,
}
if errors:
review["insights"].append(f"{len(errors)} execution/system errors this period; robustness needs attention")
if outcomes["bad"] > outcomes["good"]:
review["insights"].append("Recent trade quality is weak; consider lowering frequency or raising entry threshold")
if total_missed > 0:
parts = []
if outcomes["missed"]:
parts.append(f"sold then rallied {outcomes['missed']} times")
if hold_pass_misses:
parts.append(f"missed after PASS {len(hold_pass_misses)} times")
if cash_misses:
parts.append(f"missed while sitting in cash {len(cash_misses)} times")
review["insights"].append("Opportunities missed: " + ", ".join(parts) + "; consider relaxing trend-following or entry conditions")
if outcomes["good"] >= max(1, outcomes["bad"] + total_missed):
review["insights"].append("Recent decisions are generally acceptable")
if not trades and decisions:
review["insights"].append("Decisions without trades; may be due to waiting on sidelines, minimum notional limits, or execution interception")
if len(trades) < len(decisions) * 0.1 and decisions:
review["insights"].append("Many decisions did not convert to trades; check if minimum notional/step-size/fee buffer thresholds are too high")
if hold_pass_misses:
for m in hold_pass_misses[:3]:
review["insights"].append(f"PASS'd {m['symbol']} during HOLD, then it rose {m['change_pct']}%")
if cash_misses:
for m in cash_misses[:3]:
review["insights"].append(f"{m['symbol']} rose {m['change_pct']}% while portfolio was mostly USDT")
review["recommendations"] = [
"Check whether minimum-notional/precision rejections are blocking small-capital execution",
"If estimated edge is negative for two consecutive review periods, reduce rebalancing frequency next hour",
"If error logs are increasing, prioritize defensive mode (hold more USDT)",
]
return review
def save_review(review: dict) -> str:
ensure_review_dir()
ts = datetime.now(CST).strftime("%Y%m%d_%H%M%S")
path = _review_dir() / f"review_{ts}.json"
path.write_text(json.dumps(review, indent=2, ensure_ascii=False), encoding="utf-8")
return str(path)
def print_review(review: dict):
print("=" * 50)
print("📊 Coin Hunter Review Report")
print(f"Review time: {review['review_timestamp']}")
print(f"Period: last {review['review_period_hours']} hours")
print(f"Total decisions: {review['total_decisions']} | Total trades: {review['total_trades']} | Total errors: {review['total_errors']}")
stats = review.get("stats", {})
print("\nDecision quality:")
print(f" ✓ Good: {stats.get('good_decisions', 0)}")
print(f" ○ Neutral: {stats.get('neutral_decisions', 0)}")
print(f" ✗ Bad: {stats.get('bad_decisions', 0)}")
print(f" ↗ Missed opportunities: {stats.get('missed_opportunities', 0)}")
if stats.get("avg_estimated_edge_pct") is not None:
print(f" Avg estimated edge: {stats['avg_estimated_edge_pct']}%")
if review.get("insights"):
print("\n💡 Insights:")
for item in review["insights"]:
print(f"{item}")
if review.get("recommendations"):
print("\n🔧 Recommendations:")
for item in review["recommendations"]:
print(f"{item}")
print("=" * 50)

View File

@@ -1,6 +1,15 @@
"""CLI parser and legacy argument normalization for smart executor."""
import argparse
__all__ = [
"COMMAND_CANONICAL",
"add_shared_options",
"build_parser",
"normalize_legacy_argv",
"parse_cli_args",
"cli_action_args",
]
COMMAND_CANONICAL = {
"bal": "balances",
@@ -16,6 +25,10 @@ COMMAND_CANONICAL = {
"sell_all": "sell-all",
"rotate": "rebalance",
"rebalance": "rebalance",
"orders": "orders",
"cancel": "cancel",
"order-status": "order-status",
"order_status": "order-status",
}
@@ -41,25 +54,32 @@ def build_parser() -> argparse.ArgumentParser:
" hold Record a hold decision without trading\n"
" buy SYMBOL USDT Buy a symbol using a USDT notional amount\n"
" flat SYMBOL Exit an entire symbol position\n"
" rotate FROM TO Rotate exposure from one symbol into another\n\n"
" rotate FROM TO Rotate exposure from one symbol into another\n"
" orders List open spot orders\n"
" order-status SYMBOL ORDER_ID Get status of a specific order\n"
" cancel SYMBOL [ORDER_ID] Cancel an open order (cancels newest if ORDER_ID omitted)\n\n"
"Examples:\n"
" coinhunter exec bal\n"
" coinhunter exec overview\n"
" coinhunter exec hold\n"
" coinhunter exec buy ENJUSDT 100\n"
" coinhunter exec flat ENJUSDT --dry-run\n"
" coinhunter exec rotate PEPEUSDT ETHUSDT\n\n"
" coinhunter exec rotate PEPEUSDT ETHUSDT\n"
" coinhunter exec orders\n"
" coinhunter exec order-status ENJUSDT 123456\n"
" coinhunter exec cancel ENJUSDT 123456\n\n"
"Legacy forms remain supported for backward compatibility:\n"
" balances, balance -> bal\n"
" acct, status -> overview\n"
" sell-all, sell_all -> flat\n"
" rebalance -> rotate\n"
" order_status -> order-status\n"
" HOLD / BUY / SELL_ALL / REBALANCE via --decision are still accepted\n"
),
)
subparsers = parser.add_subparsers(
dest="command",
metavar="{bal,overview,hold,buy,flat,rotate,...}",
metavar="{bal,overview,hold,buy,flat,rotate,orders,order-status,cancel,...}",
)
subparsers.add_parser("bal", parents=[shared], help="Preferred: print live balances as stable JSON")
@@ -77,6 +97,16 @@ def build_parser() -> argparse.ArgumentParser:
rebalance.add_argument("from_symbol")
rebalance.add_argument("to_symbol")
subparsers.add_parser("orders", parents=[shared], help="List open spot orders")
order_status = subparsers.add_parser("order-status", parents=[shared], help="Get status of a specific order")
order_status.add_argument("symbol")
order_status.add_argument("order_id")
cancel = subparsers.add_parser("cancel", parents=[shared], help="Cancel an open order")
cancel.add_argument("symbol")
cancel.add_argument("order_id", nargs="?")
subparsers.add_parser("balances", parents=[shared], help=argparse.SUPPRESS)
subparsers.add_parser("balance", parents=[shared], help=argparse.SUPPRESS)
subparsers.add_parser("acct", parents=[shared], help=argparse.SUPPRESS)
@@ -91,6 +121,10 @@ def build_parser() -> argparse.ArgumentParser:
rebalance_legacy.add_argument("from_symbol")
rebalance_legacy.add_argument("to_symbol")
order_status_legacy = subparsers.add_parser("order_status", parents=[shared], help=argparse.SUPPRESS)
order_status_legacy.add_argument("symbol")
order_status_legacy.add_argument("order_id")
subparsers._choices_actions = [
action
for action in subparsers._choices_actions
@@ -126,7 +160,13 @@ def normalize_legacy_argv(argv: list[str]) -> list[str]:
"STATUS": ["status"],
"status": ["status"],
"OVERVIEW": ["status"],
"overview": ["status"],
"ORDERS": ["orders"],
"orders": ["orders"],
"CANCEL": ["cancel"],
"cancel": ["cancel"],
"ORDER_STATUS": ["order-status"],
"order_status": ["order-status"],
"order-status": ["order-status"],
}
has_legacy_flag = any(t.startswith("--decision") for t in argv)
@@ -166,18 +206,18 @@ def normalize_legacy_argv(argv: list[str]) -> list[str]:
rebuilt += ["hold"]
elif decision == "SELL_ALL":
if not ns.symbol:
raise RuntimeError("旧式 --decision SELL_ALL 需要搭配 --symbol")
raise RuntimeError("Legacy --decision SELL_ALL requires --symbol")
rebuilt += ["sell-all", ns.symbol]
elif decision == "BUY":
if not ns.symbol or ns.amount_usdt is None:
raise RuntimeError("旧式 --decision BUY 需要 --symbol --amount-usdt")
raise RuntimeError("Legacy --decision BUY requires --symbol and --amount-usdt")
rebuilt += ["buy", ns.symbol, str(ns.amount_usdt)]
elif decision == "REBALANCE":
if not ns.from_symbol or not ns.to_symbol:
raise RuntimeError("旧式 --decision REBALANCE 需要 --from-symbol --to-symbol")
raise RuntimeError("Legacy --decision REBALANCE requires --from-symbol and --to-symbol")
rebuilt += ["rebalance", ns.from_symbol, ns.to_symbol]
else:
raise RuntimeError(f"不支持的旧式 decision: {decision}")
raise RuntimeError(f"Unsupported legacy decision: {decision}")
return rebuilt + unknown
@@ -202,4 +242,8 @@ def cli_action_args(args, action: str) -> list[str]:
return [args.symbol, str(args.amount_usdt)]
if action == "rebalance":
return [args.from_symbol, args.to_symbol]
if action == "order_status":
return [args.symbol, args.order_id]
if action == "cancel":
return [args.symbol, args.order_id] if args.order_id else [args.symbol]
return []

View File

@@ -3,21 +3,27 @@
from __future__ import annotations
import os
__all__ = ["run"]
import sys
from ..logger import log_decision, log_error
from .exchange_service import fetch_balances, build_market_snapshot
from .exchange_service import build_market_snapshot, fetch_balances
from .execution_state import default_decision_id, get_execution_state, record_execution_state
from .portfolio_service import load_positions
from .smart_executor_parser import parse_cli_args, cli_action_args
from .trade_common import is_dry_run, log, set_dry_run, bj_now_iso
from .smart_executor_parser import cli_action_args, parse_cli_args
from .trade_common import bj_now_iso, log, set_dry_run
from .trade_execution import (
command_balances,
command_status,
build_decision_context,
action_sell_all,
action_buy,
action_rebalance,
action_sell_all,
build_decision_context,
command_balances,
command_cancel,
command_order_status,
command_orders,
command_status,
print_json,
)
@@ -36,9 +42,9 @@ def run(argv: list[str] | None = None) -> int:
set_dry_run(True)
previous = get_execution_state(decision_id)
read_only_action = action in {"balance", "balances", "status"}
read_only_action = action in {"balance", "balances", "status", "orders", "order_status", "cancel"}
if previous and previous.get("status") == "success" and not read_only_action:
log(f"⚠️ decision_id={decision_id} 已执行成功,跳过重复执行")
log(f"⚠️ decision_id={decision_id} already executed successfully, skipping duplicate")
return 0
try:
@@ -48,8 +54,14 @@ def run(argv: list[str] | None = None) -> int:
if read_only_action:
if action in {"balance", "balances"}:
command_balances(ex)
else:
elif action == "status":
command_status(ex)
elif action == "orders":
command_orders(ex)
elif action == "order_status":
command_order_status(ex, args.symbol, args.order_id)
elif action == "cancel":
command_cancel(ex, args.symbol, getattr(args, "order_id", None))
return 0
decision_context = build_decision_context(ex, action, argv_tail, decision_id)
@@ -88,10 +100,10 @@ def run(argv: list[str] | None = None) -> int:
"execution_result": {"status": "hold"},
}
)
log("😴 决策: 持续持有,无操作")
log("😴 Decision: hold, no action")
result = {"status": "hold"}
else:
raise RuntimeError(f"未知动作: {action};请运行 --help 查看正确 CLI 用法")
raise RuntimeError(f"Unknown action: {action}; run --help for valid CLI usage")
record_execution_state(
decision_id,
@@ -103,7 +115,9 @@ def run(argv: list[str] | None = None) -> int:
"result": result,
},
)
log(f"✅ 执行完成 decision_id={decision_id}")
if not read_only_action:
print_json({"ok": True, "decision_id": decision_id, "action": action, "result": result})
log(f"Execution completed decision_id={decision_id}")
return 0
except Exception as exc:
@@ -124,5 +138,5 @@ def run(argv: list[str] | None = None) -> int:
action=action,
args=argv_tail,
)
log(f"执行失败: {exc}")
log(f"Execution failed: {exc}")
return 1

View File

@@ -5,7 +5,7 @@ from __future__ import annotations
from .candidate_scoring import top_candidates_from_tickers
from .data_utils import norm_symbol, stable_hash, to_float
from .market_data import enrich_candidates_and_positions, get_exchange, regime_from_pct
from .precheck_constants import MIN_ACTIONABLE_USDT, MIN_REAL_POSITION_VALUE_USDT
from .precheck_constants import MIN_REAL_POSITION_VALUE_USDT
from .state_manager import load_config, load_positions
from .time_utils import get_local_now, utc_iso

View File

@@ -4,34 +4,58 @@ from __future__ import annotations
from datetime import timedelta
from ..runtime import load_env_file
__all__ = [
"load_env",
"load_positions",
"load_state",
"modify_state",
"load_config",
"clear_run_request_fields",
"sanitize_state_for_stale_triggers",
"save_state",
"update_state_after_observation",
]
from ..runtime import get_runtime_paths, load_env_file
from .data_utils import load_json
from .file_utils import load_json_locked, read_modify_write_json, save_json_locked
from .precheck_constants import (
CONFIG_FILE,
ENV_FILE,
MAX_PENDING_TRIGGER_MINUTES,
MAX_RUN_REQUEST_MINUTES,
POSITIONS_FILE,
STATE_DIR,
STATE_FILE,
)
from .time_utils import parse_ts, utc_iso, utc_now
def _paths():
return get_runtime_paths()
def load_env() -> None:
load_env_file()
load_env_file(_paths())
def load_positions():
return load_json(POSITIONS_FILE, {}).get("positions", [])
return load_json(_paths().positions_file, {}).get("positions", [])
def load_state():
return load_json(STATE_FILE, {})
paths = _paths()
return load_json_locked(paths.precheck_state_file, paths.precheck_state_lock, {})
def modify_state(modifier):
"""Atomic read-modify-write for precheck state."""
paths = _paths()
read_modify_write_json(
paths.precheck_state_file,
paths.precheck_state_lock,
{},
modifier,
)
def load_config():
return load_json(CONFIG_FILE, {})
return load_json(_paths().config_file, {})
def clear_run_request_fields(state: dict):
@@ -58,14 +82,14 @@ def sanitize_state_for_stale_triggers(state: dict):
)
pending_trigger = False
notes.append(
f"自动清理已完成的 run_requested 标记:最近深度分析时间 {last_deep_analysis_at.isoformat()} >= 请求时间 {run_requested_at.isoformat()}"
f"Auto-cleared completed run_requested marker: last_deep_analysis_at {last_deep_analysis_at.isoformat()} >= run_requested_at {run_requested_at.isoformat()}"
)
run_requested_at = None
if run_requested_at and now - run_requested_at > timedelta(minutes=MAX_RUN_REQUEST_MINUTES):
clear_run_request_fields(sanitized)
notes.append(
f"自动清理超时 run_requested 标记:已等待 {(now - run_requested_at).total_seconds() / 60:.1f} 分钟,超过 {MAX_RUN_REQUEST_MINUTES} 分钟"
f"Auto-cleared stale run_requested marker: waited {(now - run_requested_at).total_seconds() / 60:.1f} minutes, exceeding {MAX_RUN_REQUEST_MINUTES} minutes"
)
run_requested_at = None
@@ -78,7 +102,7 @@ def sanitize_state_for_stale_triggers(state: dict):
f"{(now - pending_anchor).total_seconds() / 60:.1f} minutes"
)
notes.append(
f"自动解除 pending_trigger触发状态已悬挂 {(now - pending_anchor).total_seconds() / 60:.1f} 分钟,超过 {MAX_PENDING_TRIGGER_MINUTES} 分钟"
f"Auto-recovered stale pending_trigger: trigger was dangling for {(now - pending_anchor).total_seconds() / 60:.1f} minutes, exceeding {MAX_PENDING_TRIGGER_MINUTES} minutes"
)
sanitized["_stale_recovery_notes"] = notes
@@ -86,12 +110,16 @@ def sanitize_state_for_stale_triggers(state: dict):
def save_state(state: dict):
import json
STATE_DIR.mkdir(parents=True, exist_ok=True)
paths = _paths()
paths.state_dir.mkdir(parents=True, exist_ok=True)
state_to_save = dict(state)
state_to_save.pop("_stale_recovery_notes", None)
STATE_FILE.write_text(json.dumps(state_to_save, indent=2, ensure_ascii=False), encoding="utf-8")
save_json_locked(
paths.precheck_state_file,
paths.precheck_state_lock,
state_to_save,
)
def update_state_after_observation(state: dict, snapshot: dict, analysis: dict):
@@ -123,6 +151,10 @@ def update_state_after_observation(state: dict, snapshot: dict, analysis: dict):
for hr in analysis.get("hard_reasons", []):
last_hard_reasons_at[hr] = snapshot["generated_at"]
cutoff = utc_now() - timedelta(hours=24)
pruned = {k: v for k, v in last_hard_reasons_at.items() if parse_ts(v) and parse_ts(v) > cutoff}
pruned: dict[str, str] = {}
for k, v in last_hard_reasons_at.items():
ts = parse_ts(v)
if ts and ts > cutoff:
pruned[k] = v
new_state["last_hard_reasons_at"] = pruned
return new_state

View File

@@ -2,7 +2,7 @@
from __future__ import annotations
from datetime import datetime, timedelta, timezone
from datetime import datetime, timezone
from zoneinfo import ZoneInfo

View File

@@ -1,12 +1,15 @@
"""Common trade utilities (time, logging, constants)."""
import os
from datetime import datetime, timezone, timedelta
import sys
from datetime import datetime, timedelta, timezone
from ..runtime import get_user_config
CST = timezone(timedelta(hours=8))
_DRY_RUN = {"value": os.getenv("DRY_RUN", "false").lower() == "true"}
USDT_BUFFER_PCT = 0.03
MIN_REMAINING_DUST_USDT = 1.0
USDT_BUFFER_PCT = get_user_config("trading.usdt_buffer_pct", 0.03)
MIN_REMAINING_DUST_USDT = get_user_config("trading.min_remaining_dust_usdt", 1.0)
def is_dry_run() -> bool:
@@ -18,7 +21,7 @@ def set_dry_run(value: bool):
def log(msg: str):
print(f"[{datetime.now(CST).strftime('%Y-%m-%d %H:%M:%S')} CST] {msg}")
print(f"[{datetime.now(CST).strftime('%Y-%m-%d %H:%M:%S')} CST] {msg}", file=sys.stderr)
def bj_now_iso():

View File

@@ -1,17 +1,37 @@
"""Trade execution actions (buy, sell, rebalance, hold, status)."""
import json
__all__ = [
"print_json",
"build_decision_context",
"market_sell",
"market_buy",
"action_sell_all",
"action_buy",
"action_rebalance",
"command_status",
"command_balances",
"command_orders",
"command_order_status",
"command_cancel",
]
from ..logger import log_decision, log_trade
from .exchange_service import (
build_market_snapshot,
fetch_balances,
norm_symbol,
storage_symbol,
build_market_snapshot,
prepare_buy_quantity,
prepare_sell_quantity,
storage_symbol,
)
from .portfolio_service import load_positions, save_positions, upsert_position, reconcile_positions_with_exchange
from .trade_common import is_dry_run, USDT_BUFFER_PCT, log, bj_now_iso
from .portfolio_service import (
load_positions,
reconcile_positions_with_exchange,
update_positions,
upsert_position,
)
from .trade_common import USDT_BUFFER_PCT, bj_now_iso, is_dry_run, log
def print_json(payload: dict) -> None:
@@ -35,7 +55,7 @@ def build_decision_context(ex, action: str, argv_tail: list[str], decision_id: s
def market_sell(ex, symbol: str, qty: float, decision_id: str):
sym, qty, bid, est_cost = prepare_sell_quantity(ex, symbol, qty)
if is_dry_run():
log(f"[DRY RUN] 卖出 {sym} 数量 {qty}")
log(f"[DRY RUN] SELL {sym} qty {qty}")
return {"id": f"dry-sell-{decision_id}", "symbol": sym, "amount": qty, "price": bid, "cost": est_cost, "status": "closed"}
order = ex.create_market_sell_order(sym, qty, params={"newClientOrderId": f"ch-{decision_id}-sell"})
return order
@@ -44,7 +64,7 @@ def market_sell(ex, symbol: str, qty: float, decision_id: str):
def market_buy(ex, symbol: str, amount_usdt: float, decision_id: str):
sym, qty, ask, est_cost = prepare_buy_quantity(ex, symbol, amount_usdt)
if is_dry_run():
log(f"[DRY RUN] 买入 {sym} 金额 ${est_cost:.4f} 数量 {qty}")
log(f"[DRY RUN] BUY {sym} amount ${est_cost:.4f} qty {qty}")
return {"id": f"dry-buy-{decision_id}", "symbol": sym, "amount": qty, "price": ask, "cost": est_cost, "status": "closed"}
order = ex.create_market_buy_order(sym, qty, params={"newClientOrderId": f"ch-{decision_id}-buy"})
return order
@@ -55,13 +75,13 @@ def action_sell_all(ex, symbol: str, decision_id: str, decision_context: dict):
base = norm_symbol(symbol).split("/")[0]
qty = float(balances_before.get(base, 0))
if qty <= 0:
raise RuntimeError(f"{base} 余额为0无法卖出")
raise RuntimeError(f"{base} balance is zero, cannot sell")
order = market_sell(ex, symbol, qty, decision_id)
positions_after, balances_after = (
reconcile_positions_with_exchange(ex, load_positions())
if not is_dry_run()
else (load_positions(), balances_before)
)
if is_dry_run():
positions_after = load_positions()
balances_after = balances_before
else:
positions_after, balances_after = reconcile_positions_with_exchange(ex)
log_trade(
"SELL_ALL",
norm_symbol(symbol),
@@ -88,13 +108,12 @@ def action_sell_all(ex, symbol: str, decision_id: str, decision_context: dict):
return order
def action_buy(ex, symbol: str, amount_usdt: float, decision_id: str, decision_context: dict, simulated_usdt_balance: float = None):
def action_buy(ex, symbol: str, amount_usdt: float, decision_id: str, decision_context: dict, simulated_usdt_balance: float | None = None):
balances_before = fetch_balances(ex) if simulated_usdt_balance is None else {"USDT": simulated_usdt_balance}
usdt = float(balances_before.get("USDT", 0))
if usdt < amount_usdt:
raise RuntimeError(f"USDT 余额不足(${usdt:.4f} < ${amount_usdt:.4f}")
raise RuntimeError(f"Insufficient USDT balance (${usdt:.4f} < ${amount_usdt:.4f})")
order = market_buy(ex, symbol, amount_usdt, decision_id)
positions_existing = load_positions()
sym_store = storage_symbol(symbol)
price = float(order.get("price") or 0)
qty = float(order.get("amount") or 0)
@@ -110,18 +129,13 @@ def action_buy(ex, symbol: str, amount_usdt: float, decision_id: str, decision_c
"updated_at": bj_now_iso(),
"note": "Smart executor entry",
}
upsert_position(positions_existing, position)
if is_dry_run():
balances_after = balances_before
positions_after = positions_existing
positions_after = load_positions()
upsert_position(positions_after, position)
else:
save_positions(positions_existing)
positions_after, balances_after = reconcile_positions_with_exchange(ex, positions_existing)
for p in positions_after:
if p["symbol"] == sym_store and price:
p["avg_cost"] = price
p["updated_at"] = bj_now_iso()
save_positions(positions_after)
update_positions(lambda p: upsert_position(p, position))
positions_after, balances_after = reconcile_positions_with_exchange(ex, [position])
log_trade(
"BUY",
norm_symbol(symbol),
@@ -160,7 +174,7 @@ def action_rebalance(ex, from_symbol: str, to_symbol: str, decision_id: str, dec
spend = usdt * (1 - USDT_BUFFER_PCT)
simulated_usdt = None
if spend < 5:
raise RuntimeError(f"卖出后 USDT ${spend:.4f} 不足,无法买入新币")
raise RuntimeError(f"USDT ${spend:.4f} insufficient after sell, cannot buy new token")
buy_order = action_buy(ex, to_symbol, spend, decision_id + "b", decision_context, simulated_usdt_balance=simulated_usdt)
return {"sell": sell_order, "buy": buy_order}
@@ -183,3 +197,47 @@ def command_balances(ex):
payload = {"balances": balances}
print_json(payload)
return balances
def command_orders(ex):
sym = None
try:
orders = ex.fetch_open_orders(symbol=sym) if sym else ex.fetch_open_orders()
except Exception as e:
raise RuntimeError(f"Failed to fetch open orders: {e}")
payload = {"orders": orders}
print_json(payload)
return orders
def command_order_status(ex, symbol: str, order_id: str):
sym = norm_symbol(symbol)
try:
order = ex.fetch_order(order_id, sym)
except Exception as e:
raise RuntimeError(f"Failed to fetch order {order_id}: {e}")
payload = {"order": order}
print_json(payload)
return order
def command_cancel(ex, symbol: str, order_id: str | None):
sym = norm_symbol(symbol)
if is_dry_run():
log(f"[DRY RUN] Would cancel order {order_id or '(newest)'} on {sym}")
return {"dry_run": True, "symbol": sym, "order_id": order_id}
if not order_id:
try:
open_orders = ex.fetch_open_orders(sym)
except Exception as e:
raise RuntimeError(f"Failed to fetch open orders for {sym}: {e}")
if not open_orders:
raise RuntimeError(f"No open orders to cancel for {sym}")
order_id = str(open_orders[-1]["id"])
try:
result = ex.cancel_order(order_id, sym)
except Exception as e:
raise RuntimeError(f"Failed to cancel order {order_id} on {sym}: {e}")
payload = {"cancelled": True, "symbol": sym, "order_id": order_id, "result": result}
print_json(payload)
return result

View File

@@ -10,11 +10,9 @@ from .precheck_constants import (
HARD_MOON_PCT,
HARD_REASON_DEDUP_MINUTES,
HARD_STOP_PCT,
MAX_PENDING_TRIGGER_MINUTES,
MAX_RUN_REQUEST_MINUTES,
MIN_REAL_POSITION_VALUE_USDT,
)
from .time_utils import parse_ts, utc_iso, utc_now
from .time_utils import parse_ts, utc_now
def analyze_trigger(snapshot: dict, state: dict):
@@ -51,36 +49,37 @@ def analyze_trigger(snapshot: dict, state: dict):
if pending_trigger:
reasons.append("pending-trigger-unacked")
hard_reasons.append("pending-trigger-unacked")
details.append("上次已触发深度分析但尚未确认完成")
details.append("Previous deep analysis trigger has not been acknowledged yet")
if run_requested_at:
details.append(f"外部门控已在 {run_requested_at.isoformat()} 请求运行分析任务")
details.append(f"External gate requested analysis at {run_requested_at.isoformat()}")
if not last_deep_analysis_at:
reasons.append("first-analysis")
hard_reasons.append("first-analysis")
details.append("尚未记录过深度分析")
details.append("No deep analysis has been recorded yet")
elif now - last_deep_analysis_at >= timedelta(minutes=force_minutes):
reasons.append("stale-analysis")
hard_reasons.append("stale-analysis")
details.append(f"距离上次深度分析已超过 {force_minutes} 分钟")
details.append(f"Time since last deep analysis exceeds {force_minutes} minutes")
if last_positions_hash and snapshot["positions_hash"] != last_positions_hash:
reasons.append("positions-changed")
hard_reasons.append("positions-changed")
details.append("持仓结构发生变化")
details.append("Position structure has changed")
if last_portfolio_value not in (None, 0):
portfolio_delta = abs(snapshot["portfolio_value_usdt"] - last_portfolio_value) / max(last_portfolio_value, 1e-9)
lpf = float(str(last_portfolio_value))
portfolio_delta = abs(snapshot["portfolio_value_usdt"] - lpf) / max(lpf, 1e-9)
if portfolio_delta >= portfolio_trigger:
if portfolio_delta >= 1.0:
reasons.append("portfolio-extreme-move")
hard_reasons.append("portfolio-extreme-move")
details.append(f"组合净值剧烈变化 {portfolio_delta:.1%},超过 100%,视为硬触发")
details.append(f"Portfolio value moved extremely {portfolio_delta:.1%}, exceeding 100%, treated as hard trigger")
else:
reasons.append("portfolio-move")
soft_reasons.append("portfolio-move")
soft_score += 1.0
details.append(f"组合净值变化 {portfolio_delta:.1%},阈值 {portfolio_trigger:.1%}")
details.append(f"Portfolio value moved {portfolio_delta:.1%}, threshold {portfolio_trigger:.1%}")
for pos in snapshot["positions"]:
symbol = pos["symbol"]
@@ -98,42 +97,42 @@ def analyze_trigger(snapshot: dict, state: dict):
reasons.append(f"price-move:{symbol}")
soft_reasons.append(f"price-move:{symbol}")
soft_score += 1.0 if actionable_position else 0.4
details.append(f"{symbol} 价格变化 {price_move:.1%},阈值 {price_trigger:.1%}")
details.append(f"{symbol} price moved {price_move:.1%}, threshold {price_trigger:.1%}")
if cur_pnl is not None and prev_pnl is not None:
pnl_move = abs(cur_pnl - prev_pnl)
if pnl_move >= pnl_trigger:
reasons.append(f"pnl-move:{symbol}")
soft_reasons.append(f"pnl-move:{symbol}")
soft_score += 1.0 if actionable_position else 0.4
details.append(f"{symbol} 盈亏变化 {pnl_move:.1%},阈值 {pnl_trigger:.1%}")
details.append(f"{symbol} PnL moved {pnl_move:.1%}, threshold {pnl_trigger:.1%}")
if cur_pnl is not None:
stop_band = -0.06 if actionable_position else -0.12
take_band = 0.14 if actionable_position else 0.25
if cur_pnl <= stop_band or cur_pnl >= take_band:
reasons.append(f"risk-band:{symbol}")
hard_reasons.append(f"risk-band:{symbol}")
details.append(f"{symbol} 接近执行阈值,当前盈亏 {cur_pnl:.1%}")
details.append(f"{symbol} near execution threshold, current PnL {cur_pnl:.1%}")
if cur_pnl <= HARD_STOP_PCT:
reasons.append(f"hard-stop:{symbol}")
hard_reasons.append(f"hard-stop:{symbol}")
details.append(f"{symbol} 盈亏超过 {HARD_STOP_PCT:.1%},触发紧急硬触发")
details.append(f"{symbol} PnL exceeded {HARD_STOP_PCT:.1%}, emergency hard trigger")
current_market = snapshot.get("market_regime", {})
if last_market_regime:
if current_market.get("btc_regime") != last_market_regime.get("btc_regime"):
reasons.append("btc-regime-change")
hard_reasons.append("btc-regime-change")
details.append(f"BTC {last_market_regime.get('btc_regime')} 切换为 {current_market.get('btc_regime')}")
details.append(f"BTC regime changed from {last_market_regime.get('btc_regime')} to {current_market.get('btc_regime')}")
if current_market.get("eth_regime") != last_market_regime.get("eth_regime"):
reasons.append("eth-regime-change")
hard_reasons.append("eth-regime-change")
details.append(f"ETH {last_market_regime.get('eth_regime')} 切换为 {current_market.get('eth_regime')}")
details.append(f"ETH regime changed from {last_market_regime.get('eth_regime')} to {current_market.get('eth_regime')}")
for cand in snapshot.get("top_candidates", []):
if cand.get("change_24h_pct", 0) >= HARD_MOON_PCT * 100:
reasons.append(f"hard-moon:{cand['symbol']}")
hard_reasons.append(f"hard-moon:{cand['symbol']}")
details.append(f"候选币 {cand['symbol']} 24h 涨幅 {cand['change_24h_pct']:.1f}%,触发强势硬触发")
details.append(f"Candidate {cand['symbol']} 24h change {cand['change_24h_pct']:.1f}%, hard moon trigger")
candidate_weight = _candidate_weight(snapshot, profile)
@@ -151,7 +150,7 @@ def analyze_trigger(snapshot: dict, state: dict):
soft_reasons.append(f"new-leader-{band}:{cur_leader['symbol']}")
soft_score += candidate_weight * 0.7
details.append(
f"{band} 层新榜首 {cur_leader['symbol']} 替代 {prev_leader['symbol']}score 比例 {score_ratio:.2f}"
f"{band} tier new leader {cur_leader['symbol']} replaced {prev_leader['symbol']}, score ratio {score_ratio:.2f}"
)
current_leader = snapshot.get("top_candidates", [{}])[0] if snapshot.get("top_candidates") else None
@@ -163,13 +162,13 @@ def analyze_trigger(snapshot: dict, state: dict):
soft_reasons.append("new-leader")
soft_score += candidate_weight
details.append(
f"新候选币 {current_leader.get('symbol')} 领先上次榜首,score 比例 {score_ratio:.2f},阈值 {candidate_ratio_trigger:.2f}"
f"New candidate {current_leader.get('symbol')} leads previous top, score ratio {score_ratio:.2f}, threshold {candidate_ratio_trigger:.2f}"
)
elif current_leader and not last_top_candidate:
reasons.append("candidate-leader-init")
soft_reasons.append("candidate-leader-init")
soft_score += candidate_weight
details.append(f"首次记录候选榜首 {current_leader.get('symbol')}")
details.append(f"First recorded candidate leader {current_leader.get('symbol')}")
def _signal_delta() -> float:
delta = 0.0
@@ -204,7 +203,8 @@ def analyze_trigger(snapshot: dict, state: dict):
if current_market.get("eth_regime") != last_market_regime.get("eth_regime"):
delta += 1.5
if last_portfolio_value not in (None, 0):
portfolio_delta = abs(snapshot["portfolio_value_usdt"] - last_portfolio_value) / max(last_portfolio_value, 1e-9)
lpf = float(str(last_portfolio_value))
portfolio_delta = abs(snapshot["portfolio_value_usdt"] - lpf) / max(lpf, 1e-9)
if portfolio_delta >= 0.05:
delta += 1.0
last_trigger_hard_types = {r.split(":")[0] for r in (state.get("last_trigger_hard_reasons") or [])}
@@ -227,41 +227,41 @@ def analyze_trigger(snapshot: dict, state: dict):
last_at = parse_ts(last_hard_reasons_at.get(hr))
if last_at and now - last_at < dedup_window:
hard_reasons.remove(hr)
details.append(f"{hr} 近期已触发,{HARD_REASON_DEDUP_MINUTES}分钟内去重")
details.append(f"{hr} triggered recently, deduplicated within {HARD_REASON_DEDUP_MINUTES} minutes")
hard_trigger = bool(hard_reasons)
if profile.get("dust_mode") and not hard_trigger and soft_score < soft_score_threshold + 1.0:
details.append("微型资金/粉尘仓位模式:抬高软触发门槛,避免无意义分析")
details.append("Dust-mode portfolio: raising soft-trigger threshold to avoid noise")
if profile.get("dust_mode") and not profile.get("new_entries_allowed") and any(
r in {"new-leader", "candidate-leader-init"} for r in soft_reasons
):
details.append("当前可用资金低于可执行阈值,新候选币仅做观察,不单独触发深度分析")
details.append("Available capital below executable threshold; new candidates are observation-only")
soft_score = max(0.0, soft_score - 0.75)
should_analyze = hard_trigger or soft_score >= soft_score_threshold
if cooldown_active and not hard_trigger and should_analyze:
should_analyze = False
details.append(f"处于 {cooldown_minutes} 分钟冷却窗口,软触发先记录不升级")
details.append(f"In {cooldown_minutes} minute cooldown window; soft trigger logged but not escalated")
if cooldown_active and not hard_trigger and reasons and soft_score < soft_score_threshold:
details.append(f"处于 {cooldown_minutes} 分钟冷却窗口,且软信号强度不足 ({soft_score:.2f} < {soft_score_threshold:.2f})")
details.append(f"In {cooldown_minutes} minute cooldown window with insufficient soft signal ({soft_score:.2f} < {soft_score_threshold:.2f})")
status = "deep_analysis_required" if should_analyze else "stable"
compact_lines = [
f"状态: {status}",
f"组合净值: ${snapshot['portfolio_value_usdt']:.4f} | 可用USDT: ${snapshot['free_usdt']:.4f}",
f"本地时段: {snapshot['session']} | 时区: {snapshot['timezone']}",
f"BTC/ETH: {market.get('btc_regime')} ({market.get('btc_24h_pct')}%), {market.get('eth_regime')} ({market.get('eth_24h_pct')}%) | 波动分数 {market.get('volatility_score')}",
f"门控画像: capital={profile['capital_band']}, session={profile['session_mode']}, volatility={profile['volatility_mode']}, dust={profile['dust_mode']}",
f"阈值: price={price_trigger:.1%}, pnl={pnl_trigger:.1%}, portfolio={portfolio_trigger:.1%}, candidate={candidate_ratio_trigger:.2f}, cooldown={effective_cooldown}m({cooldown_minutes}m基础), force={force_minutes}m",
f"软信号分: {soft_score:.2f} / {soft_score_threshold:.2f}",
f"信号变化度: {signal_delta:.1f}",
f"Status: {status}",
f"Portfolio: ${snapshot['portfolio_value_usdt']:.4f} | Free USDT: ${snapshot['free_usdt']:.4f}",
f"Session: {snapshot['session']} | TZ: {snapshot['timezone']}",
f"BTC/ETH: {market.get('btc_regime')} ({market.get('btc_24h_pct')}%), {market.get('eth_regime')} ({market.get('eth_24h_pct')}%) | Volatility score {market.get('volatility_score')}",
f"Profile: capital={profile['capital_band']}, session={profile['session_mode']}, volatility={profile['volatility_mode']}, dust={profile['dust_mode']}",
f"Thresholds: price={price_trigger:.1%}, pnl={pnl_trigger:.1%}, portfolio={portfolio_trigger:.1%}, candidate={candidate_ratio_trigger:.2f}, cooldown={effective_cooldown}m({cooldown_minutes}m base), force={force_minutes}m",
f"Soft score: {soft_score:.2f} / {soft_score_threshold:.2f}",
f"Signal delta: {signal_delta:.1f}",
]
if snapshot["positions"]:
compact_lines.append("持仓:")
compact_lines.append("Positions:")
for pos in snapshot["positions"][:4]:
pnl = pos.get("pnl_pct")
pnl_text = f"{pnl:+.1%}" if pnl is not None else "n/a"
@@ -269,9 +269,9 @@ def analyze_trigger(snapshot: dict, state: dict):
f"- {pos['symbol']}: qty={pos['quantity']}, px={pos.get('last_price')}, pnl={pnl_text}, value=${pos.get('market_value_usdt')}"
)
else:
compact_lines.append("持仓: 当前无现货仓位")
compact_lines.append("Positions: no spot positions currently")
if snapshot["top_candidates"]:
compact_lines.append("候选榜:")
compact_lines.append("Candidates:")
for cand in snapshot["top_candidates"]:
compact_lines.append(
f"- {cand['symbol']}: score={cand['score']}, 24h={cand['change_24h_pct']}%, vol=${cand['volume_24h']}"
@@ -279,13 +279,13 @@ def analyze_trigger(snapshot: dict, state: dict):
layers = snapshot.get("top_candidates_layers", {})
for band, band_cands in layers.items():
if band_cands:
compact_lines.append(f"{band} :")
compact_lines.append(f"{band} tier:")
for cand in band_cands:
compact_lines.append(
f"- {cand['symbol']}: score={cand['score']}, 24h={cand['change_24h_pct']}%, vol=${cand['volume_24h']}"
)
if details:
compact_lines.append("触发说明:")
compact_lines.append("Trigger notes:")
for item in details:
compact_lines.append(f"- {item}")

View File

@@ -11,8 +11,6 @@ from __future__ import annotations
import sys
from importlib import import_module
from .services.smart_executor_service import run as _run_service
_EXPORT_MAP = {
"PATHS": (".runtime", "get_runtime_paths"),
"ENV_FILE": (".runtime", "get_runtime_paths"),
@@ -39,6 +37,7 @@ _EXPORT_MAP = {
"save_executions": (".services.execution_state", "save_executions"),
"load_positions": (".services.portfolio_service", "load_positions"),
"save_positions": (".services.portfolio_service", "save_positions"),
"update_positions": (".services.portfolio_service", "update_positions"),
"upsert_position": (".services.portfolio_service", "upsert_position"),
"reconcile_positions_with_exchange": (".services.portfolio_service", "reconcile_positions_with_exchange"),
"get_exchange": (".services.exchange_service", "get_exchange"),
@@ -58,6 +57,9 @@ _EXPORT_MAP = {
"action_rebalance": (".services.trade_execution", "action_rebalance"),
"command_status": (".services.trade_execution", "command_status"),
"command_balances": (".services.trade_execution", "command_balances"),
"command_orders": (".services.trade_execution", "command_orders"),
"command_order_status": (".services.trade_execution", "command_order_status"),
"command_cancel": (".services.trade_execution", "command_cancel"),
}
__all__ = sorted(set(_EXPORT_MAP) | {"ENV_FILE", "PATHS", "load_env", "main"})
@@ -93,6 +95,7 @@ def load_env():
def main(argv=None):
from .services.smart_executor_service import run as _run_service
return _run_service(sys.argv[1:] if argv is None else argv)

0
tests/__init__.py Normal file
View File

70
tests/test_check_api.py Normal file
View File

@@ -0,0 +1,70 @@
"""Tests for check_api command."""
import json
from unittest.mock import MagicMock, patch
from coinhunter.commands import check_api
class TestMain:
def test_missing_api_key(self, monkeypatch, capsys):
monkeypatch.setenv("BINANCE_API_KEY", "")
monkeypatch.setenv("BINANCE_API_SECRET", "secret")
rc = check_api.main()
assert rc == 1
out = json.loads(capsys.readouterr().out)
assert out["ok"] is False
assert "BINANCE_API_KEY" in out["error"]
def test_missing_api_secret(self, monkeypatch, capsys):
monkeypatch.setenv("BINANCE_API_KEY", "key")
monkeypatch.setenv("BINANCE_API_SECRET", "")
rc = check_api.main()
assert rc == 1
out = json.loads(capsys.readouterr().out)
assert out["ok"] is False
assert "BINANCE_API_SECRET" in out["error"]
def test_placholder_api_key(self, monkeypatch, capsys):
monkeypatch.setenv("BINANCE_API_KEY", "your_api_key")
monkeypatch.setenv("BINANCE_API_SECRET", "secret")
rc = check_api.main()
assert rc == 1
def test_balance_fetch_failure(self, monkeypatch, capsys):
monkeypatch.setenv("BINANCE_API_KEY", "key")
monkeypatch.setenv("BINANCE_API_SECRET", "secret")
mock_ex = MagicMock()
mock_ex.fetch_balance.side_effect = Exception("Network error")
with patch("coinhunter.commands.check_api.ccxt.binance", return_value=mock_ex):
rc = check_api.main()
assert rc == 1
out = json.loads(capsys.readouterr().out)
assert "Failed to connect" in out["error"]
def test_success_with_spot_trading(self, monkeypatch, capsys):
monkeypatch.setenv("BINANCE_API_KEY", "key")
monkeypatch.setenv("BINANCE_API_SECRET", "secret")
mock_ex = MagicMock()
mock_ex.fetch_balance.return_value = {"USDT": 100.0}
mock_ex.sapi_get_account_api_restrictions.return_value = {"enableSpotTrading": True}
with patch("coinhunter.commands.check_api.ccxt.binance", return_value=mock_ex):
rc = check_api.main()
assert rc == 0
out = json.loads(capsys.readouterr().out)
assert out["ok"] is True
assert out["read_permission"] is True
assert out["spot_trading_enabled"] is True
def test_success_restrictions_query_fails(self, monkeypatch, capsys):
monkeypatch.setenv("BINANCE_API_KEY", "key")
monkeypatch.setenv("BINANCE_API_SECRET", "secret")
mock_ex = MagicMock()
mock_ex.fetch_balance.return_value = {"USDT": 100.0}
mock_ex.sapi_get_account_api_restrictions.side_effect = Exception("no permission")
with patch("coinhunter.commands.check_api.ccxt.binance", return_value=mock_ex):
rc = check_api.main()
assert rc == 0
out = json.loads(capsys.readouterr().out)
assert out["spot_trading_enabled"] is None
assert "may be null" in out["note"]

58
tests/test_cli.py Normal file
View File

@@ -0,0 +1,58 @@
"""Tests for CLI routing and parser behavior."""
import pytest
from coinhunter.cli import ALIASES, MODULE_MAP, build_parser, run_python_module
class TestAliases:
def test_all_aliases_resolve_to_canonical(self):
for alias, canonical in ALIASES.items():
assert canonical in MODULE_MAP, f"alias {alias!r} points to missing canonical {canonical!r}"
def test_no_alias_is_itself_an_alias_loop(self):
for alias in ALIASES:
assert alias not in ALIASES.values() or alias in MODULE_MAP
class TestModuleMap:
def test_all_modules_exist(self):
import importlib
for command, module_name in MODULE_MAP.items():
full = f"coinhunter.{module_name}"
mod = importlib.import_module(full)
assert hasattr(mod, "main"), f"{full} missing main()"
class TestBuildParser:
def test_help_includes_commands(self):
parser = build_parser()
help_text = parser.format_help()
assert "coinhunter diag" in help_text
assert "coinhunter exec" in help_text
def test_parses_command_and_args(self):
parser = build_parser()
ns = parser.parse_args(["exec", "bal"])
assert ns.command == "exec"
assert "bal" in ns.args
def test_version_action_exits(self):
parser = build_parser()
with pytest.raises(SystemExit) as exc:
parser.parse_args(["--version"])
assert exc.value.code == 0
class TestRunPythonModule:
def test_runs_module_main_and_returns_int(self):
result = run_python_module("commands.paths", [], "coinhunter paths")
assert result == 0
def test_mutates_sys_argv(self):
import sys
original = sys.argv[:]
run_python_module("commands.paths", ["--help"], "coinhunter paths")
assert sys.argv == original

View File

@@ -0,0 +1,98 @@
"""Tests for exchange helpers and caching."""
import pytest
from coinhunter.services import exchange_service as es
class TestNormSymbol:
def test_already_normalized(self):
assert es.norm_symbol("BTC/USDT") == "BTC/USDT"
def test_raw_symbol(self):
assert es.norm_symbol("BTCUSDT") == "BTC/USDT"
def test_lowercase(self):
assert es.norm_symbol("btcusdt") == "BTC/USDT"
def test_dash_separator(self):
assert es.norm_symbol("BTC-USDT") == "BTC/USDT"
def test_underscore_separator(self):
assert es.norm_symbol("BTC_USDT") == "BTC/USDT"
def test_unsupported_symbol(self):
with pytest.raises(ValueError):
es.norm_symbol("BTC")
class TestStorageSymbol:
def test_converts_to_flat(self):
assert es.storage_symbol("BTC/USDT") == "BTCUSDT"
def test_raw_input(self):
assert es.storage_symbol("ETHUSDT") == "ETHUSDT"
class TestFloorToStep:
def test_no_step(self):
assert es.floor_to_step(1.234, 0) == 1.234
def test_floors_to_step(self):
assert es.floor_to_step(1.234, 0.1) == pytest.approx(1.2)
def test_exact_multiple(self):
assert es.floor_to_step(12, 1) == 12
class TestGetExchangeCaching:
def test_returns_cached_instance(self, monkeypatch):
monkeypatch.setenv("BINANCE_API_KEY", "test_key")
monkeypatch.setenv("BINANCE_API_SECRET", "test_secret")
class FakeEx:
def load_markets(self):
pass
# Reset cache and patch ccxt.binance
original_cache = es._exchange_cache
original_cached_at = es._exchange_cached_at
try:
es._exchange_cache = None
es._exchange_cached_at = None
monkeypatch.setattr(es.ccxt, "binance", lambda *a, **kw: FakeEx())
first = es.get_exchange()
second = es.get_exchange()
assert first is second
third = es.get_exchange(force_new=True)
assert third is not first
finally:
es._exchange_cache = original_cache
es._exchange_cached_at = original_cached_at
def test_cache_expires_after_ttl(self, monkeypatch):
monkeypatch.setenv("BINANCE_API_KEY", "test_key")
monkeypatch.setenv("BINANCE_API_SECRET", "test_secret")
class FakeEx:
def load_markets(self):
pass
original_cache = es._exchange_cache
original_cached_at = es._exchange_cached_at
try:
es._exchange_cache = None
es._exchange_cached_at = None
monkeypatch.setattr(es.ccxt, "binance", lambda *a, **kw: FakeEx())
monkeypatch.setattr(es, "load_env", lambda: None)
first = es.get_exchange()
# Simulate time passing beyond TTL
es._exchange_cached_at -= es.CACHE_TTL_SECONDS + 1
second = es.get_exchange()
assert first is not second
finally:
es._exchange_cache = original_cache
es._exchange_cached_at = original_cached_at

203
tests/test_external_gate.py Normal file
View File

@@ -0,0 +1,203 @@
"""Tests for external_gate configurable hook."""
import json
from unittest.mock import MagicMock, patch
from coinhunter.commands import external_gate
class TestResolveTriggerCommand:
def test_missing_config_means_disabled(self, tmp_path, monkeypatch):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
# no config file exists
cmd = external_gate._resolve_trigger_command(paths)
assert cmd is None
def test_uses_explicit_list_from_config(self, tmp_path, monkeypatch):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
paths.config_file.write_text(json.dumps({
"external_gate": {"trigger_command": ["my-scheduler", "run", "job-123"]}
}), encoding="utf-8")
cmd = external_gate._resolve_trigger_command(paths)
assert cmd == ["my-scheduler", "run", "job-123"]
def test_null_means_disabled(self, tmp_path, monkeypatch):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
paths.config_file.write_text(json.dumps({
"external_gate": {"trigger_command": None}
}), encoding="utf-8")
cmd = external_gate._resolve_trigger_command(paths)
assert cmd is None
def test_empty_list_means_disabled(self, tmp_path, monkeypatch):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
paths.config_file.write_text(json.dumps({
"external_gate": {"trigger_command": []}
}), encoding="utf-8")
cmd = external_gate._resolve_trigger_command(paths)
assert cmd is None
def test_string_gets_wrapped(self, tmp_path, monkeypatch):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
paths.config_file.write_text(json.dumps({
"external_gate": {"trigger_command": "my-script.sh"}
}), encoding="utf-8")
cmd = external_gate._resolve_trigger_command(paths)
assert cmd == ["my-script.sh"]
def test_unexpected_type_returns_none_and_warns(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
paths.config_file.write_text(json.dumps({
"external_gate": {"trigger_command": 42}
}), encoding="utf-8")
cmd = external_gate._resolve_trigger_command(paths)
assert cmd is None
class TestMain:
def test_already_running(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.state_dir.mkdir(parents=True, exist_ok=True)
# Acquire the lock in this process so main() sees it as busy
import fcntl
with open(paths.external_gate_lock, "w", encoding="utf-8") as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
rc = external_gate.main()
assert rc == 0
out = json.loads(capsys.readouterr().out)
assert out["reason"] == "already_running"
def test_precheck_failure(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
fake_result = MagicMock(returncode=1, stdout="err", stderr="")
with patch("coinhunter.commands.external_gate.run_cmd", return_value=fake_result):
rc = external_gate.main()
assert rc == 1
out = json.loads(capsys.readouterr().out)
assert out["reason"] == "precheck_failed"
def test_precheck_parse_error(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
fake_result = MagicMock(returncode=0, stdout="not-json", stderr="")
with patch("coinhunter.commands.external_gate.run_cmd", return_value=fake_result):
rc = external_gate.main()
assert rc == 1
out = json.loads(capsys.readouterr().out)
assert out["reason"] == "precheck_parse_error"
def test_precheck_not_ok(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
fake_result = MagicMock(returncode=0, stdout=json.dumps({"ok": False}), stderr="")
with patch("coinhunter.commands.external_gate.run_cmd", return_value=fake_result):
rc = external_gate.main()
assert rc == 1
out = json.loads(capsys.readouterr().out)
assert out["reason"] == "precheck_not_ok"
def test_no_trigger(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
fake_result = MagicMock(returncode=0, stdout=json.dumps({"ok": True, "should_analyze": False}), stderr="")
with patch("coinhunter.commands.external_gate.run_cmd", return_value=fake_result):
rc = external_gate.main()
assert rc == 0
out = json.loads(capsys.readouterr().out)
assert out["reason"] == "no_trigger"
def test_already_queued(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
fake_result = MagicMock(
returncode=0,
stdout=json.dumps({"ok": True, "should_analyze": True, "run_requested": True, "run_requested_at": "2024-01-01T00:00:00Z"}),
stderr="",
)
with patch("coinhunter.commands.external_gate.run_cmd", return_value=fake_result):
rc = external_gate.main()
assert rc == 0
out = json.loads(capsys.readouterr().out)
assert out["reason"] == "already_queued"
def test_trigger_disabled(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
paths.config_file.write_text(json.dumps({"external_gate": {"trigger_command": None}}), encoding="utf-8")
# First call is precheck, second is mark-run-requested
responses = [
MagicMock(returncode=0, stdout=json.dumps({"ok": True, "should_analyze": True}), stderr=""),
MagicMock(returncode=0, stdout=json.dumps({"ok": True}), stderr=""),
]
with patch("coinhunter.commands.external_gate.run_cmd", side_effect=responses):
rc = external_gate.main()
assert rc == 0
out = json.loads(capsys.readouterr().out)
assert out["reason"] == "trigger_disabled"
def test_trigger_success(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
paths.config_file.write_text(json.dumps({"external_gate": {"trigger_command": ["echo", "ok"]}}), encoding="utf-8")
responses = [
MagicMock(returncode=0, stdout=json.dumps({"ok": True, "should_analyze": True, "reasons": ["price-move"]}), stderr=""),
MagicMock(returncode=0, stdout=json.dumps({"ok": True}), stderr=""),
MagicMock(returncode=0, stdout="triggered", stderr=""),
]
with patch("coinhunter.commands.external_gate.run_cmd", side_effect=responses):
rc = external_gate.main()
assert rc == 0
out = json.loads(capsys.readouterr().out)
assert out["triggered"] is True
assert out["reason"] == "price-move"
def test_trigger_command_failure(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
paths.config_file.write_text(json.dumps({"external_gate": {"trigger_command": ["false"]}}), encoding="utf-8")
responses = [
MagicMock(returncode=0, stdout=json.dumps({"ok": True, "should_analyze": True}), stderr=""),
MagicMock(returncode=0, stdout=json.dumps({"ok": True}), stderr=""),
MagicMock(returncode=1, stdout="", stderr="fail"),
]
with patch("coinhunter.commands.external_gate.run_cmd", side_effect=responses):
rc = external_gate.main()
assert rc == 1
out = json.loads(capsys.readouterr().out)
assert out["reason"] == "trigger_failed"
def test_mark_run_requested_failure(self, tmp_path, monkeypatch, capsys):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = external_gate._paths()
paths.root.mkdir(parents=True, exist_ok=True)
paths.config_file.write_text(json.dumps({"external_gate": {"trigger_command": ["echo", "ok"]}}), encoding="utf-8")
responses = [
MagicMock(returncode=0, stdout=json.dumps({"ok": True, "should_analyze": True}), stderr=""),
MagicMock(returncode=1, stdout="err", stderr=""),
]
with patch("coinhunter.commands.external_gate.run_cmd", side_effect=responses):
rc = external_gate.main()
assert rc == 1
out = json.loads(capsys.readouterr().out)
assert out["reason"] == "mark_failed"

View File

@@ -0,0 +1,194 @@
"""Tests for review_service."""
import json
from pathlib import Path
from unittest.mock import patch
from coinhunter.services import review_service as rs
class TestAnalyzeTrade:
def test_buy_good_when_price_up(self):
ex = None
trade = {"symbol": "BTC/USDT", "price": 50000.0, "action": "BUY"}
with patch.object(rs, "fetch_current_price", return_value=52000.0):
result = rs.analyze_trade(trade, ex)
assert result["action"] == "BUY"
assert result["pnl_estimate_pct"] == 4.0
assert result["outcome_assessment"] == "good"
def test_buy_bad_when_price_down(self):
trade = {"symbol": "BTC/USDT", "price": 50000.0, "action": "BUY"}
with patch.object(rs, "fetch_current_price", return_value=48000.0):
result = rs.analyze_trade(trade, None)
assert result["pnl_estimate_pct"] == -4.0
assert result["outcome_assessment"] == "bad"
def test_sell_all_missed_when_price_up(self):
trade = {"symbol": "BTC/USDT", "price": 50000.0, "action": "SELL_ALL"}
with patch.object(rs, "fetch_current_price", return_value=53000.0):
result = rs.analyze_trade(trade, None)
assert result["pnl_estimate_pct"] == -6.0
assert result["outcome_assessment"] == "missed"
def test_neutral_when_small_move(self):
trade = {"symbol": "BTC/USDT", "price": 50000.0, "action": "BUY"}
with patch.object(rs, "fetch_current_price", return_value=50100.0):
result = rs.analyze_trade(trade, None)
assert result["outcome_assessment"] == "neutral"
def test_none_when_no_price(self):
trade = {"symbol": "BTC/USDT", "action": "BUY"}
result = rs.analyze_trade(trade, None)
assert result["pnl_estimate_pct"] is None
assert result["outcome_assessment"] == "neutral"
class TestAnalyzeHoldPasses:
def test_finds_passed_opportunities_that_rise(self):
decisions = [
{
"timestamp": "2024-01-01T00:00:00Z",
"decision": "HOLD",
"analysis": {"opportunities_evaluated": [{"symbol": "ETH/USDT", "verdict": "PASS"}]},
"market_snapshot": {"ETHUSDT": {"lastPrice": 100.0}},
}
]
with patch.object(rs, "fetch_current_price", return_value=110.0):
result = rs.analyze_hold_passes(decisions, None)
assert len(result) == 1
assert result[0]["symbol"] == "ETH/USDT"
assert result[0]["change_pct"] == 10.0
def test_ignores_non_pass_verdicts(self):
decisions = [
{
"decision": "HOLD",
"analysis": {"opportunities_evaluated": [{"symbol": "ETH/USDT", "verdict": "BUY"}]},
"market_snapshot": {"ETHUSDT": {"lastPrice": 100.0}},
}
]
with patch.object(rs, "fetch_current_price", return_value=110.0):
result = rs.analyze_hold_passes(decisions, None)
assert result == []
def test_ignores_small_moves(self):
decisions = [
{
"decision": "HOLD",
"analysis": {"opportunities_evaluated": [{"symbol": "ETH/USDT", "verdict": "PASS"}]},
"market_snapshot": {"ETHUSDT": {"lastPrice": 100.0}},
}
]
with patch.object(rs, "fetch_current_price", return_value=102.0):
result = rs.analyze_hold_passes(decisions, None)
assert result == []
class TestAnalyzeCashMisses:
def test_finds_cash_sit_misses(self):
decisions = [
{
"timestamp": "2024-01-01T00:00:00Z",
"balances": {"USDT": 100.0},
"market_snapshot": {"BTCUSDT": {"lastPrice": 50000.0}},
}
]
with patch.object(rs, "fetch_current_price", return_value=54000.0):
result = rs.analyze_cash_misses(decisions, None)
assert len(result) == 1
assert result[0]["symbol"] == "BTCUSDT"
assert result[0]["change_pct"] == 8.0
def test_requires_mostly_usdt(self):
decisions = [
{
"balances": {"USDT": 10.0, "BTC": 90.0},
"market_snapshot": {"ETHUSDT": {"lastPrice": 100.0}},
}
]
with patch.object(rs, "fetch_current_price", return_value=200.0):
result = rs.analyze_cash_misses(decisions, None)
assert result == []
def test_dedupes_by_best_change(self):
decisions = [
{
"timestamp": "2024-01-01T00:00:00Z",
"balances": {"USDT": 100.0},
"market_snapshot": {"BTCUSDT": {"lastPrice": 50000.0}},
},
{
"timestamp": "2024-01-01T01:00:00Z",
"balances": {"USDT": 100.0},
"market_snapshot": {"BTCUSDT": {"lastPrice": 52000.0}},
},
]
with patch.object(rs, "fetch_current_price", return_value=56000.0):
result = rs.analyze_cash_misses(decisions, None)
assert len(result) == 1
# best change is from 50000 -> 56000 = 12%
assert result[0]["change_pct"] == 12.0
class TestGenerateReview:
def test_empty_period(self, monkeypatch):
monkeypatch.setattr(rs, "get_logs_last_n_hours", lambda log_type, hours: [])
review = rs.generate_review(1)
assert review["total_decisions"] == 0
assert review["total_trades"] == 0
assert any("No decisions or trades" in i for i in review["insights"])
def test_with_trades_and_decisions(self, monkeypatch):
trades = [
{"symbol": "BTC/USDT", "price": 50000.0, "action": "BUY", "timestamp": "2024-01-01T00:00:00Z"}
]
decisions = [
{
"timestamp": "2024-01-01T00:00:00Z",
"decision": "HOLD",
"analysis": {"opportunities_evaluated": [{"symbol": "ETH/USDT", "verdict": "PASS"}]},
"market_snapshot": {"ETHUSDT": {"lastPrice": 100.0}},
}
]
errors = [{"message": "oops"}]
def _logs(log_type, hours):
return {"decisions": decisions, "trades": trades, "errors": errors}.get(log_type, [])
monkeypatch.setattr(rs, "get_logs_last_n_hours", _logs)
monkeypatch.setattr(rs, "fetch_current_price", lambda ex, sym: 110.0 if "ETH" in sym else 52000.0)
review = rs.generate_review(1)
assert review["total_trades"] == 1
assert review["total_decisions"] == 1
assert review["stats"]["good_decisions"] == 1
assert review["stats"]["missed_hold_passes"] == 1
assert any("execution/system errors" in i for i in review["insights"])
class TestSaveReview:
def test_writes_file(self, tmp_path, monkeypatch):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
review = {"review_timestamp": "2024-01-01T00:00:00+08:00", "review_period_hours": 1}
path = rs.save_review(review)
saved = json.loads(Path(path).read_text(encoding="utf-8"))
assert saved["review_period_hours"] == 1
class TestPrintReview:
def test_outputs_report(self, capsys):
review = {
"review_timestamp": "2024-01-01T00:00:00+08:00",
"review_period_hours": 1,
"total_decisions": 2,
"total_trades": 1,
"total_errors": 0,
"stats": {"good_decisions": 1, "neutral_decisions": 0, "bad_decisions": 0, "missed_opportunities": 0},
"insights": ["Insight A"],
"recommendations": ["Rec B"],
}
rs.print_review(review)
captured = capsys.readouterr()
assert "Coin Hunter Review Report" in captured.out
assert "Insight A" in captured.out
assert "Rec B" in captured.out

63
tests/test_runtime.py Normal file
View File

@@ -0,0 +1,63 @@
"""Tests for runtime path resolution."""
from pathlib import Path
import pytest
from coinhunter.runtime import RuntimePaths, ensure_runtime_dirs, get_runtime_paths, mask_secret
class TestGetRuntimePaths:
def test_defaults_point_to_home_dot_coinhunter(self):
paths = get_runtime_paths()
assert paths.root == Path.home() / ".coinhunter"
def test_respects_coinhunter_home_env(self, monkeypatch):
monkeypatch.setenv("COINHUNTER_HOME", "~/custom_ch")
paths = get_runtime_paths()
assert paths.root == Path.home() / "custom_ch"
def test_respects_hermes_home_env(self, monkeypatch):
monkeypatch.setenv("HERMES_HOME", "~/custom_hermes")
paths = get_runtime_paths()
assert paths.hermes_home == Path.home() / "custom_hermes"
assert paths.env_file == Path.home() / "custom_hermes" / ".env"
def test_returns_frozen_dataclass(self):
paths = get_runtime_paths()
assert isinstance(paths, RuntimePaths)
with pytest.raises(AttributeError):
paths.root = Path("/tmp")
def test_as_dict_returns_strings(self):
paths = get_runtime_paths()
d = paths.as_dict()
assert isinstance(d, dict)
assert all(isinstance(v, str) for v in d.values())
assert "root" in d
class TestEnsureRuntimeDirs:
def test_creates_directories(self, tmp_path, monkeypatch):
monkeypatch.setenv("COINHUNTER_HOME", str(tmp_path))
paths = get_runtime_paths()
returned = ensure_runtime_dirs(paths)
assert returned.root.exists()
assert returned.state_dir.exists()
assert returned.logs_dir.exists()
assert returned.cache_dir.exists()
assert returned.reviews_dir.exists()
class TestMaskSecret:
def test_empty_string(self):
assert mask_secret("") == ""
def test_short_value(self):
assert mask_secret("ab") == "**"
def test_masks_all_but_tail(self):
assert mask_secret("supersecret", tail=4) == "*******cret"
def test_none_returns_empty(self):
assert mask_secret(None) == ""

View File

@@ -0,0 +1,98 @@
"""Tests for smart executor deduplication and routing."""
from coinhunter.services import exchange_service
from coinhunter.services import smart_executor_service as ses
class FakeArgs:
def __init__(self, command="buy", symbol="BTCUSDT", amount_usdt=50, dry_run=False,
decision_id=None, analysis=None, reasoning=None, order_id=None,
from_symbol=None, to_symbol=None):
self.command = command
self.symbol = symbol
self.amount_usdt = amount_usdt
self.dry_run = dry_run
self.decision_id = decision_id
self.analysis = analysis
self.reasoning = reasoning
self.order_id = order_id
self.from_symbol = from_symbol
self.to_symbol = to_symbol
class TestDeduplication:
def test_skips_duplicate_mutating_action(self, monkeypatch, capsys):
monkeypatch.setattr(ses, "parse_cli_args", lambda argv: (FakeArgs(command="buy"), ["BTCUSDT", "50"]))
monkeypatch.setattr(ses, "get_execution_state", lambda did: {"status": "success"})
get_exchange_calls = []
monkeypatch.setattr(exchange_service, "get_exchange", lambda: get_exchange_calls.append(1) or "ex")
result = ses.run(["buy", "BTCUSDT", "50"])
assert result == 0
assert get_exchange_calls == []
captured = capsys.readouterr()
assert "already executed successfully" in captured.err
def test_allows_duplicate_read_only_action(self, monkeypatch):
monkeypatch.setattr(ses, "parse_cli_args", lambda argv: (FakeArgs(command="balances"), ["balances"]))
monkeypatch.setattr(ses, "get_execution_state", lambda did: {"status": "success"})
monkeypatch.setattr(ses, "command_balances", lambda ex: None)
get_exchange_calls = []
monkeypatch.setattr(exchange_service, "get_exchange", lambda: get_exchange_calls.append(1) or "ex")
result = ses.run(["bal"])
assert result == 0
assert len(get_exchange_calls) == 1
def test_allows_retry_after_failure(self, monkeypatch):
monkeypatch.setattr(ses, "parse_cli_args", lambda argv: (FakeArgs(command="buy"), ["BTCUSDT", "50"]))
monkeypatch.setattr(ses, "get_execution_state", lambda did: {"status": "failed"})
monkeypatch.setattr(ses, "record_execution_state", lambda did, state: None)
monkeypatch.setattr(ses, "build_decision_context", lambda ex, action, tail, did: {})
monkeypatch.setattr(ses, "action_buy", lambda *a, **k: {"id": "123"})
monkeypatch.setattr(ses, "print_json", lambda d: None)
get_exchange_calls = []
monkeypatch.setattr(exchange_service, "get_exchange", lambda: get_exchange_calls.append(1) or "ex")
result = ses.run(["buy", "BTCUSDT", "50"])
assert result == 0
assert len(get_exchange_calls) == 1
class TestReadOnlyRouting:
def test_routes_balances(self, monkeypatch):
monkeypatch.setattr(ses, "parse_cli_args", lambda argv: (FakeArgs(command="balances"), ["balances"]))
monkeypatch.setattr(ses, "get_execution_state", lambda did: None)
routed = []
monkeypatch.setattr(ses, "command_balances", lambda ex: routed.append("balances"))
monkeypatch.setattr(exchange_service, "get_exchange", lambda: "ex")
result = ses.run(["balances"])
assert result == 0
assert routed == ["balances"]
def test_routes_orders(self, monkeypatch):
monkeypatch.setattr(ses, "parse_cli_args", lambda argv: (FakeArgs(command="orders"), ["orders"]))
monkeypatch.setattr(ses, "get_execution_state", lambda did: None)
routed = []
monkeypatch.setattr(ses, "command_orders", lambda ex: routed.append("orders"))
monkeypatch.setattr(exchange_service, "get_exchange", lambda: "ex")
result = ses.run(["orders"])
assert result == 0
assert routed == ["orders"]
def test_routes_order_status(self, monkeypatch):
monkeypatch.setattr(ses, "parse_cli_args", lambda argv: (
FakeArgs(command="order-status", symbol="BTCUSDT", order_id="123"),
["order-status", "BTCUSDT", "123"]
))
monkeypatch.setattr(ses, "get_execution_state", lambda did: None)
routed = []
monkeypatch.setattr(ses, "command_order_status", lambda ex, sym, oid: routed.append((sym, oid)))
monkeypatch.setattr(exchange_service, "get_exchange", lambda: "ex")
result = ses.run(["order-status", "BTCUSDT", "123"])
assert result == 0
assert routed == [("BTCUSDT", "123")]

View File

@@ -0,0 +1,99 @@
"""Tests for state_manager precheck utilities."""
from datetime import timedelta, timezone
from coinhunter.services.state_manager import (
clear_run_request_fields,
sanitize_state_for_stale_triggers,
update_state_after_observation,
)
from coinhunter.services.time_utils import utc_iso, utc_now
class TestClearRunRequestFields:
def test_removes_run_fields(self):
state = {"run_requested_at": utc_iso(), "run_request_note": "test"}
clear_run_request_fields(state)
assert "run_requested_at" not in state
assert "run_request_note" not in state
class TestSanitizeStateForStaleTriggers:
def test_no_changes_when_clean(self):
state = {"pending_trigger": False}
result = sanitize_state_for_stale_triggers(state)
assert result["pending_trigger"] is False
assert result["_stale_recovery_notes"] == []
def test_clears_completed_run_request(self):
state = {
"pending_trigger": True,
"run_requested_at": utc_iso(),
"last_deep_analysis_at": utc_iso(),
}
result = sanitize_state_for_stale_triggers(state)
assert result["pending_trigger"] is False
assert "run_requested_at" not in result
assert any("completed run_requested" in note for note in result["_stale_recovery_notes"])
def test_clears_stale_run_request(self):
old = (utc_now() - timedelta(minutes=60)).replace(tzinfo=timezone.utc).isoformat()
state = {
"pending_trigger": False,
"run_requested_at": old,
}
result = sanitize_state_for_stale_triggers(state)
assert "run_requested_at" not in result
assert any("stale run_requested" in note for note in result["_stale_recovery_notes"])
def test_recovers_stale_pending_trigger(self):
old = (utc_now() - timedelta(minutes=60)).replace(tzinfo=timezone.utc).isoformat()
state = {
"pending_trigger": True,
"last_triggered_at": old,
}
result = sanitize_state_for_stale_triggers(state)
assert result["pending_trigger"] is False
assert any("stale pending_trigger" in note for note in result["_stale_recovery_notes"])
class TestUpdateStateAfterObservation:
def test_updates_last_observed_fields(self):
state = {}
snapshot = {
"generated_at": "2024-01-01T00:00:00Z",
"snapshot_hash": "abc",
"positions_hash": "pos123",
"candidates_hash": "can456",
"portfolio_value_usdt": 100.0,
"market_regime": "neutral",
"positions": [],
"top_candidates": [],
}
analysis = {"should_analyze": False, "details": [], "adaptive_profile": {}}
result = update_state_after_observation(state, snapshot, analysis)
assert result["last_observed_at"] == snapshot["generated_at"]
assert result["last_snapshot_hash"] == "abc"
def test_sets_pending_trigger_when_should_analyze(self):
state = {}
snapshot = {
"generated_at": "2024-01-01T00:00:00Z",
"snapshot_hash": "abc",
"positions_hash": "pos123",
"candidates_hash": "can456",
"portfolio_value_usdt": 100.0,
"market_regime": "neutral",
"positions": [],
"top_candidates": [],
}
analysis = {
"should_analyze": True,
"details": ["price move"],
"adaptive_profile": {},
"hard_reasons": ["moon"],
"signal_delta": 1.5,
}
result = update_state_after_observation(state, snapshot, analysis)
assert result["pending_trigger"] is True
assert result["pending_reasons"] == ["price move"]

View File

@@ -0,0 +1,160 @@
"""Tests for trade execution dry-run paths."""
import pytest
from coinhunter.services import trade_execution as te
from coinhunter.services.trade_common import set_dry_run
class TestMarketSellDryRun:
def test_returns_dry_run_order(self, monkeypatch):
set_dry_run(True)
monkeypatch.setattr(
te, "prepare_sell_quantity",
lambda ex, sym, qty: ("BTC/USDT", 0.5, 60000.0, 30000.0)
)
order = te.market_sell(None, "BTCUSDT", 0.5, "dec-123")
assert order["id"] == "dry-sell-dec-123"
assert order["symbol"] == "BTC/USDT"
assert order["amount"] == 0.5
assert order["status"] == "closed"
set_dry_run(False)
class TestMarketBuyDryRun:
def test_returns_dry_run_order(self, monkeypatch):
set_dry_run(True)
monkeypatch.setattr(
te, "prepare_buy_quantity",
lambda ex, sym, amt: ("ETH/USDT", 1.0, 3000.0, 3000.0)
)
order = te.market_buy(None, "ETHUSDT", 100, "dec-456")
assert order["id"] == "dry-buy-dec-456"
assert order["symbol"] == "ETH/USDT"
assert order["amount"] == 1.0
assert order["status"] == "closed"
set_dry_run(False)
class TestActionSellAll:
def test_raises_when_balance_zero(self, monkeypatch):
monkeypatch.setattr(te, "fetch_balances", lambda ex: {"BTC": 0})
with pytest.raises(RuntimeError, match="balance is zero"):
te.action_sell_all(None, "BTCUSDT", "dec-789", {})
def test_dry_run_does_not_reconcile(self, monkeypatch):
set_dry_run(True)
monkeypatch.setattr(te, "fetch_balances", lambda ex: {"BTC": 0.5})
monkeypatch.setattr(
te, "market_sell",
lambda ex, sym, qty, did: {"id": "dry-1", "amount": qty, "price": 60000.0, "cost": 30000.0, "status": "closed"}
)
monkeypatch.setattr(te, "reconcile_positions_with_exchange", lambda ex, hint=None: ([], {}))
monkeypatch.setattr(te, "log_trade", lambda *a, **k: None)
monkeypatch.setattr(te, "log_decision", lambda *a, **k: None)
result = te.action_sell_all(None, "BTCUSDT", "dec-789", {"analysis": "test"})
assert result["id"] == "dry-1"
set_dry_run(False)
class TestActionBuy:
def test_raises_when_insufficient_usdt(self, monkeypatch):
monkeypatch.setattr(te, "fetch_balances", lambda ex: {"USDT": 10.0})
with pytest.raises(RuntimeError, match="Insufficient USDT"):
te.action_buy(None, "BTCUSDT", 50, "dec-abc", {})
def test_dry_run_skips_save(self, monkeypatch):
set_dry_run(True)
monkeypatch.setattr(te, "fetch_balances", lambda ex: {"USDT": 100.0})
monkeypatch.setattr(
te, "market_buy",
lambda ex, sym, amt, did: {"id": "dry-2", "amount": 0.01, "price": 50000.0, "cost": 500.0, "status": "closed"}
)
monkeypatch.setattr(te, "load_positions", lambda: [])
monkeypatch.setattr(te, "upsert_position", lambda positions, pos: positions.append(pos))
monkeypatch.setattr(te, "reconcile_positions_with_exchange", lambda ex, hint=None: ([], {}))
monkeypatch.setattr(te, "log_trade", lambda *a, **k: None)
monkeypatch.setattr(te, "log_decision", lambda *a, **k: None)
result = te.action_buy(None, "BTCUSDT", 50, "dec-abc", {})
assert result["id"] == "dry-2"
set_dry_run(False)
class TestCommandBalances:
def test_returns_balances(self, monkeypatch, capsys):
monkeypatch.setattr(te, "fetch_balances", lambda ex: {"USDT": 100.0, "BTC": 0.5})
result = te.command_balances(None)
assert result == {"USDT": 100.0, "BTC": 0.5}
captured = capsys.readouterr()
assert '"USDT": 100.0' in captured.out
class TestCommandStatus:
def test_returns_snapshot(self, monkeypatch, capsys):
monkeypatch.setattr(te, "fetch_balances", lambda ex: {"USDT": 100.0})
monkeypatch.setattr(te, "load_positions", lambda: [])
monkeypatch.setattr(te, "build_market_snapshot", lambda ex: {"BTC/USDT": 50000.0})
result = te.command_status(None)
assert result["balances"]["USDT"] == 100.0
captured = capsys.readouterr()
assert '"balances"' in captured.out
class TestCommandOrders:
def test_returns_orders(self, monkeypatch, capsys):
orders = [{"id": "1", "symbol": "BTC/USDT"}]
monkeypatch.setattr(te, "print_json", lambda d: None)
ex = type("Ex", (), {"fetch_open_orders": lambda self: orders})()
result = te.command_orders(ex)
assert result == orders
class TestCommandOrderStatus:
def test_returns_order(self, monkeypatch, capsys):
order = {"id": "123", "status": "open"}
monkeypatch.setattr(te, "print_json", lambda d: None)
ex = type("Ex", (), {"fetch_order": lambda self, oid, sym: order})()
result = te.command_order_status(ex, "BTCUSDT", "123")
assert result == order
class TestCommandCancel:
def test_dry_run_returns_early(self, monkeypatch):
set_dry_run(True)
monkeypatch.setattr(te, "log", lambda msg: None)
result = te.command_cancel(None, "BTCUSDT", "123")
assert result["dry_run"] is True
set_dry_run(False)
def test_cancel_by_order_id(self, monkeypatch):
set_dry_run(False)
monkeypatch.setattr(te, "print_json", lambda d: None)
ex = type("Ex", (), {"cancel_order": lambda self, oid, sym: {"id": oid, "status": "canceled"}})()
result = te.command_cancel(ex, "BTCUSDT", "123")
assert result["status"] == "canceled"
def test_cancel_latest_when_no_order_id(self, monkeypatch):
set_dry_run(False)
monkeypatch.setattr(te, "print_json", lambda d: None)
ex = type("Ex", (), {
"fetch_open_orders": lambda self, sym: [{"id": "999"}, {"id": "888"}],
"cancel_order": lambda self, oid, sym: {"id": oid, "status": "canceled"},
})()
result = te.command_cancel(ex, "BTCUSDT", None)
assert result["id"] == "888"
def test_cancel_raises_when_no_open_orders(self, monkeypatch):
set_dry_run(False)
ex = type("Ex", (), {"fetch_open_orders": lambda self, sym: []})()
with pytest.raises(RuntimeError, match="No open orders"):
te.command_cancel(ex, "BTCUSDT", None)
class TestPrintJson:
def test_outputs_sorted_json(self, capsys):
te.print_json({"b": 2, "a": 1})
captured = capsys.readouterr()
assert '"a": 1' in captured.out
assert '"b": 2' in captured.out