feat: add opportunity evaluation optimizer
This commit is contained in:
@@ -11,6 +11,7 @@ license = {text = "MIT"}
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"binance-connector>=3.9.0",
|
||||
"requests>=2.31.0",
|
||||
"shtab>=1.7.0",
|
||||
"tomli>=2.0.1; python_version < '3.11'",
|
||||
"tomli-w>=1.0.0",
|
||||
|
||||
@@ -28,6 +28,7 @@ from .services import (
|
||||
account_service,
|
||||
market_service,
|
||||
opportunity_dataset_service,
|
||||
opportunity_evaluation_service,
|
||||
opportunity_service,
|
||||
portfolio_service,
|
||||
trade_service,
|
||||
@@ -42,6 +43,8 @@ examples:
|
||||
coin buy BTCUSDT -Q 100 -d
|
||||
coin sell BTCUSDT --qty 0.01 --type limit --price 90000
|
||||
coin opportunity -s BTCUSDT ETHUSDT
|
||||
coin opportunity evaluate ~/.coinhunter/datasets/opportunity_dataset.json --agent
|
||||
coin opportunity optimize ~/.coinhunter/datasets/opportunity_dataset.json --agent
|
||||
coin upgrade
|
||||
"""
|
||||
|
||||
@@ -495,6 +498,87 @@ Fields:
|
||||
counts – kline row counts by symbol and interval
|
||||
plan – reference/simulation/run windows used for collection
|
||||
external_history – external provider historical capability probe result
|
||||
""",
|
||||
},
|
||||
"opportunity/evaluate": {
|
||||
"tui": """\
|
||||
TUI Output:
|
||||
SUMMARY
|
||||
count=120 correct=76 incorrect=44 accuracy=0.6333
|
||||
interval=1h top_n=10 decision_times=24
|
||||
|
||||
BY ACTION
|
||||
trigger count=12 correct=7 accuracy=0.5833 avg_trade_return=0.0062
|
||||
setup count=78 correct=52 accuracy=0.6667
|
||||
skip count=30 correct=17 accuracy=0.5667
|
||||
|
||||
JSON Output:
|
||||
{
|
||||
"summary": {"count": 120, "correct": 76, "incorrect": 44, "accuracy": 0.6333},
|
||||
"by_action": {"trigger": {"count": 12, "correct": 7, "accuracy": 0.5833}},
|
||||
"trade_simulation": {"trigger_trades": 12, "wins": 7, "losses": 5, "win_rate": 0.5833},
|
||||
"rules": {"horizon_hours": 24.0, "take_profit": 0.02, "stop_loss": 0.015, "setup_target": 0.01}
|
||||
}
|
||||
Fields:
|
||||
summary – aggregate walk-forward judgment accuracy
|
||||
by_action – accuracy and average returns grouped by trigger/setup/chase/skip
|
||||
trade_simulation – trigger-only trade outcome using take-profit/stop-loss rules
|
||||
rules – objective evaluation assumptions used for the run
|
||||
examples – first evaluated judgments with outcome labels
|
||||
""",
|
||||
"json": """\
|
||||
JSON Output:
|
||||
{
|
||||
"summary": {"count": 120, "correct": 76, "incorrect": 44, "accuracy": 0.6333},
|
||||
"by_action": {"trigger": {"count": 12, "correct": 7, "accuracy": 0.5833}},
|
||||
"trade_simulation": {"trigger_trades": 12, "wins": 7, "losses": 5, "win_rate": 0.5833},
|
||||
"rules": {"horizon_hours": 24.0, "take_profit": 0.02, "stop_loss": 0.015, "setup_target": 0.01}
|
||||
}
|
||||
Fields:
|
||||
summary – aggregate walk-forward judgment accuracy
|
||||
by_action – accuracy and average returns grouped by trigger/setup/chase/skip
|
||||
trade_simulation – trigger-only trade outcome using take-profit/stop-loss rules
|
||||
rules – objective evaluation assumptions used for the run
|
||||
examples – first evaluated judgments with outcome labels
|
||||
""",
|
||||
},
|
||||
"opportunity/optimize": {
|
||||
"tui": """\
|
||||
TUI Output:
|
||||
BASELINE
|
||||
objective=0.5012 accuracy=0.5970 trigger_win_rate=0.4312
|
||||
|
||||
BEST
|
||||
objective=0.5341 accuracy=0.6214 trigger_win_rate=0.4862
|
||||
|
||||
JSON Output:
|
||||
{
|
||||
"baseline": {"objective": 0.5012, "summary": {"accuracy": 0.597}},
|
||||
"best": {"objective": 0.5341, "summary": {"accuracy": 0.6214}},
|
||||
"improvement": {"accuracy": 0.0244, "trigger_win_rate": 0.055},
|
||||
"recommended_config": {"opportunity.model_weights.trigger": 1.5}
|
||||
}
|
||||
Fields:
|
||||
baseline – evaluation snapshot with current model weights
|
||||
best – best walk-forward snapshot found by coordinate search
|
||||
improvement – deltas from baseline to best
|
||||
recommended_config – config keys that can be written with `coin config set`
|
||||
search – optimizer metadata; thresholds are fixed
|
||||
""",
|
||||
"json": """\
|
||||
JSON Output:
|
||||
{
|
||||
"baseline": {"objective": 0.5012, "summary": {"accuracy": 0.597}},
|
||||
"best": {"objective": 0.5341, "summary": {"accuracy": 0.6214}},
|
||||
"improvement": {"accuracy": 0.0244, "trigger_win_rate": 0.055},
|
||||
"recommended_config": {"opportunity.model_weights.trigger": 1.5}
|
||||
}
|
||||
Fields:
|
||||
baseline – evaluation snapshot with current model weights
|
||||
best – best walk-forward snapshot found by coordinate search
|
||||
improvement – deltas from baseline to best
|
||||
recommended_config – config keys that can be written with `coin config set`
|
||||
search – optimizer metadata; thresholds are fixed
|
||||
""",
|
||||
},
|
||||
"upgrade": {
|
||||
@@ -854,6 +938,32 @@ def build_parser() -> argparse.ArgumentParser:
|
||||
dataset_parser.add_argument("--run-days", type=float, help="Continuous scan simulation window in days")
|
||||
dataset_parser.add_argument("-o", "--output", help="Output dataset JSON path")
|
||||
_add_global_flags(dataset_parser)
|
||||
evaluate_parser = opportunity_subparsers.add_parser(
|
||||
"evaluate", aliases=["eval", "ev"], help="Evaluate opportunity accuracy from a historical dataset",
|
||||
description="Run a walk-forward evaluation over an opportunity dataset using point-in-time candles only.",
|
||||
)
|
||||
evaluate_parser.add_argument("dataset", help="Path to an opportunity dataset JSON file")
|
||||
evaluate_parser.add_argument("--horizon-hours", type=float, help="Forward evaluation horizon in hours")
|
||||
evaluate_parser.add_argument("--take-profit-pct", type=float, help="Trigger success take-profit threshold in percent")
|
||||
evaluate_parser.add_argument("--stop-loss-pct", type=float, help="Stop-loss threshold in percent")
|
||||
evaluate_parser.add_argument("--setup-target-pct", type=float, help="Setup success target threshold in percent")
|
||||
evaluate_parser.add_argument("--lookback", type=int, help="Closed candles used for each point-in-time score")
|
||||
evaluate_parser.add_argument("--top-n", type=int, help="Evaluate only the top-N ranked symbols at each decision time")
|
||||
evaluate_parser.add_argument("--examples", type=int, default=20, help="Number of example judgments to include")
|
||||
_add_global_flags(evaluate_parser)
|
||||
optimize_parser = opportunity_subparsers.add_parser(
|
||||
"optimize", aliases=["opt"], help="Optimize opportunity model weights from a historical dataset",
|
||||
description="Coordinate-search normalized model weights while keeping decision thresholds fixed.",
|
||||
)
|
||||
optimize_parser.add_argument("dataset", help="Path to an opportunity dataset JSON file")
|
||||
optimize_parser.add_argument("--horizon-hours", type=float, help="Forward evaluation horizon in hours")
|
||||
optimize_parser.add_argument("--take-profit-pct", type=float, help="Trigger success take-profit threshold in percent")
|
||||
optimize_parser.add_argument("--stop-loss-pct", type=float, help="Stop-loss threshold in percent")
|
||||
optimize_parser.add_argument("--setup-target-pct", type=float, help="Setup success target threshold in percent")
|
||||
optimize_parser.add_argument("--lookback", type=int, help="Closed candles used for each point-in-time score")
|
||||
optimize_parser.add_argument("--top-n", type=int, help="Evaluate only the top-N ranked symbols at each decision time")
|
||||
optimize_parser.add_argument("--passes", type=int, default=2, help="Coordinate-search passes over model weights")
|
||||
_add_global_flags(optimize_parser)
|
||||
|
||||
upgrade_parser = subparsers.add_parser(
|
||||
"upgrade", help="Upgrade coinhunter to the latest version",
|
||||
@@ -901,6 +1011,9 @@ _CANONICAL_SUBCOMMANDS = {
|
||||
"t": "tickers",
|
||||
"k": "klines",
|
||||
"ds": "dataset",
|
||||
"eval": "evaluate",
|
||||
"ev": "evaluate",
|
||||
"opt": "optimize",
|
||||
}
|
||||
|
||||
_COMMANDS_WITH_SUBCOMMANDS = {"market", "config", "opportunity"}
|
||||
@@ -964,6 +1077,7 @@ def main(argv: list[str] | None = None) -> int:
|
||||
parser = build_parser()
|
||||
raw_argv = _reorder_flag(raw_argv, "--agent", "-a")
|
||||
args = parser.parse_args(raw_argv)
|
||||
args.agent = bool(getattr(args, "agent", False) or "--agent" in raw_argv or "-a" in raw_argv)
|
||||
|
||||
# Normalize aliases to canonical command names
|
||||
if args.command:
|
||||
@@ -1148,6 +1262,36 @@ def main(argv: list[str] | None = None) -> int:
|
||||
return 0
|
||||
|
||||
if args.command == "opportunity":
|
||||
if args.opportunity_command == "optimize":
|
||||
with with_spinner("Optimizing opportunity model...", enabled=not args.agent):
|
||||
result = opportunity_evaluation_service.optimize_opportunity_model(
|
||||
config,
|
||||
dataset_path=args.dataset,
|
||||
horizon_hours=args.horizon_hours,
|
||||
take_profit=args.take_profit_pct / 100.0 if args.take_profit_pct is not None else None,
|
||||
stop_loss=args.stop_loss_pct / 100.0 if args.stop_loss_pct is not None else None,
|
||||
setup_target=args.setup_target_pct / 100.0 if args.setup_target_pct is not None else None,
|
||||
lookback=args.lookback,
|
||||
top_n=args.top_n,
|
||||
passes=args.passes,
|
||||
)
|
||||
print_output(result, agent=args.agent)
|
||||
return 0
|
||||
if args.opportunity_command == "evaluate":
|
||||
with with_spinner("Evaluating opportunity dataset...", enabled=not args.agent):
|
||||
result = opportunity_evaluation_service.evaluate_opportunity_dataset(
|
||||
config,
|
||||
dataset_path=args.dataset,
|
||||
horizon_hours=args.horizon_hours,
|
||||
take_profit=args.take_profit_pct / 100.0 if args.take_profit_pct is not None else None,
|
||||
stop_loss=args.stop_loss_pct / 100.0 if args.stop_loss_pct is not None else None,
|
||||
setup_target=args.setup_target_pct / 100.0 if args.setup_target_pct is not None else None,
|
||||
lookback=args.lookback,
|
||||
top_n=args.top_n,
|
||||
max_examples=args.examples,
|
||||
)
|
||||
print_output(result, agent=args.agent)
|
||||
return 0
|
||||
if args.opportunity_command == "dataset":
|
||||
with with_spinner("Collecting opportunity dataset...", enabled=not args.agent):
|
||||
result = opportunity_dataset_service.collect_opportunity_dataset(
|
||||
|
||||
@@ -45,6 +45,8 @@ scan_limit = 50
|
||||
ignore_dust = true
|
||||
entry_threshold = 1.5
|
||||
watch_threshold = 0.6
|
||||
min_trigger_score = 0.45
|
||||
min_setup_score = 0.35
|
||||
overlap_penalty = 0.6
|
||||
lookback_intervals = ["1h", "4h", "1d"]
|
||||
auto_research = true
|
||||
@@ -53,6 +55,11 @@ research_timeout_seconds = 4.0
|
||||
simulate_days = 7
|
||||
run_days = 7
|
||||
dataset_timeout_seconds = 15.0
|
||||
evaluation_horizon_hours = 24.0
|
||||
evaluation_take_profit_pct = 2.0
|
||||
evaluation_stop_loss_pct = 1.5
|
||||
evaluation_setup_target_pct = 1.0
|
||||
evaluation_lookback = 24
|
||||
|
||||
[opportunity.risk_limits]
|
||||
min_liquidity = 0.0
|
||||
@@ -82,6 +89,21 @@ unlock_penalty = 0.8
|
||||
regulatory_penalty = 0.4
|
||||
position_concentration_penalty = 0.6
|
||||
|
||||
[opportunity.model_weights]
|
||||
trend = 0.1406
|
||||
compression = 0.1688
|
||||
breakout_proximity = 0.0875
|
||||
higher_lows = 0.15
|
||||
range_position = 0.45
|
||||
fresh_breakout = 0.2
|
||||
volume = 0.525
|
||||
momentum = 0.1562
|
||||
setup = 1.875
|
||||
trigger = 1.875
|
||||
liquidity = 0.3
|
||||
volatility_penalty = 0.8
|
||||
extension_penalty = 0.45
|
||||
|
||||
[signal]
|
||||
lookback_interval = "1h"
|
||||
trend = 1.0
|
||||
|
||||
@@ -3,19 +3,22 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from collections.abc import Callable
|
||||
from dataclasses import asdict, dataclass
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.error import HTTPError, URLError
|
||||
from urllib.parse import parse_qs, urlencode, urlparse
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
import requests
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
from ..runtime import get_runtime_paths
|
||||
from .market_service import normalize_symbol, normalize_symbols
|
||||
|
||||
HttpGet = Callable[[str, dict[str, str], float], Any]
|
||||
_PUBLIC_HTTP_ATTEMPTS = 5
|
||||
|
||||
_INTERVAL_SECONDS = {
|
||||
"1m": 60,
|
||||
@@ -64,18 +67,34 @@ def _as_int(value: Any, default: int = 0) -> int:
|
||||
|
||||
|
||||
def _public_http_get(url: str, headers: dict[str, str], timeout: float) -> Any:
|
||||
request = Request(url, headers=headers)
|
||||
with urlopen(request, timeout=timeout) as response: # noqa: S310 - market data endpoints are user-configurable
|
||||
return json.loads(response.read().decode("utf-8"))
|
||||
last_error: RequestException | None = None
|
||||
for attempt in range(_PUBLIC_HTTP_ATTEMPTS):
|
||||
try:
|
||||
response = requests.get(url, headers=headers, timeout=timeout)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except RequestException as exc:
|
||||
last_error = exc
|
||||
if attempt < _PUBLIC_HTTP_ATTEMPTS - 1:
|
||||
time.sleep(0.5 * (attempt + 1))
|
||||
if last_error is not None:
|
||||
raise last_error
|
||||
raise RuntimeError("public HTTP request failed")
|
||||
|
||||
|
||||
def _public_http_status(url: str, headers: dict[str, str], timeout: float) -> tuple[int, str]:
|
||||
request = Request(url, headers=headers)
|
||||
try:
|
||||
with urlopen(request, timeout=timeout) as response: # noqa: S310 - market data endpoints are user-configurable
|
||||
return response.status, response.read().decode("utf-8")
|
||||
except HTTPError as exc:
|
||||
return exc.code, exc.read().decode("utf-8")
|
||||
last_error: RequestException | None = None
|
||||
for attempt in range(_PUBLIC_HTTP_ATTEMPTS):
|
||||
try:
|
||||
response = requests.get(url, headers=headers, timeout=timeout)
|
||||
return response.status_code, response.text
|
||||
except RequestException as exc:
|
||||
last_error = exc
|
||||
if attempt < _PUBLIC_HTTP_ATTEMPTS - 1:
|
||||
time.sleep(0.5 * (attempt + 1))
|
||||
if last_error is not None:
|
||||
raise last_error
|
||||
raise RuntimeError("public HTTP status request failed")
|
||||
|
||||
|
||||
def _build_url(base_url: str, path: str, params: dict[str, str]) -> str:
|
||||
@@ -253,7 +272,7 @@ def _probe_external_history(
|
||||
http_status = http_status or _public_http_status
|
||||
try:
|
||||
status, body = http_status(url, headers, timeout)
|
||||
except (TimeoutError, URLError, OSError) as exc:
|
||||
except (TimeoutError, RequestException, OSError) as exc:
|
||||
return {"provider": "coingecko", "status": "failed", "sample_date": sample_date, "error": str(exc)}
|
||||
if status == 200:
|
||||
return {"provider": "coingecko", "status": "available", "sample_date": sample_date}
|
||||
|
||||
536
src/coinhunter/services/opportunity_evaluation_service.py
Normal file
536
src/coinhunter/services/opportunity_evaluation_service.py
Normal file
@@ -0,0 +1,536 @@
|
||||
"""Walk-forward evaluation for historical opportunity datasets."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from statistics import mean
|
||||
from typing import Any
|
||||
|
||||
from .market_service import normalize_symbol
|
||||
from .opportunity_service import _action_for_opportunity, _opportunity_thresholds
|
||||
from .signal_service import (
|
||||
get_opportunity_model_weights,
|
||||
get_signal_interval,
|
||||
score_opportunity_signal,
|
||||
)
|
||||
|
||||
_OPTIMIZE_WEIGHT_KEYS = [
|
||||
"trend",
|
||||
"compression",
|
||||
"breakout_proximity",
|
||||
"higher_lows",
|
||||
"range_position",
|
||||
"fresh_breakout",
|
||||
"volume",
|
||||
"momentum",
|
||||
"setup",
|
||||
"trigger",
|
||||
"liquidity",
|
||||
"volatility_penalty",
|
||||
"extension_penalty",
|
||||
]
|
||||
_OPTIMIZE_MULTIPLIERS = [0.5, 0.75, 1.25, 1.5]
|
||||
|
||||
|
||||
def _as_float(value: Any, default: float = 0.0) -> float:
|
||||
try:
|
||||
return float(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _as_int(value: Any, default: int = 0) -> int:
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _parse_dt(value: Any) -> datetime | None:
|
||||
if not value:
|
||||
return None
|
||||
try:
|
||||
return datetime.fromisoformat(str(value).replace("Z", "+00:00")).astimezone(timezone.utc)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _iso_from_ms(value: int) -> str:
|
||||
return datetime.fromtimestamp(value / 1000, tz=timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
def _close(row: list[Any]) -> float:
|
||||
return _as_float(row[4])
|
||||
|
||||
|
||||
def _high(row: list[Any]) -> float:
|
||||
return _as_float(row[2])
|
||||
|
||||
|
||||
def _low(row: list[Any]) -> float:
|
||||
return _as_float(row[3])
|
||||
|
||||
|
||||
def _volume(row: list[Any]) -> float:
|
||||
return _as_float(row[5])
|
||||
|
||||
|
||||
def _quote_volume(row: list[Any]) -> float:
|
||||
if len(row) > 7:
|
||||
return _as_float(row[7])
|
||||
return _close(row) * _volume(row)
|
||||
|
||||
|
||||
def _open_ms(row: list[Any]) -> int:
|
||||
return int(row[0])
|
||||
|
||||
|
||||
def _ticker_from_window(symbol: str, rows: list[list[Any]]) -> dict[str, Any]:
|
||||
first = _close(rows[0])
|
||||
last = _close(rows[-1])
|
||||
price_change_pct = ((last - first) / first * 100.0) if first else 0.0
|
||||
return {
|
||||
"symbol": symbol,
|
||||
"price_change_pct": price_change_pct,
|
||||
"quote_volume": sum(_quote_volume(row) for row in rows),
|
||||
"high_price": max(_high(row) for row in rows),
|
||||
"low_price": min(_low(row) for row in rows),
|
||||
}
|
||||
|
||||
|
||||
def _window_series(rows: list[list[Any]]) -> tuple[list[float], list[float]]:
|
||||
return [_close(row) for row in rows], [_volume(row) for row in rows]
|
||||
|
||||
|
||||
def _pct(new: float, old: float) -> float:
|
||||
if old == 0:
|
||||
return 0.0
|
||||
return (new - old) / old
|
||||
|
||||
|
||||
def _path_stats(entry: float, future_rows: list[list[Any]], take_profit: float, stop_loss: float) -> dict[str, Any]:
|
||||
if not future_rows:
|
||||
return {
|
||||
"event": "missing",
|
||||
"exit_return": 0.0,
|
||||
"final_return": 0.0,
|
||||
"max_upside": 0.0,
|
||||
"max_drawdown": 0.0,
|
||||
"bars": 0,
|
||||
}
|
||||
|
||||
for row in future_rows:
|
||||
high_return = _pct(_high(row), entry)
|
||||
low_return = _pct(_low(row), entry)
|
||||
if low_return <= -stop_loss:
|
||||
return {
|
||||
"event": "stop",
|
||||
"exit_return": -stop_loss,
|
||||
"final_return": _pct(_close(future_rows[-1]), entry),
|
||||
"max_upside": max(_pct(_high(item), entry) for item in future_rows),
|
||||
"max_drawdown": min(_pct(_low(item), entry) for item in future_rows),
|
||||
"bars": len(future_rows),
|
||||
}
|
||||
if high_return >= take_profit:
|
||||
return {
|
||||
"event": "target",
|
||||
"exit_return": take_profit,
|
||||
"final_return": _pct(_close(future_rows[-1]), entry),
|
||||
"max_upside": max(_pct(_high(item), entry) for item in future_rows),
|
||||
"max_drawdown": min(_pct(_low(item), entry) for item in future_rows),
|
||||
"bars": len(future_rows),
|
||||
}
|
||||
|
||||
return {
|
||||
"event": "horizon",
|
||||
"exit_return": _pct(_close(future_rows[-1]), entry),
|
||||
"final_return": _pct(_close(future_rows[-1]), entry),
|
||||
"max_upside": max(_pct(_high(item), entry) for item in future_rows),
|
||||
"max_drawdown": min(_pct(_low(item), entry) for item in future_rows),
|
||||
"bars": len(future_rows),
|
||||
}
|
||||
|
||||
|
||||
def _is_correct(action: str, trigger_path: dict[str, Any], setup_path: dict[str, Any]) -> bool:
|
||||
if action == "trigger":
|
||||
return str(trigger_path["event"]) == "target"
|
||||
if action == "setup":
|
||||
return str(setup_path["event"]) == "target"
|
||||
if action in {"skip", "chase"}:
|
||||
return str(setup_path["event"]) != "target"
|
||||
return False
|
||||
|
||||
|
||||
def _round_float(value: Any, digits: int = 4) -> float:
|
||||
return round(_as_float(value), digits)
|
||||
|
||||
|
||||
def _finalize_bucket(bucket: dict[str, Any]) -> dict[str, Any]:
|
||||
count = int(bucket["count"])
|
||||
correct = int(bucket["correct"])
|
||||
returns = bucket["forward_returns"]
|
||||
trade_returns = bucket["trade_returns"]
|
||||
return {
|
||||
"count": count,
|
||||
"correct": correct,
|
||||
"incorrect": count - correct,
|
||||
"accuracy": round(correct / count, 4) if count else 0.0,
|
||||
"avg_forward_return": round(mean(returns), 4) if returns else 0.0,
|
||||
"avg_trade_return": round(mean(trade_returns), 4) if trade_returns else 0.0,
|
||||
}
|
||||
|
||||
|
||||
def _bucket() -> dict[str, Any]:
|
||||
return {"count": 0, "correct": 0, "forward_returns": [], "trade_returns": []}
|
||||
|
||||
|
||||
def evaluate_opportunity_dataset(
|
||||
config: dict[str, Any],
|
||||
*,
|
||||
dataset_path: str,
|
||||
horizon_hours: float | None = None,
|
||||
take_profit: float | None = None,
|
||||
stop_loss: float | None = None,
|
||||
setup_target: float | None = None,
|
||||
lookback: int | None = None,
|
||||
top_n: int | None = None,
|
||||
max_examples: int = 20,
|
||||
) -> dict[str, Any]:
|
||||
"""Evaluate opportunity actions using only point-in-time historical candles."""
|
||||
dataset_file = Path(dataset_path).expanduser()
|
||||
dataset = json.loads(dataset_file.read_text(encoding="utf-8"))
|
||||
metadata = dataset.get("metadata", {})
|
||||
plan = metadata.get("plan", {})
|
||||
klines = dataset.get("klines", {})
|
||||
opportunity_config = config.get("opportunity", {})
|
||||
|
||||
intervals = list(plan.get("intervals") or [])
|
||||
configured_interval = get_signal_interval(config)
|
||||
primary_interval = configured_interval if configured_interval in intervals else (intervals[0] if intervals else "1h")
|
||||
simulation_start = _parse_dt(plan.get("simulation_start"))
|
||||
simulation_end = _parse_dt(plan.get("simulation_end"))
|
||||
if simulation_start is None or simulation_end is None:
|
||||
raise ValueError("dataset metadata must include plan.simulation_start and plan.simulation_end")
|
||||
|
||||
horizon = _as_float(horizon_hours, 0.0)
|
||||
if horizon <= 0:
|
||||
horizon = _as_float(plan.get("simulate_days"), 0.0) * 24.0
|
||||
if horizon <= 0:
|
||||
horizon = _as_float(opportunity_config.get("evaluation_horizon_hours"), 24.0)
|
||||
|
||||
take_profit_value = take_profit if take_profit is not None else _as_float(opportunity_config.get("evaluation_take_profit_pct"), 2.0) / 100.0
|
||||
stop_loss_value = stop_loss if stop_loss is not None else _as_float(opportunity_config.get("evaluation_stop_loss_pct"), 1.5) / 100.0
|
||||
setup_target_value = setup_target if setup_target is not None else _as_float(opportunity_config.get("evaluation_setup_target_pct"), 1.0) / 100.0
|
||||
lookback_bars = lookback or _as_int(opportunity_config.get("evaluation_lookback"), 24)
|
||||
selected_top_n = top_n or _as_int(opportunity_config.get("top_n"), 10)
|
||||
thresholds = _opportunity_thresholds(config)
|
||||
horizon_ms = int(horizon * 60 * 60 * 1000)
|
||||
start_ms = int(simulation_start.timestamp() * 1000)
|
||||
end_ms = int(simulation_end.timestamp() * 1000)
|
||||
|
||||
rows_by_symbol: dict[str, list[list[Any]]] = {}
|
||||
index_by_symbol: dict[str, dict[int, int]] = {}
|
||||
for symbol, by_interval in klines.items():
|
||||
rows = by_interval.get(primary_interval, [])
|
||||
normalized = normalize_symbol(symbol)
|
||||
if rows:
|
||||
rows_by_symbol[normalized] = rows
|
||||
index_by_symbol[normalized] = {_open_ms(row): index for index, row in enumerate(rows)}
|
||||
|
||||
decision_times = sorted(
|
||||
{
|
||||
_open_ms(row)
|
||||
for rows in rows_by_symbol.values()
|
||||
for row in rows
|
||||
if start_ms <= _open_ms(row) < end_ms
|
||||
}
|
||||
)
|
||||
|
||||
judgments: list[dict[str, Any]] = []
|
||||
skipped_missing_future = 0
|
||||
skipped_warmup = 0
|
||||
for decision_time in decision_times:
|
||||
candidates: list[dict[str, Any]] = []
|
||||
for symbol, rows in rows_by_symbol.items():
|
||||
index = index_by_symbol[symbol].get(decision_time)
|
||||
if index is None:
|
||||
continue
|
||||
window = rows[max(0, index - lookback_bars + 1) : index + 1]
|
||||
if len(window) < lookback_bars:
|
||||
skipped_warmup += 1
|
||||
continue
|
||||
future_rows = [row for row in rows[index + 1 :] if _open_ms(row) <= decision_time + horizon_ms]
|
||||
if not future_rows:
|
||||
skipped_missing_future += 1
|
||||
continue
|
||||
closes, volumes = _window_series(window)
|
||||
ticker = _ticker_from_window(symbol, window)
|
||||
opportunity_score, metrics = score_opportunity_signal(closes, volumes, ticker, opportunity_config)
|
||||
score = opportunity_score
|
||||
metrics["opportunity_score"] = round(opportunity_score, 4)
|
||||
metrics["position_weight"] = 0.0
|
||||
metrics["research_score"] = 0.0
|
||||
action, reasons = _action_for_opportunity(score, metrics, thresholds)
|
||||
candidates.append(
|
||||
{
|
||||
"symbol": symbol,
|
||||
"time": decision_time,
|
||||
"action": action,
|
||||
"score": round(score, 4),
|
||||
"metrics": metrics,
|
||||
"reasons": reasons,
|
||||
"entry_price": _close(window[-1]),
|
||||
"future_rows": future_rows,
|
||||
}
|
||||
)
|
||||
|
||||
for rank, candidate in enumerate(sorted(candidates, key=lambda item: item["score"], reverse=True)[:selected_top_n], start=1):
|
||||
trigger_path = _path_stats(candidate["entry_price"], candidate["future_rows"], take_profit_value, stop_loss_value)
|
||||
setup_path = _path_stats(candidate["entry_price"], candidate["future_rows"], setup_target_value, stop_loss_value)
|
||||
correct = _is_correct(candidate["action"], trigger_path, setup_path)
|
||||
judgments.append(
|
||||
{
|
||||
"time": _iso_from_ms(candidate["time"]),
|
||||
"rank": rank,
|
||||
"symbol": candidate["symbol"],
|
||||
"action": candidate["action"],
|
||||
"score": candidate["score"],
|
||||
"correct": correct,
|
||||
"entry_price": round(candidate["entry_price"], 8),
|
||||
"forward_return": _round_float(trigger_path["final_return"]),
|
||||
"max_upside": _round_float(trigger_path["max_upside"]),
|
||||
"max_drawdown": _round_float(trigger_path["max_drawdown"]),
|
||||
"trade_return": _round_float(trigger_path["exit_return"]) if candidate["action"] == "trigger" else 0.0,
|
||||
"trigger_event": trigger_path["event"],
|
||||
"setup_event": setup_path["event"],
|
||||
"metrics": candidate["metrics"],
|
||||
"reason": candidate["reasons"][0] if candidate["reasons"] else "",
|
||||
}
|
||||
)
|
||||
|
||||
overall = _bucket()
|
||||
by_action: dict[str, dict[str, Any]] = defaultdict(_bucket)
|
||||
trigger_returns: list[float] = []
|
||||
for judgment in judgments:
|
||||
action = judgment["action"]
|
||||
for bucket in (overall, by_action[action]):
|
||||
bucket["count"] += 1
|
||||
bucket["correct"] += 1 if judgment["correct"] else 0
|
||||
bucket["forward_returns"].append(judgment["forward_return"])
|
||||
if action == "trigger":
|
||||
bucket["trade_returns"].append(judgment["trade_return"])
|
||||
if action == "trigger":
|
||||
trigger_returns.append(judgment["trade_return"])
|
||||
|
||||
by_action_result = {action: _finalize_bucket(bucket) for action, bucket in sorted(by_action.items())}
|
||||
incorrect_examples = [item for item in judgments if not item["correct"]][:max_examples]
|
||||
examples = judgments[:max_examples]
|
||||
trigger_count = by_action_result.get("trigger", {}).get("count", 0)
|
||||
trigger_correct = by_action_result.get("trigger", {}).get("correct", 0)
|
||||
return {
|
||||
"summary": {
|
||||
**_finalize_bucket(overall),
|
||||
"decision_times": len(decision_times),
|
||||
"symbols": sorted(rows_by_symbol),
|
||||
"interval": primary_interval,
|
||||
"top_n": selected_top_n,
|
||||
"skipped_warmup": skipped_warmup,
|
||||
"skipped_missing_future": skipped_missing_future,
|
||||
},
|
||||
"by_action": by_action_result,
|
||||
"trade_simulation": {
|
||||
"trigger_trades": trigger_count,
|
||||
"wins": trigger_correct,
|
||||
"losses": trigger_count - trigger_correct,
|
||||
"win_rate": round(trigger_correct / trigger_count, 4) if trigger_count else 0.0,
|
||||
"avg_trade_return": round(mean(trigger_returns), 4) if trigger_returns else 0.0,
|
||||
},
|
||||
"rules": {
|
||||
"dataset": str(dataset_file),
|
||||
"interval": primary_interval,
|
||||
"horizon_hours": round(horizon, 4),
|
||||
"lookback_bars": lookback_bars,
|
||||
"take_profit": round(take_profit_value, 4),
|
||||
"stop_loss": round(stop_loss_value, 4),
|
||||
"setup_target": round(setup_target_value, 4),
|
||||
"same_candle_policy": "stop_first",
|
||||
"research_mode": "disabled: dataset has no point-in-time research snapshots",
|
||||
},
|
||||
"examples": examples,
|
||||
"incorrect_examples": incorrect_examples,
|
||||
}
|
||||
|
||||
|
||||
def _objective(result: dict[str, Any]) -> float:
|
||||
summary = result.get("summary", {})
|
||||
by_action = result.get("by_action", {})
|
||||
trade = result.get("trade_simulation", {})
|
||||
count = _as_float(summary.get("count"))
|
||||
trigger_trades = _as_float(trade.get("trigger_trades"))
|
||||
trigger_rate = trigger_trades / count if count else 0.0
|
||||
avg_trade_return = _as_float(trade.get("avg_trade_return"))
|
||||
bounded_trade_return = max(min(avg_trade_return, 0.03), -0.03)
|
||||
trigger_coverage = min(trigger_rate / 0.08, 1.0)
|
||||
return round(
|
||||
0.45 * _as_float(summary.get("accuracy"))
|
||||
+ 0.20 * _as_float(by_action.get("setup", {}).get("accuracy"))
|
||||
+ 0.25 * _as_float(trade.get("win_rate"))
|
||||
+ 6.0 * bounded_trade_return
|
||||
+ 0.05 * trigger_coverage,
|
||||
6,
|
||||
)
|
||||
|
||||
|
||||
def _copy_config_with_weights(config: dict[str, Any], weights: dict[str, float]) -> dict[str, Any]:
|
||||
candidate = deepcopy(config)
|
||||
candidate.setdefault("opportunity", {})["model_weights"] = weights
|
||||
return candidate
|
||||
|
||||
|
||||
def _evaluation_snapshot(result: dict[str, Any], objective: float, weights: dict[str, float]) -> dict[str, Any]:
|
||||
return {
|
||||
"objective": objective,
|
||||
"weights": {key: round(value, 4) for key, value in sorted(weights.items())},
|
||||
"summary": result.get("summary", {}),
|
||||
"by_action": result.get("by_action", {}),
|
||||
"trade_simulation": result.get("trade_simulation", {}),
|
||||
}
|
||||
|
||||
|
||||
def optimize_opportunity_model(
|
||||
config: dict[str, Any],
|
||||
*,
|
||||
dataset_path: str,
|
||||
horizon_hours: float | None = None,
|
||||
take_profit: float | None = None,
|
||||
stop_loss: float | None = None,
|
||||
setup_target: float | None = None,
|
||||
lookback: int | None = None,
|
||||
top_n: int | None = None,
|
||||
passes: int = 2,
|
||||
) -> dict[str, Any]:
|
||||
"""Coordinate-search model weights against a walk-forward dataset.
|
||||
|
||||
This intentionally optimizes model feature weights only. Entry/watch policy
|
||||
thresholds remain fixed so the search improves signal construction instead
|
||||
of fitting decision cutoffs to a sample.
|
||||
"""
|
||||
base_weights = get_opportunity_model_weights(config.get("opportunity", {}))
|
||||
|
||||
def evaluate(weights: dict[str, float]) -> tuple[dict[str, Any], float]:
|
||||
result = evaluate_opportunity_dataset(
|
||||
_copy_config_with_weights(config, weights),
|
||||
dataset_path=dataset_path,
|
||||
horizon_hours=horizon_hours,
|
||||
take_profit=take_profit,
|
||||
stop_loss=stop_loss,
|
||||
setup_target=setup_target,
|
||||
lookback=lookback,
|
||||
top_n=top_n,
|
||||
max_examples=0,
|
||||
)
|
||||
return result, _objective(result)
|
||||
|
||||
baseline_result, baseline_objective = evaluate(base_weights)
|
||||
best_weights = dict(base_weights)
|
||||
best_result = baseline_result
|
||||
best_objective = baseline_objective
|
||||
evaluations = 1
|
||||
history: list[dict[str, Any]] = [
|
||||
{
|
||||
"pass": 0,
|
||||
"key": "baseline",
|
||||
"multiplier": 1.0,
|
||||
"objective": baseline_objective,
|
||||
"accuracy": baseline_result["summary"]["accuracy"],
|
||||
"trigger_win_rate": baseline_result["trade_simulation"]["win_rate"],
|
||||
}
|
||||
]
|
||||
|
||||
for pass_index in range(max(passes, 0)):
|
||||
improved = False
|
||||
for key in _OPTIMIZE_WEIGHT_KEYS:
|
||||
current_value = best_weights.get(key, 0.0)
|
||||
if current_value <= 0:
|
||||
continue
|
||||
local_best_weights = best_weights
|
||||
local_best_result = best_result
|
||||
local_best_objective = best_objective
|
||||
local_best_multiplier = 1.0
|
||||
for multiplier in _OPTIMIZE_MULTIPLIERS:
|
||||
candidate_weights = dict(best_weights)
|
||||
candidate_weights[key] = round(max(current_value * multiplier, 0.01), 4)
|
||||
candidate_result, candidate_objective = evaluate(candidate_weights)
|
||||
evaluations += 1
|
||||
history.append(
|
||||
{
|
||||
"pass": pass_index + 1,
|
||||
"key": key,
|
||||
"multiplier": multiplier,
|
||||
"objective": candidate_objective,
|
||||
"accuracy": candidate_result["summary"]["accuracy"],
|
||||
"trigger_win_rate": candidate_result["trade_simulation"]["win_rate"],
|
||||
}
|
||||
)
|
||||
if candidate_objective > local_best_objective:
|
||||
local_best_weights = candidate_weights
|
||||
local_best_result = candidate_result
|
||||
local_best_objective = candidate_objective
|
||||
local_best_multiplier = multiplier
|
||||
if local_best_objective > best_objective:
|
||||
best_weights = local_best_weights
|
||||
best_result = local_best_result
|
||||
best_objective = local_best_objective
|
||||
improved = True
|
||||
history.append(
|
||||
{
|
||||
"pass": pass_index + 1,
|
||||
"key": key,
|
||||
"multiplier": local_best_multiplier,
|
||||
"objective": best_objective,
|
||||
"accuracy": best_result["summary"]["accuracy"],
|
||||
"trigger_win_rate": best_result["trade_simulation"]["win_rate"],
|
||||
"selected": True,
|
||||
}
|
||||
)
|
||||
if not improved:
|
||||
break
|
||||
|
||||
recommended_config = {
|
||||
f"opportunity.model_weights.{key}": round(value, 4)
|
||||
for key, value in sorted(best_weights.items())
|
||||
}
|
||||
return {
|
||||
"baseline": _evaluation_snapshot(baseline_result, baseline_objective, base_weights),
|
||||
"best": _evaluation_snapshot(best_result, best_objective, best_weights),
|
||||
"improvement": {
|
||||
"objective": round(best_objective - baseline_objective, 6),
|
||||
"accuracy": round(
|
||||
_as_float(best_result["summary"].get("accuracy")) - _as_float(baseline_result["summary"].get("accuracy")),
|
||||
4,
|
||||
),
|
||||
"trigger_win_rate": round(
|
||||
_as_float(best_result["trade_simulation"].get("win_rate"))
|
||||
- _as_float(baseline_result["trade_simulation"].get("win_rate")),
|
||||
4,
|
||||
),
|
||||
"avg_trade_return": round(
|
||||
_as_float(best_result["trade_simulation"].get("avg_trade_return"))
|
||||
- _as_float(baseline_result["trade_simulation"].get("avg_trade_return")),
|
||||
4,
|
||||
),
|
||||
},
|
||||
"recommended_config": recommended_config,
|
||||
"search": {
|
||||
"passes": passes,
|
||||
"evaluations": evaluations,
|
||||
"optimized": "model_weights_only",
|
||||
"thresholds": "fixed",
|
||||
"objective": "0.45*accuracy + 0.20*setup_accuracy + 0.25*trigger_win_rate + 6*avg_trade_return + 0.05*trigger_coverage",
|
||||
},
|
||||
"history": history[-20:],
|
||||
}
|
||||
@@ -28,6 +28,8 @@ def _opportunity_thresholds(config: dict[str, Any]) -> dict[str, float]:
|
||||
return {
|
||||
"entry_threshold": float(opportunity_config.get("entry_threshold", 1.5)),
|
||||
"watch_threshold": float(opportunity_config.get("watch_threshold", 0.6)),
|
||||
"min_trigger_score": float(opportunity_config.get("min_trigger_score", 0.45)),
|
||||
"min_setup_score": float(opportunity_config.get("min_setup_score", 0.35)),
|
||||
"overlap_penalty": float(opportunity_config.get("overlap_penalty", 0.6)),
|
||||
}
|
||||
|
||||
@@ -228,11 +230,22 @@ def _action_for_opportunity(score: float, metrics: dict[str, float], thresholds:
|
||||
if metrics["extension_penalty"] >= 1.0 and (metrics["recent_runup"] >= 0.10 or metrics["breakout_pct"] >= 0.03):
|
||||
reasons.append("price is already extended, so this is treated as a chase setup")
|
||||
return "chase", reasons
|
||||
if score >= thresholds["entry_threshold"]:
|
||||
if (
|
||||
score >= thresholds["entry_threshold"]
|
||||
and metrics.get("edge_score", 0.0) >= 0.0
|
||||
and metrics["trigger_score"] >= thresholds["min_trigger_score"]
|
||||
and metrics["setup_score"] >= thresholds["min_setup_score"]
|
||||
):
|
||||
reasons.append("fresh breakout trigger is forming without excessive extension")
|
||||
return "trigger", reasons
|
||||
if score >= thresholds["watch_threshold"] and metrics.get("edge_score", 0.0) < 0.0:
|
||||
reasons.append("standardized feature balance is negative despite enough raw score")
|
||||
return "skip", reasons
|
||||
if score >= thresholds["watch_threshold"]:
|
||||
reasons.append("setup is constructive but still needs a cleaner trigger")
|
||||
if score >= thresholds["entry_threshold"]:
|
||||
reasons.append("research and liquidity are constructive, but technical trigger quality is not clean enough")
|
||||
else:
|
||||
reasons.append("setup is constructive but still needs a cleaner trigger")
|
||||
return "setup", reasons
|
||||
reasons.append("setup, trigger, or liquidity quality is too weak")
|
||||
return "skip", reasons
|
||||
|
||||
@@ -2,16 +2,19 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from collections.abc import Callable
|
||||
from math import log10
|
||||
from typing import Any
|
||||
from urllib.parse import urlencode
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
import requests
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
from .market_service import base_asset, normalize_symbol
|
||||
|
||||
HttpGet = Callable[[str, dict[str, str], float], Any]
|
||||
_PUBLIC_HTTP_ATTEMPTS = 5
|
||||
|
||||
|
||||
def _clamp(value: float, low: float = 0.0, high: float = 1.0) -> float:
|
||||
@@ -44,9 +47,19 @@ def _pct_score(value: float, *, low: float, high: float) -> float:
|
||||
|
||||
|
||||
def _public_http_get(url: str, headers: dict[str, str], timeout: float) -> Any:
|
||||
request = Request(url, headers=headers)
|
||||
with urlopen(request, timeout=timeout) as response: # noqa: S310 - user-configured market data endpoint
|
||||
return json.loads(response.read().decode("utf-8"))
|
||||
last_error: RequestException | None = None
|
||||
for attempt in range(_PUBLIC_HTTP_ATTEMPTS):
|
||||
try:
|
||||
response = requests.get(url, headers=headers, timeout=timeout)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except RequestException as exc:
|
||||
last_error = exc
|
||||
if attempt < _PUBLIC_HTTP_ATTEMPTS - 1:
|
||||
time.sleep(0.5 * (attempt + 1))
|
||||
if last_error is not None:
|
||||
raise last_error
|
||||
raise RuntimeError("public HTTP request failed")
|
||||
|
||||
|
||||
def _build_url(base_url: str, path: str, params: dict[str, str]) -> str:
|
||||
|
||||
@@ -23,6 +23,45 @@ def _range_pct(values: list[float], denominator: float) -> float:
|
||||
return (max(values) - min(values)) / denominator
|
||||
|
||||
|
||||
_DEFAULT_OPPORTUNITY_MODEL_WEIGHTS = {
|
||||
"trend": 0.1406,
|
||||
"compression": 0.1688,
|
||||
"breakout_proximity": 0.0875,
|
||||
"higher_lows": 0.15,
|
||||
"range_position": 0.45,
|
||||
"fresh_breakout": 0.2,
|
||||
"volume": 0.525,
|
||||
"momentum": 0.1562,
|
||||
"setup": 1.875,
|
||||
"trigger": 1.875,
|
||||
"liquidity": 0.3,
|
||||
"volatility_penalty": 0.8,
|
||||
"extension_penalty": 0.45,
|
||||
}
|
||||
|
||||
|
||||
def get_opportunity_model_weights(opportunity_config: dict[str, Any]) -> dict[str, float]:
|
||||
configured = opportunity_config.get("model_weights", {})
|
||||
return {
|
||||
key: float(configured.get(key, default))
|
||||
for key, default in _DEFAULT_OPPORTUNITY_MODEL_WEIGHTS.items()
|
||||
}
|
||||
|
||||
|
||||
def _weighted_quality(values: dict[str, float], weights: dict[str, float]) -> float:
|
||||
weighted_sum = 0.0
|
||||
total_weight = 0.0
|
||||
for key, value in values.items():
|
||||
weight = max(float(weights.get(key, 0.0)), 0.0)
|
||||
if weight == 0:
|
||||
continue
|
||||
weighted_sum += weight * value
|
||||
total_weight += weight
|
||||
if total_weight == 0:
|
||||
return 0.0
|
||||
return _clamp(weighted_sum / total_weight, -1.0, 1.0)
|
||||
|
||||
|
||||
def get_signal_weights(config: dict[str, Any]) -> dict[str, float]:
|
||||
signal_config = config.get("signal", {})
|
||||
return {
|
||||
@@ -104,11 +143,17 @@ def score_opportunity_signal(
|
||||
ticker: dict[str, Any],
|
||||
opportunity_config: dict[str, Any],
|
||||
) -> tuple[float, dict[str, float]]:
|
||||
model_weights = get_opportunity_model_weights(opportunity_config)
|
||||
if len(closes) < 6 or len(volumes) < 2:
|
||||
return 0.0, {
|
||||
"setup_score": 0.0,
|
||||
"trigger_score": 0.0,
|
||||
"liquidity_score": 0.0,
|
||||
"edge_score": 0.0,
|
||||
"setup_quality": 0.0,
|
||||
"trigger_quality": 0.0,
|
||||
"liquidity_quality": 0.0,
|
||||
"risk_quality": 0.0,
|
||||
"extension_penalty": 0.0,
|
||||
"breakout_pct": 0.0,
|
||||
"recent_runup": 0.0,
|
||||
@@ -117,11 +162,20 @@ def score_opportunity_signal(
|
||||
}
|
||||
|
||||
current = closes[-1]
|
||||
sma_short = mean(closes[-5:])
|
||||
sma_long = mean(closes[-20:]) if len(closes) >= 20 else mean(closes)
|
||||
if current >= sma_short >= sma_long:
|
||||
trend_quality = 1.0
|
||||
elif current < sma_short < sma_long:
|
||||
trend_quality = -1.0
|
||||
else:
|
||||
trend_quality = 0.0
|
||||
prior_closes = closes[:-1]
|
||||
prev_high = max(prior_closes[-20:]) if prior_closes else current
|
||||
recent_low = min(closes[-20:])
|
||||
range_width = prev_high - recent_low
|
||||
range_position = _clamp((current - recent_low) / range_width, 0.0, 1.2) if range_width else 0.0
|
||||
range_position_quality = 2.0 * _clamp(1.0 - abs(range_position - 0.62) / 0.62, 0.0, 1.0) - 1.0
|
||||
breakout_pct = _safe_pct(current, prev_high)
|
||||
|
||||
recent_range = _range_pct(closes[-6:], current)
|
||||
@@ -131,27 +185,45 @@ def score_opportunity_signal(
|
||||
|
||||
recent_low_window = min(closes[-5:])
|
||||
prior_low_window = min(closes[-10:-5]) if len(closes) >= 10 else min(closes[:-5])
|
||||
higher_lows = 1.0 if recent_low_window > prior_low_window else 0.0
|
||||
higher_lows = 1.0 if recent_low_window > prior_low_window else -1.0
|
||||
breakout_proximity = _clamp(1.0 - abs(breakout_pct) / 0.03, 0.0, 1.0)
|
||||
setup_score = _clamp(0.45 * compression + 0.35 * breakout_proximity + 0.20 * higher_lows, 0.0, 1.0)
|
||||
breakout_proximity_quality = 2.0 * breakout_proximity - 1.0
|
||||
setup_quality = _weighted_quality(
|
||||
{
|
||||
"trend": trend_quality,
|
||||
"compression": compression,
|
||||
"breakout_proximity": breakout_proximity_quality,
|
||||
"higher_lows": higher_lows,
|
||||
"range_position": range_position_quality,
|
||||
},
|
||||
model_weights,
|
||||
)
|
||||
setup_score = _clamp((setup_quality + 1.0) / 2.0, 0.0, 1.0)
|
||||
|
||||
avg_volume = mean(volumes[:-1])
|
||||
volume_confirmation = volumes[-1] / avg_volume if avg_volume else 1.0
|
||||
volume_score = _clamp((volume_confirmation - 1.0) / 1.5, -0.5, 1.0)
|
||||
volume_score = _clamp((volume_confirmation - 1.0) / 1.5, -1.0, 1.0)
|
||||
momentum_3 = _safe_pct(closes[-1], closes[-4])
|
||||
if momentum_3 <= 0:
|
||||
controlled_momentum = _clamp(momentum_3 / 0.05, -0.5, 0.0)
|
||||
controlled_momentum = _clamp(momentum_3 / 0.05, -1.0, 0.0)
|
||||
elif momentum_3 <= 0.05:
|
||||
controlled_momentum = momentum_3 / 0.05
|
||||
elif momentum_3 <= 0.12:
|
||||
controlled_momentum = 1.0 - ((momentum_3 - 0.05) / 0.07) * 0.5
|
||||
else:
|
||||
controlled_momentum = 0.2
|
||||
controlled_momentum = -0.2
|
||||
fresh_breakout = _clamp(1.0 - abs(breakout_pct) / 0.025, 0.0, 1.0)
|
||||
trigger_score = _clamp(0.40 * fresh_breakout + 0.35 * volume_score + 0.25 * controlled_momentum, 0.0, 1.0)
|
||||
fresh_breakout_quality = 2.0 * fresh_breakout - 1.0
|
||||
trigger_quality = _weighted_quality(
|
||||
{
|
||||
"fresh_breakout": fresh_breakout_quality,
|
||||
"volume": volume_score,
|
||||
"momentum": controlled_momentum,
|
||||
},
|
||||
model_weights,
|
||||
)
|
||||
trigger_score = _clamp((trigger_quality + 1.0) / 2.0, 0.0, 1.0)
|
||||
|
||||
sma_short = mean(closes[-5:])
|
||||
sma_long = mean(closes[-20:]) if len(closes) >= 20 else mean(closes)
|
||||
extension_from_short = _safe_pct(current, sma_short)
|
||||
recent_runup = _safe_pct(current, closes[-6])
|
||||
extension_penalty = (
|
||||
@@ -167,18 +239,46 @@ def score_opportunity_signal(
|
||||
liquidity_score = _clamp(log10(max(quote_volume / min_quote_volume, 1.0)) / 2.0, 0.0, 1.0)
|
||||
else:
|
||||
liquidity_score = 1.0
|
||||
|
||||
score = (
|
||||
setup_score
|
||||
+ 1.2 * trigger_score
|
||||
+ 0.4 * liquidity_score
|
||||
- 0.8 * volatility
|
||||
- 0.9 * extension_penalty
|
||||
liquidity_quality = 2.0 * liquidity_score - 1.0
|
||||
volatility_quality = 1.0 - 2.0 * _clamp(volatility / 0.12, 0.0, 1.0)
|
||||
extension_quality = 1.0 - 2.0 * _clamp(extension_penalty / 2.0, 0.0, 1.0)
|
||||
risk_quality = _weighted_quality(
|
||||
{
|
||||
"volatility_penalty": volatility_quality,
|
||||
"extension_penalty": extension_quality,
|
||||
},
|
||||
model_weights,
|
||||
)
|
||||
edge_score = _weighted_quality(
|
||||
{
|
||||
"setup": setup_quality,
|
||||
"trigger": trigger_quality,
|
||||
"liquidity": liquidity_quality,
|
||||
"trend": trend_quality,
|
||||
"range_position": range_position_quality,
|
||||
"volatility_penalty": volatility_quality,
|
||||
"extension_penalty": extension_quality,
|
||||
},
|
||||
model_weights,
|
||||
)
|
||||
|
||||
score = 1.0 + edge_score
|
||||
metrics = {
|
||||
"setup_score": round(setup_score, 4),
|
||||
"trigger_score": round(trigger_score, 4),
|
||||
"liquidity_score": round(liquidity_score, 4),
|
||||
"edge_score": round(edge_score, 4),
|
||||
"setup_quality": round(setup_quality, 4),
|
||||
"trigger_quality": round(trigger_quality, 4),
|
||||
"liquidity_quality": round(liquidity_quality, 4),
|
||||
"risk_quality": round(risk_quality, 4),
|
||||
"trend_quality": round(trend_quality, 4),
|
||||
"range_position_quality": round(range_position_quality, 4),
|
||||
"breakout_proximity_quality": round(breakout_proximity_quality, 4),
|
||||
"volume_quality": round(volume_score, 4),
|
||||
"momentum_quality": round(controlled_momentum, 4),
|
||||
"extension_quality": round(extension_quality, 4),
|
||||
"volatility_quality": round(volatility_quality, 4),
|
||||
"extension_penalty": round(extension_penalty, 4),
|
||||
"compression": round(compression, 4),
|
||||
"range_position": round(range_position, 4),
|
||||
|
||||
@@ -261,15 +261,18 @@ class CLITestCase(unittest.TestCase):
|
||||
return_value={"path": "/tmp/dataset.json", "symbols": ["BTCUSDT"]},
|
||||
) as collect_mock,
|
||||
patch.object(
|
||||
cli, "print_output", side_effect=lambda payload, **kwargs: captured.setdefault("payload", payload)
|
||||
cli,
|
||||
"print_output",
|
||||
side_effect=lambda payload, **kwargs: captured.update({"payload": payload, "agent": kwargs["agent"]}),
|
||||
),
|
||||
):
|
||||
result = cli.main(
|
||||
["opportunity", "dataset", "--symbols", "BTCUSDT", "--simulate-days", "3", "--run-days", "7"]
|
||||
["opportunity", "dataset", "--symbols", "BTCUSDT", "--simulate-days", "3", "--run-days", "7", "--agent"]
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
self.assertEqual(captured["payload"]["path"], "/tmp/dataset.json")
|
||||
self.assertTrue(captured["agent"])
|
||||
collect_mock.assert_called_once_with(
|
||||
config,
|
||||
symbols=["BTCUSDT"],
|
||||
@@ -277,3 +280,113 @@ class CLITestCase(unittest.TestCase):
|
||||
run_days=7.0,
|
||||
output_path=None,
|
||||
)
|
||||
|
||||
def test_opportunity_evaluate_dispatches_without_private_client(self):
|
||||
captured = {}
|
||||
config = {"market": {"default_quote": "USDT"}, "opportunity": {}}
|
||||
with (
|
||||
patch.object(cli, "load_config", return_value=config),
|
||||
patch.object(cli, "_load_spot_client", side_effect=AssertionError("evaluate should use dataset only")),
|
||||
patch.object(
|
||||
cli.opportunity_evaluation_service,
|
||||
"evaluate_opportunity_dataset",
|
||||
return_value={"summary": {"count": 1, "correct": 1}},
|
||||
) as evaluate_mock,
|
||||
patch.object(
|
||||
cli,
|
||||
"print_output",
|
||||
side_effect=lambda payload, **kwargs: captured.update({"payload": payload, "agent": kwargs["agent"]}),
|
||||
),
|
||||
):
|
||||
result = cli.main(
|
||||
[
|
||||
"opportunity",
|
||||
"evaluate",
|
||||
"/tmp/dataset.json",
|
||||
"--horizon-hours",
|
||||
"6",
|
||||
"--take-profit-pct",
|
||||
"2",
|
||||
"--stop-loss-pct",
|
||||
"1.5",
|
||||
"--setup-target-pct",
|
||||
"1",
|
||||
"--lookback",
|
||||
"24",
|
||||
"--top-n",
|
||||
"3",
|
||||
"--examples",
|
||||
"5",
|
||||
"--agent",
|
||||
]
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
self.assertEqual(captured["payload"]["summary"]["correct"], 1)
|
||||
self.assertTrue(captured["agent"])
|
||||
evaluate_mock.assert_called_once_with(
|
||||
config,
|
||||
dataset_path="/tmp/dataset.json",
|
||||
horizon_hours=6.0,
|
||||
take_profit=0.02,
|
||||
stop_loss=0.015,
|
||||
setup_target=0.01,
|
||||
lookback=24,
|
||||
top_n=3,
|
||||
max_examples=5,
|
||||
)
|
||||
|
||||
def test_opportunity_optimize_dispatches_without_private_client(self):
|
||||
captured = {}
|
||||
config = {"market": {"default_quote": "USDT"}, "opportunity": {}}
|
||||
with (
|
||||
patch.object(cli, "load_config", return_value=config),
|
||||
patch.object(cli, "_load_spot_client", side_effect=AssertionError("optimize should use dataset only")),
|
||||
patch.object(
|
||||
cli.opportunity_evaluation_service,
|
||||
"optimize_opportunity_model",
|
||||
return_value={"best": {"summary": {"accuracy": 0.7}}},
|
||||
) as optimize_mock,
|
||||
patch.object(
|
||||
cli,
|
||||
"print_output",
|
||||
side_effect=lambda payload, **kwargs: captured.update({"payload": payload, "agent": kwargs["agent"]}),
|
||||
),
|
||||
):
|
||||
result = cli.main(
|
||||
[
|
||||
"opportunity",
|
||||
"optimize",
|
||||
"/tmp/dataset.json",
|
||||
"--horizon-hours",
|
||||
"6",
|
||||
"--take-profit-pct",
|
||||
"2",
|
||||
"--stop-loss-pct",
|
||||
"1.5",
|
||||
"--setup-target-pct",
|
||||
"1",
|
||||
"--lookback",
|
||||
"24",
|
||||
"--top-n",
|
||||
"3",
|
||||
"--passes",
|
||||
"1",
|
||||
"--agent",
|
||||
]
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
self.assertEqual(captured["payload"]["best"]["summary"]["accuracy"], 0.7)
|
||||
self.assertTrue(captured["agent"])
|
||||
optimize_mock.assert_called_once_with(
|
||||
config,
|
||||
dataset_path="/tmp/dataset.json",
|
||||
horizon_hours=6.0,
|
||||
take_profit=0.02,
|
||||
stop_loss=0.015,
|
||||
setup_target=0.01,
|
||||
lookback=24,
|
||||
top_n=3,
|
||||
passes=1,
|
||||
)
|
||||
|
||||
@@ -8,7 +8,10 @@ import unittest
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
from coinhunter.services import opportunity_dataset_service
|
||||
from coinhunter.services import (
|
||||
opportunity_dataset_service,
|
||||
opportunity_evaluation_service,
|
||||
)
|
||||
|
||||
|
||||
class OpportunityDatasetServiceTestCase(unittest.TestCase):
|
||||
@@ -74,3 +77,204 @@ class OpportunityDatasetServiceTestCase(unittest.TestCase):
|
||||
self.assertEqual(payload["external_history"]["status"], "available")
|
||||
self.assertEqual(payload["counts"]["BTCUSDT"]["1d"], 5)
|
||||
self.assertEqual(len(dataset["klines"]["BTCUSDT"]["1d"]), 5)
|
||||
|
||||
|
||||
class OpportunityEvaluationServiceTestCase(unittest.TestCase):
|
||||
def _rows(self, closes):
|
||||
start = int(datetime(2026, 4, 20, tzinfo=timezone.utc).timestamp() * 1000)
|
||||
rows = []
|
||||
for index, close in enumerate(closes):
|
||||
open_time = start + index * 60 * 60 * 1000
|
||||
rows.append(
|
||||
[
|
||||
open_time,
|
||||
close * 0.995,
|
||||
close * 1.01,
|
||||
close * 0.995,
|
||||
close,
|
||||
100 + index * 10,
|
||||
open_time + 60 * 60 * 1000 - 1,
|
||||
close * (100 + index * 10),
|
||||
]
|
||||
)
|
||||
return rows
|
||||
|
||||
def test_evaluate_dataset_counts_walk_forward_accuracy(self):
|
||||
good = [
|
||||
100,
|
||||
105,
|
||||
98,
|
||||
106,
|
||||
99,
|
||||
107,
|
||||
100,
|
||||
106,
|
||||
101,
|
||||
105,
|
||||
102,
|
||||
104,
|
||||
102.5,
|
||||
103,
|
||||
102.8,
|
||||
103.2,
|
||||
103.0,
|
||||
103.4,
|
||||
103.1,
|
||||
103.6,
|
||||
103.3,
|
||||
103.8,
|
||||
104.2,
|
||||
106,
|
||||
108.5,
|
||||
109,
|
||||
]
|
||||
weak = [
|
||||
100,
|
||||
99,
|
||||
98,
|
||||
97,
|
||||
96,
|
||||
95,
|
||||
94,
|
||||
93,
|
||||
92,
|
||||
91,
|
||||
90,
|
||||
89,
|
||||
88,
|
||||
87,
|
||||
86,
|
||||
85,
|
||||
84,
|
||||
83,
|
||||
82,
|
||||
81,
|
||||
80,
|
||||
79,
|
||||
78,
|
||||
77,
|
||||
76,
|
||||
75,
|
||||
]
|
||||
good_rows = self._rows(good)
|
||||
weak_rows = self._rows(weak)
|
||||
simulation_start = datetime.fromtimestamp(good_rows[23][0] / 1000, tz=timezone.utc)
|
||||
simulation_end = datetime.fromtimestamp(good_rows[24][0] / 1000, tz=timezone.utc)
|
||||
dataset = {
|
||||
"metadata": {
|
||||
"symbols": ["GOODUSDT", "WEAKUSDT"],
|
||||
"plan": {
|
||||
"intervals": ["1h"],
|
||||
"simulate_days": 1 / 12,
|
||||
"simulation_start": simulation_start.isoformat().replace("+00:00", "Z"),
|
||||
"simulation_end": simulation_end.isoformat().replace("+00:00", "Z"),
|
||||
},
|
||||
},
|
||||
"klines": {
|
||||
"GOODUSDT": {"1h": good_rows},
|
||||
"WEAKUSDT": {"1h": weak_rows},
|
||||
},
|
||||
}
|
||||
config = {
|
||||
"signal": {"lookback_interval": "1h"},
|
||||
"opportunity": {
|
||||
"top_n": 2,
|
||||
"min_quote_volume": 0.0,
|
||||
"entry_threshold": 1.5,
|
||||
"watch_threshold": 0.6,
|
||||
"min_trigger_score": 0.45,
|
||||
"min_setup_score": 0.35,
|
||||
},
|
||||
}
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
path = Path(tmpdir) / "dataset.json"
|
||||
path.write_text(json.dumps(dataset), encoding="utf-8")
|
||||
result = opportunity_evaluation_service.evaluate_opportunity_dataset(
|
||||
config,
|
||||
dataset_path=str(path),
|
||||
take_profit=0.02,
|
||||
stop_loss=0.015,
|
||||
setup_target=0.01,
|
||||
max_examples=2,
|
||||
)
|
||||
|
||||
self.assertEqual(result["summary"]["count"], 2)
|
||||
self.assertEqual(result["summary"]["correct"], 2)
|
||||
self.assertEqual(result["summary"]["accuracy"], 1.0)
|
||||
self.assertEqual(result["by_action"]["trigger"]["correct"], 1)
|
||||
self.assertEqual(result["trade_simulation"]["wins"], 1)
|
||||
|
||||
def test_optimize_model_reports_recommended_weights(self):
|
||||
rows = self._rows(
|
||||
[
|
||||
100,
|
||||
105,
|
||||
98,
|
||||
106,
|
||||
99,
|
||||
107,
|
||||
100,
|
||||
106,
|
||||
101,
|
||||
105,
|
||||
102,
|
||||
104,
|
||||
102.5,
|
||||
103,
|
||||
102.8,
|
||||
103.2,
|
||||
103.0,
|
||||
103.4,
|
||||
103.1,
|
||||
103.6,
|
||||
103.3,
|
||||
103.8,
|
||||
104.2,
|
||||
106,
|
||||
108.5,
|
||||
109,
|
||||
]
|
||||
)
|
||||
simulation_start = datetime.fromtimestamp(rows[23][0] / 1000, tz=timezone.utc)
|
||||
simulation_end = datetime.fromtimestamp(rows[24][0] / 1000, tz=timezone.utc)
|
||||
dataset = {
|
||||
"metadata": {
|
||||
"symbols": ["GOODUSDT"],
|
||||
"plan": {
|
||||
"intervals": ["1h"],
|
||||
"simulate_days": 1 / 12,
|
||||
"simulation_start": simulation_start.isoformat().replace("+00:00", "Z"),
|
||||
"simulation_end": simulation_end.isoformat().replace("+00:00", "Z"),
|
||||
},
|
||||
},
|
||||
"klines": {"GOODUSDT": {"1h": rows}},
|
||||
}
|
||||
config = {
|
||||
"signal": {"lookback_interval": "1h"},
|
||||
"opportunity": {
|
||||
"top_n": 1,
|
||||
"min_quote_volume": 0.0,
|
||||
"entry_threshold": 1.5,
|
||||
"watch_threshold": 0.6,
|
||||
"min_trigger_score": 0.45,
|
||||
"min_setup_score": 0.35,
|
||||
},
|
||||
}
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
path = Path(tmpdir) / "dataset.json"
|
||||
path.write_text(json.dumps(dataset), encoding="utf-8")
|
||||
result = opportunity_evaluation_service.optimize_opportunity_model(
|
||||
config,
|
||||
dataset_path=str(path),
|
||||
passes=1,
|
||||
take_profit=0.02,
|
||||
stop_loss=0.015,
|
||||
setup_target=0.01,
|
||||
)
|
||||
|
||||
self.assertIn("baseline", result)
|
||||
self.assertIn("best", result)
|
||||
self.assertIn("opportunity.model_weights.trigger", result["recommended_config"])
|
||||
self.assertEqual(result["search"]["optimized"], "model_weights_only")
|
||||
|
||||
@@ -436,6 +436,28 @@ class OpportunityServiceTestCase(unittest.TestCase):
|
||||
self.assertEqual(sol["metrics"]["fundamental"], 0.9)
|
||||
self.assertEqual(sol["metrics"]["research_confidence"], 0.9)
|
||||
|
||||
def test_research_score_does_not_create_weak_trigger(self):
|
||||
metrics = {
|
||||
"extension_penalty": 0.0,
|
||||
"recent_runup": 0.0,
|
||||
"breakout_pct": -0.01,
|
||||
"setup_score": 0.12,
|
||||
"trigger_score": 0.18,
|
||||
}
|
||||
action, reasons = opportunity_service._action_for_opportunity(
|
||||
2.5,
|
||||
metrics,
|
||||
{
|
||||
"entry_threshold": 1.5,
|
||||
"watch_threshold": 0.6,
|
||||
"min_trigger_score": 0.45,
|
||||
"min_setup_score": 0.35,
|
||||
},
|
||||
)
|
||||
|
||||
self.assertEqual(action, "setup")
|
||||
self.assertIn("technical trigger quality is not clean enough", reasons[0])
|
||||
|
||||
def test_unlock_risk_blocks_add_recommendation(self):
|
||||
metrics = {
|
||||
"liquidity": 0.8,
|
||||
|
||||
Reference in New Issue
Block a user