feat: add opportunity evaluation optimizer

This commit is contained in:
2026-04-22 00:29:02 +08:00
parent 436bef4814
commit 076a5f1b1c
11 changed files with 1224 additions and 37 deletions

View File

@@ -261,15 +261,18 @@ class CLITestCase(unittest.TestCase):
return_value={"path": "/tmp/dataset.json", "symbols": ["BTCUSDT"]},
) as collect_mock,
patch.object(
cli, "print_output", side_effect=lambda payload, **kwargs: captured.setdefault("payload", payload)
cli,
"print_output",
side_effect=lambda payload, **kwargs: captured.update({"payload": payload, "agent": kwargs["agent"]}),
),
):
result = cli.main(
["opportunity", "dataset", "--symbols", "BTCUSDT", "--simulate-days", "3", "--run-days", "7"]
["opportunity", "dataset", "--symbols", "BTCUSDT", "--simulate-days", "3", "--run-days", "7", "--agent"]
)
self.assertEqual(result, 0)
self.assertEqual(captured["payload"]["path"], "/tmp/dataset.json")
self.assertTrue(captured["agent"])
collect_mock.assert_called_once_with(
config,
symbols=["BTCUSDT"],
@@ -277,3 +280,113 @@ class CLITestCase(unittest.TestCase):
run_days=7.0,
output_path=None,
)
def test_opportunity_evaluate_dispatches_without_private_client(self):
captured = {}
config = {"market": {"default_quote": "USDT"}, "opportunity": {}}
with (
patch.object(cli, "load_config", return_value=config),
patch.object(cli, "_load_spot_client", side_effect=AssertionError("evaluate should use dataset only")),
patch.object(
cli.opportunity_evaluation_service,
"evaluate_opportunity_dataset",
return_value={"summary": {"count": 1, "correct": 1}},
) as evaluate_mock,
patch.object(
cli,
"print_output",
side_effect=lambda payload, **kwargs: captured.update({"payload": payload, "agent": kwargs["agent"]}),
),
):
result = cli.main(
[
"opportunity",
"evaluate",
"/tmp/dataset.json",
"--horizon-hours",
"6",
"--take-profit-pct",
"2",
"--stop-loss-pct",
"1.5",
"--setup-target-pct",
"1",
"--lookback",
"24",
"--top-n",
"3",
"--examples",
"5",
"--agent",
]
)
self.assertEqual(result, 0)
self.assertEqual(captured["payload"]["summary"]["correct"], 1)
self.assertTrue(captured["agent"])
evaluate_mock.assert_called_once_with(
config,
dataset_path="/tmp/dataset.json",
horizon_hours=6.0,
take_profit=0.02,
stop_loss=0.015,
setup_target=0.01,
lookback=24,
top_n=3,
max_examples=5,
)
def test_opportunity_optimize_dispatches_without_private_client(self):
captured = {}
config = {"market": {"default_quote": "USDT"}, "opportunity": {}}
with (
patch.object(cli, "load_config", return_value=config),
patch.object(cli, "_load_spot_client", side_effect=AssertionError("optimize should use dataset only")),
patch.object(
cli.opportunity_evaluation_service,
"optimize_opportunity_model",
return_value={"best": {"summary": {"accuracy": 0.7}}},
) as optimize_mock,
patch.object(
cli,
"print_output",
side_effect=lambda payload, **kwargs: captured.update({"payload": payload, "agent": kwargs["agent"]}),
),
):
result = cli.main(
[
"opportunity",
"optimize",
"/tmp/dataset.json",
"--horizon-hours",
"6",
"--take-profit-pct",
"2",
"--stop-loss-pct",
"1.5",
"--setup-target-pct",
"1",
"--lookback",
"24",
"--top-n",
"3",
"--passes",
"1",
"--agent",
]
)
self.assertEqual(result, 0)
self.assertEqual(captured["payload"]["best"]["summary"]["accuracy"], 0.7)
self.assertTrue(captured["agent"])
optimize_mock.assert_called_once_with(
config,
dataset_path="/tmp/dataset.json",
horizon_hours=6.0,
take_profit=0.02,
stop_loss=0.015,
setup_target=0.01,
lookback=24,
top_n=3,
passes=1,
)

View File

@@ -8,7 +8,10 @@ import unittest
from datetime import datetime, timezone
from pathlib import Path
from coinhunter.services import opportunity_dataset_service
from coinhunter.services import (
opportunity_dataset_service,
opportunity_evaluation_service,
)
class OpportunityDatasetServiceTestCase(unittest.TestCase):
@@ -74,3 +77,204 @@ class OpportunityDatasetServiceTestCase(unittest.TestCase):
self.assertEqual(payload["external_history"]["status"], "available")
self.assertEqual(payload["counts"]["BTCUSDT"]["1d"], 5)
self.assertEqual(len(dataset["klines"]["BTCUSDT"]["1d"]), 5)
class OpportunityEvaluationServiceTestCase(unittest.TestCase):
def _rows(self, closes):
start = int(datetime(2026, 4, 20, tzinfo=timezone.utc).timestamp() * 1000)
rows = []
for index, close in enumerate(closes):
open_time = start + index * 60 * 60 * 1000
rows.append(
[
open_time,
close * 0.995,
close * 1.01,
close * 0.995,
close,
100 + index * 10,
open_time + 60 * 60 * 1000 - 1,
close * (100 + index * 10),
]
)
return rows
def test_evaluate_dataset_counts_walk_forward_accuracy(self):
good = [
100,
105,
98,
106,
99,
107,
100,
106,
101,
105,
102,
104,
102.5,
103,
102.8,
103.2,
103.0,
103.4,
103.1,
103.6,
103.3,
103.8,
104.2,
106,
108.5,
109,
]
weak = [
100,
99,
98,
97,
96,
95,
94,
93,
92,
91,
90,
89,
88,
87,
86,
85,
84,
83,
82,
81,
80,
79,
78,
77,
76,
75,
]
good_rows = self._rows(good)
weak_rows = self._rows(weak)
simulation_start = datetime.fromtimestamp(good_rows[23][0] / 1000, tz=timezone.utc)
simulation_end = datetime.fromtimestamp(good_rows[24][0] / 1000, tz=timezone.utc)
dataset = {
"metadata": {
"symbols": ["GOODUSDT", "WEAKUSDT"],
"plan": {
"intervals": ["1h"],
"simulate_days": 1 / 12,
"simulation_start": simulation_start.isoformat().replace("+00:00", "Z"),
"simulation_end": simulation_end.isoformat().replace("+00:00", "Z"),
},
},
"klines": {
"GOODUSDT": {"1h": good_rows},
"WEAKUSDT": {"1h": weak_rows},
},
}
config = {
"signal": {"lookback_interval": "1h"},
"opportunity": {
"top_n": 2,
"min_quote_volume": 0.0,
"entry_threshold": 1.5,
"watch_threshold": 0.6,
"min_trigger_score": 0.45,
"min_setup_score": 0.35,
},
}
with tempfile.TemporaryDirectory() as tmpdir:
path = Path(tmpdir) / "dataset.json"
path.write_text(json.dumps(dataset), encoding="utf-8")
result = opportunity_evaluation_service.evaluate_opportunity_dataset(
config,
dataset_path=str(path),
take_profit=0.02,
stop_loss=0.015,
setup_target=0.01,
max_examples=2,
)
self.assertEqual(result["summary"]["count"], 2)
self.assertEqual(result["summary"]["correct"], 2)
self.assertEqual(result["summary"]["accuracy"], 1.0)
self.assertEqual(result["by_action"]["trigger"]["correct"], 1)
self.assertEqual(result["trade_simulation"]["wins"], 1)
def test_optimize_model_reports_recommended_weights(self):
rows = self._rows(
[
100,
105,
98,
106,
99,
107,
100,
106,
101,
105,
102,
104,
102.5,
103,
102.8,
103.2,
103.0,
103.4,
103.1,
103.6,
103.3,
103.8,
104.2,
106,
108.5,
109,
]
)
simulation_start = datetime.fromtimestamp(rows[23][0] / 1000, tz=timezone.utc)
simulation_end = datetime.fromtimestamp(rows[24][0] / 1000, tz=timezone.utc)
dataset = {
"metadata": {
"symbols": ["GOODUSDT"],
"plan": {
"intervals": ["1h"],
"simulate_days": 1 / 12,
"simulation_start": simulation_start.isoformat().replace("+00:00", "Z"),
"simulation_end": simulation_end.isoformat().replace("+00:00", "Z"),
},
},
"klines": {"GOODUSDT": {"1h": rows}},
}
config = {
"signal": {"lookback_interval": "1h"},
"opportunity": {
"top_n": 1,
"min_quote_volume": 0.0,
"entry_threshold": 1.5,
"watch_threshold": 0.6,
"min_trigger_score": 0.45,
"min_setup_score": 0.35,
},
}
with tempfile.TemporaryDirectory() as tmpdir:
path = Path(tmpdir) / "dataset.json"
path.write_text(json.dumps(dataset), encoding="utf-8")
result = opportunity_evaluation_service.optimize_opportunity_model(
config,
dataset_path=str(path),
passes=1,
take_profit=0.02,
stop_loss=0.015,
setup_target=0.01,
)
self.assertIn("baseline", result)
self.assertIn("best", result)
self.assertIn("opportunity.model_weights.trigger", result["recommended_config"])
self.assertEqual(result["search"]["optimized"], "model_weights_only")

View File

@@ -436,6 +436,28 @@ class OpportunityServiceTestCase(unittest.TestCase):
self.assertEqual(sol["metrics"]["fundamental"], 0.9)
self.assertEqual(sol["metrics"]["research_confidence"], 0.9)
def test_research_score_does_not_create_weak_trigger(self):
metrics = {
"extension_penalty": 0.0,
"recent_runup": 0.0,
"breakout_pct": -0.01,
"setup_score": 0.12,
"trigger_score": 0.18,
}
action, reasons = opportunity_service._action_for_opportunity(
2.5,
metrics,
{
"entry_threshold": 1.5,
"watch_threshold": 0.6,
"min_trigger_score": 0.45,
"min_setup_score": 0.35,
},
)
self.assertEqual(action, "setup")
self.assertIn("technical trigger quality is not clean enough", reasons[0])
def test_unlock_risk_blocks_add_recommendation(self):
metrics = {
"liquidity": 0.8,