refactor: simplify opportunity actions to entry/watch/avoid with confidence

- Remove dead scoring code (_score_candidate, _action_for, etc.) and
  align action decisions directly with score_opportunity_signal metrics.
- Reduce action surface from trigger/setup/chase/skip to entry/watch/avoid.
- Add confidence field (0..100) mapped from edge_score.
- Update evaluate/optimize ground-truth mapping and tests.

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
This commit is contained in:
2026-04-22 01:08:34 +08:00
parent d3408dabba
commit 003212de99
5 changed files with 72 additions and 346 deletions

View File

@@ -317,7 +317,7 @@ class OpportunityServiceTestCase(unittest.TestCase):
spot_client=OpportunityPatternSpotClient(),
)
self.assertEqual([item["symbol"] for item in payload["recommendations"]], ["SETUPUSDT", "CHASEUSDT"])
self.assertEqual([item["action"] for item in payload["recommendations"]], ["trigger", "chase"])
self.assertEqual([item["action"] for item in payload["recommendations"]], ["entry", "avoid"])
self.assertGreater(payload["recommendations"][0]["metrics"]["setup_score"], 0.6)
self.assertGreater(payload["recommendations"][1]["metrics"]["extension_penalty"], 1.0)
@@ -340,9 +340,9 @@ class OpportunityServiceTestCase(unittest.TestCase):
ignored_rec = ignored["recommendations"][0]
included_rec = included["recommendations"][0]
self.assertEqual(ignored_rec["action"], "trigger")
self.assertEqual(ignored_rec["action"], "entry")
self.assertEqual(ignored_rec["metrics"]["position_weight"], 0.0)
self.assertEqual(included_rec["action"], "skip")
self.assertEqual(included_rec["action"], "entry")
self.assertEqual(included_rec["metrics"]["position_weight"], 1.0)
self.assertLess(included_rec["score"], ignored_rec["score"])
@@ -351,57 +351,6 @@ class OpportunityServiceTestCase(unittest.TestCase):
self.assertEqual(score, 0.0)
self.assertEqual(metrics["trend"], 0.0)
def test_overextended_candidate_is_not_an_add(self):
closes = [100, 110, 121, 133, 146, 160, 176]
volumes = [100, 120, 130, 150, 170, 190, 230]
ticker = {
"price_change_pct": 35.0,
"quote_volume": 20_000_000.0,
"high_price": 180.0,
"low_price": 95.0,
}
score, metrics = opportunity_service._score_candidate(
closes, volumes, ticker, self.config["opportunity"]["weights"], 0.0, {"1h": closes, "4h": closes}
)
action, reasons = opportunity_service._action_for(score, 0.0, metrics)
self.assertGreater(score, 1.0)
self.assertGreater(metrics["overextension"], 0.08)
self.assertEqual(action, "observe")
self.assertIn("move looks extended; wait for a cleaner entry", reasons)
def test_external_research_signals_improve_candidate_quality(self):
closes = [100, 101, 102, 103, 104, 105, 106]
volumes = [100, 105, 110, 115, 120, 125, 130]
ticker = {
"price_change_pct": 4.0,
"quote_volume": 50_000_000.0,
"high_price": 110.0,
"low_price": 95.0,
}
base_score, base_metrics = opportunity_service._score_candidate(
closes, volumes, ticker, self.config["opportunity"]["weights"], 0.0, {"1h": closes}
)
researched_score, researched_metrics = opportunity_service._score_candidate(
closes,
volumes,
ticker,
self.config["opportunity"]["weights"],
0.0,
{"1h": closes},
{
"fundamental": 85,
"tokenomics": 80,
"catalyst": 70,
"adoption": 90,
"smart_money": 60,
},
)
self.assertGreater(researched_score, base_score)
self.assertEqual(base_metrics["quality"], 0.0)
self.assertGreater(researched_metrics["quality"], 0.7)
def test_scan_uses_automatic_external_research(self):
config = self.config | {
"opportunity": self.config["opportunity"]
@@ -436,15 +385,16 @@ class OpportunityServiceTestCase(unittest.TestCase):
self.assertEqual(sol["metrics"]["fundamental"], 0.9)
self.assertEqual(sol["metrics"]["research_confidence"], 0.9)
def test_research_score_does_not_create_weak_trigger(self):
def test_weak_setup_and_trigger_becomes_avoid(self):
metrics = {
"extension_penalty": 0.0,
"recent_runup": 0.0,
"breakout_pct": -0.01,
"setup_score": 0.12,
"trigger_score": 0.18,
"edge_score": 0.0,
}
action, reasons = opportunity_service._action_for_opportunity(
action, reasons, confidence = opportunity_service._action_for_opportunity(
2.5,
metrics,
{
@@ -455,27 +405,9 @@ class OpportunityServiceTestCase(unittest.TestCase):
},
)
self.assertEqual(action, "setup")
self.assertIn("technical trigger quality is not clean enough", reasons[0])
def test_unlock_risk_blocks_add_recommendation(self):
metrics = {
"liquidity": 0.8,
"overextension": 0.0,
"downside_risk": 0.0,
"unlock_risk": 0.9,
"regulatory_risk": 0.0,
"quality": 0.8,
}
action, reasons = opportunity_service._action_for(
3.0,
0.0,
metrics,
self.config["opportunity"]["risk_limits"],
)
self.assertEqual(action, "observe")
self.assertIn("token unlock or dilution risk is too high", reasons)
self.assertEqual(action, "avoid")
self.assertIn("setup, trigger, or overall quality is too weak", reasons[0])
self.assertEqual(confidence, 50)
class ResearchServiceTestCase(unittest.TestCase):