Add opportunity dataset collection

This commit is contained in:
Carlos Ouyang
2026-04-21 19:41:48 +08:00
parent 50402e4aa7
commit 436bef4814
10 changed files with 1295 additions and 32 deletions

View File

@@ -5,7 +5,12 @@ from __future__ import annotations
import unittest
from unittest.mock import patch
from coinhunter.services import opportunity_service, portfolio_service, signal_service
from coinhunter.services import (
opportunity_service,
portfolio_service,
research_service,
signal_service,
)
class FakeSpotClient:
@@ -253,6 +258,37 @@ class OpportunityServiceTestCase(unittest.TestCase):
"entry_threshold": 1.5,
"watch_threshold": 0.6,
"overlap_penalty": 0.6,
"auto_research": False,
"research_provider": "coingecko",
"research_timeout_seconds": 4.0,
"risk_limits": {
"min_liquidity": 0.0,
"max_overextension": 0.08,
"max_downside_risk": 0.3,
"max_unlock_risk": 0.75,
"max_regulatory_risk": 0.75,
"min_quality_for_add": 0.0,
},
"weights": {
"trend": 1.0,
"momentum": 1.0,
"breakout": 0.8,
"pullback": 0.4,
"volume": 0.7,
"liquidity": 0.3,
"trend_alignment": 0.8,
"fundamental": 0.8,
"tokenomics": 0.7,
"catalyst": 0.5,
"adoption": 0.4,
"smart_money": 0.3,
"volatility_penalty": 0.5,
"overextension_penalty": 0.7,
"downside_penalty": 0.5,
"unlock_penalty": 0.8,
"regulatory_penalty": 0.4,
"position_concentration_penalty": 0.6,
},
},
"portfolio": {
"add_threshold": 1.5,
@@ -314,3 +350,133 @@ class OpportunityServiceTestCase(unittest.TestCase):
score, metrics = signal_service.score_market_signal([], [], {"price_change_pct": 1.0}, {})
self.assertEqual(score, 0.0)
self.assertEqual(metrics["trend"], 0.0)
def test_overextended_candidate_is_not_an_add(self):
closes = [100, 110, 121, 133, 146, 160, 176]
volumes = [100, 120, 130, 150, 170, 190, 230]
ticker = {
"price_change_pct": 35.0,
"quote_volume": 20_000_000.0,
"high_price": 180.0,
"low_price": 95.0,
}
score, metrics = opportunity_service._score_candidate(
closes, volumes, ticker, self.config["opportunity"]["weights"], 0.0, {"1h": closes, "4h": closes}
)
action, reasons = opportunity_service._action_for(score, 0.0, metrics)
self.assertGreater(score, 1.0)
self.assertGreater(metrics["overextension"], 0.08)
self.assertEqual(action, "observe")
self.assertIn("move looks extended; wait for a cleaner entry", reasons)
def test_external_research_signals_improve_candidate_quality(self):
closes = [100, 101, 102, 103, 104, 105, 106]
volumes = [100, 105, 110, 115, 120, 125, 130]
ticker = {
"price_change_pct": 4.0,
"quote_volume": 50_000_000.0,
"high_price": 110.0,
"low_price": 95.0,
}
base_score, base_metrics = opportunity_service._score_candidate(
closes, volumes, ticker, self.config["opportunity"]["weights"], 0.0, {"1h": closes}
)
researched_score, researched_metrics = opportunity_service._score_candidate(
closes,
volumes,
ticker,
self.config["opportunity"]["weights"],
0.0,
{"1h": closes},
{
"fundamental": 85,
"tokenomics": 80,
"catalyst": 70,
"adoption": 90,
"smart_money": 60,
},
)
self.assertGreater(researched_score, base_score)
self.assertEqual(base_metrics["quality"], 0.0)
self.assertGreater(researched_metrics["quality"], 0.7)
def test_scan_uses_automatic_external_research(self):
config = self.config | {
"opportunity": self.config["opportunity"]
| {
"auto_research": True,
"top_n": 2,
}
}
with (
patch.object(opportunity_service, "audit_event", return_value=None),
patch.object(
opportunity_service,
"get_external_research",
return_value={
"SOLUSDT": {
"fundamental": 0.9,
"tokenomics": 0.8,
"catalyst": 0.9,
"adoption": 0.8,
"smart_money": 0.7,
"unlock_risk": 0.1,
"regulatory_risk": 0.1,
"research_confidence": 0.9,
}
},
) as research_mock,
):
payload = opportunity_service.scan_opportunities(config, spot_client=FakeSpotClient())
research_mock.assert_called_once()
sol = next(item for item in payload["recommendations"] if item["symbol"] == "SOLUSDT")
self.assertEqual(sol["metrics"]["fundamental"], 0.9)
self.assertEqual(sol["metrics"]["research_confidence"], 0.9)
def test_unlock_risk_blocks_add_recommendation(self):
metrics = {
"liquidity": 0.8,
"overextension": 0.0,
"downside_risk": 0.0,
"unlock_risk": 0.9,
"regulatory_risk": 0.0,
"quality": 0.8,
}
action, reasons = opportunity_service._action_for(
3.0,
0.0,
metrics,
self.config["opportunity"]["risk_limits"],
)
self.assertEqual(action, "observe")
self.assertIn("token unlock or dilution risk is too high", reasons)
class ResearchServiceTestCase(unittest.TestCase):
def test_coingecko_market_data_becomes_research_signals(self):
signals = research_service._coingecko_market_to_signals(
{
"id": "solana",
"symbol": "sol",
"market_cap": 80_000_000_000,
"fully_diluted_valuation": 95_000_000_000,
"total_volume": 5_000_000_000,
"market_cap_rank": 6,
"circulating_supply": 550_000_000,
"total_supply": 600_000_000,
"max_supply": None,
"price_change_percentage_7d_in_currency": 12.0,
"price_change_percentage_30d_in_currency": 35.0,
"price_change_percentage_200d_in_currency": 80.0,
},
is_trending=True,
)
self.assertGreater(signals["fundamental"], 0.6)
self.assertGreater(signals["tokenomics"], 0.8)
self.assertGreater(signals["catalyst"], 0.6)
self.assertLess(signals["unlock_risk"], 0.2)