Add opportunity dataset collection

This commit is contained in:
Carlos Ouyang
2026-04-21 19:41:48 +08:00
parent 50402e4aa7
commit 436bef4814
10 changed files with 1295 additions and 32 deletions

View File

@@ -248,3 +248,32 @@ class CLITestCase(unittest.TestCase):
content = __import__("pathlib").Path(tmp_path).read_text()
self.assertIn("BINANCE_API_SECRET=test_secret_value", content)
__import__("os").unlink(tmp_path)
def test_opportunity_dataset_dispatches_without_private_client(self):
captured = {}
config = {"market": {"default_quote": "USDT"}, "opportunity": {}}
with (
patch.object(cli, "load_config", return_value=config),
patch.object(cli, "_load_spot_client", side_effect=AssertionError("dataset should use public data")),
patch.object(
cli.opportunity_dataset_service,
"collect_opportunity_dataset",
return_value={"path": "/tmp/dataset.json", "symbols": ["BTCUSDT"]},
) as collect_mock,
patch.object(
cli, "print_output", side_effect=lambda payload, **kwargs: captured.setdefault("payload", payload)
),
):
result = cli.main(
["opportunity", "dataset", "--symbols", "BTCUSDT", "--simulate-days", "3", "--run-days", "7"]
)
self.assertEqual(result, 0)
self.assertEqual(captured["payload"]["path"], "/tmp/dataset.json")
collect_mock.assert_called_once_with(
config,
symbols=["BTCUSDT"],
simulate_days=3.0,
run_days=7.0,
output_path=None,
)

View File

@@ -0,0 +1,76 @@
"""Opportunity dataset collection tests."""
from __future__ import annotations
import json
import tempfile
import unittest
from datetime import datetime, timezone
from pathlib import Path
from coinhunter.services import opportunity_dataset_service
class OpportunityDatasetServiceTestCase(unittest.TestCase):
def test_default_plan_uses_widest_scan_reference_window(self):
config = {"opportunity": {"lookback_intervals": ["1h", "4h", "1d"]}}
plan = opportunity_dataset_service.build_dataset_plan(
config,
now=datetime(2026, 4, 21, tzinfo=timezone.utc),
)
self.assertEqual(plan.kline_limit, 48)
self.assertEqual(plan.reference_days, 48.0)
self.assertEqual(plan.simulate_days, 7.0)
self.assertEqual(plan.run_days, 7.0)
self.assertEqual(plan.total_days, 62.0)
def test_collect_dataset_writes_klines_and_probe_metadata(self):
config = {
"binance": {"spot_base_url": "https://api.binance.test"},
"market": {"default_quote": "USDT"},
"opportunity": {
"lookback_intervals": ["1d"],
"kline_limit": 2,
"simulate_days": 1,
"run_days": 1,
"auto_research": True,
"research_provider": "coingecko",
},
}
def fake_http_get(url, headers, timeout):
query = opportunity_dataset_service.parse_query(url)
interval_seconds = 86400
start = int(query["startTime"])
end = int(query["endTime"])
rows = []
cursor = start
index = 0
while cursor <= end:
close = 100 + index
rows.append([cursor, close - 1, close + 1, close - 2, close, 10, cursor + interval_seconds * 1000 - 1, close * 10])
cursor += interval_seconds * 1000
index += 1
return rows
def fake_http_status(url, headers, timeout):
return 200, "{}"
with tempfile.TemporaryDirectory() as tmpdir:
output = Path(tmpdir) / "dataset.json"
payload = opportunity_dataset_service.collect_opportunity_dataset(
config,
symbols=["BTCUSDT"],
output_path=str(output),
http_get=fake_http_get,
http_status=fake_http_status,
now=datetime(2026, 4, 21, tzinfo=timezone.utc),
)
dataset = json.loads(output.read_text(encoding="utf-8"))
self.assertEqual(payload["plan"]["reference_days"], 2.0)
self.assertEqual(payload["plan"]["total_days"], 4.0)
self.assertEqual(payload["external_history"]["status"], "available")
self.assertEqual(payload["counts"]["BTCUSDT"]["1d"], 5)
self.assertEqual(len(dataset["klines"]["BTCUSDT"]["1d"]), 5)

View File

@@ -5,7 +5,12 @@ from __future__ import annotations
import unittest
from unittest.mock import patch
from coinhunter.services import opportunity_service, portfolio_service, signal_service
from coinhunter.services import (
opportunity_service,
portfolio_service,
research_service,
signal_service,
)
class FakeSpotClient:
@@ -253,6 +258,37 @@ class OpportunityServiceTestCase(unittest.TestCase):
"entry_threshold": 1.5,
"watch_threshold": 0.6,
"overlap_penalty": 0.6,
"auto_research": False,
"research_provider": "coingecko",
"research_timeout_seconds": 4.0,
"risk_limits": {
"min_liquidity": 0.0,
"max_overextension": 0.08,
"max_downside_risk": 0.3,
"max_unlock_risk": 0.75,
"max_regulatory_risk": 0.75,
"min_quality_for_add": 0.0,
},
"weights": {
"trend": 1.0,
"momentum": 1.0,
"breakout": 0.8,
"pullback": 0.4,
"volume": 0.7,
"liquidity": 0.3,
"trend_alignment": 0.8,
"fundamental": 0.8,
"tokenomics": 0.7,
"catalyst": 0.5,
"adoption": 0.4,
"smart_money": 0.3,
"volatility_penalty": 0.5,
"overextension_penalty": 0.7,
"downside_penalty": 0.5,
"unlock_penalty": 0.8,
"regulatory_penalty": 0.4,
"position_concentration_penalty": 0.6,
},
},
"portfolio": {
"add_threshold": 1.5,
@@ -314,3 +350,133 @@ class OpportunityServiceTestCase(unittest.TestCase):
score, metrics = signal_service.score_market_signal([], [], {"price_change_pct": 1.0}, {})
self.assertEqual(score, 0.0)
self.assertEqual(metrics["trend"], 0.0)
def test_overextended_candidate_is_not_an_add(self):
closes = [100, 110, 121, 133, 146, 160, 176]
volumes = [100, 120, 130, 150, 170, 190, 230]
ticker = {
"price_change_pct": 35.0,
"quote_volume": 20_000_000.0,
"high_price": 180.0,
"low_price": 95.0,
}
score, metrics = opportunity_service._score_candidate(
closes, volumes, ticker, self.config["opportunity"]["weights"], 0.0, {"1h": closes, "4h": closes}
)
action, reasons = opportunity_service._action_for(score, 0.0, metrics)
self.assertGreater(score, 1.0)
self.assertGreater(metrics["overextension"], 0.08)
self.assertEqual(action, "observe")
self.assertIn("move looks extended; wait for a cleaner entry", reasons)
def test_external_research_signals_improve_candidate_quality(self):
closes = [100, 101, 102, 103, 104, 105, 106]
volumes = [100, 105, 110, 115, 120, 125, 130]
ticker = {
"price_change_pct": 4.0,
"quote_volume": 50_000_000.0,
"high_price": 110.0,
"low_price": 95.0,
}
base_score, base_metrics = opportunity_service._score_candidate(
closes, volumes, ticker, self.config["opportunity"]["weights"], 0.0, {"1h": closes}
)
researched_score, researched_metrics = opportunity_service._score_candidate(
closes,
volumes,
ticker,
self.config["opportunity"]["weights"],
0.0,
{"1h": closes},
{
"fundamental": 85,
"tokenomics": 80,
"catalyst": 70,
"adoption": 90,
"smart_money": 60,
},
)
self.assertGreater(researched_score, base_score)
self.assertEqual(base_metrics["quality"], 0.0)
self.assertGreater(researched_metrics["quality"], 0.7)
def test_scan_uses_automatic_external_research(self):
config = self.config | {
"opportunity": self.config["opportunity"]
| {
"auto_research": True,
"top_n": 2,
}
}
with (
patch.object(opportunity_service, "audit_event", return_value=None),
patch.object(
opportunity_service,
"get_external_research",
return_value={
"SOLUSDT": {
"fundamental": 0.9,
"tokenomics": 0.8,
"catalyst": 0.9,
"adoption": 0.8,
"smart_money": 0.7,
"unlock_risk": 0.1,
"regulatory_risk": 0.1,
"research_confidence": 0.9,
}
},
) as research_mock,
):
payload = opportunity_service.scan_opportunities(config, spot_client=FakeSpotClient())
research_mock.assert_called_once()
sol = next(item for item in payload["recommendations"] if item["symbol"] == "SOLUSDT")
self.assertEqual(sol["metrics"]["fundamental"], 0.9)
self.assertEqual(sol["metrics"]["research_confidence"], 0.9)
def test_unlock_risk_blocks_add_recommendation(self):
metrics = {
"liquidity": 0.8,
"overextension": 0.0,
"downside_risk": 0.0,
"unlock_risk": 0.9,
"regulatory_risk": 0.0,
"quality": 0.8,
}
action, reasons = opportunity_service._action_for(
3.0,
0.0,
metrics,
self.config["opportunity"]["risk_limits"],
)
self.assertEqual(action, "observe")
self.assertIn("token unlock or dilution risk is too high", reasons)
class ResearchServiceTestCase(unittest.TestCase):
def test_coingecko_market_data_becomes_research_signals(self):
signals = research_service._coingecko_market_to_signals(
{
"id": "solana",
"symbol": "sol",
"market_cap": 80_000_000_000,
"fully_diluted_valuation": 95_000_000_000,
"total_volume": 5_000_000_000,
"market_cap_rank": 6,
"circulating_supply": 550_000_000,
"total_supply": 600_000_000,
"max_supply": None,
"price_change_percentage_7d_in_currency": 12.0,
"price_change_percentage_30d_in_currency": 35.0,
"price_change_percentage_200d_in_currency": 80.0,
},
is_trending=True,
)
self.assertGreater(signals["fundamental"], 0.6)
self.assertGreater(signals["tokenomics"], 0.8)
self.assertGreater(signals["catalyst"], 0.6)
self.assertLess(signals["unlock_risk"], 0.2)