refactor: simplify CLI to data layer for AI-assisted trading

Transform CoinHunter from an over-engineered auto-trading system into a
lightweight data-layer CLI paired with the coinbuddy AI Skill.

Key changes:
- Remove non-core commands: backtest, strategy, opportunity dataset/evaluate/optimize
- Add scan: rule-based market screening (zero token cost)
- Add analyze: multi-timeframe technical analysis for AI consumption
- Add watch: lightweight portfolio anomaly monitoring (zero token cost)
- Remove services: backtest, dataset, evaluation, research, strategy
- Add analyze_service with RSI, key levels, alerts, and AI-friendly summaries
- Add watch_portfolio with drawdown/spike/concentration/technical triggers
- Simplify config: remove research/dataset settings, add watch thresholds
- Update TUI rendering for analyze and watch outputs
- Update tests and CLAUDE.md for new architecture

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
This commit is contained in:
2026-04-27 16:35:33 +08:00
parent e4b2239bcd
commit 76c4129c8d
18 changed files with 600 additions and 3142 deletions

View File

@@ -8,7 +8,6 @@ from unittest.mock import patch
from coinhunter.services import (
opportunity_service,
portfolio_service,
research_service,
signal_service,
)
@@ -258,37 +257,6 @@ class OpportunityServiceTestCase(unittest.TestCase):
"entry_threshold": 1.5,
"watch_threshold": 0.6,
"overlap_penalty": 0.6,
"auto_research": False,
"research_provider": "coingecko",
"research_timeout_seconds": 4.0,
"risk_limits": {
"min_liquidity": 0.0,
"max_overextension": 0.08,
"max_downside_risk": 0.3,
"max_unlock_risk": 0.75,
"max_regulatory_risk": 0.75,
"min_quality_for_add": 0.0,
},
"weights": {
"trend": 1.0,
"momentum": 1.0,
"breakout": 0.8,
"pullback": 0.4,
"volume": 0.7,
"liquidity": 0.3,
"trend_alignment": 0.8,
"fundamental": 0.8,
"tokenomics": 0.7,
"catalyst": 0.5,
"adoption": 0.4,
"smart_money": 0.3,
"volatility_penalty": 0.5,
"overextension_penalty": 0.7,
"downside_penalty": 0.5,
"unlock_penalty": 0.8,
"regulatory_penalty": 0.4,
"position_concentration_penalty": 0.6,
},
},
"portfolio": {
"add_threshold": 1.5,
@@ -351,40 +319,6 @@ class OpportunityServiceTestCase(unittest.TestCase):
self.assertEqual(score, 0.0)
self.assertEqual(metrics["trend"], 0.0)
def test_scan_uses_automatic_external_research(self):
config = self.config | {
"opportunity": self.config["opportunity"]
| {
"auto_research": True,
"top_n": 2,
}
}
with (
patch.object(opportunity_service, "audit_event", return_value=None),
patch.object(
opportunity_service,
"get_external_research",
return_value={
"SOLUSDT": {
"fundamental": 0.9,
"tokenomics": 0.8,
"catalyst": 0.9,
"adoption": 0.8,
"smart_money": 0.7,
"unlock_risk": 0.1,
"regulatory_risk": 0.1,
"research_confidence": 0.9,
}
},
) as research_mock,
):
payload = opportunity_service.scan_opportunities(config, spot_client=FakeSpotClient())
research_mock.assert_called_once()
sol = next(item for item in payload["recommendations"] if item["symbol"] == "SOLUSDT")
self.assertEqual(sol["metrics"]["fundamental"], 0.9)
self.assertEqual(sol["metrics"]["research_confidence"], 0.9)
def test_weak_setup_and_trigger_becomes_avoid(self):
metrics = {
"extension_penalty": 0.0,
@@ -409,28 +343,18 @@ class OpportunityServiceTestCase(unittest.TestCase):
self.assertIn("setup, trigger, or overall quality is too weak", reasons[0])
self.assertEqual(confidence, 50)
class ResearchServiceTestCase(unittest.TestCase):
def test_coingecko_market_data_becomes_research_signals(self):
signals = research_service._coingecko_market_to_signals(
{
"id": "solana",
"symbol": "sol",
"market_cap": 80_000_000_000,
"fully_diluted_valuation": 95_000_000_000,
"total_volume": 5_000_000_000,
"market_cap_rank": 6,
"circulating_supply": 550_000_000,
"total_supply": 600_000_000,
"max_supply": None,
"price_change_percentage_7d_in_currency": 12.0,
"price_change_percentage_30d_in_currency": 35.0,
"price_change_percentage_200d_in_currency": 80.0,
},
is_trending=True,
)
self.assertGreater(signals["fundamental"], 0.6)
self.assertGreater(signals["tokenomics"], 0.8)
self.assertGreater(signals["catalyst"], 0.6)
self.assertLess(signals["unlock_risk"], 0.2)
def test_watch_flags_anomalies(self):
config = self.config | {
"watch": {
"alert_drawdown_1h_pct": -5.0,
"alert_drawdown_24h_pct": -10.0,
"alert_spike_1h_pct": 8.0,
"max_position_weight": 0.5,
}
}
with patch.object(portfolio_service, "audit_event", return_value=None):
payload = portfolio_service.watch_portfolio(config, spot_client=FakeSpotClient())
# FakeSpotClient BTC is +5% 24h, ETH is +3% — both should be healthy
self.assertGreaterEqual(payload["healthy_count"], 1)
for result in payload["watch_results"]:
self.assertIn(result["status"], {"healthy", "need_review"})