From 315d60694587962d083f3451b373396931eaeb19 Mon Sep 17 00:00:00 2001 From: zzlgreat Date: Fri, 7 Nov 2025 19:11:58 +0800 Subject: [PATCH] =?UTF-8?q?agent=E5=8A=9F=E8=83=BD=E5=BC=80=E5=8F=91?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0MCP=E5=90=8E=E7=AB=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CLAUDE.md | 28 ++ __pycache__/mcp_server.cpython-310.pyc | Bin 21050 -> 32953 bytes docs/AGENT_DEPLOYMENT.md | 381 ++++++++++++++ docs/MCP_ARCHITECTURE.md | 309 ++++++++++++ kimi_integration.py | 2 +- mcp_agent_system.py | 470 ++++++++++++++++++ mcp_chat_endpoint.py | 295 +++++++++++ mcp_server.py | 471 ++++++++++++++++++ mcp_server_agent_integration.py | 492 ++++++++++++++++++ src/components/ChatBot/ChatInterfaceV2.js | 576 ++++++++++++++++++++++ src/components/ChatBot/PlanCard.js | 145 ++++++ src/components/ChatBot/StepResultCard.js | 186 +++++++ src/components/ChatBot/index.js | 6 +- src/services/llmService.js | 278 +++++++++++ src/views/AgentChat/index.js | 4 +- 15 files changed, 3639 insertions(+), 4 deletions(-) create mode 100644 docs/AGENT_DEPLOYMENT.md create mode 100644 docs/MCP_ARCHITECTURE.md create mode 100644 mcp_agent_system.py create mode 100644 mcp_chat_endpoint.py create mode 100644 mcp_server_agent_integration.py create mode 100644 src/components/ChatBot/ChatInterfaceV2.js create mode 100644 src/components/ChatBot/PlanCard.js create mode 100644 src/components/ChatBot/StepResultCard.js create mode 100644 src/services/llmService.js diff --git a/CLAUDE.md b/CLAUDE.md index 07e104f4..5bda7fa7 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -2956,8 +2956,36 @@ refactor(components): 将 EventCard 拆分为原子组件 > **📝 页面级变更历史**: 特定页面的详细变更历史和技术文档已迁移到各自的文档中: > - **Community 页面**: [docs/Community.md](./docs/Community.md) - 页面架构、组件结构、数据流、变更历史 +> - **Agent 系统**: [AGENT_INTEGRATION_COMPLETE.md](./AGENT_INTEGRATION_COMPLETE.md) - Agent 系统集成完整说明 > - **其他页面**: 根据需要创建独立的页面文档 +### 2025-11-07: Agent 系统集成到 mcp_server.py + +**影响范围**: 后端 MCP 服务器 + 前端 Agent 聊天功能 + +**集成成果**: +- 将独立的 Agent 系统完全集成到 `mcp_server.py` 中 +- 使用 **Kimi (kimi-k2-thinking)** 进行计划制定和推理 +- 使用 **DeepMoney (本地部署)** 进行新闻总结 +- 实现三阶段智能分析流程(计划→执行→总结) +- 前端使用 ChatInterfaceV2 + 可折叠卡片展示执行过程 +- **无需运行多个脚本**,所有功能集成在单一服务中 + +**技术要点**: +- 新增 `MCPAgentIntegrated` 类(991-1367行) +- 新增 `/agent/chat` API 端点 +- 新增特殊工具 `summarize_news`(使用 DeepMoney) +- Kimi 使用 `reasoning_content` 字段记录思考过程 +- 自动替换占位符("前面的新闻数据" → 实际数据) + +**前端组件**: +- `ChatInterfaceV2.js` - 新版聊天界面 +- `PlanCard.js` - 执行计划展示 +- `StepResultCard.js` - 步骤结果卡片(可折叠) +- 路由:`/agent-chat` + +**详细文档**: 参见 [AGENT_INTEGRATION_COMPLETE.md](./AGENT_INTEGRATION_COMPLETE.md) + ### 2025-10-30: EventList.js 组件化重构 **影响范围**: Community 页面核心组件 diff --git a/__pycache__/mcp_server.cpython-310.pyc b/__pycache__/mcp_server.cpython-310.pyc index deadef6fc9fa02967ea3fff97732edecb2fc5df8..a4af35fb75bd2824b71864a8057fce7408d5b34b 100644 GIT binary patch literal 32953 zcmc(IeSB2ang7h4JFj^mgb0GDBUmx0A%L$bz9273g+K+hUbWL9=K>QZnRsUgWt!OV z{vrWXe1jl~1(mjfRs{qRciY|V?*6vB+kL;g+udz5lcd{S+uFB9!Qc0J&b>2tk_oo^ z$M2VX=FGk4oO|v$&w0-Ce$FH+Dnc&&{qP4Bv4h`txqeJH&cBJcxdWf%v7pO!pG&wj zmv9Ru?ru>urNyIpT2xJSQ=TX8ZSiS7zEN8N*mQurB$_z)<$!gKVIEZtyQ;-(Z=w7AYP-@pzT2XBJCo42jgQ~#%bfMvWvCx z$P2|Mv|OTHV&zTLCh9fKm*R7oDC=^&U3yK=<$Ac+tzDrjqFhvnN-?U-qfOE$HBZ)k zqH3jEjOLWCs=K^y*Ih0#M$~L{iJA?HcBOt%^XJ4xVr-XNyGo1`7mM*?LbpeoA}$dV z#ie|GHJ-RkT#hF$*FzgTS}pFw;tJef!S_>fKS@l+{bats2KQHr&*A=ae19$OuM$&m zKZWnF!~NBw7WcJ$Ux)jt;u_pv!}rs0f33I<_t)`#J?`toG~7?)`|0{rQ7@*88Dgfm zUfj^F;QL1F`zGuAX8yiKG>FfOS=}CSYqwY21_<1a>pQwUXkoUvlUta>HRp-%2odnte4BJ38H4I9sW z;(l)70juVhtasfa9u%52vWHf>#KSMU#UtX&-726K;ksWL)>c$3=eC-xw+sH(0qGS* zFkC5CalM%J&StUNdQ%*4YQdXYtvAUQ5_oG{Q45a>11;PllGdAs&R9ZDPB4a;1V)LUzXpUeI`0hqk`+SL# zKN5e8l3686{zQBaCAXF+`BU*{D7mdf$)Ah=gOb}zl>CMGK1%K=QSz7KuTV0(M9E)^ zAE4yU5+#2l{uU*3N|gMa_4T@?J;?w}_wV_2TDU3b1uL%6=g};$zY@P@d|!r~`*aVt zqKe<>9^mh9#s6^a`?>6aeAxpiJ1aisvM-_RLGcO7&WZEflEyio_PTewg$o}y=RTx| zajmRYcB{reu|)K+_OSkN^CQUh;G^Q>6{C^*@bQxzBLYkLb2a2JS9%+SxX_|GOTtTa zV~uWvS0s$^{8($WwJ8>jhv!C9QItfDrd6Trfs=hFcg~L{Q?r*W=-;~$59PYMvfF$6 zzj^e-_jcr-eLK5*YxdyYz7yXV-15}GOI^8V-^g_x$?fj$fBR)T63Xp4n%l8%;lf1& zZ*1t_^IG=l9S`Ws2i9%R9z2}geqdn3>ChlL;<^;#`QIN{OtH4z$$?9>|B0%cut}Hx*08b+b(9$tELa0R&T)B*?g@E~{0wifBBZ zcr21IVk={ASQ2qLBYFfMeL2F%kG z;nqAt(Ny8lyh7D{!mIg(Z>g)+Z&nc!#hUbat)eXvYfUB77vVw7eI8Nz*S|ZssjL6= zk<5fusZ?9Tv}rSD%&40_toW+fE<}F#qH`Pm*EuM2vWOn21g%2)Wz|{}CjL8@- zCub{Kn%W|Art8|)8WT`36`y1|l7LHf%m09zOI9Kie~ot>c58k@yhJ*cnVDRD-LlM` z*Wdr(7go$keCe*HyNtUQ#q`J0tM9)z)1020Tz>zY)luzBu^9{3tX_I^^2YjkH%Q1O z0oG_+tga=2rLZcIs%vVUwq|;!YIUq7cHQb3*QHj)T35$fSJFyDy~t`E3t>4{OC)Vz zu>d|;Gp@`f7D%U0pI&$4jJlaO)ZI|ufGMa)V_Hz?dRt4PRbM*{Ydjdr9rz^YBeAgt zIwJ5!S9PyYASuwdXO@fbNB8RiT^;X&6yX(K&I=+h#P>eBXWXh-mPo{xL=8+M)@iyL zZD)@k%5FNI-L)Q+CVOOW-@DsPHMO=)H^!l`(NuM;S<^4{714A&Ra<6Uf^35{SL0Gj zE<<8^k}1O+6^TS!TN9}$QJG{UVq8n*G*t%FTbFSiC1sT4Cs}P9mFF>9Zpk6Em&AkAe3e(n4vZs zWXZhQQRjS|xNM;>QAN*Cdiq-uq68k9fsD5>?*ql~$D@>M-xjgS!nBlMc1 zXoPb>zAS?8m~P2D296%jy}2IKje2GN-3H~4-0P9i8H?4=lQ+D_74sg~o6@va zm&BuTz2>&PI?%gg;7D(F>(i#XG64z1n@s6#Ni!&W4*CqvRcg*03zn%9b-3gRZ$|;K z7RLz7X@rzN@(7zzS{juxLZ;e4S6_b7VqWy1@aEChuZ8q7J;3(?ptoNu$Gwm5gSZcA z70~FsqKwLb;^k&&DaH-K4MbWZch|GI*AGbm^`Cks_foeRBG5!)twMjiFvqnX<7V{L z^sR`-;=0iM$Z(?W`%##~Dk8l3nIb+LG%IGW1SPfQ=roZ6&XHtB zLfqd1LK9u7gVwYGq0Mrh9trHESwT{5Jd#Yev_y@y22qdN(WT&Q%tDa^&cs|KUOF=}vI?~mX0B$#rw zGO1TYTJ^`U9FQY*vh1eU`gd>0J@>}o^QTCGWA!Y%>8-&%M+S~|S#_S?k$e77uKU2i znJu|f2Y=kTez0>rls9OCxdT|E?`QX&9(-|=Y~;tC>p;``PVCKYe=XO!uK)FIsBiib z%bRuRTUeC_h+3M+mE8gBU3h!n`%h+HS^wdCd#CiBJPBbY_u@%tgj3Mu6f};@+{xX! zH%||oer3vG#kdA-uzZsynNGevxBn38^uNAuuyY$4%F)ri*YKX9RHnjeHgUkUN^QZzeoW>0qCOOirC0v+6fHg^o3 zdST$m!5Q^~HK+j!mG3DFk%zse7wBQE1vC@~h8Aiw5)f@uF+FJp3?NwyY8?uvqYi_@Amr6XTXWq689yp#+(5nrc;AWFF<l`4Q+ZEg zB@ZLf##m5m(M1eurG(oulvs$m<5d51&u7;i%AP#l|K_PY0JA$b<(~XjcE_oKuA|gV zt~PMw{p`Ulxb5F_pnu!p-1g2~_fhnE@a4_f0|x+J7F(R1<=zj+ra@ZkT9wr`BM}u) z=CFc^eA)B4J$quk1pmPCqq+C?;I+B!&knq^aiDWk-^qiych=?JK858taO92b>1XkB zEQ{QZZSs*ke61zEV?%b|+j%TVljLNL-zExjUG<$hM03ZpE++-A?K^R5@Z}dU&U7uF zv--psB42|?3v2ww`uZED-%_{q?kQ>Fp`X1#!?~@`g$wI7eD~cAixxF3UCNJt_QDLcKU&(FQl-;#mE+kn$yY=~j zUaYb8X%eP~&qe{HKqyHinPwIdQMo#f?i%cU$4RZyJ~T;rJ?jQZI7;jE3hr9&xpyUr zY+4m zOKq)ch6*ifKG~dE=4ivjloTE`NQ|vj9n|z%WH3oh69FVtf8m)6k?uUf$aQxDRJC&A z4H`0p7|1eEpb{X&%h#HrlmT-X6Jl-5NaKNzB9eKbz~8h)FHQP*T9)Kh#W9|l%SzyYU749lS*cpCY94}l$p|Jj|>LO zkln+WFo?e!ByVcf)OQ=C7Uq($3hF&@^yS=+EwGu$?!cOx+XgbV&7$GO=xmL{?ynpl zp(GjiWCL=EV~xrQb`UgcX)xLk5{Hu1&w*uNlBjM-F>V|nkr*8 zB|Jo0gW6c<^pMmKfhyJ{Nu-<;>;1em`%fK!!X@Edn4e>>VS(me>83Txi&b8OZVnvZ zU|qlUGKn;@&Y71mb$q=(P3nPcHGA+d8U_RFJF&CC(x=%Rz3=4Hx#tdnaiuTDBf!Dz z8&Bm|2_y$8UJZIsh+#v6Uz>E0p;0Zd*8EIjfoWqciWG-MFBM6{(nzZ>nO;GSv%wvG z%&zN}uP7o(EX`r8d8SObD8cjkw%q>SzEj8YEWn!i&|+Ye=*+FqFO=eFd!zXeZ=HG4dhh3REkpzm}y zgaz)!y6n~yxUfd>`Zg&{(^9`0y(lHG?XABRvSI&Q^vdbCN?Fcy*PHI?k_H^5CM9){ zjn(>@;~Hzwph4yrvjWgg8?n?-<|}D)O@WBPLLF;@5nx3kO|-(Ixh!J9H-&Kyp}a!` z6sekEc-ihZvim!60g+{3_aW9-dT~4W%@_KQ^aAW?t-rTZ(g_Tn@qq+`G+B(D1P_DG zGhivwnBxom%{zCb;es_UXQv0=236?=9nSS^DPYFYU3f!o&$0e}+o`WxIw8#UoqSd1 z^zS6W5}n3v_T`rbpFM*d>ai5iWP|K>qe-K+*d_GC*=@;bp0pK zV3HsNBIvZz3M-;3$sj}VQC&0{ja}^u1aY>|5Uy+=6H)RNJaoLCITlR93`=6`T@L? zgi!}=AI=wwR>zK$cp+&Po$q1v@h})cILE16VETm#Ko(9vKn)yVv!d zIxSg&=7VQuIn?MpGu$QLM*Q>Sne3(wPEL97)e?mxqVf&(@E-(VDon%P0KXR0<~1>( zg;_~V6c(w~B+*6Na36i#ac_K$=FEZZtP&V87?IOtiLyq1^xfRiJ$Y(w*-Q@Z&F=b! ztN^<2T-OkZoJ%9ikJH+`Jl~|GjuNB*-@ul)AREkp7o91nd_oclsGixEo-~7b!lHS3 zNs`MNM3hKW()7XLZ^W969W)&8vc-!REHq7MZMe%p=VJq1>gcArwc!Vmv-Ry3Y z+B>`VA?d=y567)B2K5aR`ZN#R+y+}XRB3>qiPQ+zkVD{4pxJ30Y7L!CkCVsDRE#GR&B+nwI%O1b7D+9>8G*Ab%>fSKKL zQm$htycBG}u0pb_P`jk<*njASi>$3n@%f0_UFm9fJ>u(dx4U~>y{@N}N0p`0ynYyA z05A`v4lAZoS1;|*Jf_0HLbKwKKnZhh+l_`lqQ+}nnae0Q!tbt|#g6ObZMd+jBEZ1d ziJC(J#;^(%p4~wZBc)C0tW?!cjTdp5sg|&s_El=0wX6;^n7~9RD+yqA9*`UziljBng;lozfK9}(A_aJygSx-Rm^a5730k*tJQr5riU8oWg z#K3>)iXHZ54_-(ng(T7Kbp+%Py{Fl=)wNnNly>+1F1PDZMWFS=ZUPA@cD0J}9KK9< z>ag3`iz^8uhD>l4EYUD`Ag8V{Vo(C`wsw-8tc9q-J;lVp5r(DhDqAXR>ju!A{MFd27m*RGzl?s_# zqNPe|$>(|yZ|v|q0iUM)e$4%tt9F!>P-HX^FVIXGPYi*jA)F}>KNd?N9L89gYzP|% z00g6ll3q$)M$)(p(?Bx-jE6S|v)9rFvHxZ&XKyCC3o@hgZHCz=+7QM{4(N|dS#rMp z04)gVtf(}7@x)4)c}GF1%Du$suFUm=L6>|gBMFOYUNa)3v?>yjkoDlDnXGPVyU#zT7+rTf09=i)7o&c z4mj3>aE)I14V>b81hMfqFpj^y!q=+^f48>-f%qc0(FNMz?qyQk;Z7;du9T-)1*V4f zV{IeoUIEsX6V|mWJ>Fidb$t7nmQ+YicW`6>?#-56lyz9CDU`6+8f1Yn9zZf|B(!dr zloOSS5tS)>81#(ZDqLwd(V2GT8A;UlQ7e;?9QIgLX0_>FGjtL{vxw+3P5&Ndfob|> zuFO?(?hxZcF#rBboAPjv7Eb9|vS07{hNIq4%eem}L~_7MZhf!Mw(091&!Y zd|*}?(HNp~fJsm~i87gLGvpC;_Z@oleX0XrfD8;6+VAS&9ekg#h{RTYjVu};o?9yf%i7xV+zlE(@QJABqRC=9S9*bRm%LZWO$ zEGcI!sbDOrEQuwfBx`3ZaXK=jIxBG{iZ0GCW#N|?^A!{i8=s9a#&MJyd7Lm#&@Ewy zOlUzGr;z+3fM~pji_Epf5H7e}N+LqFjR=K$#%VOuXq=&@3B*HQXI?zg3%*YiNir8r zWQZq{r}AumD*qUT!-i@v^N(6lN>eG^(5VIT`^ppUZ0XvS7T3|v*u?_-h`1~@UMl0haS2`3c~kq)AjdeIQC9$RhOdoec%b-4K>ih;c9KOs$XA(nc)>e7?VcW$B@7Qq8qijJ z4&EVQFTHJ2e_?kCLo5@VW{qZ=#8P;W8B0Gv&q~1yk{q*Qa=0)*E(tK36p&tEyJ{?F zQiZ3$k|ETE5ErB>l>l4$`G5+ROrrlVfO(P00CUoD3rV5I+bmfkGek6x1;_vp_%Z+z z(OCc(qO*X&HXwrpNC;~xN(#c-a8On_Kq*Zg7C_n7W{_J(Vkq@Z`fFC7h0zf3ll`*X_6G@BE{xu%7!Od?W@uwuX$6NSG;O5qBpeHXOXj23* z_@)wq*bMYM7xEI!s|qmx8&oKQIi4yY>2}<#Bjn7BLPn*tO=ov2r0g8Pkx97-*0c#M3!u#miz5ls8*tgU~Edn}ujr#0z{?&V+PGS*ZOe zdh6f>MEjt9k~Aq@We(t$B(|42U}>Xn;pyuLo^r{P5>n3iYkyly{%`ThVbCNi24ipr z7xt2ui+w2m0(&K-{eaU%zzI>7fDI zo7M;}_W>$MvReWy06kM&26!p%bh!g#OF8V85RfYI3Zen3yV4!{l$0Uk?~yeOD1Kxz zp6x8cLjy|fIg*Im?q+0F zB(fSc5EmdQbi<;HU2#6rNK7i9J{O#V9r`-#SaiE*_Dhvl-kt2kIPKNjx+2_KF2|((w+nHXDBLZd~ zA!z&)B~KtRJ#oDiA{i3^hE@SVzebfx0WUlZczOSc<(LHo+49dq0kg2NY+3U`I57Sl z?T!dZxsL&4o}12^DoqhbF7`nFn&nMzfm$dpD+&?jtvFJ>;{a* z_z$Z8BP5^M?@LfgQtKh^(F=5X1D+|>>Gky15j$-YfPzd`?59>{mOZ)lf^Yveh{mvKXjO`23~DZSKkLmo`_sK=(&4zF|JwR?NL zmJH~F(cXup=flzq^3n^jGsa@Or98dHW*X!cO14t6jS@ydlG01f<|e8^vkASCm)nRf zTxiW%RDQTq`gH)$5*$ekr+*T{@%R;= zf~tTRvQgCnBB1*nCBYRw5fmYmzz&NN+LnWTXSA6?R5-4^h3gdoIu-{{RER1(F&ZCw zgHKe8G03eHHQ4%7{H#|WHQt39I2s2vs<;nSYaDur5`I2F$K{}ObWsnB+o{I8Cx}Z> zYmAr(?sBPF4WBdbvRFVd_y)q{MS2!B1u8{BBLl$f{UjMJTU@{V&%*g@t|Dga@A4pMII?{ zu85Dkm$lwT<0`aeOhE#cpT9P)rVPI^8eeAFJqs2sh+s?c`~`QJWwbRn(iD#&_R$RS zjogJ>HFw^;C5sj}&U?_VT9w~;Yu6uD$l-^uIbBWa@fAb)1~x&{j?qY@kC3kholq1k zG9NL4)Ys#48dG91oz_LIg_c*;12q)9+cJ1yzcoKneXO&Q>hzih)N!}qZPm&}l zcG!=He}Tkl%pG1r9w#W-xKiP@gqN8%sL{F*fiCiz`lMUGHkeVSPGv_kyCSDf4asm_ zYz~nDgS7NQa)&YH&A{#aLF&O_1^e1slW9YbM3YUinCYY4X{{;D_aCCz^on##The$I z*F)yPcpiDbqA94th0mp^<>!2E_0wuaJ@5N8(@+BZ#58H6RV((-L=a@r(w1^qSo3iN zY4YCno$f)PUf+pMM3D5Icm~mJfT=u?L52usw;x8(6=G0i2s45sa$AoNK7Rz!`xF$# z(;y?6@t|{?A|45aCQlAKBPv3n^n+f2i1HBzkUO$nh5>}G3s0RYcYwh*$=Z_lEFSC(#DhNe$sivWkqAq2nGVT^pXXBGHsi zQX%a=dFwR6Bi$@#6+0)Mqc6ScEvd;N3imPI6iNc&XR#HY(<(@yr zLmJ%Dg(vz+cHN924ZQtM_V`N!I}oM+MnhOe{2^N!4jTCT-A(-`UzuM2Im&i~Q8$Ds zw0idr06DjRx5aq2Zbn28zo+jMHfi*RWL#0n85uGWGKw!uFaTbLLSOmHSI9FM`YJ{~ ziFT6`Kx9Mx`;O##woJN~O7dGv8D1Qdn(N#T|A8##4X7x32zA3@YK#6RQ7i>NAottv z?&8{9l;3%c@~=*k6UJ3L-<#AyujE?twVcn9!IQXvW(hGM)OQBLz^fav8J1#)x4syf zUKh@8+KMnoBJcUI!p&V0L~;&8GwN)NDn_-ibB<}lj>M^%sD7er;MgWaTi<}?fnBD9 z-#8e$zAj7w*}HdQ(NI?B-t37JSVPuJ7_|?cM4)FVBq0ijPjaFHo*p>!6vF5GPIdOZ zyHz6Tq)B9ye?<^Dy84HS6TeKLFcP3(S^9Sw&|4N*!Pd9vn2 zKcXKXNy9OoLIyb&S1w}go8(t-I@WjUx$Ndm*(2|Q;PTls%{WU1GU$I3FChl3g=j@4 zrSW_+<#IwC)#ME;LwLnU#B7LVh7?5&e4_X%&0O`nF&Z-jmy&^lQm|{W2@U1{%5dew z=R5c$?}Q|kk5J<1Ac{+(C?(hq;9M4-ZV~|ad!<75g=YB8n!U~NwKe-8okQh8gqaEj zNNtAXIvy5=c5idA12SueKNV{CiO>ecn42n-SIDDisIqp*v7%xVWHs>mO0rENuEf_| z-c!-*>IjHY?SVDe;@2MNfgFooRE>AxYBa9!23e=IT8!petz3)BO4Le?>ZxjnsktMV z8r>cg)sXgrJ&=M$5C*@Rajtg1v6=gF5%r}NirF}q7%Rp>01AqW$8%JS7~kdT2ywqB zq{g&|nrkSIN?c;~Is~z3VtN*!Irze{+>v*t=h>wcVz=(goEVtKLoSjb`Xb58i>ROQd7DT-v8WZOV`C;8pk@%_7 ze6K_+`1{%IvCi8d?5Hfd(qR(3?>niL;Zf{HMNEpJ;}B0o(xwutArBcOQ5&^L%+Yyf zL+%%9ns?0(>6^3r4dKSY@8Qw2p1QiajNc-f>jo(Sh&yWoM5-%Gj8WKvf zXgC2EEE)#M7APnf$bHbg(`|`8w&JVVzgchkPm=7&1yYd=gQ9Ep3Hq2H!wOt%|OWN zIFK$1VuPy4J}bInlPLvTN?XlR7hKV(&z2oEc@;ux18j8;?ne+M!fBxs<-;*S@IV4W zY{{4FJlpjw#1t!bxw4oS$^b`RX3MUIvckbs68Lt5SxKa(T#x&XJ3)G?imf z71q@tS*Ky?;mNFWSHk_0xxBYZE>jT{dP2Lpu_hB4owsM&C#%9m~>lq zi@YdNLPJVlh2cJwuklEcv`Z?Stp_1!_P40Y8$d3g7D^dO3f9>ZTWJf&Gn=rr2r4)> zwK_2hvg*K@eY8i16*kBx#{b|2#wYZasYvLk3ogPI@xBvJI;AgX#~>0jwuVBsXumhq z7&`%pqJIVd)Qi0af|>C;BseUoKx}_Xoxlvc3_m4wY@+dBlozC{g>*$!)OeE;GDz?NJ+Sle zq9GTNK}*mmkV%}?B+o{9iwcYBpm7oEZlXm~k3ynEh}MMMI?lp zf@nu2b6tay5YGDcb(DcNSGB|M3Xw60cB}He9Zhx7LyE;9F|#m^YCJn;pDJqh zd+E6cT#t@Lko@Sie+D8@4kA#G`qhs57eUY~XL6txMq8p^7`qrN>*5XLihK8f>p_gG zqx^~TN5{%U%@N7t;{pE70%{P9I7K~pYX=H~e z7|dMCxq%X9F!Sk(dCT3n()ehsi0O``gSZXb!coRkH)DnAPFlIvdN<~w&InEN@}wpu zL2-_kp&?tYt#K^>yx?O7=|DjedUe@Tvfmp=DPbW(ZA##z$0Kx4CP3qtl;BW$mj>}7 zMJFQZ5QHIA%|U_bi{h-RRxy-nR7k490upgq+8sh?SS7VeY^;i7W3tX?#6(SZ%Oq_fmP#pK?0a5x{sQNSbJiVQddn%H=NWsjr_(>S5 zO@|2QE$gy}tjnY{CP&jbk;2=9@J+Tmt7PmYwrkm+z2y85We^9TFtQ9**)Br}K^s7! zs`mS-6as-Sa|y%%R!*DY^nk)iie3%G0e`z6>+~Xs1HtA{M}Sx0SYCmF{C1h$paftH znHx}AF2-SFOt2shw5!G@P#P$Nph=Zb0Y{}Opw_OTtNb>Z;1j{6u2fa?Xv(GE6%dy= zHqme?Qv`npuE{xqEj74eA>z{X6hKW1ju@3m6wG3JGIeeLJV<@{LYxpnFrh%V}aAj^S z!Mf2B>w2J`5kElmhk7>@3(YMKNGB}Q;TRU zeyYO7$F0E&A&+T+J{${^7zBJLpW|G-RZ^Zhy5e;{1x2Mm`dfgMZ9Z@?c*dTGPV}ko z(?+9>9vmhVezuoDLQZxIaFsCYKhl|ddtL6trre8f5Q)zNuOJjslvc4ldvAI)@*w#+ z7!=8a)s9^IJ$plGVhOOM9z8B!nfGWo=L?3Anf3MA*Pn;f3CkI*Aeh0kW5|uh$K9rl zv*AT`tls40~`dJM&?r9a7kM$ z@eWcbq0UxzK}SI!Mu{p%{^%~n;vPze{|TI`DfpW?LN(JG9Cd+124EuOV6}(XOAa}Z zcjHJu1c=SDVFV8o%LLE3lJMhmlw3v0NK|Joe1VdXUtePG;JKs|YNai?6#c&N`(dji z`hMXa%(HtitStr4Ch0@#o%bK}ChulfVc#+D{=#cWKrA`NmryVtZ?HBSV{7-!-(#Hi zCS;=)Fs$`pzr1Cp)hcWijn9SD%ITy=`vA>aFRZOneuOla#DQ?~pkG?n?6A|yJB^)HQKKz)k?KTMWBO>F|tVzMNQJbiU=G?ujkZ{17|D8$epwxN9Sn| zZH8h?h6JC~6}_qvIs--14LM)j_>l5QY2jVt^61`Q)9vk)FpNm5kf&~wvcUR3s|Z7Z zSH}RQ%Ee^a=ttgvKu$<5!37;IA5g}Qny^TzgY14jLWc$@1Ee3E2~vO|*WGu|mQ#yTmOP7H=*HV4M)Mbd{L_kQ!QKje;W#ffP!j}x)hg3%;?_W*n@#e#})7MW~dp|JT9u6>na>BG>d}^wPI+bIpsvUM*e_BB#VfUAD)UcDgh4Z#^i_#CPJ}~<2%pDF%7Hv8U@Z<+ z4O0)WHrK>ZC3EF{Nj?;z_`L9T(57B-~OY!#75?@|Q&?#(!UX5i?4dHzeL zoGD|721#EOm--epMgK^w8 z=I$77y*8FocgVoO$C<$~FJ*ngXGfR-Fqb^?_R)0PjQi+`n~>xU>p*;jg8*TnxJ+@z z2=LjP^2nxK_sKq#0_BYwAcu5gJVHH)AOXg)w4P^fbk=*p8$oZIQKB$Q$Y0Ds7$H)u^Li;891lbNXt(7 zIn`&(#EUcjY4~ZFcxqJ>p3nc$Wc%=893>+j?}o##HZ547uS_Oq+KNyAJ0!y2L zqp<0Y7e2Vh{q1ml%!2*273OsMw{spWv_n=I?BE)qom=zmUM}%JII0M)tB=31aLSCY}4p9))H1qrS!J(iNLw9cS3~DTyAd?C*kdOj+eMKg)>R6 z;^>%Lr>~R%thvMZ9IhG-4BDmnC4ARv!E~#A8DQxTo5F*hj&Jhlc}_~P07m8 z@%Y$PdlMDa0cx@J^fkOi!?{rWi#*BNknsUBq1j#kO+U87yGEX>~8*8}Ux1Q-w%1(LDhU3MhP~uHeFjo$#D-Jt4UV zC$bbpl@{R{b&qHv*;jA~4sX1soshJ_S16!wz%|T^mxWh4O(Xl>dW3OFS+)3#7Xl3I zQ9N{V+|ZUY!e^Otkgkg1fXckwZa_DOj(sfsNKOP@mp@Tu`;h;Dpg?joc!3A2id16A zmWc4a8I24ZrXMwVIT7QhVbUPUgk&jxyp*8$@d;f(NG?g?%pU;22LSlHRYbND0HpgE zF>J89I6;u9zK_mIV*9au49+qZ>e=fxKXVa7(6`Uzln3);fZJkW{h z>9!`+%KxFDU@LwcH#Xm7uM7EbloYn=6F7xO`mQpgBmltyL7@O#s%3d~$d(Mhj&JSf zp^KvobzMF9m8*96vLp{?3#{7VdIC>F-{vdUxBZeboZLebu=^-N$ui@iJm6Q<`1jSg z^N7p|r`ssVrR-jPC4^m^ZY#nWpOi)#GgmBHyY!0->FDHEOc->ya2v!Fh%ETMpJ*Hb zwIXxHeE!i3DaqK`Vfh?YhGOPqD0%sXH&bbRLKRgjxF2AsEvZmh(2n5O&RSFdT9}@Y zXDraV5{gr$8}S#N%BKc^MHFXMIwca1wT?VLZA0frp+m)_)rDcrh;85Vys+%@09Lv+ zFTg6tyufx-G%pIz3+loSf*Im@@jB-P$7MeMI!C(a=LNk^(Y#RCF*A*siKQo@DV~51 zotapGKj(}L`VTN#czd{(gno#xBgODH8cHmbN*kCG{DV&La#t$=oiGYX09 z8Kjb#KKyPMpzrvN3-bQR9|K|ij;RV+lQln`&4s#}mq_p9#2klO8$tFKc?h+l>9j*e?`d;DES*o{+5!zqvY=?`3Fi&O8%XaA5rpS zO8O|tQj(*DBuduujTwZ3tcsFGDb3R)2Qq`!Mi1k1%*-SSVbUCcwTbBjlLCVf5+{=2 z7XcGStLZV)tq$%vncjLO5;HIhdJuk*?>1D&jt*d!&sBrHzw`?^6Y-&=smiTSC5a4@ zF+gxo{baIXvki{1stlC}6dy4SA6uU3yDCuSukuxStAa>Nyz1@n5yi^)>nrJb^RCg82Bb`utVy36myN;eQnV zM^C7nP>t&`6N3Dy#6A6wR?4vwKZV499xCQJi2k3bUgcxer<}zq{U0@;{8kMrzfnWV zuhlZ;S8BQPOSMAzNUc9MuFERjB%C@-8K$$_LKy9quITuSnOdE`iPmhz?T$gFRN*yJr!;Wv%i z4-#TxlaFVy>;dA;VG2#86KE1mK4^C4vfM3s%t})_2u+oUt?LfP5z|r=ok%C`Gto(V z6=y!1vZa7drc(|QXCZLXXgYAxnSHO>IUV{LG!y!nvR?%KsWdbV2-75C1`x7nHuSS) zznB%#9GXk>Xg)2Vh5ZUVryI{A<2ge3McolbQX`R$iZ;T8rG8zrW zvr#^a>9C5$<7U)Emq-=OhGdIjte7sP%Z#}#?=aC7j}p3)uIjhI6jw{x)^WPpLUfH( zc7tL5M)_O|!ETQV<4yEtDR+xubRE6bFy#bO>%r6p!<4RJBbdD{N`s5u4jPK7+pwg^ zER;`&bGV6mq)OUv0hN&qGP+rs?l1;+8c#1sZ2`kurC}fKqWNAz{WJi%FQ(l_MQj^4 zXHHenr$JOeQ>b5oL0XijLE0k?>Lrs;qYdTLJLq=W+o1sJ!R?EhQDj2HDcsQ?Kcq=o0raQnX-S_;&#+`Ro26`4 zGEMX>JT3GbeR`k8Ssuj+>8v68Lts@zv2@OP`V4U9#o#~S!i%VqXYC4 z`cq)l#u)TEeFHdkF*tA1p8=;n2IuGWE#NGQ!TAM!8#oQf3606oJM>*(HO3h9OZpyg z7JE%Jhm{LL%UA{A^=uwsEt?P6#3}*XSQX$+?0P^Ks|IAO29UD_fI+qpa3`w;+|BAl zh|q7adg$EC76Be)4SJU}C%i9E+gkZ^{5g0pTt|jJPbeirg^5#?TMD4d5x&f9Td<*Lvx_sgN88Zl>2|4% z_b@)EyO(DIe>>eh3H=!k(>@7!l$)ikT~NtZx;&$HBp{ARkS4ck~DE4IW065 z_T>a7@^6Qicjt`b|)bgT%M1Ghz#Pn<%^#T)6rOmHZC1`bFQr!r0? z>P_s=6gOp7lwk-QW+~ycck@6uSLP8(<2f?m!jFlWi(2(yQt$(&3Ia8NjrZHKaR zb-6W9C$B|r9YQ_AqM^^GeoZo^+yqactK04Gb$J4m88Snb?1LnIDTq2OhIcqjR?xl9 zD-WD?#PNkge=6`3hxF}ckl>izaFWVALGIO}W{BM8q5abXByl6Qb^A}x_(CBr@t2uz z5ubR?aU+|EyvYDg^XjHGd=j)EA~N9VNVP#qU}jJf=vM34B_;EZ>nd#Ffq=F`F&37x zTd^nvzt;cb-C{?{2I3T#OLA{e(N>3Pr)i^g7uiXUn+8lrlx@mdeX%-7*h0M?&7p*q zIdgd#$d0lo?bvI5URA@C)~`crVp(ZPr9WCQ;*}kRT^f|M+K$pz5L9+2TP$C5O*~%u zK`G7@3Y5bmOteA5gCnY>xrEvd=4acxdHrqT>@2!WS~8=|HhrX&k2ty=S#qSUgXxyn zqhVYv%I*xa1$G+P1NRJVm|aaO`%roZ!cK%;0Ilud4(bah;J&D8?yhcF0u{)Y3$jwY zK4(Ug?8_C2i_wuBw)z4c9gK%9UjOER9y#10!b$CIZL3_%8e5mFYQAx8*bH8A>3_MX zD4V`$WKl!b?*e&ee5cz_eV_;mTn+lPFkxd;kl=Cqd@`sWaevuDa!9;Y_I$1^%CXB3 zy{60gpkUeSYH%+R3O2>aGB=^1OoUW;`g(|lNDl(#zZ4B|HdgcFywx36(E zty4*i_VRI5eWBBrcSvll|gy zc`i8)UQQOf0tsSD#jzqeBV1uE2!HIy?(dL&+bqk6a(|}$$<3p&t*38;XHBm z$S+3C82NwN!!x>vznQ;c+BG~B4VAg%l(-%6v0+aX{-`)snXNms1P$vu4z}cmD4l6S zXDa^5aJ?*@0R?@f;#gI>XsF6rD7|vT^O17k-$e^}+u*qf{RodBjE$=}T9uV}9(A5T z_9?^j1o3{=;gHOA#LpEd(E{)Z#g^R6@w8}nSb0PgrJRXIj#O_SlY-9pAk^VjzhB?} zPl6EaE9g?|u;b`+*wP(<>x}d~4^Qfd>qgBwS^RPQB_?? zo)pgN2R5um9-cOR31yyu-^04KGA?5^Xw_pC2~!@*j)V!;y<8ts11^&B##YwkD!)>M zuO^2)B@Wb_DQH768LU=p$>`uY*bW<&B(D@37GyR{f8xvJ$ygR~Ow}FZKY$U#)}_YS z)?ZkVqkN=@@Pg^&G^8v;q%Rz*y#eh;3hYMc_9cQPxD%?H*E8D0_&TuG7ASvFL`iJ{ zIU|d}UHkZ4nbn(7EuPMN9YPHNOew;W)D&dhBC6{OmOcT^sHE~iWXhc8V{0VL{QKA) zl|tx-vyD|Q2J+KLoOrITn4FakeNlJc0+}#b<}uWPS2AnTmvPncT>W&CA>OY~C+DQP zghdzi0*}lOw>5q%!bs6WE+R9Pe92mmLXl9P#IC*y$5qytDUV)&78q41RyP!qe6g*e zm^>|0d9J~mvJn)vwht=C5homt<>e!%8(swHCcXh=hNq13{2;=pXo|+AY5AMns@LPf z^KU+!aPoq$?6xVX@akbF6BT}5OlZmyA2jX=Nga_nxlr3;0PthfQ5%l)1cH7I;v&=V zLzKD=AZ+$AKddX6fmo`>dWU23j9lZbKwMrth17}97iW_f!KZBUj3{Yxw8T>7mR|8^ z;SuG>?LgqqBfNkxCKTdKlRa@I1cCnuxi1+;e%CZ3rV$Sa!C5-a$R->d+eqt@!o*ug z47_X@czDUbWIzV0B~Xm&6!7)%oZdVyKF>ytU;*Y zyv+SDVKo`#v2G_o&BBLR|IRPUcfu0is!-ShhYNQmxK$N)I=5d7Th(3_-W!~70r#G(7+ed3GkIvk4gM~QiiZ})rEo+ zfcf#B`~GlZ@Z&SjUO9J{(*~DS)!o6=7H~;}zXZBk9SJdgNEo)FJMt6kRw2al<`OVY zM|+b>xN^xfbgrv`xWe&HPergh%)qD~V_o!7NC(IEqnS3wsoHqR5 z!|P$H!>->)h>((MI??F-#<@ok5KT?(pk2CR)VN-Yn%klX$kbl-w;o>Mb(gz_UeS z-vB8VPxYlcr2a&Csx<}to4p-Q%ktKiHfI8yA*Hi?91K;s0U0j5eyheovFn zi+@eX8$=O$Qi?DK0soxgGC(ERl6BgREe%2L345?Lh;RtuIKlwJV+f}ZotJ!UokS zQ^ns!#(Vgm9$dU(5eB$FZ014#s0_b+=DDfqsd2WrRANiB6I(94x+lUf1=@*rVo$OW3+&IsU;F=B GzWN8@?tlLP diff --git a/docs/AGENT_DEPLOYMENT.md b/docs/AGENT_DEPLOYMENT.md new file mode 100644 index 00000000..c16ac10a --- /dev/null +++ b/docs/AGENT_DEPLOYMENT.md @@ -0,0 +1,381 @@ +# AI Agent 系统部署指南 + +## 🎯 系统架构 + +### 三阶段流程 + +``` +用户输入 + ↓ +[阶段1: 计划制定 Planning] + - LLM 分析用户需求 + - 确定需要哪些工具 + - 制定执行计划(steps) + ↓ +[阶段2: 工具执行 Execution] + - 按计划顺序调用 MCP 工具 + - 收集数据 + - 异常处理和重试 + ↓ +[阶段3: 结果总结 Summarization] + - LLM 综合分析所有数据 + - 生成自然语言报告 + ↓ +输出给用户 +``` + +## 📦 文件清单 + +### 后端文件 + +``` +mcp_server.py # MCP 工具服务器(已有) +mcp_agent_system.py # Agent 系统核心逻辑(新增) +mcp_config.py # 配置文件(已有) +mcp_database.py # 数据库操作(已有) +``` + +### 前端文件 + +``` +src/components/ChatBot/ +├── ChatInterfaceV2.js # 新版聊天界面(漂亮) +├── PlanCard.js # 执行计划卡片 +├── StepResultCard.js # 步骤结果卡片(可折叠) +├── ChatInterface.js # 旧版聊天界面(保留) +├── MessageBubble.js # 消息气泡组件(保留) +└── index.js # 统一导出 + +src/views/AgentChat/ +└── index.js # Agent 聊天页面 +``` + +## 🚀 部署步骤 + +### 1. 安装依赖 + +```bash +# 进入项目目录 +cd /home/ubuntu/vf_react + +# 安装 OpenAI SDK(支持多个LLM提供商) +pip install openai +``` + +### 2. 获取 LLM API Key + +**推荐:通义千问(便宜且中文能力强)** + +1. 访问 https://dashscope.console.aliyun.com/ +2. 注册/登录阿里云账号 +3. 开通 DashScope 服务 +4. 创建 API Key +5. 复制 API Key(格式:`sk-xxx...`) + +**其他选择**: +- DeepSeek: https://platform.deepseek.com/ (最便宜) +- OpenAI: https://platform.openai.com/ (需要翻墙) + +### 3. 配置环境变量 + +```bash +# 编辑环境变量 +sudo nano /etc/environment + +# 添加以下内容(选择一个) +# 方式1: 通义千问(推荐) +DASHSCOPE_API_KEY="sk-your-key-here" + +# 方式2: DeepSeek(更便宜) +DEEPSEEK_API_KEY="sk-your-key-here" + +# 方式3: OpenAI +OPENAI_API_KEY="sk-your-key-here" + +# 保存并退出,然后重新加载 +source /etc/environment + +# 验证环境变量 +echo $DASHSCOPE_API_KEY +``` + +### 4. 修改 mcp_server.py + +在文件末尾(`if __name__ == "__main__":` 之前)添加: + +```python +# ==================== Agent 端点 ==================== + +from mcp_agent_system import MCPAgent, ChatRequest, AgentResponse + +# 创建 Agent 实例 +agent = MCPAgent(provider="qwen") # 或 "deepseek", "openai" + +@app.post("/agent/chat", response_model=AgentResponse) +async def agent_chat(request: ChatRequest): + """智能代理对话端点""" + logger.info(f"Agent chat: {request.message}") + + # 获取工具列表和处理器 + tools = [tool.dict() for tool in TOOLS] + + # 处理查询 + response = await agent.process_query( + user_query=request.message, + tools=tools, + tool_handlers=TOOL_HANDLERS, + ) + + return response +``` + +### 5. 重启 MCP 服务 + +```bash +# 如果使用 systemd +sudo systemctl restart mcp-server + +# 或者手动重启 +pkill -f mcp_server +nohup uvicorn mcp_server:app --host 0.0.0.0 --port 8900 > mcp_server.log 2>&1 & + +# 查看日志 +tail -f mcp_server.log +``` + +### 6. 测试 Agent API + +```bash +# 测试 Agent 端点 +curl -X POST http://localhost:8900/agent/chat \ + -H "Content-Type: application/json" \ + -d '{ + "message": "全面分析贵州茅台这只股票", + "conversation_history": [] + }' + +# 应该返回类似这样的JSON: +# { +# "success": true, +# "message": "根据分析,贵州茅台...", +# "plan": { +# "goal": "全面分析贵州茅台", +# "steps": [...] +# }, +# "step_results": [...], +# "metadata": {...} +# } +``` + +### 7. 部署前端 + +```bash +# 在本地构建 +npm run build + +# 上传到服务器 +scp -r build/* ubuntu@your-server:/var/www/valuefrontier.cn/ + +# 或者在服务器上构建 +cd /home/ubuntu/vf_react +npm run build +sudo cp -r build/* /var/www/valuefrontier.cn/ +``` + +### 8. 重启 Nginx + +```bash +sudo systemctl reload nginx +``` + +## ✅ 验证部署 + +### 1. 测试后端 API + +```bash +# 测试工具列表 +curl https://valuefrontier.cn/mcp/tools + +# 测试 Agent +curl -X POST https://valuefrontier.cn/mcp/agent/chat \ + -H "Content-Type: application/json" \ + -d '{ + "message": "今日涨停股票有哪些", + "conversation_history": [] + }' +``` + +### 2. 测试前端 + +1. 访问 https://valuefrontier.cn/agent-chat +2. 输入问题:"全面分析贵州茅台这只股票" +3. 观察: + - ✓ 是否显示执行计划卡片 + - ✓ 是否显示步骤执行过程 + - ✓ 是否显示最终总结 + - ✓ 步骤结果卡片是否可折叠 + +### 3. 测试用例 + +``` +测试1: 简单查询 +输入:查询贵州茅台的股票信息 +预期:调用 get_stock_basic_info,返回基本信息 + +测试2: 深度分析(推荐) +输入:全面分析贵州茅台这只股票 +预期: + - 步骤1: get_stock_basic_info + - 步骤2: get_stock_financial_index + - 步骤3: get_stock_trade_data + - 步骤4: search_china_news + - 步骤5: summarize_with_llm + +测试3: 市场热点 +输入:今日涨停股票有哪些亮点 +预期: + - 步骤1: search_limit_up_stocks + - 步骤2: get_concept_statistics + - 步骤3: summarize_with_llm + +测试4: 概念分析 +输入:新能源概念板块的投资机会 +预期: + - 步骤1: search_concepts(新能源) + - 步骤2: search_china_news(新能源) + - 步骤3: summarize_with_llm +``` + +## 🐛 故障排查 + +### 问题1: Agent 返回 "Provider not configured" + +**原因**: 环境变量未设置 + +**解决**: +```bash +# 检查环境变量 +echo $DASHSCOPE_API_KEY + +# 如果为空,重新设置 +export DASHSCOPE_API_KEY="sk-xxx..." + +# 重启服务 +sudo systemctl restart mcp-server +``` + +### 问题2: Agent 返回 JSON 解析错误 + +**原因**: LLM 没有返回正确的 JSON 格式 + +**解决**: 在 `mcp_agent_system.py` 中已经处理了代码块标记清理,如果还有问题: +1. 检查 LLM 的 temperature 参数(建议 0.3) +2. 检查 prompt 是否清晰 +3. 尝试不同的 LLM 提供商 + +### 问题3: 前端显示 "查询失败" + +**原因**: 后端 API 未正确配置或 Nginx 代理问题 + +**解决**: +```bash +# 1. 检查 MCP 服务是否运行 +ps aux | grep mcp_server + +# 2. 检查 Nginx 配置 +sudo nginx -t + +# 3. 查看错误日志 +sudo tail -f /var/log/nginx/error.log +tail -f /home/ubuntu/vf_react/mcp_server.log +``` + +### 问题4: 执行步骤失败 + +**原因**: 某个 MCP 工具调用失败 + +**解决**: 查看步骤结果卡片中的错误信息,通常是: +- API 超时:增加 timeout +- 参数错误:检查工具定义 +- 数据库连接失败:检查数据库连接 + +## 💰 成本估算 + +### 使用通义千问(qwen-plus) + +**价格**: ¥0.004/1000 tokens + +**典型对话消耗**: +- 简单查询(1步): ~500 tokens = ¥0.002 +- 深度分析(5步): ~3000 tokens = ¥0.012 +- 平均每次对话: ¥0.005 + +**月度成本**(1000次深度分析): +- 1000次 × ¥0.012 = ¥12 + +**结论**: 非常便宜!1000次深度分析只需要12元。 + +### 使用 DeepSeek(更便宜) + +**价格**: ¥0.001/1000 tokens(比通义千问便宜4倍) + +**月度成本**(1000次深度分析): +- 1000次 × ¥0.003 = ¥3 + +## 📊 监控和优化 + +### 1. 添加日志监控 + +```bash +# 实时查看 Agent 日志 +tail -f mcp_server.log | grep -E "\[Agent\]|\[Planning\]|\[Execution\]|\[Summary\]" +``` + +### 2. 性能优化建议 + +1. **缓存计划**: 相似的问题可以复用执行计划 +2. **并行执行**: 独立的工具调用可以并行执行 +3. **流式输出**: 使用 Server-Sent Events 实时返回进度 +4. **结果缓存**: 相同的工具调用结果可以缓存 + +### 3. 添加统计分析 + +在 `mcp_server.py` 中添加: + +```python +from datetime import datetime +import json + +# 记录每次 Agent 调用 +@app.post("/agent/chat") +async def agent_chat(request: ChatRequest): + start_time = datetime.now() + + response = await agent.process_query(...) + + duration = (datetime.now() - start_time).total_seconds() + + # 记录到日志 + logger.info(f"Agent query completed in {duration:.2f}s", extra={ + "query": request.message, + "steps": len(response.plan.steps) if response.plan else 0, + "success": response.success, + "duration": duration, + }) + + return response +``` + +## 🎉 完成! + +现在你的 AI Agent 系统已经部署完成! + +访问 https://valuefrontier.cn/agent-chat 开始使用。 + +**特点**: +- ✅ 三阶段智能分析(计划-执行-总结) +- ✅ 漂亮的UI界面(卡片式展示) +- ✅ 步骤结果可折叠查看 +- ✅ 实时进度反馈 +- ✅ 异常处理和重试 +- ✅ 成本低廉(¥3-12/月) diff --git a/docs/MCP_ARCHITECTURE.md b/docs/MCP_ARCHITECTURE.md new file mode 100644 index 00000000..23ea3b6b --- /dev/null +++ b/docs/MCP_ARCHITECTURE.md @@ -0,0 +1,309 @@ +# MCP 架构说明 + +## 🎯 MCP 是什么? + +**MCP (Model Context Protocol)** 是一个**工具调用协议**,它的核心职责是: + +1. ✅ **定义工具接口**:告诉 LLM 有哪些工具可用,每个工具需要什么参数 +2. ✅ **执行工具调用**:根据请求调用对应的后端 API +3. ✅ **返回结构化数据**:将 API 结果返回给调用方 + +**MCP 不负责**: +- ❌ 自然语言理解(NLU) +- ❌ 意图识别 +- ❌ 结果总结 +- ❌ 对话管理 + +## 📊 当前架构 + +### 方案 1:简单关键词匹配(已实现) + +``` +用户输入:"查询贵州茅台的股票信息" + ↓ +前端 ChatInterface (关键词匹配) + ↓ +MCP 工具层 (search_china_news) + ↓ +返回 JSON 数据 + ↓ +前端显示原始数据 +``` + +**问题**: +- ✗ 只能识别简单关键词 +- ✗ 无法理解复杂意图 +- ✗ 返回的是原始 JSON,用户体验差 + +### 方案 2:集成 LLM(推荐) + +``` +用户输入:"查询贵州茅台的股票信息" + ↓ +LLM (Claude/GPT-4/通义千问) + ↓ 理解意图:需要查询股票代码 600519 的基本信息 + ↓ 选择工具:get_stock_basic_info + ↓ 提取参数:{"seccode": "600519"} +MCP 工具层 + ↓ 调用 API,获取数据 +返回结构化数据 + ↓ +LLM 总结结果 + ↓ "贵州茅台(600519)是中国知名的白酒生产企业, + 当前股价 1650.00 元,市值 2.07 万亿..." +前端显示自然语言回复 +``` + +**优势**: +- ✓ 理解复杂意图 +- ✓ 自动选择合适的工具 +- ✓ 自然语言总结,用户体验好 +- ✓ 支持多轮对话 + +## 🔧 实现方案 + +### 选项 A:前端集成 LLM(快速实现) + +**适用场景**:快速原型、小规模应用 + +**优点**: +- 实现简单 +- 无需修改后端 + +**缺点**: +- API Key 暴露在前端(安全风险) +- 每个用户都消耗 API 额度 +- 无法统一管理和监控 + +**实现步骤**: + +1. 修改 `src/components/ChatBot/ChatInterface.js`: + +```javascript +import { llmService } from '../../services/llmService'; + +const handleSendMessage = async () => { + // ... + + // 使用 LLM 服务替代简单的 mcpService.chat + const response = await llmService.chat(inputValue, messages); + + // ... +}; +``` + +2. 配置 API Key(在 `.env.local`): + +```bash +REACT_APP_OPENAI_API_KEY=sk-xxx... +# 或者使用通义千问(更便宜) +REACT_APP_DASHSCOPE_API_KEY=sk-xxx... +``` + +### 选项 B:后端集成 LLM(生产推荐)⭐ + +**适用场景**:生产环境、需要安全和性能 + +**优点**: +- ✓ API Key 安全(不暴露给前端) +- ✓ 统一管理和监控 +- ✓ 可以做缓存优化 +- ✓ 可以做速率限制 + +**缺点**: +- 需要修改后端 +- 增加服务器成本 + +**实现步骤**: + +#### 1. 安装依赖 + +```bash +pip install openai +``` + +#### 2. 修改 `mcp_server.py`,添加聊天端点 + +在文件末尾添加: + +```python +from mcp_chat_endpoint import MCPChatAssistant, ChatRequest, ChatResponse + +# 创建聊天助手实例 +chat_assistant = MCPChatAssistant(provider="qwen") # 推荐使用通义千问 + +@app.post("/chat", response_model=ChatResponse) +async def chat_endpoint(request: ChatRequest): + """智能对话端点 - 使用LLM理解意图并调用工具""" + logger.info(f"Chat request: {request.message}") + + # 获取可用工具列表 + tools = [tool.dict() for tool in TOOLS] + + # 调用聊天助手 + response = await chat_assistant.chat( + user_message=request.message, + conversation_history=request.conversation_history, + tools=tools, + ) + + return response +``` + +#### 3. 配置环境变量 + +在服务器上设置: + +```bash +# 方式1:使用通义千问(推荐,价格便宜) +export DASHSCOPE_API_KEY="sk-xxx..." + +# 方式2:使用 OpenAI +export OPENAI_API_KEY="sk-xxx..." + +# 方式3:使用 DeepSeek(最便宜) +export DEEPSEEK_API_KEY="sk-xxx..." +``` + +#### 4. 修改前端 `mcpService.js` + +```javascript +/** + * 智能对话 - 使用后端LLM处理 + */ +async chat(userMessage, conversationHistory = []) { + try { + const response = await this.client.post('/chat', { + message: userMessage, + conversation_history: conversationHistory, + }); + + return { + success: true, + data: response, + }; + } catch (error) { + return { + success: false, + error: error.message || '对话处理失败', + }; + } +} +``` + +#### 5. 修改前端 `ChatInterface.js` + +```javascript +const handleSendMessage = async () => { + // ... + + try { + // 调用后端聊天API + const response = await mcpService.chat(inputValue, messages); + + if (response.success) { + const botMessage = { + id: Date.now() + 1, + content: response.data.message, // LLM总结的自然语言 + isUser: false, + type: 'text', + timestamp: new Date().toISOString(), + toolUsed: response.data.tool_used, // 可选:显示使用了哪个工具 + rawData: response.data.raw_data, // 可选:原始数据(折叠显示) + }; + setMessages((prev) => [...prev, botMessage]); + } + } catch (error) { + // ... + } +}; +``` + +## 💰 LLM 选择和成本 + +### 推荐:通义千问(阿里云) + +**优点**: +- 价格便宜(1000次对话约 ¥1-2) +- 中文理解能力强 +- 国内访问稳定 + +**价格**: +- qwen-plus: ¥0.004/1000 tokens(约 ¥0.001/次对话) +- qwen-turbo: ¥0.002/1000 tokens(更便宜) + +**获取 API Key**: +1. 访问 https://dashscope.console.aliyun.com/ +2. 创建 API Key +3. 设置环境变量 `DASHSCOPE_API_KEY` + +### 其他选择 + +| 提供商 | 模型 | 价格 | 优点 | 缺点 | +|--------|------|------|------|------| +| **通义千问** | qwen-plus | ¥0.001/次 | 便宜、中文好 | - | +| **DeepSeek** | deepseek-chat | ¥0.0005/次 | 最便宜 | 新公司 | +| **OpenAI** | gpt-4o-mini | $0.15/1M tokens | 能力强 | 贵、需翻墙 | +| **Claude** | claude-3-haiku | $0.25/1M tokens | 理解力强 | 贵、需翻墙 | + +## 🚀 部署步骤 + +### 1. 后端部署 + +```bash +# 安装依赖 +pip install openai + +# 设置 API Key +export DASHSCOPE_API_KEY="sk-xxx..." + +# 重启服务 +sudo systemctl restart mcp-server + +# 测试聊天端点 +curl -X POST https://valuefrontier.cn/mcp/chat \ + -H "Content-Type: application/json" \ + -d '{"message": "查询贵州茅台的股票信息"}' +``` + +### 2. 前端部署 + +```bash +# 构建 +npm run build + +# 部署 +scp -r build/* user@server:/var/www/valuefrontier.cn/ +``` + +### 3. 验证 + +访问 https://valuefrontier.cn/agent-chat,测试对话: + +**测试用例**: +1. "查询贵州茅台的股票信息" → 应返回自然语言总结 +2. "今日涨停的股票有哪些" → 应返回涨停股票列表并总结 +3. "新能源概念板块表现如何" → 应搜索概念并分析 + +## 📊 对比总结 + +| 特性 | 简单匹配 | 前端LLM | 后端LLM ⭐ | +|------|---------|---------|-----------| +| 实现难度 | 简单 | 中等 | 中等 | +| 用户体验 | 差 | 好 | 好 | +| 安全性 | 高 | 低 | 高 | +| 成本 | 无 | 用户承担 | 服务器承担 | +| 可维护性 | 差 | 中 | 好 | +| **推荐指数** | ⭐ | ⭐⭐ | ⭐⭐⭐⭐⭐ | + +## 🎯 最终推荐 + +**生产环境:后端集成 LLM (方案 B)** +- 使用通义千问(qwen-plus) +- 成本低(约 ¥50/月,10000次对话) +- 安全可靠 + +**快速原型:前端集成 LLM (方案 A)** +- 适合演示 +- 快速验证可行性 +- 后续再迁移到后端 diff --git a/kimi_integration.py b/kimi_integration.py index 3f74c99a..80969666 100644 --- a/kimi_integration.py +++ b/kimi_integration.py @@ -11,7 +11,7 @@ from mcp_client_example import MCPClient # Kimi API配置 KIMI_API_KEY = "sk-TzB4VYJfCoXGcGrGMiewukVRzjuDsbVCkaZXi2LvkS8s60E5" KIMI_BASE_URL = "https://api.moonshot.cn/v1" -KIMI_MODEL = "kimi-k2-turbo-preview" +KIMI_MODEL = "kimi-k2-turbpreview" # 初始化Kimi客户端 kimi_client = OpenAI( diff --git a/mcp_agent_system.py b/mcp_agent_system.py new file mode 100644 index 00000000..2dfa836d --- /dev/null +++ b/mcp_agent_system.py @@ -0,0 +1,470 @@ +""" +MCP Agent System - 基于 DeepResearch 逻辑的智能代理系统 +三阶段流程:计划制定 → 工具执行 → 结果总结 +""" + +from pydantic import BaseModel +from typing import List, Dict, Any, Optional, Literal +from datetime import datetime +import json +import logging +from openai import OpenAI +import asyncio +import os + +logger = logging.getLogger(__name__) + +# ==================== 数据模型 ==================== + +class ToolCall(BaseModel): + """工具调用""" + tool: str + arguments: Dict[str, Any] + reason: str # 为什么要调用这个工具 + +class ExecutionPlan(BaseModel): + """执行计划""" + goal: str # 用户的目标 + steps: List[ToolCall] # 执行步骤 + reasoning: str # 规划reasoning + +class StepResult(BaseModel): + """单步执行结果""" + step_index: int + tool: str + arguments: Dict[str, Any] + status: Literal["success", "failed", "skipped"] + result: Optional[Any] = None + error: Optional[str] = None + execution_time: float = 0 + +class AgentResponse(BaseModel): + """Agent响应""" + success: bool + message: str # 自然语言总结 + plan: Optional[ExecutionPlan] = None # 执行计划 + step_results: List[StepResult] = [] # 每步的结果 + final_summary: Optional[str] = None # 最终总结 + metadata: Optional[Dict[str, Any]] = None + +class ChatRequest(BaseModel): + """聊天请求""" + message: str + conversation_history: List[Dict[str, str]] = [] + stream: bool = False # 是否流式输出 + +# ==================== Agent 系统 ==================== + +class MCPAgent: + """MCP 智能代理 - 三阶段执行""" + + def __init__(self, provider: str = "qwen"): + self.provider = provider + + # LLM 配置 + config = { + "qwen": { + "api_key": os.getenv("DASHSCOPE_API_KEY", ""), + "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1", + "model": "qwen-plus", + }, + "deepseek": { + "api_key": os.getenv("DEEPSEEK_API_KEY", ""), + "base_url": "https://api.deepseek.com/v1", + "model": "deepseek-chat", + }, + "openai": { + "api_key": os.getenv("OPENAI_API_KEY", ""), + "base_url": "https://api.openai.com/v1", + "model": "gpt-4o-mini", + }, + }.get(provider) + + if not config or not config["api_key"]: + raise ValueError(f"Provider '{provider}' not configured. Please set API key.") + + self.client = OpenAI( + api_key=config["api_key"], + base_url=config["base_url"], + ) + self.model = config["model"] + + # ==================== 阶段 1: 计划制定 ==================== + + def get_planning_prompt(self, tools: List[dict]) -> str: + """获取计划制定的系统提示词""" + tools_desc = "\n\n".join([ + f"**{tool['name']}**\n" + f"描述:{tool['description']}\n" + f"参数:{json.dumps(tool['parameters'], ensure_ascii=False, indent=2)}" + for tool in tools + ]) + + return f"""你是一个专业的金融研究助手。你需要根据用户的问题,制定一个详细的执行计划。 + +## 可用工具 + +{tools_desc} + +## 重要知识 +- 贵州茅台股票代码: 600519 +- 涨停: 股价单日涨幅约10% +- 概念板块: 相同题材的股票分类 + +## 特殊工具说明 +- **summarize_with_llm**: 这是一个特殊工具,用于让你总结和分析收集到的数据 + - 当需要对多个数据源进行综合分析时使用 + - 当需要生成研究报告时使用 + - 参数: {{"data": "要分析的数据", "task": "分析任务描述"}} + +## 任务 +分析用户问题,制定执行计划。返回 JSON 格式: + +```json +{{ + "goal": "用户的目标(一句话概括)", + "reasoning": "你的分析思路(为什么这样规划)", + "steps": [ + {{ + "tool": "工具名称", + "arguments": {{"参数名": "参数值"}}, + "reason": "为什么要执行这一步" + }} + ] +}} +``` + +## 规划原则 +1. **从简到繁**: 先获取基础信息,再深入分析 +2. **数据先行**: 先收集数据,再总结分析 +3. **合理组合**: 可以调用多个工具,但不要超过5个 +4. **包含总结**: 最后一步通常是 summarize_with_llm + +## 示例 + +用户:"帮我全面分析一下贵州茅台这只股票" + +你的计划: +```json +{{ + "goal": "全面分析贵州茅台股票", + "reasoning": "需要获取基本信息、财务指标、交易数据,然后综合分析", + "steps": [ + {{ + "tool": "get_stock_basic_info", + "arguments": {{"seccode": "600519"}}, + "reason": "获取股票基本信息(公司名称、行业、市值等)" + }}, + {{ + "tool": "get_stock_financial_index", + "arguments": {{"seccode": "600519", "limit": 5}}, + "reason": "获取最近5期财务指标(营收、利润、ROE等)" + }}, + {{ + "tool": "get_stock_trade_data", + "arguments": {{"seccode": "600519", "limit": 30}}, + "reason": "获取最近30天交易数据(价格走势、成交量)" + }}, + {{ + "tool": "search_china_news", + "arguments": {{"query": "贵州茅台", "top_k": 5}}, + "reason": "获取最新新闻,了解市场动态" + }}, + {{ + "tool": "summarize_with_llm", + "arguments": {{ + "data": "前面收集的所有数据", + "task": "综合分析贵州茅台的投资价值,包括基本面、财务状况、股价走势、市场情绪" + }}, + "reason": "综合所有数据,生成投资分析报告" + }} + ] +}} +``` + +只返回JSON,不要额外解释。""" + + async def create_plan(self, user_query: str, tools: List[dict]) -> ExecutionPlan: + """阶段1: 创建执行计划""" + logger.info(f"[Planning] Creating plan for: {user_query}") + + messages = [ + {"role": "system", "content": self.get_planning_prompt(tools)}, + {"role": "user", "content": user_query}, + ] + + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + temperature=0.3, + max_tokens=1500, + ) + + plan_json = response.choices[0].message.content.strip() + logger.info(f"[Planning] Raw response: {plan_json}") + + # 清理可能的代码块标记 + if "```json" in plan_json: + plan_json = plan_json.split("```json")[1].split("```")[0].strip() + elif "```" in plan_json: + plan_json = plan_json.split("```")[1].split("```")[0].strip() + + plan_data = json.loads(plan_json) + + plan = ExecutionPlan( + goal=plan_data["goal"], + reasoning=plan_data.get("reasoning", ""), + steps=[ + ToolCall(**step) for step in plan_data["steps"] + ], + ) + + logger.info(f"[Planning] Plan created: {len(plan.steps)} steps") + return plan + + # ==================== 阶段 2: 工具执行 ==================== + + async def execute_tool( + self, + tool_name: str, + arguments: Dict[str, Any], + tool_handlers: Dict[str, Any], + ) -> Dict[str, Any]: + """执行单个工具""" + + # 特殊处理:summarize_with_llm + if tool_name == "summarize_with_llm": + return await self.summarize_with_llm( + data=arguments.get("data", ""), + task=arguments.get("task", "总结数据"), + ) + + # 调用 MCP 工具 + handler = tool_handlers.get(tool_name) + if not handler: + raise ValueError(f"Tool '{tool_name}' not found") + + result = await handler(arguments) + return result + + async def execute_plan( + self, + plan: ExecutionPlan, + tool_handlers: Dict[str, Any], + ) -> List[StepResult]: + """阶段2: 执行计划中的所有步骤""" + logger.info(f"[Execution] Starting execution: {len(plan.steps)} steps") + + results = [] + collected_data = {} # 收集的数据,供后续步骤使用 + + for i, step in enumerate(plan.steps): + logger.info(f"[Execution] Step {i+1}/{len(plan.steps)}: {step.tool}") + + start_time = datetime.now() + + try: + # 替换 arguments 中的占位符 + arguments = step.arguments.copy() + if step.tool == "summarize_with_llm" and arguments.get("data") == "前面收集的所有数据": + # 将收集的数据传递给总结工具 + arguments["data"] = json.dumps(collected_data, ensure_ascii=False, indent=2) + + # 执行工具 + result = await self.execute_tool(step.tool, arguments, tool_handlers) + + execution_time = (datetime.now() - start_time).total_seconds() + + # 保存结果 + step_result = StepResult( + step_index=i, + tool=step.tool, + arguments=arguments, + status="success", + result=result, + execution_time=execution_time, + ) + results.append(step_result) + + # 收集数据 + collected_data[f"step_{i+1}_{step.tool}"] = result + + logger.info(f"[Execution] Step {i+1} completed in {execution_time:.2f}s") + + except Exception as e: + logger.error(f"[Execution] Step {i+1} failed: {str(e)}") + + execution_time = (datetime.now() - start_time).total_seconds() + + step_result = StepResult( + step_index=i, + tool=step.tool, + arguments=step.arguments, + status="failed", + error=str(e), + execution_time=execution_time, + ) + results.append(step_result) + + # 根据错误类型决定是否继续 + if "not found" in str(e).lower(): + logger.warning(f"[Execution] Stopping due to critical error") + break + else: + logger.warning(f"[Execution] Continuing despite error") + continue + + logger.info(f"[Execution] Execution completed: {len(results)} steps") + return results + + async def summarize_with_llm(self, data: str, task: str) -> str: + """特殊工具:使用 LLM 总结数据""" + logger.info(f"[LLM Summary] Task: {task}") + + messages = [ + { + "role": "system", + "content": "你是一个专业的金融分析师。根据提供的数据,完成指定的分析任务。" + }, + { + "role": "user", + "content": f"## 任务\n{task}\n\n## 数据\n{data}\n\n请根据数据完成分析任务,用专业且易懂的语言呈现。" + }, + ] + + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + temperature=0.7, + max_tokens=2000, + ) + + summary = response.choices[0].message.content + return summary + + # ==================== 阶段 3: 结果总结 ==================== + + async def generate_final_summary( + self, + user_query: str, + plan: ExecutionPlan, + step_results: List[StepResult], + ) -> str: + """阶段3: 生成最终总结""" + logger.info("[Summary] Generating final summary") + + # 收集所有成功的结果 + successful_results = [r for r in step_results if r.status == "success"] + + if not successful_results: + return "很抱歉,所有步骤都执行失败,无法生成分析报告。" + + # 构建总结提示 + results_text = "\n\n".join([ + f"**步骤 {r.step_index + 1}: {r.tool}**\n" + f"结果: {json.dumps(r.result, ensure_ascii=False, indent=2)[:1000]}..." + for r in successful_results + ]) + + messages = [ + { + "role": "system", + "content": "你是一个专业的金融研究助手。根据执行结果,生成一份简洁清晰的报告。" + }, + { + "role": "user", + "content": f""" +用户问题:{user_query} + +执行计划:{plan.goal} + +执行结果: +{results_text} + +请根据以上信息,生成一份专业的分析报告(300字以内)。 +""" + }, + ] + + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + temperature=0.7, + max_tokens=1000, + ) + + summary = response.choices[0].message.content + logger.info("[Summary] Final summary generated") + return summary + + # ==================== 主流程 ==================== + + async def process_query( + self, + user_query: str, + tools: List[dict], + tool_handlers: Dict[str, Any], + ) -> AgentResponse: + """主流程:处理用户查询""" + logger.info(f"[Agent] Processing query: {user_query}") + + try: + # 阶段 1: 创建计划 + plan = await self.create_plan(user_query, tools) + + # 阶段 2: 执行计划 + step_results = await self.execute_plan(plan, tool_handlers) + + # 阶段 3: 生成总结 + final_summary = await self.generate_final_summary( + user_query, plan, step_results + ) + + return AgentResponse( + success=True, + message=final_summary, + plan=plan, + step_results=step_results, + final_summary=final_summary, + metadata={ + "total_steps": len(plan.steps), + "successful_steps": len([r for r in step_results if r.status == "success"]), + "failed_steps": len([r for r in step_results if r.status == "failed"]), + "total_execution_time": sum(r.execution_time for r in step_results), + }, + ) + + except Exception as e: + logger.error(f"[Agent] Error: {str(e)}", exc_info=True) + return AgentResponse( + success=False, + message=f"处理失败: {str(e)}", + ) + +# ==================== FastAPI 端点 ==================== + +""" +在 mcp_server.py 中添加: + +from mcp_agent_system import MCPAgent, ChatRequest, AgentResponse + +# 创建 Agent 实例 +agent = MCPAgent(provider="qwen") + +@app.post("/agent/chat", response_model=AgentResponse) +async def agent_chat(request: ChatRequest): + \"\"\"智能代理对话端点\"\"\" + logger.info(f"Agent chat: {request.message}") + + # 获取工具列表和处理器 + tools = [tool.dict() for tool in TOOLS] + + # 处理查询 + response = await agent.process_query( + user_query=request.message, + tools=tools, + tool_handlers=TOOL_HANDLERS, + ) + + return response +""" diff --git a/mcp_chat_endpoint.py b/mcp_chat_endpoint.py new file mode 100644 index 00000000..11da0ad8 --- /dev/null +++ b/mcp_chat_endpoint.py @@ -0,0 +1,295 @@ +""" +MCP Chat Endpoint - 添加到 mcp_server.py +集成LLM实现智能对话,自动调用MCP工具并总结结果 +""" + +from pydantic import BaseModel +from typing import List, Dict, Any, Optional +import os +import json +from openai import OpenAI +import logging + +logger = logging.getLogger(__name__) + +# ==================== LLM配置 ==================== + +# 支持多种LLM提供商 +LLM_PROVIDERS = { + "openai": { + "api_key": os.getenv("OPENAI_API_KEY", ""), + "base_url": "https://api.openai.com/v1", + "model": "gpt-4o-mini", # 便宜且快速 + }, + "qwen": { + "api_key": os.getenv("DASHSCOPE_API_KEY", ""), + "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1", + "model": "qwen-plus", + }, + "deepseek": { + "api_key": os.getenv("DEEPSEEK_API_KEY", ""), + "base_url": "https://api.deepseek.com/v1", + "model": "deepseek-chat", + }, +} + +# 默认使用的LLM提供商 +DEFAULT_PROVIDER = "qwen" # 推荐使用通义千问,价格便宜 + +# ==================== 数据模型 ==================== + +class Message(BaseModel): + """消息""" + role: str # system, user, assistant + content: str + +class ChatRequest(BaseModel): + """聊天请求""" + message: str + conversation_history: List[Dict[str, str]] = [] + provider: Optional[str] = DEFAULT_PROVIDER + +class ChatResponse(BaseModel): + """聊天响应""" + success: bool + message: str + tool_used: Optional[str] = None + raw_data: Optional[Any] = None + error: Optional[str] = None + +# ==================== LLM助手类 ==================== + +class MCPChatAssistant: + """MCP聊天助手 - 集成LLM和工具调用""" + + def __init__(self, provider: str = DEFAULT_PROVIDER): + self.provider = provider + config = LLM_PROVIDERS.get(provider) + + if not config or not config["api_key"]: + logger.warning(f"LLM provider '{provider}' not configured, using fallback mode") + self.client = None + else: + self.client = OpenAI( + api_key=config["api_key"], + base_url=config["base_url"], + ) + self.model = config["model"] + + def get_system_prompt(self, tools: List[dict]) -> str: + """构建系统提示词""" + tools_desc = "\n\n".join([ + f"**{tool['name']}**\n描述:{tool['description']}\n参数:{json.dumps(tool['parameters'], ensure_ascii=False, indent=2)}" + for tool in tools + ]) + + return f"""你是一个专业的金融投资助手。你可以使用以下工具来帮助用户查询信息: + +{tools_desc} + +## 工作流程 +1. **理解用户意图**:分析用户问题,确定需要什么信息 +2. **选择工具**:从上面的工具中选择最合适的一个或多个 +3. **提取参数**:从用户输入中提取工具需要的参数 +4. **返回工具调用指令**(JSON格式): + {{"tool": "工具名", "arguments": {{...}}}} + +## 重要规则 +- 贵州茅台的股票代码是 **600519** +- 如果用户提到股票名称,尝试推断股票代码 +- 如果不确定需要什么信息,使用 search_china_news 搜索相关新闻 +- 涨停是指股票当日涨幅达到10%左右 +- 只返回工具调用指令,不要额外解释 + +## 示例 +用户:"查询贵州茅台的股票信息" +你:{{"tool": "get_stock_basic_info", "arguments": {{"seccode": "600519"}}}} + +用户:"今日涨停的股票有哪些" +你:{{"tool": "search_limit_up_stocks", "arguments": {{"query": "", "mode": "hybrid", "page_size": 10}}}} + +用户:"新能源概念板块表现如何" +你:{{"tool": "search_concepts", "arguments": {{"query": "新能源", "size": 10, "sort_by": "change_pct"}}}} +""" + + async def chat(self, user_message: str, conversation_history: List[Dict[str, str]], tools: List[dict]) -> ChatResponse: + """智能对话""" + try: + if not self.client: + # 降级到简单匹配 + return await self.fallback_chat(user_message) + + # 1. 构建消息历史 + messages = [ + {"role": "system", "content": self.get_system_prompt(tools)}, + ] + + # 添加历史对话(最多保留最近10轮) + for msg in conversation_history[-20:]: + messages.append({ + "role": "user" if msg.get("isUser") else "assistant", + "content": msg.get("content", ""), + }) + + messages.append({"role": "user", "content": user_message}) + + # 2. 调用LLM获取工具调用指令 + logger.info(f"Calling LLM with {len(messages)} messages") + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + temperature=0.3, # 低温度,更确定性 + max_tokens=500, + ) + + tool_call_instruction = response.choices[0].message.content.strip() + logger.info(f"LLM response: {tool_call_instruction}") + + # 3. 解析工具调用指令 + try: + tool_call = json.loads(tool_call_instruction) + tool_name = tool_call.get("tool") + tool_args = tool_call.get("arguments", {}) + + if not tool_name: + raise ValueError("No tool specified") + + # 4. 调用工具(这里需要导入 mcp_server 的工具处理器) + from mcp_server import TOOL_HANDLERS + + handler = TOOL_HANDLERS.get(tool_name) + if not handler: + raise ValueError(f"Tool '{tool_name}' not found") + + tool_result = await handler(tool_args) + + # 5. 让LLM总结结果 + summary_messages = messages + [ + {"role": "assistant", "content": tool_call_instruction}, + {"role": "system", "content": f"工具 {tool_name} 返回的数据:\n{json.dumps(tool_result, ensure_ascii=False, indent=2)}\n\n请用自然语言总结这些数据,给用户一个简洁清晰的回复(不超过200字)。"} + ] + + summary_response = self.client.chat.completions.create( + model=self.model, + messages=summary_messages, + temperature=0.7, + max_tokens=300, + ) + + summary = summary_response.choices[0].message.content + + return ChatResponse( + success=True, + message=summary, + tool_used=tool_name, + raw_data=tool_result, + ) + + except json.JSONDecodeError: + # LLM没有返回JSON格式,直接返回其回复 + return ChatResponse( + success=True, + message=tool_call_instruction, + ) + except Exception as tool_error: + logger.error(f"Tool execution error: {str(tool_error)}") + return ChatResponse( + success=False, + message="工具调用失败", + error=str(tool_error), + ) + + except Exception as e: + logger.error(f"Chat error: {str(e)}", exc_info=True) + return ChatResponse( + success=False, + message="对话处理失败", + error=str(e), + ) + + async def fallback_chat(self, user_message: str) -> ChatResponse: + """降级方案:简单关键词匹配""" + from mcp_server import TOOL_HANDLERS + + try: + # 茅台特殊处理 + if "茅台" in user_message or "贵州茅台" in user_message: + handler = TOOL_HANDLERS.get("get_stock_basic_info") + result = await handler({"seccode": "600519"}) + return ChatResponse( + success=True, + message="已为您查询贵州茅台(600519)的股票信息:", + tool_used="get_stock_basic_info", + raw_data=result, + ) + + # 涨停分析 + elif "涨停" in user_message: + handler = TOOL_HANDLERS.get("search_limit_up_stocks") + query = user_message.replace("涨停", "").strip() + result = await handler({"query": query, "mode": "hybrid", "page_size": 10}) + return ChatResponse( + success=True, + message="已为您查询涨停股票信息:", + tool_used="search_limit_up_stocks", + raw_data=result, + ) + + # 概念板块 + elif "概念" in user_message or "板块" in user_message: + handler = TOOL_HANDLERS.get("search_concepts") + query = user_message.replace("概念", "").replace("板块", "").strip() + result = await handler({"query": query, "size": 10, "sort_by": "change_pct"}) + return ChatResponse( + success=True, + message=f"已为您查询'{query}'相关概念板块:", + tool_used="search_concepts", + raw_data=result, + ) + + # 默认:搜索新闻 + else: + handler = TOOL_HANDLERS.get("search_china_news") + result = await handler({"query": user_message, "top_k": 5}) + return ChatResponse( + success=True, + message="已为您搜索相关新闻:", + tool_used="search_china_news", + raw_data=result, + ) + + except Exception as e: + logger.error(f"Fallback chat error: {str(e)}") + return ChatResponse( + success=False, + message="查询失败", + error=str(e), + ) + +# ==================== FastAPI端点 ==================== + +# 在 mcp_server.py 中添加以下代码: + +""" +from mcp_chat_endpoint import MCPChatAssistant, ChatRequest, ChatResponse + +# 创建聊天助手实例 +chat_assistant = MCPChatAssistant(provider="qwen") # 或 "openai", "deepseek" + +@app.post("/chat", response_model=ChatResponse) +async def chat_endpoint(request: ChatRequest): + \"\"\"智能对话端点 - 使用LLM理解意图并调用工具\"\"\" + logger.info(f"Chat request: {request.message}") + + # 获取可用工具列表 + tools = [tool.dict() for tool in TOOLS] + + # 调用聊天助手 + response = await chat_assistant.chat( + user_message=request.message, + conversation_history=request.conversation_history, + tools=tools, + ) + + return response +""" diff --git a/mcp_server.py b/mcp_server.py index 5dc2da2e..c1dbea41 100644 --- a/mcp_server.py +++ b/mcp_server.py @@ -14,6 +14,8 @@ import logging import httpx from enum import Enum import mcp_database as db +from openai import OpenAI +import json # 配置日志 logging.basicConfig(level=logging.INFO) @@ -47,6 +49,22 @@ class ServiceEndpoints: # HTTP客户端配置 HTTP_CLIENT = httpx.AsyncClient(timeout=60.0) +# ==================== Agent系统配置 ==================== + +# Kimi 配置 - 用于计划制定和深度推理 +KIMI_CONFIG = { + "api_key": "sk-TzB4VYJfCoXGcGrGMiewukVRzjuDsbVCkaZXi2LvkS8s60E5", + "base_url": "https://api.moonshot.cn/v1", + "model": "kimi-k2-thinking", # 思考模型 +} + +# DeepMoney 配置 - 用于新闻总结 +DEEPMONEY_CONFIG = { + "api_key": "", # 空值 + "base_url": "http://111.62.35.50:8000/v1", + "model": "deepmoney", +} + # ==================== MCP协议数据模型 ==================== class ToolParameter(BaseModel): @@ -74,6 +92,44 @@ class ToolCallResponse(BaseModel): error: Optional[str] = None metadata: Optional[Dict[str, Any]] = None +# ==================== Agent系统数据模型 ==================== + +class ToolCall(BaseModel): + """工具调用""" + tool: str + arguments: Dict[str, Any] + reason: str + +class ExecutionPlan(BaseModel): + """执行计划""" + goal: str + steps: List[ToolCall] + reasoning: str + +class StepResult(BaseModel): + """单步执行结果""" + step_index: int + tool: str + arguments: Dict[str, Any] + status: Literal["success", "failed", "skipped"] + result: Optional[Any] = None + error: Optional[str] = None + execution_time: float = 0 + +class AgentResponse(BaseModel): + """Agent响应""" + success: bool + message: str + plan: Optional[ExecutionPlan] = None + step_results: List[StepResult] = [] + final_summary: Optional[str] = None + metadata: Optional[Dict[str, Any]] = None + +class AgentChatRequest(BaseModel): + """聊天请求""" + message: str + conversation_history: List[Dict[str, str]] = [] + # ==================== MCP工具定义 ==================== TOOLS: List[ToolDefinition] = [ @@ -932,6 +988,384 @@ TOOL_HANDLERS = { "get_stock_comparison": handle_get_stock_comparison, } +# ==================== Agent系统实现 ==================== + +class MCPAgentIntegrated: + """集成版 MCP Agent - 使用 Kimi 和 DeepMoney""" + + def __init__(self): + # 初始化 Kimi 客户端(计划制定) + self.kimi_client = OpenAI( + api_key=KIMI_CONFIG["api_key"], + base_url=KIMI_CONFIG["base_url"], + ) + self.kimi_model = KIMI_CONFIG["model"] + + # 初始化 DeepMoney 客户端(新闻总结) + self.deepmoney_client = OpenAI( + api_key=DEEPMONEY_CONFIG["api_key"], + base_url=DEEPMONEY_CONFIG["base_url"], + ) + self.deepmoney_model = DEEPMONEY_CONFIG["model"] + + def get_planning_prompt(self, tools: List[dict]) -> str: + """获取计划制定的系统提示词""" + tools_desc = "\n\n".join([ + f"**{tool['name']}**\n" + f"描述:{tool['description']}\n" + f"参数:{json.dumps(tool['parameters'], ensure_ascii=False, indent=2)}" + for tool in tools + ]) + + return f"""你是一个专业的金融研究助手。根据用户问题,制定详细的执行计划。 + +## 可用工具 + +{tools_desc} + +## 特殊工具 +- **summarize_news**: 使用 DeepMoney 模型总结新闻数据 + - 参数: {{"data": "新闻列表JSON", "focus": "关注点"}} + - 适用场景: 当需要总结新闻、研报等文本数据时 + +## 重要知识 +- 贵州茅台: 600519 +- 涨停: 涨幅约10% +- 概念板块: 相同题材股票分类 + +## 任务 +分析用户问题,制定执行计划。返回 JSON: + +```json +{{ + "goal": "用户目标", + "reasoning": "分析思路", + "steps": [ + {{ + "tool": "工具名", + "arguments": {{"参数": "值"}}, + "reason": "原因" + }} + ] +}} +``` + +## 规划原则 +1. 先收集数据,再分析总结 +2. 使用 summarize_news 总结新闻类数据 +3. 不超过5个步骤 +4. 最后一步通常是总结 + +## 示例 + +用户:"贵州茅台最近有什么新闻" + +计划: +```json +{{ + "goal": "查询并总结贵州茅台最新新闻", + "reasoning": "先搜索新闻,再用 DeepMoney 总结", + "steps": [ + {{ + "tool": "search_china_news", + "arguments": {{"query": "贵州茅台", "top_k": 10}}, + "reason": "搜索贵州茅台相关新闻" + }}, + {{ + "tool": "summarize_news", + "arguments": {{ + "data": "前面的新闻数据", + "focus": "贵州茅台的重要动态和市场影响" + }}, + "reason": "使用DeepMoney总结新闻要点" + }} + ] +}} +``` + +只返回JSON,不要其他内容。""" + + async def create_plan(self, user_query: str, tools: List[dict]) -> ExecutionPlan: + """阶段1: 使用 Kimi 创建执行计划(带思考过程)""" + logger.info(f"[Planning] Kimi开始制定计划: {user_query}") + + messages = [ + {"role": "system", "content": self.get_planning_prompt(tools)}, + {"role": "user", "content": user_query}, + ] + + # 使用 Kimi 思考模型 + response = self.kimi_client.chat.completions.create( + model=self.kimi_model, + messages=messages, + temperature=1.0, # Kimi 推荐 + max_tokens=16000, # 足够容纳 reasoning_content + ) + + choice = response.choices[0] + message = choice.message + + # 提取思考过程 + reasoning_content = "" + if hasattr(message, "reasoning_content"): + reasoning_content = getattr(message, "reasoning_content") + logger.info(f"[Planning] Kimi思考过程: {reasoning_content[:200]}...") + + # 提取计划内容 + plan_json = message.content.strip() + + # 清理可能的代码块标记 + if "```json" in plan_json: + plan_json = plan_json.split("```json")[1].split("```")[0].strip() + elif "```" in plan_json: + plan_json = plan_json.split("```")[1].split("```")[0].strip() + + plan_data = json.loads(plan_json) + + plan = ExecutionPlan( + goal=plan_data["goal"], + reasoning=plan_data.get("reasoning", "") + "\n\n" + (reasoning_content[:500] if reasoning_content else ""), + steps=[ToolCall(**step) for step in plan_data["steps"]], + ) + + logger.info(f"[Planning] 计划制定完成: {len(plan.steps)} 步") + return plan + + async def execute_tool( + self, + tool_name: str, + arguments: Dict[str, Any], + tool_handlers: Dict[str, Any], + ) -> Dict[str, Any]: + """执行单个工具""" + + # 特殊工具:summarize_news(使用 DeepMoney) + if tool_name == "summarize_news": + return await self.summarize_news_with_deepmoney( + data=arguments.get("data", ""), + focus=arguments.get("focus", "关键信息"), + ) + + # 调用 MCP 工具 + handler = tool_handlers.get(tool_name) + if not handler: + raise ValueError(f"Tool '{tool_name}' not found") + + result = await handler(arguments) + return result + + async def summarize_news_with_deepmoney(self, data: str, focus: str) -> str: + """使用 DeepMoney 模型总结新闻""" + logger.info(f"[DeepMoney] 总结新闻,关注点: {focus}") + + messages = [ + { + "role": "system", + "content": "你是一个专业的金融新闻分析师,擅长提取关键信息并进行总结。" + }, + { + "role": "user", + "content": f"请总结以下新闻数据,关注点:{focus}\n\n数据:\n{data[:3000]}" + }, + ] + + try: + response = self.deepmoney_client.chat.completions.create( + model=self.deepmoney_model, + messages=messages, + temperature=0.7, + max_tokens=1000, + ) + + summary = response.choices[0].message.content + logger.info(f"[DeepMoney] 总结完成") + return summary + + except Exception as e: + logger.error(f"[DeepMoney] 总结失败: {str(e)}") + # 降级:返回简化摘要 + return f"新闻总结失败,原始数据:{data[:500]}..." + + async def execute_plan( + self, + plan: ExecutionPlan, + tool_handlers: Dict[str, Any], + ) -> List[StepResult]: + """阶段2: 执行计划""" + logger.info(f"[Execution] 开始执行: {len(plan.steps)} 步") + + results = [] + collected_data = {} + + for i, step in enumerate(plan.steps): + logger.info(f"[Execution] 步骤 {i+1}/{len(plan.steps)}: {step.tool}") + + start_time = datetime.now() + + try: + # 替换占位符 + arguments = step.arguments.copy() + + # 如果参数值是 "前面的新闻数据" 或 "前面收集的所有数据" + if step.tool == "summarize_news": + if arguments.get("data") in ["前面的新闻数据", "前面收集的所有数据"]: + # 将收集的数据传递 + arguments["data"] = json.dumps(collected_data, ensure_ascii=False, indent=2) + + # 执行工具 + result = await self.execute_tool(step.tool, arguments, tool_handlers) + + execution_time = (datetime.now() - start_time).total_seconds() + + step_result = StepResult( + step_index=i, + tool=step.tool, + arguments=arguments, + status="success", + result=result, + execution_time=execution_time, + ) + results.append(step_result) + + # 收集数据 + collected_data[f"step_{i+1}_{step.tool}"] = result + + logger.info(f"[Execution] 步骤 {i+1} 完成: {execution_time:.2f}s") + + except Exception as e: + logger.error(f"[Execution] 步骤 {i+1} 失败: {str(e)}") + + execution_time = (datetime.now() - start_time).total_seconds() + + step_result = StepResult( + step_index=i, + tool=step.tool, + arguments=step.arguments, + status="failed", + error=str(e), + execution_time=execution_time, + ) + results.append(step_result) + + # 继续执行其他步骤 + continue + + logger.info(f"[Execution] 执行完成") + return results + + async def generate_final_summary( + self, + user_query: str, + plan: ExecutionPlan, + step_results: List[StepResult], + ) -> str: + """阶段3: 使用 Kimi 生成最终总结""" + logger.info("[Summary] Kimi生成最终总结") + + # 收集成功的结果 + successful_results = [r for r in step_results if r.status == "success"] + + if not successful_results: + return "很抱歉,所有步骤都执行失败,无法生成分析报告。" + + # 构建结果文本(精简版) + results_text = "\n\n".join([ + f"**步骤 {r.step_index + 1}: {r.tool}**\n" + f"结果: {str(r.result)[:800]}..." + for r in successful_results[:3] # 只取前3个,避免超长 + ]) + + messages = [ + { + "role": "system", + "content": "你是专业的金融研究助手。根据执行结果,生成简洁清晰的报告。" + }, + { + "role": "user", + "content": f"""用户问题:{user_query} + +执行计划:{plan.goal} + +执行结果: +{results_text} + +请生成专业的分析报告(300字以内)。""" + }, + ] + + try: + response = self.kimi_client.chat.completions.create( + model="kimi-k2-turbpreview", # 使用非思考模型,更快 + messages=messages, + temperature=0.7, + max_tokens=1000, + ) + + summary = response.choices[0].message.content + logger.info("[Summary] 总结完成") + return summary + + except Exception as e: + logger.error(f"[Summary] 总结失败: {str(e)}") + # 降级:返回最后一步的结果 + if successful_results: + last_result = successful_results[-1] + if isinstance(last_result.result, str): + return last_result.result + else: + return json.dumps(last_result.result, ensure_ascii=False, indent=2) + return "总结生成失败" + + async def process_query( + self, + user_query: str, + tools: List[dict], + tool_handlers: Dict[str, Any], + ) -> AgentResponse: + """主流程""" + logger.info(f"[Agent] 处理查询: {user_query}") + + try: + # 阶段1: Kimi 制定计划 + plan = await self.create_plan(user_query, tools) + + # 阶段2: 执行工具 + step_results = await self.execute_plan(plan, tool_handlers) + + # 阶段3: Kimi 生成总结 + final_summary = await self.generate_final_summary( + user_query, plan, step_results + ) + + return AgentResponse( + success=True, + message=final_summary, + plan=plan, + step_results=step_results, + final_summary=final_summary, + metadata={ + "total_steps": len(plan.steps), + "successful_steps": len([r for r in step_results if r.status == "success"]), + "failed_steps": len([r for r in step_results if r.status == "failed"]), + "total_execution_time": sum(r.execution_time for r in step_results), + "model_used": { + "planning": self.kimi_model, + "summarization": "kimi-k2-turbpreview", + "news_summary": self.deepmoney_model, + }, + }, + ) + + except Exception as e: + logger.error(f"[Agent] 错误: {str(e)}", exc_info=True) + return AgentResponse( + success=False, + message=f"处理失败: {str(e)}", + ) + +# 创建 Agent 实例(全局) +agent = MCPAgentIntegrated() + # ==================== Web聊天接口 ==================== class ChatMessage(BaseModel): @@ -965,6 +1399,43 @@ async def chat(request: ChatRequest): "hint": "Use POST /tools/call to invoke tools" } +@app.post("/agent/chat", response_model=AgentResponse) +async def agent_chat(request: AgentChatRequest): + """智能代理对话端点""" + logger.info(f"Agent chat: {request.message}") + + # 获取工具列表 + tools = [tool.dict() for tool in TOOLS] + + # 添加特殊工具:summarize_news + tools.append({ + "name": "summarize_news", + "description": "使用 DeepMoney 模型总结新闻数据,提取关键信息", + "parameters": { + "type": "object", + "properties": { + "data": { + "type": "string", + "description": "要总结的新闻数据(JSON格式)" + }, + "focus": { + "type": "string", + "description": "关注点,例如:'市场影响'、'投资机会'等" + } + }, + "required": ["data"] + } + }) + + # 处理查询 + response = await agent.process_query( + user_query=request.message, + tools=tools, + tool_handlers=TOOL_HANDLERS, + ) + + return response + # ==================== 健康检查 ==================== @app.get("/health") diff --git a/mcp_server_agent_integration.py b/mcp_server_agent_integration.py new file mode 100644 index 00000000..fbb47f88 --- /dev/null +++ b/mcp_server_agent_integration.py @@ -0,0 +1,492 @@ +""" +集成到 mcp_server.py 的 Agent 系统 +使用 Kimi (kimi-k2-thinking) 和 DeepMoney 两个模型 +""" + +from openai import OpenAI +from pydantic import BaseModel +from typing import List, Dict, Any, Optional, Literal +from datetime import datetime +import json +import logging + +logger = logging.getLogger(__name__) + +# ==================== 模型配置 ==================== + +# Kimi 配置 - 用于计划制定和深度推理 +KIMI_CONFIG = { + "api_key": "sk-TzB4VYJfCoXGcGrGMiewukVRzjuDsbVCkaZXi2LvkS8s60E5", + "base_url": "https://api.moonshot.cn/v1", + "model": "kimi-k2-thinking", # 思考模型 +} + +# DeepMoney 配置 - 用于新闻总结 +DEEPMONEY_CONFIG = { + "api_key": "", # 空值 + "base_url": "http://111.62.35.50:8000/v1", + "model": "deepmoney", +} + +# ==================== 数据模型 ==================== + +class ToolCall(BaseModel): + """工具调用""" + tool: str + arguments: Dict[str, Any] + reason: str + +class ExecutionPlan(BaseModel): + """执行计划""" + goal: str + steps: List[ToolCall] + reasoning: str + +class StepResult(BaseModel): + """单步执行结果""" + step_index: int + tool: str + arguments: Dict[str, Any] + status: Literal["success", "failed", "skipped"] + result: Optional[Any] = None + error: Optional[str] = None + execution_time: float = 0 + +class AgentResponse(BaseModel): + """Agent响应""" + success: bool + message: str + plan: Optional[ExecutionPlan] = None + step_results: List[StepResult] = [] + final_summary: Optional[str] = None + metadata: Optional[Dict[str, Any]] = None + +class ChatRequest(BaseModel): + """聊天请求""" + message: str + conversation_history: List[Dict[str, str]] = [] + +# ==================== Agent 系统 ==================== + +class MCPAgentIntegrated: + """集成版 MCP Agent - 使用 Kimi 和 DeepMoney""" + + def __init__(self): + # 初始化 Kimi 客户端(计划制定) + self.kimi_client = OpenAI( + api_key=KIMI_CONFIG["api_key"], + base_url=KIMI_CONFIG["base_url"], + ) + self.kimi_model = KIMI_CONFIG["model"] + + # 初始化 DeepMoney 客户端(新闻总结) + self.deepmoney_client = OpenAI( + api_key=DEEPMONEY_CONFIG["api_key"], + base_url=DEEPMONEY_CONFIG["base_url"], + ) + self.deepmoney_model = DEEPMONEY_CONFIG["model"] + + def get_planning_prompt(self, tools: List[dict]) -> str: + """获取计划制定的系统提示词""" + tools_desc = "\n\n".join([ + f"**{tool['name']}**\n" + f"描述:{tool['description']}\n" + f"参数:{json.dumps(tool['parameters'], ensure_ascii=False, indent=2)}" + for tool in tools + ]) + + return f"""你是一个专业的金融研究助手。根据用户问题,制定详细的执行计划。 + +## 可用工具 + +{tools_desc} + +## 特殊工具 +- **summarize_news**: 使用 DeepMoney 模型总结新闻数据 + - 参数: {{"data": "新闻列表JSON", "focus": "关注点"}} + - 适用场景: 当需要总结新闻、研报等文本数据时 + +## 重要知识 +- 贵州茅台: 600519 +- 涨停: 涨幅约10% +- 概念板块: 相同题材股票分类 + +## 任务 +分析用户问题,制定执行计划。返回 JSON: + +```json +{{ + "goal": "用户目标", + "reasoning": "分析思路", + "steps": [ + {{ + "tool": "工具名", + "arguments": {{"参数": "值"}}, + "reason": "原因" + }} + ] +}} +``` + +## 规划原则 +1. 先收集数据,再分析总结 +2. 使用 summarize_news 总结新闻类数据 +3. 不超过5个步骤 +4. 最后一步通常是总结 + +## 示例 + +用户:"贵州茅台最近有什么新闻" + +计划: +```json +{{ + "goal": "查询并总结贵州茅台最新新闻", + "reasoning": "先搜索新闻,再用 DeepMoney 总结", + "steps": [ + {{ + "tool": "search_china_news", + "arguments": {{"query": "贵州茅台", "top_k": 10}}, + "reason": "搜索贵州茅台相关新闻" + }}, + {{ + "tool": "summarize_news", + "arguments": {{ + "data": "前面的新闻数据", + "focus": "贵州茅台的重要动态和市场影响" + }}, + "reason": "使用DeepMoney总结新闻要点" + }} + ] +}} +``` + +只返回JSON,不要其他内容。""" + + async def create_plan(self, user_query: str, tools: List[dict]) -> ExecutionPlan: + """阶段1: 使用 Kimi 创建执行计划(带思考过程)""" + logger.info(f"[Planning] Kimi开始制定计划: {user_query}") + + messages = [ + {"role": "system", "content": self.get_planning_prompt(tools)}, + {"role": "user", "content": user_query}, + ] + + # 使用 Kimi 思考模型 + response = self.kimi_client.chat.completions.create( + model=self.kimi_model, + messages=messages, + temperature=1.0, # Kimi 推荐 + max_tokens=16000, # 足够容纳 reasoning_content + ) + + choice = response.choices[0] + message = choice.message + + # 提取思考过程 + reasoning_content = "" + if hasattr(message, "reasoning_content"): + reasoning_content = getattr(message, "reasoning_content") + logger.info(f"[Planning] Kimi思考过程: {reasoning_content[:200]}...") + + # 提取计划内容 + plan_json = message.content.strip() + + # 清理可能的代码块标记 + if "```json" in plan_json: + plan_json = plan_json.split("```json")[1].split("```")[0].strip() + elif "```" in plan_json: + plan_json = plan_json.split("```")[1].split("```")[0].strip() + + plan_data = json.loads(plan_json) + + plan = ExecutionPlan( + goal=plan_data["goal"], + reasoning=plan_data.get("reasoning", "") + "\n\n" + (reasoning_content[:500] if reasoning_content else ""), + steps=[ToolCall(**step) for step in plan_data["steps"]], + ) + + logger.info(f"[Planning] 计划制定完成: {len(plan.steps)} 步") + return plan + + async def execute_tool( + self, + tool_name: str, + arguments: Dict[str, Any], + tool_handlers: Dict[str, Any], + ) -> Dict[str, Any]: + """执行单个工具""" + + # 特殊工具:summarize_news(使用 DeepMoney) + if tool_name == "summarize_news": + return await self.summarize_news_with_deepmoney( + data=arguments.get("data", ""), + focus=arguments.get("focus", "关键信息"), + ) + + # 调用 MCP 工具 + handler = tool_handlers.get(tool_name) + if not handler: + raise ValueError(f"Tool '{tool_name}' not found") + + result = await handler(arguments) + return result + + async def summarize_news_with_deepmoney(self, data: str, focus: str) -> str: + """使用 DeepMoney 模型总结新闻""" + logger.info(f"[DeepMoney] 总结新闻,关注点: {focus}") + + messages = [ + { + "role": "system", + "content": "你是一个专业的金融新闻分析师,擅长提取关键信息并进行总结。" + }, + { + "role": "user", + "content": f"请总结以下新闻数据,关注点:{focus}\n\n数据:\n{data[:3000]}" + }, + ] + + try: + response = self.deepmoney_client.chat.completions.create( + model=self.deepmoney_model, + messages=messages, + temperature=0.7, + max_tokens=1000, + ) + + summary = response.choices[0].message.content + logger.info(f"[DeepMoney] 总结完成") + return summary + + except Exception as e: + logger.error(f"[DeepMoney] 总结失败: {str(e)}") + # 降级:返回简化摘要 + return f"新闻总结失败,原始数据:{data[:500]}..." + + async def execute_plan( + self, + plan: ExecutionPlan, + tool_handlers: Dict[str, Any], + ) -> List[StepResult]: + """阶段2: 执行计划""" + logger.info(f"[Execution] 开始执行: {len(plan.steps)} 步") + + results = [] + collected_data = {} + + for i, step in enumerate(plan.steps): + logger.info(f"[Execution] 步骤 {i+1}/{len(plan.steps)}: {step.tool}") + + start_time = datetime.now() + + try: + # 替换占位符 + arguments = step.arguments.copy() + + # 如果参数值是 "前面的新闻数据" 或 "前面收集的所有数据" + if step.tool == "summarize_news": + if arguments.get("data") in ["前面的新闻数据", "前面收集的所有数据"]: + # 将收集的数据传递 + arguments["data"] = json.dumps(collected_data, ensure_ascii=False, indent=2) + + # 执行工具 + result = await self.execute_tool(step.tool, arguments, tool_handlers) + + execution_time = (datetime.now() - start_time).total_seconds() + + step_result = StepResult( + step_index=i, + tool=step.tool, + arguments=arguments, + status="success", + result=result, + execution_time=execution_time, + ) + results.append(step_result) + + # 收集数据 + collected_data[f"step_{i+1}_{step.tool}"] = result + + logger.info(f"[Execution] 步骤 {i+1} 完成: {execution_time:.2f}s") + + except Exception as e: + logger.error(f"[Execution] 步骤 {i+1} 失败: {str(e)}") + + execution_time = (datetime.now() - start_time).total_seconds() + + step_result = StepResult( + step_index=i, + tool=step.tool, + arguments=step.arguments, + status="failed", + error=str(e), + execution_time=execution_time, + ) + results.append(step_result) + + # 继续执行其他步骤 + continue + + logger.info(f"[Execution] 执行完成") + return results + + async def generate_final_summary( + self, + user_query: str, + plan: ExecutionPlan, + step_results: List[StepResult], + ) -> str: + """阶段3: 使用 Kimi 生成最终总结""" + logger.info("[Summary] Kimi生成最终总结") + + # 收集成功的结果 + successful_results = [r for r in step_results if r.status == "success"] + + if not successful_results: + return "很抱歉,所有步骤都执行失败,无法生成分析报告。" + + # 构建结果文本(精简版) + results_text = "\n\n".join([ + f"**步骤 {r.step_index + 1}: {r.tool}**\n" + f"结果: {str(r.result)[:800]}..." + for r in successful_results[:3] # 只取前3个,避免超长 + ]) + + messages = [ + { + "role": "system", + "content": "你是专业的金融研究助手。根据执行结果,生成简洁清晰的报告。" + }, + { + "role": "user", + "content": f"""用户问题:{user_query} + +执行计划:{plan.goal} + +执行结果: +{results_text} + +请生成专业的分析报告(300字以内)。""" + }, + ] + + try: + response = self.kimi_client.chat.completions.create( + model="kimi-k2-turbpreview", # 使用非思考模型,更快 + messages=messages, + temperature=0.7, + max_tokens=1000, + ) + + summary = response.choices[0].message.content + logger.info("[Summary] 总结完成") + return summary + + except Exception as e: + logger.error(f"[Summary] 总结失败: {str(e)}") + # 降级:返回最后一步的结果 + if successful_results: + last_result = successful_results[-1] + if isinstance(last_result.result, str): + return last_result.result + else: + return json.dumps(last_result.result, ensure_ascii=False, indent=2) + return "总结生成失败" + + async def process_query( + self, + user_query: str, + tools: List[dict], + tool_handlers: Dict[str, Any], + ) -> AgentResponse: + """主流程""" + logger.info(f"[Agent] 处理查询: {user_query}") + + try: + # 阶段1: Kimi 制定计划 + plan = await self.create_plan(user_query, tools) + + # 阶段2: 执行工具 + step_results = await self.execute_plan(plan, tool_handlers) + + # 阶段3: Kimi 生成总结 + final_summary = await self.generate_final_summary( + user_query, plan, step_results + ) + + return AgentResponse( + success=True, + message=final_summary, + plan=plan, + step_results=step_results, + final_summary=final_summary, + metadata={ + "total_steps": len(plan.steps), + "successful_steps": len([r for r in step_results if r.status == "success"]), + "failed_steps": len([r for r in step_results if r.status == "failed"]), + "total_execution_time": sum(r.execution_time for r in step_results), + "model_used": { + "planning": self.kimi_model, + "summarization": "kimi-k2-turbpreview", + "news_summary": self.deepmoney_model, + }, + }, + ) + + except Exception as e: + logger.error(f"[Agent] 错误: {str(e)}", exc_info=True) + return AgentResponse( + success=False, + message=f"处理失败: {str(e)}", + ) + +# ==================== 添加到 mcp_server.py ==================== + +""" +在 mcp_server.py 中添加以下代码: + +# 导入 Agent 系统 +from mcp_server_agent_integration import MCPAgentIntegrated, ChatRequest, AgentResponse + +# 创建 Agent 实例(全局) +agent = MCPAgentIntegrated() + +# 添加端点 +@app.post("/agent/chat", response_model=AgentResponse) +async def agent_chat(request: ChatRequest): + \"\"\"智能代理对话端点\"\"\" + logger.info(f"Agent chat: {request.message}") + + # 获取工具列表 + tools = [tool.dict() for tool in TOOLS] + + # 添加特殊工具:summarize_news + tools.append({ + "name": "summarize_news", + "description": "使用 DeepMoney 模型总结新闻数据,提取关键信息", + "parameters": { + "type": "object", + "properties": { + "data": { + "type": "string", + "description": "要总结的新闻数据(JSON格式)" + }, + "focus": { + "type": "string", + "description": "关注点,例如:'市场影响'、'投资机会'等" + } + }, + "required": ["data"] + } + }) + + # 处理查询 + response = await agent.process_query( + user_query=request.message, + tools=tools, + tool_handlers=TOOL_HANDLERS, + ) + + return response +""" diff --git a/src/components/ChatBot/ChatInterfaceV2.js b/src/components/ChatBot/ChatInterfaceV2.js new file mode 100644 index 00000000..d617d729 --- /dev/null +++ b/src/components/ChatBot/ChatInterfaceV2.js @@ -0,0 +1,576 @@ +// src/components/ChatBot/ChatInterfaceV2.js +// 重新设计的聊天界面 - 更漂亮、支持Agent模式 + +import React, { useState, useRef, useEffect } from 'react'; +import { + Box, + Flex, + Input, + IconButton, + VStack, + HStack, + Text, + Spinner, + useColorModeValue, + useToast, + Divider, + Badge, + Button, + Avatar, + Heading, + Progress, + Fade, +} from '@chakra-ui/react'; +import { FiSend, FiRefreshCw, FiDownload, FiCpu, FiUser, FiZap } from 'react-icons/fi'; +import { PlanCard } from './PlanCard'; +import { StepResultCard } from './StepResultCard'; +import { mcpService } from '../../services/mcpService'; +import { logger } from '../../utils/logger'; + +/** + * Agent消息类型 + */ +const MessageTypes = { + USER: 'user', + AGENT_THINKING: 'agent_thinking', + AGENT_PLAN: 'agent_plan', + AGENT_EXECUTING: 'agent_executing', + AGENT_RESPONSE: 'agent_response', + ERROR: 'error', +}; + +/** + * 聊天界面V2组件 - Agent模式 + */ +export const ChatInterfaceV2 = () => { + const [messages, setMessages] = useState([ + { + id: 1, + type: MessageTypes.AGENT_RESPONSE, + content: '你好!我是AI投资研究助手。我会通过多步骤分析来帮助你深入了解金融市场。\n\n你可以问我:\n• 全面分析某只股票\n• 某个行业的投资机会\n• 今日市场热点\n• 某个概念板块的表现', + timestamp: new Date().toISOString(), + }, + ]); + const [inputValue, setInputValue] = useState(''); + const [isProcessing, setIsProcessing] = useState(false); + const [currentProgress, setCurrentProgress] = useState(0); + + const messagesEndRef = useRef(null); + const inputRef = useRef(null); + const toast = useToast(); + + // 颜色主题 + const bgColor = useColorModeValue('gray.50', 'gray.900'); + const chatBg = useColorModeValue('white', 'gray.800'); + const inputBg = useColorModeValue('white', 'gray.700'); + const userBubbleBg = useColorModeValue('blue.500', 'blue.600'); + const agentBubbleBg = useColorModeValue('white', 'gray.700'); + const borderColor = useColorModeValue('gray.200', 'gray.600'); + + // 自动滚动到底部 + const scrollToBottom = () => { + messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }); + }; + + useEffect(() => { + scrollToBottom(); + }, [messages]); + + // 添加消息 + const addMessage = (message) => { + setMessages((prev) => [...prev, { ...message, id: Date.now() }]); + }; + + // 更新最后一条消息 + const updateLastMessage = (updates) => { + setMessages((prev) => { + const newMessages = [...prev]; + if (newMessages.length > 0) { + newMessages[newMessages.length - 1] = { + ...newMessages[newMessages.length - 1], + ...updates, + }; + } + return newMessages; + }); + }; + + // 发送消息(Agent模式) + const handleSendMessage = async () => { + if (!inputValue.trim() || isProcessing) return; + + const userMessage = { + type: MessageTypes.USER, + content: inputValue, + timestamp: new Date().toISOString(), + }; + + addMessage(userMessage); + setInputValue(''); + setIsProcessing(true); + setCurrentProgress(0); + + try { + // 1. 显示思考状态 + addMessage({ + type: MessageTypes.AGENT_THINKING, + content: '正在分析你的问题...', + timestamp: new Date().toISOString(), + }); + + setCurrentProgress(10); + + // 调用 Agent API + const response = await fetch(`${mcpService.baseURL.replace('/mcp', '')}/mcp/agent/chat`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + message: inputValue, + conversation_history: messages.filter(m => m.type === MessageTypes.USER || m.type === MessageTypes.AGENT_RESPONSE).map(m => ({ + isUser: m.type === MessageTypes.USER, + content: m.content, + })), + }), + }); + + if (!response.ok) { + throw new Error('Agent请求失败'); + } + + const agentResponse = await response.json(); + logger.info('Agent response', agentResponse); + + // 移除思考消息 + setMessages(prev => prev.filter(m => m.type !== MessageTypes.AGENT_THINKING)); + + if (!agentResponse.success) { + throw new Error(agentResponse.message || '处理失败'); + } + + setCurrentProgress(30); + + // 2. 显示执行计划 + if (agentResponse.plan) { + addMessage({ + type: MessageTypes.AGENT_PLAN, + content: '已制定执行计划', + plan: agentResponse.plan, + timestamp: new Date().toISOString(), + }); + } + + setCurrentProgress(40); + + // 3. 显示执行过程 + if (agentResponse.step_results && agentResponse.step_results.length > 0) { + addMessage({ + type: MessageTypes.AGENT_EXECUTING, + content: '正在执行步骤...', + plan: agentResponse.plan, + stepResults: agentResponse.step_results, + timestamp: new Date().toISOString(), + }); + + // 模拟进度更新 + for (let i = 0; i < agentResponse.step_results.length; i++) { + setCurrentProgress(40 + (i + 1) / agentResponse.step_results.length * 50); + await new Promise(resolve => setTimeout(resolve, 100)); + } + } + + setCurrentProgress(100); + + // 移除执行中消息 + setMessages(prev => prev.filter(m => m.type !== MessageTypes.AGENT_EXECUTING)); + + // 4. 显示最终结果 + addMessage({ + type: MessageTypes.AGENT_RESPONSE, + content: agentResponse.message || agentResponse.final_summary, + plan: agentResponse.plan, + stepResults: agentResponse.step_results, + metadata: agentResponse.metadata, + timestamp: new Date().toISOString(), + }); + + } catch (error) { + logger.error('Agent chat error', error); + + // 移除思考/执行中消息 + setMessages(prev => prev.filter( + m => m.type !== MessageTypes.AGENT_THINKING && m.type !== MessageTypes.AGENT_EXECUTING + )); + + addMessage({ + type: MessageTypes.ERROR, + content: `处理失败:${error.message}`, + timestamp: new Date().toISOString(), + }); + + toast({ + title: '处理失败', + description: error.message, + status: 'error', + duration: 3000, + isClosable: true, + }); + } finally { + setIsProcessing(false); + setCurrentProgress(0); + inputRef.current?.focus(); + } + }; + + // 处理键盘事件 + const handleKeyPress = (e) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault(); + handleSendMessage(); + } + }; + + // 清空对话 + const handleClearChat = () => { + setMessages([ + { + id: 1, + type: MessageTypes.AGENT_RESPONSE, + content: '对话已清空。有什么可以帮到你的?', + timestamp: new Date().toISOString(), + }, + ]); + }; + + // 导出对话 + const handleExportChat = () => { + const chatText = messages + .filter(m => m.type === MessageTypes.USER || m.type === MessageTypes.AGENT_RESPONSE) + .map((msg) => `[${msg.type === MessageTypes.USER ? '用户' : 'AI助手'}] ${msg.content}`) + .join('\n\n'); + + const blob = new Blob([chatText], { type: 'text/plain' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `chat_${new Date().toISOString().slice(0, 10)}.txt`; + a.click(); + URL.revokeObjectURL(url); + }; + + // 快捷问题 + const quickQuestions = [ + '全面分析贵州茅台这只股票', + '今日涨停股票有哪些亮点', + '新能源概念板块的投资机会', + '半导体行业最新动态', + ]; + + return ( + + {/* 头部 */} + + + + } + /> + + AI投资研究助手 + + + + + 智能分析 + + + + 多步骤深度研究 + + + + + + + } + size="sm" + variant="ghost" + aria-label="清空对话" + onClick={handleClearChat} + /> + } + size="sm" + variant="ghost" + aria-label="导出对话" + onClick={handleExportChat} + /> + + + + {/* 进度条 */} + {isProcessing && ( + + )} + + + {/* 消息列表 */} + + + {messages.map((message) => ( + + + + ))} +
+ + + + {/* 快捷问题 */} + {messages.length <= 2 && !isProcessing && ( + + 💡 试试这些问题: + + {quickQuestions.map((question, idx) => ( + + ))} + + + )} + + {/* 输入框 */} + + + setInputValue(e.target.value)} + onKeyPress={handleKeyPress} + placeholder="输入你的问题,我会进行深度分析..." + bg={inputBg} + border="1px" + borderColor={borderColor} + _focus={{ borderColor: 'blue.500', boxShadow: '0 0 0 1px #3182CE' }} + mr={2} + disabled={isProcessing} + size="lg" + /> + : } + colorScheme="blue" + aria-label="发送" + onClick={handleSendMessage} + isLoading={isProcessing} + disabled={!inputValue.trim() || isProcessing} + size="lg" + /> + + + + ); +}; + +/** + * 消息渲染器 + */ +const MessageRenderer = ({ message }) => { + const userBubbleBg = useColorModeValue('blue.500', 'blue.600'); + const agentBubbleBg = useColorModeValue('white', 'gray.700'); + const borderColor = useColorModeValue('gray.200', 'gray.600'); + + switch (message.type) { + case MessageTypes.USER: + return ( + + + + + {message.content} + + + } /> + + + ); + + case MessageTypes.AGENT_THINKING: + return ( + + + } /> + + + + + {message.content} + + + + + + ); + + case MessageTypes.AGENT_PLAN: + return ( + + + } /> + + + + + + ); + + case MessageTypes.AGENT_EXECUTING: + return ( + + + } /> + + + {message.stepResults?.map((result, idx) => ( + + ))} + + + + ); + + case MessageTypes.AGENT_RESPONSE: + return ( + + + } /> + + {/* 最终总结 */} + + + {message.content} + + + {/* 元数据 */} + {message.metadata && ( + + 总步骤: {message.metadata.total_steps} + ✓ {message.metadata.successful_steps} + {message.metadata.failed_steps > 0 && ( + ✗ {message.metadata.failed_steps} + )} + 耗时: {message.metadata.total_execution_time?.toFixed(1)}s + + )} + + + {/* 执行详情(可选) */} + {message.plan && message.stepResults && message.stepResults.length > 0 && ( + + + + 📊 执行详情(点击展开查看) + + {message.stepResults.map((result, idx) => ( + + ))} + + )} + + + + ); + + case MessageTypes.ERROR: + return ( + + + } /> + + {message.content} + + + + ); + + default: + return null; + } +}; + +export default ChatInterfaceV2; diff --git a/src/components/ChatBot/PlanCard.js b/src/components/ChatBot/PlanCard.js new file mode 100644 index 00000000..53714598 --- /dev/null +++ b/src/components/ChatBot/PlanCard.js @@ -0,0 +1,145 @@ +// src/components/ChatBot/PlanCard.js +// 执行计划展示卡片 + +import React from 'react'; +import { + Box, + VStack, + HStack, + Text, + Badge, + Accordion, + AccordionItem, + AccordionButton, + AccordionPanel, + AccordionIcon, + Icon, + useColorModeValue, + Divider, +} from '@chakra-ui/react'; +import { FiTarget, FiCheckCircle, FiXCircle, FiClock, FiTool } from 'react-icons/fi'; + +/** + * 执行计划卡片组件 + */ +export const PlanCard = ({ plan, stepResults }) => { + const cardBg = useColorModeValue('blue.50', 'blue.900'); + const borderColor = useColorModeValue('blue.200', 'blue.700'); + const successColor = useColorModeValue('green.500', 'green.300'); + const errorColor = useColorModeValue('red.500', 'red.300'); + const pendingColor = useColorModeValue('gray.400', 'gray.500'); + + const getStepStatus = (stepIndex) => { + if (!stepResults || stepResults.length === 0) return 'pending'; + const result = stepResults.find(r => r.step_index === stepIndex); + return result ? result.status : 'pending'; + }; + + const getStepIcon = (status) => { + switch (status) { + case 'success': + return FiCheckCircle; + case 'failed': + return FiXCircle; + default: + return FiClock; + } + }; + + const getStepColor = (status) => { + switch (status) { + case 'success': + return successColor; + case 'failed': + return errorColor; + default: + return pendingColor; + } + }; + + return ( + + + {/* 目标 */} + + + 执行目标 + + + {plan.goal} + + + + + {/* 规划思路 */} + {plan.reasoning && ( + <> + 规划思路: + + {plan.reasoning} + + + + )} + + {/* 执行步骤 */} + + 执行步骤 + {plan.steps.length} 步 + + + + {plan.steps.map((step, index) => { + const status = getStepStatus(index); + const StepIcon = getStepIcon(status); + const stepColor = getStepColor(status); + + return ( + + + + + + 步骤 {index + 1}: {step.tool} + + + {status === 'success' ? '✓ 完成' : + status === 'failed' ? '✗ 失败' : '⏳ 等待'} + + + + {step.reason} + + + + ); + })} + + + + ); +}; + +export default PlanCard; diff --git a/src/components/ChatBot/StepResultCard.js b/src/components/ChatBot/StepResultCard.js new file mode 100644 index 00000000..282acaf9 --- /dev/null +++ b/src/components/ChatBot/StepResultCard.js @@ -0,0 +1,186 @@ +// src/components/ChatBot/StepResultCard.js +// 步骤结果展示卡片(可折叠) + +import React, { useState } from 'react'; +import { + Box, + VStack, + HStack, + Text, + Badge, + Collapse, + Icon, + IconButton, + Code, + useColorModeValue, + Divider, +} from '@chakra-ui/react'; +import { FiChevronDown, FiChevronUp, FiCheckCircle, FiXCircle, FiClock, FiDatabase } from 'react-icons/fi'; + +/** + * 步骤结果卡片组件 + */ +export const StepResultCard = ({ stepResult }) => { + const [isExpanded, setIsExpanded] = useState(false); + + const cardBg = useColorModeValue('white', 'gray.700'); + const borderColor = useColorModeValue('gray.200', 'gray.600'); + const successColor = useColorModeValue('green.500', 'green.300'); + const errorColor = useColorModeValue('red.500', 'red.300'); + + const getStatusIcon = () => { + switch (stepResult.status) { + case 'success': + return FiCheckCircle; + case 'failed': + return FiXCircle; + default: + return FiClock; + } + }; + + const getStatusColor = () => { + switch (stepResult.status) { + case 'success': + return 'green'; + case 'failed': + return 'red'; + default: + return 'gray'; + } + }; + + const StatusIcon = getStatusIcon(); + const statusColorScheme = getStatusColor(); + + // 格式化数据以便展示 + const formatResult = (data) => { + if (typeof data === 'string') return data; + if (Array.isArray(data)) { + return `找到 ${data.length} 条记录`; + } + if (typeof data === 'object') { + return JSON.stringify(data, null, 2); + } + return String(data); + }; + + return ( + + {/* 头部 - 始终可见 */} + setIsExpanded(!isExpanded)} + _hover={{ bg: useColorModeValue('gray.50', 'gray.600') }} + > + + + + + + 步骤 {stepResult.step_index + 1}: {stepResult.tool} + + + {stepResult.status === 'success' ? '成功' : + stepResult.status === 'failed' ? '失败' : '执行中'} + + + + 耗时: {stepResult.execution_time?.toFixed(2)}s + + + + + } + size="sm" + variant="ghost" + aria-label={isExpanded ? "收起" : "展开"} + /> + + + {/* 内容 - 可折叠 */} + + + + + {/* 参数 */} + {stepResult.arguments && Object.keys(stepResult.arguments).length > 0 && ( + + + + 请求参数: + + + {JSON.stringify(stepResult.arguments, null, 2)} + + + )} + + {/* 结果或错误 */} + {stepResult.status === 'success' && stepResult.result && ( + + 执行结果: + + {typeof stepResult.result === 'string' ? ( + {stepResult.result} + ) : Array.isArray(stepResult.result) ? ( + + 找到 {stepResult.result.length} 条记录: + {stepResult.result.slice(0, 3).map((item, idx) => ( + + {JSON.stringify(item, null, 2)} + + ))} + {stepResult.result.length > 3 && ( + + ...还有 {stepResult.result.length - 3} 条记录 + + )} + + ) : ( + + {JSON.stringify(stepResult.result, null, 2)} + + )} + + + )} + + {stepResult.status === 'failed' && stepResult.error && ( + + 错误信息: + + {stepResult.error} + + + )} + + + + ); +}; + +export default StepResultCard; diff --git a/src/components/ChatBot/index.js b/src/components/ChatBot/index.js index 59a0a5f6..4b1f4dee 100644 --- a/src/components/ChatBot/index.js +++ b/src/components/ChatBot/index.js @@ -2,6 +2,10 @@ // 聊天机器人组件统一导出 export { ChatInterface } from './ChatInterface'; +export { ChatInterfaceV2 } from './ChatInterfaceV2'; export { MessageBubble } from './MessageBubble'; +export { PlanCard } from './PlanCard'; +export { StepResultCard } from './StepResultCard'; -export { ChatInterface as default } from './ChatInterface'; +// 默认导出新版本 +export { ChatInterfaceV2 as default } from './ChatInterfaceV2'; diff --git a/src/services/llmService.js b/src/services/llmService.js new file mode 100644 index 00000000..22183f47 --- /dev/null +++ b/src/services/llmService.js @@ -0,0 +1,278 @@ +// src/services/llmService.js +// LLM服务层 - 集成AI模型进行对话和工具调用 + +import axios from 'axios'; +import { mcpService } from './mcpService'; +import { logger } from '../utils/logger'; + +/** + * LLM服务配置 + */ +const LLM_CONFIG = { + // 可以使用 OpenAI、Claude、通义千问等 + provider: 'openai', // 或 'claude', 'qwen' + apiKey: process.env.REACT_APP_OPENAI_API_KEY || '', + apiUrl: 'https://api.openai.com/v1/chat/completions', + model: 'gpt-4o-mini', // 更便宜的模型 +}; + +/** + * LLM服务类 + */ +class LLMService { + constructor() { + this.conversationHistory = []; + } + + /** + * 构建系统提示词 + */ + getSystemPrompt(availableTools) { + return `你是一个专业的金融投资助手。你可以使用以下工具来帮助用户查询信息: + +${availableTools.map(tool => ` +**${tool.name}** +描述:${tool.description} +参数:${JSON.stringify(tool.parameters, null, 2)} +`).join('\n')} + +用户提问时,请按照以下步骤: +1. 理解用户的意图 +2. 选择合适的工具(可以多个) +3. 提取工具需要的参数 +4. 调用工具后,用自然语言总结结果 + +回复格式: +- 如果需要调用工具,返回JSON格式:{"tool": "工具名", "arguments": {...}} +- 如果不需要工具,直接回复自然语言 + +注意: +- 贵州茅台的股票代码是 600519 +- 涨停是指股票当日涨幅达到10% +- 概念板块是指相同题材的股票分类`; + } + + /** + * 智能对话 - 使用LLM理解意图并调用工具 + */ + async chat(userMessage, conversationHistory = []) { + try { + // 1. 获取可用工具列表 + const toolsResult = await mcpService.listTools(); + if (!toolsResult.success) { + throw new Error('获取工具列表失败'); + } + + const availableTools = toolsResult.data; + + // 2. 构建对话历史 + const messages = [ + { + role: 'system', + content: this.getSystemPrompt(availableTools), + }, + ...conversationHistory.map(msg => ({ + role: msg.isUser ? 'user' : 'assistant', + content: msg.content, + })), + { + role: 'user', + content: userMessage, + }, + ]; + + // 3. 调用LLM + logger.info('LLMService', '调用LLM', { messageCount: messages.length }); + + // 注意:这里需要配置API密钥 + if (!LLM_CONFIG.apiKey) { + // 如果没有配置LLM,使用简单的关键词匹配 + logger.warn('LLMService', '未配置LLM API密钥,使用简单匹配'); + return await this.fallbackChat(userMessage); + } + + const response = await axios.post( + LLM_CONFIG.apiUrl, + { + model: LLM_CONFIG.model, + messages: messages, + temperature: 0.7, + max_tokens: 1000, + }, + { + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${LLM_CONFIG.apiKey}`, + }, + timeout: 30000, + } + ); + + const aiResponse = response.data.choices[0].message.content; + logger.info('LLMService', 'LLM响应', { response: aiResponse }); + + // 4. 解析LLM响应 + // 如果LLM返回工具调用指令 + try { + const toolCall = JSON.parse(aiResponse); + if (toolCall.tool && toolCall.arguments) { + // 调用MCP工具 + const toolResult = await mcpService.callTool(toolCall.tool, toolCall.arguments); + + if (!toolResult.success) { + return { + success: false, + error: toolResult.error, + }; + } + + // 5. 让LLM总结工具结果 + const summaryMessages = [ + ...messages, + { + role: 'assistant', + content: aiResponse, + }, + { + role: 'system', + content: `工具 ${toolCall.tool} 返回的数据:\n${JSON.stringify(toolResult.data, null, 2)}\n\n请用自然语言总结这些数据,给用户一个简洁清晰的回复。`, + }, + ]; + + const summaryResponse = await axios.post( + LLM_CONFIG.apiUrl, + { + model: LLM_CONFIG.model, + messages: summaryMessages, + temperature: 0.7, + max_tokens: 500, + }, + { + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${LLM_CONFIG.apiKey}`, + }, + timeout: 30000, + } + ); + + const summary = summaryResponse.data.choices[0].message.content; + + return { + success: true, + data: { + message: summary, + rawData: toolResult.data, + toolUsed: toolCall.tool, + }, + }; + } + } catch (parseError) { + // 不是JSON格式,说明是直接回复 + return { + success: true, + data: { + message: aiResponse, + }, + }; + } + + // 默认返回LLM的直接回复 + return { + success: true, + data: { + message: aiResponse, + }, + }; + } catch (error) { + logger.error('LLMService', 'chat error', error); + return { + success: false, + error: error.message || '对话处理失败', + }; + } + } + + /** + * 降级方案:简单的关键词匹配(当没有配置LLM时) + */ + async fallbackChat(userMessage) { + logger.info('LLMService', '使用降级方案', { message: userMessage }); + + // 使用原有的简单匹配逻辑 + if (userMessage.includes('新闻') || userMessage.includes('资讯')) { + const result = await mcpService.callTool('search_china_news', { + query: userMessage.replace(/新闻|资讯/g, '').trim(), + top_k: 5, + }); + return this.formatFallbackResponse(result, '新闻搜索'); + } else if (userMessage.includes('概念') || userMessage.includes('板块')) { + const query = userMessage.replace(/概念|板块/g, '').trim(); + const result = await mcpService.callTool('search_concepts', { + query, + size: 5, + sort_by: 'change_pct', + }); + return this.formatFallbackResponse(result, '概念搜索'); + } else if (userMessage.includes('涨停')) { + const query = userMessage.replace(/涨停/g, '').trim(); + const result = await mcpService.callTool('search_limit_up_stocks', { + query, + mode: 'hybrid', + page_size: 5, + }); + return this.formatFallbackResponse(result, '涨停分析'); + } else if (/^[0-9]{6}$/.test(userMessage.trim())) { + // 6位数字 = 股票代码 + const result = await mcpService.callTool('get_stock_basic_info', { + seccode: userMessage.trim(), + }); + return this.formatFallbackResponse(result, '股票信息'); + } else if (userMessage.includes('茅台') || userMessage.includes('贵州茅台')) { + // 特殊处理茅台 + const result = await mcpService.callTool('get_stock_basic_info', { + seccode: '600519', + }); + return this.formatFallbackResponse(result, '贵州茅台股票信息'); + } else { + // 默认:搜索新闻 + const result = await mcpService.callTool('search_china_news', { + query: userMessage, + top_k: 5, + }); + return this.formatFallbackResponse(result, '新闻搜索'); + } + } + + /** + * 格式化降级响应 + */ + formatFallbackResponse(result, action) { + if (!result.success) { + return { + success: false, + error: result.error, + }; + } + + return { + success: true, + data: { + message: `已为您完成${action},找到以下结果:`, + rawData: result.data, + }, + }; + } + + /** + * 清除对话历史 + */ + clearHistory() { + this.conversationHistory = []; + } +} + +// 导出单例 +export const llmService = new LLMService(); + +export default LLMService; diff --git a/src/views/AgentChat/index.js b/src/views/AgentChat/index.js index 1d2fe93e..5bb0ebd5 100644 --- a/src/views/AgentChat/index.js +++ b/src/views/AgentChat/index.js @@ -10,7 +10,7 @@ import { VStack, useColorModeValue, } from '@chakra-ui/react'; -import ChatInterface from '../../components/ChatBot'; +import { ChatInterfaceV2 } from '../../components/ChatBot'; /** * Agent聊天页面 @@ -42,7 +42,7 @@ const AgentChat = () => { h="calc(100vh - 300px)" minH="600px" > - +