From da7ccd2d26ddf343ba6ce47debc018ba04fafd43 Mon Sep 17 00:00:00 2001 From: xyz <123456@gmail.com> Date: Wed, 7 Jan 2026 13:44:46 +0800 Subject: [PATCH] feat: implement council v3 round-table mode --- __pycache__/app.cpython-313.pyc | Bin 22154 -> 22878 bytes __pycache__/config.cpython-313.pyc | Bin 1881 -> 2622 bytes .../research_agent.cpython-313.pyc | Bin 3059 -> 3702 bytes agents/research_agent.py | 24 ++-- app.py | 101 ++++++++------- config.py | 58 ++++++++- .../research_manager.cpython-313.pyc | Bin 5581 -> 5522 bytes orchestrator/research_manager.py | 115 +++++++++++------- utils/__pycache__/llm_client.cpython-313.pyc | Bin 5294 -> 5264 bytes utils/llm_client.py | 25 ++-- 10 files changed, 215 insertions(+), 108 deletions(-) diff --git a/__pycache__/app.cpython-313.pyc b/__pycache__/app.cpython-313.pyc index 675f8045f5c50c89d9a0477fc23b36b1687e6ef0..a9a716ac722fa8f0550131ae166be42624ad9426 100644 GIT binary patch delta 7158 zcmbt2X;f5Kn(w{ZsD%QmD1l-tHkExB*$N+O3MdMYr*XSV3Md3w^QtN;niQHuG49^9 z8cZT4>28va#>6opd)F*!yC>5r0@3Q4GYQGjXeQGqn9Q`(Nlwmu_f?f3u}}Y)$2s@j z@7`~_-+GHD&e5N~MytOI4OLL^eLl0P%enfPI+__@^y=bDi6Wv;;F)#~N> zC@+~pG0m+wT$-xk3iK35bA?<{se~^;Omhbg7wf``xm<1uSHdmjN@2eUFH1_Jw~`Z4 zSuu^{G)anbE{RJt3j_MzjQ#prw^`5T%`=Cnk3>UDDv^8}@t=KX@~FBDptkzWZk2MJBodM-DY31U0p z4!nu4K_)zq92bsOavLNdP>{cp+lW^yQ{*+^6e3-)p%QVMkP$RvxXpM}St>Ki{hCcH z8|7l_YhXjB`M9853tpy8!G{zoQ9yL!-w++(FL*A^%C&;$+Q7SY97Fz!K}GU`4na*$ z5dJCc5L7mZiVlBJ{S2FF7D6m+OYpM7El%JeRg|^_DA#jc5FPco6mJoXlc&@b*9UR= z2Gj^~eFxT3HD80Nnot93A{G*Ko{jvp_$G)I$b%q#wv?g>9={5BXAylZTTv62n+Gx`%JOG_ zgu~4RpN2WPhq!HmPuIep5J!dExgDr3m`a%xHwZefL+gS&CfYg$Z0}$I{2>kx?JGd* z(FSfOAa6urVDLJ$7FoDm03)=q`R#jXhVV}bSz?gukA{bX*Ak-P9muG-&3sD&1=Q|) zP!s+|+#{tT2mnxsw<0U&B97&T1m|o5C9T|U!PTv>+9RymVD+f5YDX=QS@F8ZP%ClG zUBdrB!Xo~0)CK{u7q!EcS2R`@$*Tg}U%G9fdy&SK(s)f{;T24e*3ckKc&U;-7SKTotj{-xug3 zP&|_HXhaG9g-}e6!a9TDj$yqfg+7jrno0=|`U#mO!HDN};}K1!jQ8t8^uT}B=%a>B zvc|nAp7S>?nH35(Pzm)3S7}pmrA96_kS(D$CYId0HUDI?a7@K8sI5N6+5sM;$Qg`H^QA3;@^RPS`ZWQbP#)P<>Q{=o()t!=X_stXV62* zRB^zI7czY90%V%7}K=^KOGMuG$3E<(Gw=YurC$`s6SA<9aC z*N%I}6-dJG>1-h{1ayHo$quwMHK=2xD4};jXCn%<&1+mi1)$+SA81!l6z>uZY4JBi z8)vU?S>`Wgc@+OJab4*zgJ**4c7P^hUal$ z2*EtO3kq~}IC4SS|GJQw^NLE-7N8ix#|uHlId!_?o^`q)s1XDM&5Z?aOKSBH8bS{X z?+VKD5)M~I#gZ1aBZ$wHlxZN4v>+t#o=AT&cxIp)D};tNi~UmBlXqile=`R=9x*1m znIXBVefe%mxZpXwK^c#q5vS55&mxm^Den9#I!NvmfeH8UkMfV9J%Nl6dW7p0;baOLS}g95ZSvQ=#MgobfhPgYT{KJU z_VN1+6dDGv?Q7QH_d@h>{C={Wqhw$S#@FG+arFz(29yOovVi*sxat9j+k@y~|4YyX z!Kx3W%_06UI*ba+wgsxa8kE$67(el0LW~ZgL+~V}EOCKuu7vXgK^6HUNCmJ1InfSO zgbs3-;9-9h9RWR3ey$xl3UoSQpWKZ<&M`a|kkd)^%+OLMx(2$}oc8*<(9u9P^p`hC z+VIDN^=bV$Iu1egW}yh26JE*~C`-D4=kJAY36%BFk={mD&ZP*ezthY}kj zW?gtv@IXFzAkiOzVZRHy*Fb85aJ>%xDY)JUItiA9YCZph2rwWgcUgEKL0^8+`n=kmWtsNVjj;JmMib1S^EH9 z7O^PsRQO}ysSt%PB^d(Gh2O(-fxue%9X6PaSSXYqoo;k2KJ>56n-x?h5i*8qn^G?x6S{5-LNM~ zEQat9s(<#s1ur&7W2WKFI~vcLhPd_9v|`$GF>+zJ4DZeEmgGTv9mIdhu9W*^{R57! z{ysZin)9Q?C+fECz@@o45#&YcllNM;x3qOyc{^W@x99#d7{CzMup({``XHFjbX8edx+-~b>Nv8q*NPe=b$k_>J8P{JJ`Eu;+ z^=A)!`P5t2Pd{<}^mE_5Gjem^OV`hjTzhlBp}}x%{H1H-`1<+RZ@z!}=GlXQfAih* z*I#+_<}1(KJU@2x{9E6=bMVWjhprzzeB;Si{flekL)YFt8Q}XL>S_ly zL_Ue5f1s<)Cu{5PbJ(^!d@{cbyE7hGfs3C97aQ(4(QnIZK)$5y9ayW~X5=@VSJJ z#Jw<7!=_yh&=B=P&V`7NlMDHLfWTpb%?d9nmY8UIJ#H>pj?b37D&0Z|@5AesCg}(x zpS%LJ22=1xz8Bz~`0&#A>0;bax{zLuouzC+x|m-}Xx5P7eln07rc3yBWW62+pIo?k zOBWbtBfvPW>}~w-WjUIy1X0A3iGP3$4jfy)Q7I565_mn{U0y5mNqVfUwjTUZ`3-s$ zzftiiZNn!jPvJJBu93*{N$owvi2`$<-0p1cv{?yUN_bR|0bchMUqx``#JMe2-e%=l z0!v$+4o82VPt;@UZME?&U3~*ihffJmOCT(K>cFA} z0;Z#Di%;Cv-(&Ylcx&Gln~(1DMYP*Gtj-=si$6$7;HiRJB)ok}KLa7wh1k6l{Q`wg zY8?;{*!tRWO4UM*U|F9s7_VSUTw9eEE+^4y?dr4fJ~fmt2Y$aQQ4`VTu^MwUzg*51MV9l~MdJ>7g z&~{r}mmLadi(?0*wcopV_p(BIGrqK}g5Hk5U1p{Su#t_|Nu2}jR)@`}=;`TgY3u1C zanIvU_8^v2KR^%Sj_M^bLUe^xz+Ig^HY5MPAcMrO{nEut)m@BwMoNXKJYj|@nPECy zbMfZN#~If~hLVQQ3^0^9WVmWdr1y%Y`)c;octSI##F+#z?=gGA2dBh4rz5l{^~d$6 zR*ll9?wgEA*kwun-wzW^Lv z)XA3PEu)*qAH2+Z*4*#e^zel4kzs{PK$r=kB(i<=d+I$AsZ-)KuO{+j?(y8woN@MY zgr}LC(5xOZPwS$F8obKTBV|vN9qpb}CJ(LnJXCYE{7PuzFzpSEIGS`|?Vk*(p0wht zX=NX$l})BqOd2Z3t?m@hefLjRubnWioiMEPsN%gU^%3(E=2PjT?5NdscKK-W7~_e~ zoKj^?$ES|28h!9AH@fmv)o{HxEc~c>H0fC5WLSzfY4KRZSpJLJG0E6wPh!PU)wDkD zO!i3j*y0QM7goJmcp=TRcwredVxE;M(l$F%mU z34Lrru1VA8$?BGgE#01;UAHN!m#${M^-tfCX$1pQC%uw^vrgJf>`ErEay_%jf3}%G z9gLk!4#r6)JM$2k+L>*HAZ|M|TtBVSUsWYdsFJ+VDc+d0TM|)<`oAfWHZ<&Z6s6Yf zUwcbLsSPv9RJ0zC*KAtkjg0l`;=MYcs8_qVzMSFLnNA zEBMNrk##EUwj8)Tl`x~C7Q~)fa%|IR*Q74j8<{fd99uuJsAM9tyFo*sK1p<={x9|qKp4kw?IrExF~95>4hJMWQ{dn&90ot zuDqIUp2#*&h1Ac8J7~G_^O$6BOsY3J(HoP5w>IU;vb=^x-lX)4XPd+#l`E7|lwXmT z{rK%J8o$cLXS9*z+PmTV+k*_G!Qsg7Z^%PoyuAP{eq`dnLJ`PhU`wn zvqnNR8Job?NMw_x3Ia`uSca{Ml6@jk5%5a9maVA}T`AMCwQ7;qusoKnHAp^_=dg8B z>a!BSb=4M1rb~+n{CPpOp3GlnuyL$eFZyZ$o5-4tqW>(`vgRzw*C9~^`Z}G>Wb2bf ze~zx!kolXObTZ9ERp+wx;gav9Yzl$@C$5yOFB1JFQNvoKq93R<))GzqkiMdl%^m=bY(jr)Tc_tPh)|zeYZO z@7??EyZgQGNniYsKX!?a{Wd1Xz`^IOuebP`(8<_T#hu*u^6S*vwCar<=lMPzRA(%y z;28QZnNR#|0G}N6LvEE8IJ1yvmq^?WkN0w6Pqr!}_tv zy27r)6*(L>yZh;C^WrqvTE@1pxduj$4Oa8pxvrsw(&pCk30c2YJmOI$E|2Bx3RLA# z;Zj_l2c#VVj*9~ATNSG=N3=@TI~H(XoQSH^I5=E`Y8`4)1w;f?PmZfYC1^RSM-4~- zd^K%QTj?%!D&I)+HCEn9S8A+LO{f{QpjL;H)X;Mpb4(j*hZ8H1%b_B*3=_2yb)Zg( zqK?6FtI%q+MuL}vqE+u9=9*O-~eUjEm+TqM;MxEk8B_#@(R?00GS&3cr z3w>r>JK7dWrpIwRDyIg6Rkc#VS?HUVw5T4`i=9#`c6j5c-9~sK@N)IF} zh18nOYFSD3A%yCJjk915~2 zivyOBI}d)CDix3|>;pLfnMb;C7uqAK@|4sWmV;GYXoO+NT39_TtuU;fkycyqCilk@ zgZl=w7q88N9NC94jH7r9j1_h*+K--fD5Ol0v>hNDnDQMfBq?)p4@h#e0J9FH9AHw4 zVnF&jyun>`pAZQ^2k|-<{dt#k>{`I>5aY%wqT?BkuFQ5m{h2I0v)D* zN^+$fY1OVegfmewb9<}Svll|aN^h&t5%(RLn}y+|s!ywbbk(!k9#W^pow@G_yJnL0 zFG~$Q-95nCNUBVL&Pt8_yUUi z{{T|3Ke;5Jy3XOj9IhhbSLwcjRKf#r10|A&sIku4u>wLam+Jv6tGi{57~{K zk;^HGUP-klIHjVHw1mCLyCfd(hDv?Mu2}dGFygAn`(p{{yWss%@u>Glm8_lg@_095 zIlMf`9=rz!L5p|IndyxX?_Dw_s;^Fj*G&>|SGZ45OWLB$8hk&+52{Zupn%j)xFnyp zaH}%l+;3UJCtbe(92oOgbU01mSp^w0OE`384KE)ID#2|dpsr2-? z`-Rujg7igMLlDRxf*E@sOnm%wzNHzd!jz{miG%H5QsD;D(!Kb6{L^_+3mPVdsOUS?n zKJJg{w#=0#^kcjf{WY#aAAmszAhQnQ;M{9z?cjQWl1h$ycf4cY0`WhS#HR!1I57Kr#z4BPkK<#Hx~%bW*^^+#`2~D_PJfkcw>|1w z?n2{?D{z;d0{AHUhhinXgo+G4fifScR*P^a1rR zSWi;nKDS64@|KvgIY{{)XHToXb7XGS$M;F{lzgXFQbgPN>&P6IW=*G>URh%0-v~Tz3(@7g3jVXJU*^SYyDq$I`Rd&1uip9b z%*DT$e&hMC55M@;$CtyZLC=79L}AGfNA-IBJ;XO0@C^-yd0RN9*W2d__6NELhI+mI zVU09(+mvDLkVNAr1(0)LvqHD?i7(=zo@{@?}(O&ry~K zx6`F|^XlobBQtNDu;e%I8TOKZMX*_lmRn|yemFh$_B~|d9P;#scRqRb9&(Oy`pDI< zE}glDblT_{`x-sdg0wQR|NhuBqkLHF8TNI9Sz(=564ULux7v1Z)nikK)q$a5Ur$)u zGc*|R?g@lrW%`KEWO&;HaL*hZgf7p`zIoxCW#+pdO}~0<`qlF@`xm`_XaB)3Ke$v} zTpVOq$IPX-reC`_J@)$ah3BVVI0I-ZR%DHa)lxiytkcZCdHRqAV%RmJ+ya5rlHlN= zZ*aR!5mxtmwn6OF{(vXojp#WT7)dB7Si8eFxD$wgUqOMv5Z3tv-r;WVU~gD66buXp z12zTe2Hs>l3?nM5)oKazhM)?de|zn)r9P;Im1X+!3o~OUf~E*|=IH*J*N)Cy`rg;? zzCE`U*7Xbx4EK8jUYnBi02%3JLo6*>cAicydxy8vXDdw(_Op$jhrvRYv3-_H(qIRq zUk}iJ?(O0$3DKoa=JDS3AfcmQI~EKCJU> z_YMZSeZAloMR94E_xRcONXwkWu)Y?yA$(*xf!}1fe%e<33135XHB0z5T2dp70t4TU z;I;wZo{^Ltfxxi8x}-$L6oZi^PnOwKVcjqpdeYbHC1f3N3G23b{NC;$=_eilZECU! zCbF4r>1BQVVFN1*G87!_^@n5T&^`S=(AFQ0lF;4$0R2hLdDV!bpnz_vElz~*i_8aX zB3?F((${Odq9pFw4BkwYb?s`&;}q3>#h;}$%fI0JXUVk@a zZ=Y{_SnUgV2mE0jLrU4NmsY;PK6<&{+|G(!vLdYU_e&N&!!Y%15ZK^kLlX>PJu8N8 z5Ak|P3xhS=f`I@u6=lD7knEzShC7?fh1Hwy~B!NIz?I@w@3FXJ(9{zki^6E-Yj( zMa~!E<76cx`q3|(9{w&Vos&lyc@&_iYCQfjx#kY_IFOD znCUC7EoB$;CKAiXw4WzirxGnw#>G?Vc^3^6>GlWbyHOgGVazZaOS4v5TFP$f%7K4o zwx}~tWiJwS8B^(*Z&tlh^=h@KOJ!@vD-JQ&ajj`0y&W*KsvP~&DYIqLymZ36^y1@U zUB@kRC-to?7zLS=#=;3>;gx2w+9f(xjyI0)9Ul_6Y!y9&;?^N?Xjm-Y1=^4BPK9VJ z1Ot+r6jOP&3wzG*d22)jl?#im8}n}`r=4APa?3g2jpULkWB$eFE4GQHwG+l#(U>pE zD4H-9iKR}lX`|@eBo=SJW$c18G`6F1Ywo16V8U2%ajjVH6w8{fHC_vjZw!r!EgQtb zjW>;(=5=fm9V^5t*LbB^j>N8B(C3EHd)LOLt>R~waxs~c(bkD*t60!DULbb&iVX73 zYOb#BFsb-sS9h)vcz%Lja%X9+Q>ocg=5l)7U8*gdvgA!Is=lg6DrK}MhBMUM(pUfS zw`&#DygoAuzHMpZk8J+K>e)gehr!=+4xv3p`8#Ez(4MXQA8W4AUdH`Cx~Y!Me~NJ$*&J$GoG7@g z%1~3iL1>QEhnn@ELX0xhoSG^r&&dVlXc^Uf2;{v-8EM!rfW zCk42)zdtnxP`%)Ab4?73tB=KRl(0P&p&5{Ms9La+g z$Rg+xDT0>BGUy6Hpm)hC=o(oE-5~csH;D|YkS)+{@){^6D%l|#DXWP?(VZf?J54HX zJxQt%phkApIH|)idr`Fy5$;FTff{#HTaKmv$PDoDNoh0Tp@E;ZmmP}l!4Rv4s4?Ur*! zT~@f+fgW(aJz;#%GhMB*{K$C%c-uHWDw>I(>8v<1UynV}uQy0Msp5YhFMN{uz%nij zR8cFauDyfgN7OSEma}*lSJX1DYPC~r+C9(hw>*};v6m4njQrUzzN9vWK)9}t-Tt4k z0-FC2mQd5`_ux;o%v0kO=4Ya2^v?Ky#nuLFnK-uLqKEx9MUQREv?-FC)a_dI()7Mk zlmxpI9npn3Y%(f`uiu3U(F_Y)hTEo~9_@Jyjq}4pH(cC-!MLpNont4O{xS~V?Mwdg zvh?Al|4;mD-%T`s`Ze*LIJFL45$@j4qW}5C)4{|>CiXlr9i&!53CeInkkY}t628LW z)gj#BkP@W3!D=I%;V>&mrC@nKoa2xdq~&1cAe`rr7o=>kuoD(IEDBOC$XCK84n;v) z3W^6|iNmTOWr8doglnAE1t}jC%Halwd(nh)xXD3|d^@4SVJouL!)*>)6rmcHIp|S@ x-LS%8Pmqd1sTS^YxG$tPe^1Ns!%Dj#%CY$Gu?P!+FdH^S|G$Zk607h${s*Indwc)@ delta 539 zcmYk3!Hd&C6o=EaU2D^{T{p=#+IGiX*G*t2wn@vf$5nAJioydy#Y61Qw!t*1lN2E! z-aLpGnTvmdcP}2qvqvw&=uHIisO+tW#c6S6AI$LmVCKDVn2*}ms&%BQWl7ZR=fmL3 z8A-!YwPWOsDKJps6ic;;T)`h209q9>L^r!2CaZeRX7i2xByKj4=rfJ z+JX>Vg!Kh&IC)xyORx!-VGAs$Kx;$Uw%Lo^r`(L)mN(dQ*=V<}h5qz_AkXO|h@*(4 z$l8n25sfFoz-Hxqi-q}yYwnYA5Cy0!j@=0TWQ45!I39+?&W!7XOn{Kb=&pyXlVG#8 zf~jjaLx1dx<{sMVTt(I`lKM957ELxSHgsE|CGqS`8BS8%i}8HIjJ=ne4~)UW3VS%7 zOcU&O++N3ZcW2e%LIVp8JfU$`Ka~&XTA6wvY!YFE2O~etmj6de{3MPDWgiOWw*KTP ziLfiW?6{9`5G5&{4$@2;2ZH<%hdvz=v8NUkzkt#xzy8v1g^j?UGbOe8uMLd%V5NjVrw2aUFBB%iBHP>hos*&uT@JKTGxBZT9`_y~3ur F{1@h~nR@^L diff --git a/agents/__pycache__/research_agent.cpython-313.pyc b/agents/__pycache__/research_agent.cpython-313.pyc index fcaafbd0fbc7d943c3bf5ec81dc8f990ec1b6f08..71186bd710a1877476a88b2a506c4e04fd10d168 100644 GIT binary patch delta 1644 zcmbtU&u<$=6yCLW6MHvy(kQfPh>}rLIdMqiLefCPA!-u@MM@4`f)Xi}nXJe5l=Y4~ zvzs3VVNMm5K&6b3a%?Y9Bo3{(^nj{zKnMyE|~j?R-#f*Ve^W2~8uxz^!y zteMjtPUnDj(YUDd&b5@20#Cw8E6*tv3>r^6J&pxC)SBVgPtX>4yTBW-*kC1{{3d@3U zQP|pUG#Z5kw&{Bemqei!dI2rs3ZzurWc*!GW%Yu*otV>?CTVgWT->-0Yp1d(%J6eK)r||JH79S-z57)xVOTB|os#aS?^P@}1=K6OKN+qG`BS z)11Vzc0K{C|7uyyNiOSIZOV|BjH9*@;Xw$;lxK`#yW{V9r^ zQH5a-QGo)6>X--ArF9$-M16#thy%2ABEah&K_x2OP(%>`E10)IqBBAedQ_k~XMn0r zT4xDznZE(*;{cAs-UI{_v{DVqA=ub2sdW+X&<*GYfej(`N+_&DY7CO*VieI5!2-R` zIO0UqiL17H+bRWI;e?<~8dT8+_Gk$JSTLXZl}T%!`2pDqW>G6ew(5ijMU00)7x{%T z+KgoShaqf;IQ|F4gW_OdvfYR&2dqxrhzYQ+GeMLbp<*kZ$-hK-6G+756(axwb)|X4 z7{%#|Nn|ZBXW`0YDQ<5fu?^rO$y2a_~-| z`6~82;#WWeA;MZc(iT{9kkQ-z=3K`uy)O~}{n2}n;D_K>A%n!APcK6lO zS5E)ZKelU+Jv=ge-S{$nBfaxpTQgp;+9}QM{pe^rZS)-mN9J(5OUw3NUA?lpGrpTS z-cFePR@;g(wy7{CV6snR48S}ij=Yf?k>96&&L2}LsmG~hyHy+%;m#9W%Nud7$D@Q* zmy_S7r}Qb=PG9IzY+_#B%IcRZ7Va5PW)w}mHastXv`*#T2lKM_t8r}4FwD309S3;8 zc$3EC9r34{%RzgnZ`ge4=15zE;TCvd*gLDsMcdV<kkb&{u~7U E3mc2#>Hq)$ delta 1015 zcmZ`%&u2K@bEA03D{nBr@Zff=bHIT3{5;nGkgKgnrr{Q6@k*mAdJB(Ick#7`muHn^f zoHu;VB+;K3!X;eLTH|x^knycr!q>OHzTNi(`V`=x?sz()p2Nitsv-`B7q)}yu$r=F zt36lnnD+cQ?p$rgj3#N*85G$E6Kwe2AL_QX;-E{u+Oeio-JZ|&+=O(f6885HXhqfO zxst{$QP4yTX^8|S8P>)1izP4$yI>FGYOtqyn-fwOQKA!4{|__#OV7DDY@J6|m<^$PSx zmx}Z%+5A&wx#?R1+ML9|B%~a=g0HiP0)dFq?r~`GEQv_Uc%mgl!-mvC-hy<6LMEx0 zHep@Xtob+EN#n2M^M_uP) z4Y)AA%+#y-i#$o`jdTG%wR~-}ryl2LK5x0jk_cKcxh{``U~)|xqHj7(N*V$0cDYD1 zQ;JGm5-tp;hqJozkMq?0a}=69k%I}?I)!Uls&lou;F+w!d7ezCu$svfNLx@kbrMJv z!8(aEE!}|T3W;Nyw9T|c%B<5h1IVYp7Iv&*Yw+MzTO2U&{dkHoZE4CjtYef$Nzawq$)>z%i@ za32}F&E6QR9)(B0Jm{U+{-%$#*^zIJd0kU~ht0z$OqPQNMl|#ar#d{!VtQK4sf(rY MaK`fgLmCE>_x2kWWB>pF diff --git a/agents/research_agent.py b/agents/research_agent.py index 5244528..9d16a9f 100644 --- a/agents/research_agent.py +++ b/agents/research_agent.py @@ -5,27 +5,37 @@ import config class ResearchAgent: """研究模式专用 Agent""" - def __init__(self, role: str, llm_client: LLMClient): + def __init__(self, role: str, llm_client: LLMClient, name: str = None): self.role = role self.llm_client = llm_client self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {}) - self.name = self.role_config.get("name", role.capitalize()) + self.name = name if name else self.role_config.get("name", role.capitalize()) @property def model_name(self) -> str: return self.llm_client.model def _get_system_prompt(self, context: str = "") -> str: - if self.role == "expert_a": + if self.role == "council_member": + return f"""You are {self.name}, a member of the Multi-Model Decision Council. +Your goal is to participate in a round-table discussion to solve the user's problem. +Be conversational, insightful, and constructive. +Build upon others' ideas or respectfully disagree with valid reasoning. +Context: {context}""" + + elif self.role == "expert_a": return f"""You are Expert A, a Senior Analyst. -Your goal is to provide a deep, foundational analysis of the user's topic. -Structure your thinking clearly. Propose a solid initial framework or solution. +You are participating in a round-table discussion. +Your goal is to analyze the topic and propose solutions. +Be conversational, direct, and responsive to other experts. +Do not write a full final report; focus on the current discussion turn. Context: {context}""" elif self.role == "expert_b": return f"""You are Expert B, a Critical Reviewer. -Your goal is to find flaws, risks, and missed opportunities in Expert A's analysis. -Be constructive but rigorous. Don't just agree; add value by challenging assumptions. +You are participating in a round-table discussion. +Your goal is to critique Expert A's points and offer alternative perspectives. +Be conversational and constructive. Challenge assumptions directly. Context: {context}""" elif self.role == "expert_c": diff --git a/app.py b/app.py index a7070fc..56a6a56 100644 --- a/app.py +++ b/app.py @@ -131,20 +131,8 @@ with st.sidebar: st.divider() - if mode == "Deep Research": - st.subheader("🧪 研究模型配置") - - # 3 个角色的模型配置 - roles_config = {} - for role_key, role_info in config.RESEARCH_MODEL_ROLES.items(): - roles_config[role_key] = st.selectbox( - f"{role_info['name']} ({role_info['description']})", - options=list(AVAILABLE_MODELS.keys()), - index=list(AVAILABLE_MODELS.keys()).index(role_info['default_model']) if role_info['default_model'] in AVAILABLE_MODELS else 0, - key=f"model_{role_key}" - ) - - else: # Debate Workshop + + if mode == "Debate Workshop": # Debate Workshop Settings # 模型选择 model = st.selectbox( "🤖 选择通用模型", @@ -205,11 +193,39 @@ with st.sidebar: # ==================== 主界面逻辑 ==================== if mode == "Deep Research": - st.title("🧪 Multi-Model Council V2") - st.markdown("*多模型智囊团:分析 (Expert A) -> 批判 (Expert B) -> 改进 (Expert A) -> 决策 (Expert C)*") + st.title("🧪 Multi-Model Council V4") + st.markdown("*多模型智囊团:自定义 N 个专家进行多轮对话讨论,最后由最后一位专家决策*") - # Input - research_topic = st.text_area("研究/决策主题", placeholder="请输入你想深入研究或决策的主题...", height=100) + col1, col2 = st.columns([3, 1]) + with col1: + research_topic = st.text_area("研究/决策主题", placeholder="请输入你想深入研究或决策的主题...", height=100) + with col2: + max_rounds = st.number_input("讨论轮数", min_value=1, max_value=5, value=2, help="专家们进行对话的轮数") + + # Expert Configuration + st.subheader("👥 专家配置") + num_experts = st.number_input("专家数量", min_value=2, max_value=5, value=3) + + experts_config = [] + cols = st.columns(num_experts) + + for i in range(num_experts): + with cols[i]: + default_model_key = list(AVAILABLE_MODELS.keys())[i % len(AVAILABLE_MODELS)] + st.markdown(f"**Expert {i+1}**") + # Default names + default_name = f"Expert {i+1}" + if i == num_experts - 1: + default_name = f"Expert {i+1} (Synthesizer)" + + expert_name = st.text_input(f"名称 #{i+1}", value=default_name, key=f"expert_name_{i}") + expert_model = st.selectbox(f"模型 #{i+1}", options=list(AVAILABLE_MODELS.keys()), index=list(AVAILABLE_MODELS.keys()).index(default_model_key), key=f"expert_model_{i}") + + experts_config.append({ + "name": expert_name, + "model": expert_model + }) + research_context = st.text_area("补充背景 (可选)", placeholder="任何额外的背景信息...", height=80) start_research_btn = st.button("🚀 开始多模型协作", type="primary", disabled=not research_topic) @@ -219,49 +235,48 @@ if mode == "Deep Research": st.session_state.research_output = "" st.session_state.research_steps_output = [] - manager = ResearchManager(api_key=api_key) + manager = ResearchManager( + api_key=api_key, + base_url=base_url, + provider=provider_id + ) config_obj = ResearchConfig( topic=research_topic, context=research_context, - expert_a_model=roles_config['expert_a'], - expert_b_model=roles_config['expert_b'], - expert_c_model=roles_config['expert_c'] + experts=experts_config ) manager.create_agents(config_obj) st.divider() - st.subheader("🧠 智囊团思考中...") + st.subheader("🗣️ 智囊团讨论中...") - # Collaborative Execution - current_step_name = "" - current_step_content = "" - step_placeholder = st.empty() - status_container = st.status("正在初始化...", expanded=True) + chat_container = st.container() try: - for event in manager.collaborate(research_topic, research_context): + for event in manager.collaborate(research_topic, research_context, max_rounds=max_rounds): if event["type"] == "step_start": current_step_name = event["step"] current_agent = event["agent"] current_model = event["model"] - status_container.update(label=f"🔄 {current_step_name} [{current_agent}] ({current_model})", state="running") - step_placeholder = st.empty() - current_step_content = "" + + # Create a chat message block + with chat_container: + st.markdown(f"#### {current_step_name}") + st.caption(f"🤖 {current_agent} ({current_model})") + message_placeholder = st.empty() + current_content = "" elif event["type"] == "content": - current_step_content += event["content"] - step_placeholder.markdown(f"**Thinking...**\n\n{current_step_content}") + current_content += event["content"] + message_placeholder.markdown(current_content) elif event["type"] == "step_end": - # Save step result + # Save step result for history st.session_state.research_steps_output.append({ "step": current_step_name, "output": event["output"] }) - status_container.write(f"### {current_step_name}\n{event['output']}") - status_container.update(label=f"✅ {current_step_name} 完成", state="running") - - status_container.update(label="✅ 所有步骤完成", state="complete", expanded=False) + st.divider() # Separator between turns # The last step output is the final plan if st.session_state.research_steps_output: @@ -423,9 +438,9 @@ elif mode == "Debate Workshop": try: # 初始化默认客户端 llm_client = LLMClient( - provider="aihubmix", + provider=provider_id, api_key=api_key, - base_url="https://aihubmix.com/v1", + base_url=base_url, model=model ) @@ -434,9 +449,9 @@ elif mode == "Debate Workshop": for ag_id, ag_model in agent_model_map.items(): if ag_model != model: # Only create new client if different from default agent_clients[ag_id] = LLMClient( - provider="aihubmix", + provider=provider_id, api_key=api_key, - base_url="https://aihubmix.com/v1", + base_url=base_url, model=ag_model ) diff --git a/config.py b/config.py index 15db778..78909a1 100644 --- a/config.py +++ b/config.py @@ -10,8 +10,39 @@ load_dotenv() ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") AIHUBMIX_API_KEY = os.getenv("AIHUBMIX_API_KEY", "sk-yd8Tik0nFW5emKYcBdFc433b7c8b4dC182848f76819bBe73") +DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY", "") +SILICONFLOW_API_KEY = os.getenv("SILICONFLOW_API_KEY", "") -# AIHubMix 配置 +# LLM Providers Configuration +LLM_PROVIDERS = { + "AIHubMix": { + "base_url": "https://aihubmix.com/v1", + "api_key_var": "AIHUBMIX_API_KEY", + "default_model": "gpt-4o" + }, + "DeepSeek": { + "base_url": "https://api.deepseek.com", + "api_key_var": "DEEPSEEK_API_KEY", + "default_model": "deepseek-chat" + }, + "SiliconFlow": { + "base_url": "https://api.siliconflow.cn/v1", + "api_key_var": "SILICONFLOW_API_KEY", + "default_model": "deepseek-ai/DeepSeek-V3" # SiliconFlow often uses full path + }, + "OpenAI": { + "base_url": "https://api.openai.com/v1", + "api_key_var": "OPENAI_API_KEY", + "default_model": "gpt-4o" + }, + "Custom": { + "base_url": "http://localhost:8000/v1", + "api_key_var": "CUSTOM_API_KEY", + "default_model": "local-model" + } +} + +# AIHubMix 配置 (Legacy, keeping for backward compatibility if needed, but main logic will use LLM_PROVIDERS) AIHUBMIX_BASE_URL = "https://aihubmix.com/v1" # 模型配置 @@ -20,17 +51,42 @@ LLM_PROVIDER = "aihubmix" # 默认使用 AIHubMix # 支持的模型列表 AVAILABLE_MODELS = { + # OpenAI "gpt-4o": "GPT-4o (OpenAI)", "gpt-4o-mini": "GPT-4o Mini (OpenAI)", + "gpt-4-turbo": "GPT-4 Turbo (OpenAI)", + + # Anthropic "claude-3-5-sonnet-20241022": "Claude 3.5 Sonnet (Anthropic)", "claude-3-opus-20240229": "Claude 3 Opus (Anthropic)", + "claude-3-haiku-20240307": "Claude 3 Haiku (Anthropic)", + + # Google "gemini-1.5-pro": "Gemini 1.5 Pro (Google)", "gemini-1.5-flash": "Gemini 1.5 Flash (Google)", + "gemini-2.0-flash-exp": "Gemini 2.0 Flash Exp (Google)", + + # DeepSeek "deepseek-chat": "DeepSeek V3 (DeepSeek)", "deepseek-reasoner": "DeepSeek R1 (DeepSeek)", + "deepseek-coder": "DeepSeek Coder (DeepSeek)", + "deepseek-v2.5": "DeepSeek V2.5 (DeepSeek)", + "deepseek-coder-v2": "DeepSeek Coder V2 (DeepSeek)", + + # Meta "llama-3.3-70b-instruct": "Llama 3.3 70B (Meta)", + "llama-3.1-405b-instruct": "Llama 3.1 405B (Meta)", + + # Alibaba "qwen-2.5-72b-instruct": "Qwen 2.5 72B (Alibaba)", + "qwen-plus": "Qwen Plus (Alibaba)", + "qwen-turbo": "Qwen Turbo (Alibaba)", + + # Mistral "mistral-large-latest": "Mistral Large (Mistral)", + + # Perplexity + "llama-3.1-sonar-huge-128k-online": "Sonar Huge Online (Perplexity)", } # 辩论配置 diff --git a/orchestrator/__pycache__/research_manager.cpython-313.pyc b/orchestrator/__pycache__/research_manager.cpython-313.pyc index 7c0fd17a75b35c42d30acc77e7fa7425199824b0..700302d473f166b6358ef8ef6af1f41d447f360f 100644 GIT binary patch literal 5522 zcmaJ_TWl2989sB_+w9t28!x`a@nA0YlCTb7N^w#L0_JLy;9&`1!!X_*n}OMx<;*OY zP1MjVtQ=|1 z^`C#v`TooK|5>Q15eSs{zt3n+10nyyLACfQorM|b+#nK>m?PvcLz%-YWe;yi1S*XBnn@>-{H;U^>|j0KJ1U=`VRtPEYoY2I zjYz^MGs=$EjMntBxEXw;FpPL9(n>Dyw06Q5uX&0*;+(?|YL@Nr`?Qqpgbt{NN)_9r zamERy6~dUg{IE$nNSTIR|f}&_Ual8j;4@PweXeB!XR{RkWoSziBMKz zD7TS}@}oY9?Sd7Fqkeb9yCWYBj0Q(TFvBnH8EF0PYJdu`PJ>~blvDJ|N}Vi^ zYt*uJ%}@;!mO0C&4$LZsVcLqVnT93Hv=+Wc>#*Xm!lODdS+@%+zx~ZhCo=i@+HCaJ8^oD6`X7BXm^+d5| z-6Ou6HdkL=A+HF`Vp;eYs+IDRSjtNc63WbnvKaOC`lSGB&;^G2r6A0OMgyQ#f~!?Q z(6LZ2ThTE=S|il}MJ+saqhX+ofJOozPrec-alIUBW^Yi6R(RG*^|Wq+i#IxTRY?zd zM}LaY=exa;CEDjzaR~9-GGwMzUEFKtjFhH}h~v0!UI7(QvYLERz3K$V6ibzJRCj_| zYF^gTDos^&6nGM}D3s>H51_h1PDXWTy>e32nZ^Dadd2n#=^;s?<2!DdAaUksoTu$D zqOW4r0hQx-C2PfbhqqLH{Dox;w;)2HEl3PHsu`LMir4(Y!4uV$dZ4pVgx5%^>D5mn zrDpMy8n0U0JjMSi?8?{ifrhKP!^`Pl6${KR z$H|+-S%0OwtVuEwvy0zA;w&P8=U;Z-jWC4SAkMB_inFv6XOY6fEmZLc%M+?Cr(E~q zw7mq$(qa?7i9X^Q`MR#T6l=M8=K7gZWAn|y>x0!%L(_c77Zu9H7ZN;>{NO3Tf2=?! z3`|^f8UU9Yo{VA2D@kTGgwXo1C?+8zsLF0xopwhB?KR|Jl8_u6HoIP841NTq;=RWM zwxQ~1sZDZ{>&2@alH4%v^@RfV#3xzFw*}^F$#%AS8%X|LoZ$n`FVN#~xaQ@_q_6rN zkqo_0NSKIZA}|q%2lLwwfr@I1uH{wHQ*QB!W}g?WteVoswUp?po+XY`Gb4HkJGCj# zGUbe#8B?h%X@^Izn-2`QPFj45$lyIhOaUC&emPw^~A;{m!3TG#D5A=Sub@Pbk) z;zm&d4hQN0=9bN>Mmp~IG-|WEprt3Buy+^8xFIKm-;nLAS=I4*M?hspJFIG6O^T|( z-t)_r)Bd9Ftyo5FwtfQDHSz?F@RM*UT>okRr~O}V*fKZ%g>l<>wBbj^s5s4)Bcy%H zbm&=-v}`K1be1~SKkn!$cJ$l{-#t)#W8Z_0fl|wc&pK{(%&on%^X~S$m;SutZdYOJ zI}cj+&IkCGhG}0}Aoa1E;p^eY(avJDb9Tq0X#Cp_va!F+kw~lO+|if2my{!9lu4Na z_GxQ{w?iXb56{A1Km*r&vXiZ6eRl8diqhv^`Pq5WpKup*GGiw$({?X$&^ks$f zs`8K;(KQ)ikVh;TUR@H>+U?xxYcTu0vXGiwT$@)%O=1> zxW*x-H7k{~EQs&=gslmPBaF)stGNN6IB1$#v3rlMi=L;l#9=fjsK+!TE^JGP`!z$+ zMd_*m@rb2aV)vk8*%j)zkPjZm$h1%F%CGGb-ceE)C%lisS1?ZP6aUK!0!@H)9Ko{H zEC!O)&ilR3zEqX*Yp@wiTR0;%gl^TAVQPE@EW<~Jq8YTtcbGbUUlmQE&`25dp< zN{H`y0g!mr%ux{o4)Eq81X%i2+`R&4gNRAYnwnu-3Gp2jz4#&?KnlUFXywM>I|kyL z$p0=O4y%_n^~#H%v`leap}1;gP2g*rAOoN@bJimNed;)PY;QBj!xBvjA{&Po9NA?O z;t^HNzQ{QPF&rMzyrR%FY+AHTJ%<5$LJ;;XN>e-tNsURb_6Za(xj5swMK48w{Dr0c zh~;ivV?A84?*v^r;6>t5IDRu{XLI(`|3S&uza(&niRbrM6#(u<3``-Q*FZAM1ORxN zuv0i0fS58f0L6LLuuw&_x?+gqaPY3Y*p(2xt4I`AJxczeARHQc@7>|!dyXFO6H#G@ zRS*$GzTTJ9+W5Fi;rk-u>B-T%jup`2N>Y?S&8=#Jf+B`H4a`SLA&TyX62f**tA>)& zaZr?$o=XFPIHb~yqNNiTEI6GqL6wZyowHCJ=gysjdl?sjA<&NLX6j-b#eP|{AkV^> z0kNTw`ec+WO1ff`3Ao`MA5{!UaNLU?ZyLD2FpPB*H%P+(pNwM5=RKJ^LCBfBAYePn zuhzTr7cW2Bha4K0>+a&K4NFqNhycwHg90#X86pc;5gb44ER(gJAh-`uUJGJ{)cKro z(Ft1>OV6v+5j-jx=9~yfHVrUXII=KRsmjr7y&ylla~cPO3*3)6$b(OFAK~Z}x0}b8_VCmDpR~142TSdppPjvRc5eU7*$3_KP7jnC zUnw?joIY4u|N7lnvGwhTjc?EU$gA%$m@i8Qs{ymU+CF=ukG=kW5qqk3cH63 z!%|_(@q&Eev1}G)6SnjLv!4Y#K46X^9+jqU81V=Vvrk&&!a21dkCzEI!7zY2TVJT( z_<-3|AYG7Hbn8qx5L|0AQ$o3sG`CJgN-^Mhof@D?-OR% zlnFF*EzhtiN7wi^&mMh7pqby?<9idAp}`lSxd&{Zxqql|;&fr;Y~ft0Fp?^sNEgO0 z6!dK2M7B71={ZukWZvFzTTY{17XE8(R81 z8z={B1gbK}Fw8$mcad~|L)LvmdcGl>z9KbG7~wg~GjUKFCceNCHeANx^GK3m-kc{` hmkpSS%^aWI05I&Go%}peY~PCXu{$YHGA8Dp{{Ih5Q40V7 literal 5581 zcmcIoO>7(25q`VeC8;I#V_CFixv@90W0|f+|451*#b_N zmwCICWui%p25D6wMUnz;uD%pVZ$?i&x;N((GA<%@o1g)ji*It2!sx9tZ%K-hB_l!8 z1^9ku=Dj!H%o*ZaS_foi`2FYZ{>zWx8v#xZpKqbXPa>y5qp4`6_j&&WzkpmRc@y z$Hymz^CnCtjp|vv3sfg-(=fPAQiKT_VWK9mz;h%erDRR)0ge`6K|YiCOlBcI3-Vcr zh50PZXMmN@6h4bEB^!-5do4AEhHY!k%svdkrk?ED1=H||7cy+irHgJUm@T-y{kGRa z7YmfRX+1q}XK23ElBM&eW%iy(4D=S*I)Apx?*ib(4DIdf9q4sz%c5@Y$>S&cPaHpa zvc8w$%l?T>#v@)howoFOnofI48tKJ6j-%=Hn?*fe<+P^Lb0%}#ylGL(26n)4nFmze zvTRp(P1|zPY1RTzSSvOVIE3v$WU+MGK?x(RyDl?li!MdccHphSk9evL&Y`d$dhZyk zdMiDzTY8qVMBygGc;pS5$~gwjZ^+k?mUl;2cE)avENKs<*h*LTt;r9EA4uIRT}S-U zk(I9JarA}sJnO3Mu)*U^K*eY6ZD=-nToak3?a!mxj(3dy2$I&>jDdEVW2go zLj97aAdQ8!NUEt{)S`T*XwATH;WCj_GzB~Ki}>nj0WHuUz!B`$%4M6iHrA32#M`~r zT3k%{$RDC1ONafLqq;e&P89R5**k%8pbpzb%P{lm4V%3&m$z?BdLg}Frr)49z3{B= z&~%aIy)Z<`f|;SrkYKaggKru}``Yu++$Pg)`5K2zt9hX|nLeJczLSI;CrOg8y&)t^ z;=*KHV*5dqsn|RVjTZ!mS*{b8JjtQ?xlL>bRZ!t!dr=s|4#L=lSnAvaJW=Cdj5wXp z+#)Nx_q`uo=~Ulu_M2UuOVaNm+^?R@UkoqEs9R)~QP&w3V!&{u*I{s*%+#?Z$<;nP?CPm){~^wsx+>_I&*6N3X7QbbdVk(RgjvzWY&=+@`D&xk>Sj6oh*P zZkr9(K~&s2$H6X**3ksOT=zCeRiC7=m3;=PR;}IATiZ($Q0H|IXyUeKpxHJ(-}4N9 zLpPie*!B$mjoWz0_dFwjII>b)E*-iA0c`4dvqV)t^wk@tn^TzS+@Lzl;PQR5NR_@i zb$C$KZd#Bjfv|c=eaVD{>Qr906b3IN@f63U1x!boTiQ`Or@lQ)fD9%t?#%*)6b58H znP0B_Yh9X{%2P*C#7?8WVXiN=GF7S4Hgb z)NR<~tSew!aOh}HqP|2m-J|N93Cd>mf^(Fs?G&?F$i-C?fY}Rl9`m^bco0hvbqKce zeWK?FmD0`{S-nbO8!lWrT_v7VbGoA%RUc*4n|6^sVH7YxRbWsp%_>UOz>II_FR9zl zwN*Wnfr#O$FuK7uQ8`s}8MU(Dbjx3%vuu&;%>m7si`?u8F;VKSYB#(gPOh5{lsaaK z*D>rk8uF*LIKodm8VNkO(c1+GEAG^uQeTgX8Fxkn6Jwh0>z}EcKB4#q$~0OpR>zL! zqkzGnVAKy3<umn#T=+GLD{@+W)iPEYR1&FOrKW|I~2knmYw$r zN5PMl3qFNN!-~MjL7c}`Fx)~Fp#KgsThGkv1r!78;(Mp+E>|2PO||EIGcsDRnVZ1m z_5jABgr%UT@RWuZj9wVBeXO}@JnHAP^$)zKU|5+})=BHU2AMBF0qe;|u4ui{z=Sji zG(hI8*S@|aT{WQBTIK3hO2bm4zSMB=Tr4bVk6&O!*6$knWJ2A*JOs;~Zy*x$>-8HW$h_-Yze^11md*#HBFi>N6kB-07;sjxNUrDzSmP zxi4ZDmoBVCcU7YMmqu6i9=sc?w4eH0^wcAn?7AeZ21#f4C$CgmPJbRg4U->P(f+&1 zyCb08nTsN{mqZP_i{drxI+8z%#{1NAxQpBN-2H2x&3rmj{)JJQ$ShBol?k)#E-t(8 zR9v*skT{0cEDGaj+oJHYKjU5NG`H=vxbl-^`TA?+qz(&ag_i=*?Hvg$wQgZq`-|9R zwCui0w5J^1x5>0qzG=(7Kd-c${XBfO(Y9C0sn?cM*-9!~&aq0$SxzlhQj2iZdGR9Z zc3!-Ux?u;zDe)D5HskZ={2A|Bvs~{i#KhTqv3sHN$S+~V+rktsd`q~3-4!v1ohdrl z~J0Cy~$3SIc?dI9R{=e`Rsd8x7%w z&%n(Jf4%*TT?RR@~ diff --git a/orchestrator/research_manager.py b/orchestrator/research_manager.py index 79f3c67..34df206 100644 --- a/orchestrator/research_manager.py +++ b/orchestrator/research_manager.py @@ -8,9 +8,8 @@ import config class ResearchConfig: topic: str context: str = "" - expert_a_model: str = "gpt-4o" - expert_b_model: str = "gemini-1.5-pro" - expert_c_model: str = "claude-3-5-sonnet-20241022" + # Dynamic list of experts: [{"name": "Expert 1", "model": "gpt-4o", "role": "analyst"}, ...] + experts: List[Dict[str, str]] = None class ResearchManager: """Manages the Multi-Model Council workflow""" @@ -19,62 +18,88 @@ class ResearchManager: self.api_key = api_key self.base_url = base_url self.provider = provider - self.agents = {} + self.agents = [] def _get_client(self, model: str) -> LLMClient: return LLMClient( - provider=self.provider, + provider=self.provider, # Configured to respect provider or default to aihubmix logic inside client api_key=self.api_key, base_url=self.base_url, model=model ) def create_agents(self, config: ResearchConfig): - """Initialize agents with specific models""" - self.agents["expert_a"] = ResearchAgent("expert_a", self._get_client(config.expert_a_model)) - self.agents["expert_b"] = ResearchAgent("expert_b", self._get_client(config.expert_b_model)) - self.agents["expert_c"] = ResearchAgent("expert_c", self._get_client(config.expert_c_model)) + """Initialize agents with specific models from config""" + self.agents = [] + if config.experts: + for idx, expert_conf in enumerate(config.experts): + # Assign role based on position or config + # First agents are discussion members, last one is Synthesizer usually, + # but for equality we treat them all as members until the end. + # We'll assign a generic "member" role or specific if provided. + + role_type = "council_member" + # If it's the last one, maybe give them synthesizer duty? + # For now, all are members, and we explicitly pick one for synthesis. + + agent = ResearchAgent( + role=role_type, + llm_client=self._get_client(expert_conf["model"]), + name=expert_conf.get("name", f"Expert {idx+1}") + ) + self.agents.append(agent) - def collaborate(self, topic: str, context: str) -> Generator[Dict[str, str], None, None]: + def collaborate(self, topic: str, context: str, max_rounds: int = 3) -> Generator[Dict[str, str], None, None]: """ - Execute the collaborative research process: - 1. Expert A: Propose Analysis - 2. Expert B: Critique - 3. Expert C: Synthesis & Final Plan + Execute the collaborative research process with multi-round discussion: + 1. Conversation Loop (All Experts Round Robin) + 2. Final Synthesis (Last Expert) """ - # Step 1: Expert A Analysis - findings_a = "" - yield {"type": "step_start", "step": "Expert A Analysis", "agent": self.agents["expert_a"].name, "model": self.agents["expert_a"].model_name} - prompt_a = f"Please provide a comprehensive analysis and initial proposal for the topic: '{topic}'.\nContext: {context}" - for chunk in self.agents["expert_a"].generate(prompt_a, context): - findings_a += chunk - yield {"type": "content", "content": chunk} - yield {"type": "step_end", "output": findings_a} + conversation_history = [] + discussion_context = f"Topic: '{topic}'\nBackground Context: {context}\n\n" + + # Round-Robin Discussion + for round_num in range(1, max_rounds + 1): + for agent in self.agents: + yield {"type": "step_start", "step": f"Round {round_num}: {agent.name}", "agent": agent.name, "model": agent.model_name} + + # Construct prompt + if round_num == 1 and not conversation_history: + prompt = f"You are {agent.name}. You are starting the discussion on '{topic}'. Provide your initial analysis and key points. Be conversational but substantive." + else: + prompt = f"You are {agent.name}. Review the discussion so far. Respond to previous points. Defend your views or refine them. Keep the discussion moving towards a solution.\n\nDiscussion History:\n{_format_history(conversation_history)}" + + response = "" + for chunk in agent.generate(prompt, context=discussion_context): + response += chunk + yield {"type": "content", "content": chunk} + + conversation_history.append({"agent": agent.name, "content": response}) + yield {"type": "step_end", "output": response} - # Step 2: Expert B Critique - findings_b = "" - yield {"type": "step_start", "step": "Expert B Critique", "agent": self.agents["expert_b"].name, "model": self.agents["expert_b"].model_name} - prompt_b = f"Review Expert A's proposal on '{topic}'. Critique it, find gaps, and suggest improvements.\nExpert A's Proposal:\n{findings_a}" - for chunk in self.agents["expert_b"].generate(prompt_b, context): - findings_b += chunk - yield {"type": "content", "content": chunk} - yield {"type": "step_end", "output": findings_b} + # Final Synthesis by the LAST agent (or a specific designated one) + synthesizer = self.agents[-1] + yield {"type": "step_start", "step": f"Final Synthesis ({synthesizer.name})", "agent": synthesizer.name, "model": synthesizer.model_name} + + prompt_syn = f"""Synthesize the entire discussion into a final comprehensive plan for '{topic}'. + +Discussion History: +{_format_history(conversation_history)} - # Step 3: Expert A Refinement (Innovative Optimization) - findings_a_refined = "" - yield {"type": "step_start", "step": "Expert A Refinement", "agent": self.agents["expert_a"].name, "model": self.agents["expert_a"].model_name} - prompt_a_refine = f"Expert B has critiqued your initial proposal.\nCritique: {findings_b}\n\nPlease refine your proposal to address these points. Strengthen your arguments and fix the gaps." - for chunk in self.agents["expert_a"].generate(prompt_a_refine, context): - findings_a_refined += chunk - yield {"type": "content", "content": chunk} - yield {"type": "step_end", "output": findings_a_refined} +IMPORTANT: +1. Reconcile the different viewpoints from all experts. +2. Provide a concrete action plan. +3. You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the roadmap or process.""" - # Step 4: Expert C Synthesis & Visualization - findings_c = "" - yield {"type": "step_start", "step": "Expert C Synthesis & Visualization", "agent": self.agents["expert_c"].name, "model": self.agents["expert_c"].model_name} - prompt_c = f"Synthesize a final comprehensive plan for '{topic}' based on the refined proposal.\nRefined Proposal:\n{findings_a_refined}\nCritique Reference:\n{findings_b}\n\nIMPORTANT: Include a Mermaid.js diagram (sequenceDiagram, gantt, or flowchart) to visualize the roadmap or process at the end of your report." - for chunk in self.agents["expert_c"].generate(prompt_c, context): - findings_c += chunk + findings_syn = "" + for chunk in synthesizer.generate(prompt_syn, context=discussion_context): + findings_syn += chunk yield {"type": "content", "content": chunk} - yield {"type": "step_end", "output": findings_c} + yield {"type": "step_end", "output": findings_syn} + +def _format_history(history: List[Dict[str, str]]) -> str: + formatted = "" + for turn in history: + formatted += f"[{turn['agent']}]: {turn['content']}\n\n" + return formatted diff --git a/utils/__pycache__/llm_client.cpython-313.pyc b/utils/__pycache__/llm_client.cpython-313.pyc index 73d56022cd20834cacb697406466c9ee344b1ce5..1a8cdb095e7f815348bb66312bdd69c6db599e35 100644 GIT binary patch delta 1041 zcmZ`&PfX-g7=N#wcG~%e45RXIhAJ#<(HX`>h_FK9B7|KK)MktcA(K)H)6i)ruN_4a zqDNvh+4zjfW-n$>d)eJ==AtLF_tor%#F_M@2ctv}gT#vm^?e-)9P}N2{oeQce!uT) z-|Ic~hMGK0CS!=LPum;T#}AT!WA1z0NO3%#m#?t2zKW0*(WGVJy6A`s(qdX%OK3?= zE<~JYiJ!%nLCx0I0742XMc?k~>;dU@E|;0aorEKm#OG0oD@j_4KH{IFzj9sNdUxY5 z{!KipMo5B{1=5yj);!bP4dR|&@IVv#W_M zJlnV>-^N&>bMZbg162jO5m$=;Ate)x4kEs}=X>J6F5m;AVR|>NV6E|8{5Nhf51DZQ zl>s|MxJzO8(1H|T%K^N|pdM!si>)7je)E_6_h*y*{blKHIb>W9Dw^1_~B)_lu# z0^ak9Dv|#kPB1EUXP3-?44WPtAG|B9&YUB_^scPDF>hO@;}^1{@EFE13H#gr7UAqo@D5|>GhFWsAaza z3V6%$2@l)Qau^~Qh)mLEIxB*Toywo-7m7t;3IC9Ox-CG!?ga~@VJM-VXWDWY#?Vte zf5gMkrlp>rUkmjDGf@R{jXeU<|Da|$J7LR!b3FniSpn$uf+FeFs=HOU%{j704>C&k OB8T6?zoYOGmi+?{R{?qe delta 985 zcmZ`%&rcIU6rS03x4+mz%dfU*yBI30SXyI5X^vvm$`@t&or`%}xsb^h?*k_}&UW!S7s ztc4TbSxcAu-spqzx*Y^U8~*4@q&8~Xfx$LluQwd1s-7Y^5MJDxz; zg$0+N^U^92CtN~oA1OLQ(uP%+7IclAbNVgp@QQXH$%qzmhqyP>bBfk>&{|7L z(UFBNP0Ju_D3ue}F3sv=|g;O&n^*$Z&X+g@iWsdIo(n@R1UwWIDZsS~uc{&bmi y2A^#G!kc=Qn20PI#r