From 93128838da8c955242d29fa0a49cda0325944b97 Mon Sep 17 00:00:00 2001 From: dsa343 Date: Wed, 4 Mar 2026 12:04:47 +0800 Subject: [PATCH] 1.0 init --- gemini-api/SKILL.md | 133 +++++ .../__pycache__/gemini_client.cpython-310.pyc | Bin 0 -> 14177 bytes gemini-api/gemini_client.py | 474 ++++++++++++++++++ 3 files changed, 607 insertions(+) create mode 100644 gemini-api/SKILL.md create mode 100644 gemini-api/__pycache__/gemini_client.cpython-310.pyc create mode 100644 gemini-api/gemini_client.py diff --git a/gemini-api/SKILL.md b/gemini-api/SKILL.md new file mode 100644 index 0000000..e5ef461 --- /dev/null +++ b/gemini-api/SKILL.md @@ -0,0 +1,133 @@ +--- +name: gemini-api +description: Use when needing to call Gemini LLM for text generation or Gemini LMM for image generation via the local network Master API at 192.168.1.5:10900. Supports chat completion and image generation with concurrency control. +--- + +# Gemini API (Local Network) + +## Overview + +Wraps the local Gemini Master API (192.168.1.5:10900) for LLM text and LMM image generation. Max concurrency = 2. Uses `gemini_client.py` helper script. + +## When to Use + +- Need LLM text generation (chat/completion) via Gemini +- Need image generation via Gemini LMM +- Any task requiring Gemini model capabilities on the local network + +## Quick Reference + +| Capability | Endpoint | Model Options | Timeout | +|-----------|----------|---------------|---------| +| LLM Chat (sync) | `/api/v1/llm/chat/sync` | `pro` (default), `flash` | ~200s | +| LMM Image (sync) | `/api/v1/lmm/image/sync` | `nano-bananapro` (default) | ~350s | + +## Usage + +### Helper Script + +All calls go through `~/.claude/skills/gemini-api/gemini_client.py`: + +```bash +# LLM: text generation +python ~/.claude/skills/gemini-api/gemini_client.py chat "你的问题" + +# LLM: with model selection +python ~/.claude/skills/gemini-api/gemini_client.py chat "你的问题" --model flash + +# LMM: image generation (saves to file) +python ~/.claude/skills/gemini-api/gemini_client.py image --prompt "图片描述" --output result.png + +# LMM: image + auto-remove Gemini watermark +python ~/.claude/skills/gemini-api/gemini_client.py image --prompt "图片描述" --output result.png --remove-watermark + +# LMM: image from event data (JSON file) +python ~/.claude/skills/gemini-api/gemini_client.py image --event-data event.json --output result.png + +# Batch: multiple requests with concurrency=2 +python ~/.claude/skills/gemini-api/gemini_client.py batch requests.jsonl --output-dir ./results/ +python ~/.claude/skills/gemini-api/gemini_client.py batch requests.jsonl --output-dir ./results/ --remove-watermark +``` + +### Python API (recommended for agents) + +When writing inline Python scripts (not via CLI), import the module directly: + +```python +import sys +sys.path.insert(0, r'C:\Users\ZhuanZ(无密码)\.claude\skills\gemini-api') +# Or on Linux/Mac: sys.path.insert(0, os.path.expanduser('~/.claude/skills/gemini-api')) +from gemini_client import image_sync, save_image_from_response, remove_watermark + +# Generate image (always use base64 format) +resp = image_sync(prompt="your prompt here") + +# Save to file +save_image_from_response(resp, "output.png") + +# Remove watermark (optional) +remove_watermark("output.png") + +# Or combine: generate + save + remove watermark in one flow +resp = image_sync(prompt="your prompt here") +save_image_from_response(resp, "output.png") +remove_watermark("output.png") # overwrites in-place +``` + +## Image Generation: Critical Notes + +### MUST use base64 format (NOT url) + +The `output_format="url"` mode is **broken** — returned URLs consistently 404. The client defaults to `base64` which works reliably. + +```python +# CORRECT — base64 (default, works) +resp = image_sync(prompt="...", output_format="base64") +save_image_from_response(resp, "out.png") + +# WRONG — url download will fail with 404 +resp = image_sync(prompt="...", output_format="url") +download_image_by_url(resp["result"]["image_url"], "out.png") # 404! +``` + +### Watermark Removal + +Gemini `nano-bananapro` adds a small **star/sparkle watermark** in the bottom-right corner of every generated image. Use `--remove-watermark` (CLI) or `remove_watermark()` (Python) to clean it. + +**Requires**: `pip install opencv-python numpy` (one-time setup). + +```python +from gemini_client import remove_watermark + +# Remove watermark in-place +remove_watermark("image.png") + +# Or save to a different file +remove_watermark("input.png", "clean.png") + +# Custom region size (for non-standard watermark placement) +remove_watermark("input.png", region_w=300, region_h=250) +``` + +**How it works**: OpenCV `cv2.inpaint` with TELEA algorithm. Detects the watermark region by corner position, creates a mask, and fills in using surrounding pixels. Works well on both solid and complex backgrounds. + +## Constraints + +- **Concurrency**: Max 2 simultaneous requests (enforced by helper script) +- **API Key**: Uses one key per request, rotated from pool of 3 +- **Rate Limit**: 10 req/min, 400 req/hour (server-side) +- **Proxy**: Must bypass system proxy for 192.168.1.5 +- **Image format**: Always use `output_format="base64"`, not `"url"` +- **Watermark deps**: `remove_watermark()` needs `opencv-python` and `numpy` + +## Common Mistakes + +| Mistake | Consequence | Fix | +|---------|-------------|-----| +| Using `output_format="url"` | URL downloads return 404 | Use `"base64"` (default) | +| Forgetting `--noproxy '*'` with curl | Request hangs (proxy intercepts LAN) | Always add `--noproxy '*'` | +| Using `127.0.0.1` instead of `192.168.1.5` | Wrong host | Use `192.168.1.5` | +| Calling `download_image_by_url()` | 404 error | Use `save_image_from_response()` | +| Exceeding concurrency=2 | Queuing delays, timeouts | Use batch mode | +| Not checking `status` field | Missing errors silently | Check `resp.get("status") == "completed"` | +| Forgetting watermark removal | Star logo in bottom-right | Add `--remove-watermark` or call `remove_watermark()` | diff --git a/gemini-api/__pycache__/gemini_client.cpython-310.pyc b/gemini-api/__pycache__/gemini_client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..144d7aa3293137763b9a1d0533b6c3009ce95e5f GIT binary patch literal 14177 zcmbt*TW}m_c3yW+&lQ8g9Rx2TTc9o+i8BBw>V_q?f=E#0iUhd;D6U%M&Nk+600Yd8 ze>VuC-O#R0ttHuEq{Meiv;kkEjW1PxaFr9gD$Y|XZ+Y!Jro3Bxa$fQf*&_U%(>;R$ zxZF+3gPMQ3|33fe|8nlBTGq^}gio(_*K%gA z)?@bYeZ1OR>ofam{boPz6Xw8@B9ecgnu8)`4vDlmEHdT+(PfT^ta;GR+Fcdpo_bF; z4++&eY$vylY%AtdqFdxXRLrCH$f7EG>|>(WHbftO`|YR2fNd;h#Gn}ZFm8_SfoMHV ziD7X7PmhbsJUZ-uW-%cSibLps)*cpz#S!t;qArfw&xvEeY}n6>r-6OcKEZFuisRxL zz~+TUfA#_K>|$Iz2gv6!mlOEXs64(jpR9O6ya?zQ#7R6UaPKj^uPDYo)Xf*gIC^$_ zC&VfAo)jAzTi#hRz_$J<1#GZ}W zL))r3F5VPV*tLoDTj_IRMx4jWrZMsYujskOlz3ZQ#E4VklK2+jUb0V%cUHB}R5@V3 zbWg!)X>D~RBG0eZ7kV!TUu_VQ8(5r?y^yC)XQs9 z+V%1VUAwYTEoL&;9l%az41Ag!?lORvcs8Xn&!pG{FyS8EXsk)&_L}Vw#!=(_LZQ|W zcGYOghQC;~oaI~8=h`()&T`kJZHyORz4bE@hx05HfTGsKD}?QoWu@7EL1WEruDOP^ zowcf4Y}S|l73d0u2wMefCtOCMAnjV?j$K%{u(vf!uKqtw*1m%SDhSK948AO`IM}o{ z`rXO?Ol%95TVBS>ctah=QKkJ0Cs_~`X%xpoT+Z07%tMkkGkHBo&W1yRw|kG zh3`3howv}q%BMrB;;OFZ##{)sVdbE*skjicuPT;y10q<*-@TZs_{y9zp*V-uRTYe< zD(~S-eR*x>A)&>Vrb}2rsoE&71}U&$>7D8K=7J1&R_emO8^o%1{Yy270qjdf4r0{* zxrFB2{_$^Z{lj1W@=vyY@gM*8lbgkI)mjtwO=q=ItvWY% z_ba;GWw4;TRJZTCCF~`=bsV^z9GZmIt7g>9BR$spwH}kh=n7JHT{Ih&x*H^$)<(5q zi69w`3zBZ7W`lr>Wnc^|Fo;tq&r`%pr*f|IRB&pUIh>l?wQ*ZjHq*d;*iRF#72pzGB70xk zOo?tky^{T05jj7Fx$B}w^nQU8PvB%lW;6Xk8lshd*Hz%^-uTrBSC8n!$Q-Tr=Zxvu z_`_&Sx7+Kd??Ev>`5o&@zTZ1JSE(;mZR6V2xp~8KNd~&1%(V=sX_qUDl`_k2oNn<# zUJbH#-GPKGSx&i9`6|BVE*4%hbs7%d)Je7kiJA>*EWGSFh-#8J^BYau>xJyER>~~Z z$4KyemHtivL|b?Vw5iHNIN+HeDeY#}D%*Hb!Y0;tyf}#g!_y{>AhBpws3L}J>H1d5x=E~IE{Pb)I(j(C6l|fvr)tU})*UQiVlDRo!2~k>x zNP!GWM&c*XSwQI|X@Z2X2|9=q%{nrHnFczKPtqkw)*{^L>APi{H9!z^T$#_v19&V) zs5wXtEis5e_{pPu1rZwH4Nf!A7aC$CP;KVWuo)tdo>H^nxFwo;2`bQj9i7e#XtLU% zn$xmsR@K01IW?u8c$ACvtIw#ZALP`d+#W)!?Z|Hcyc|Q*`j9wki=;omjZimZE{i0x zvd$~FrodLQdkQm>;;Vk#*Y4>NBWc^P^Hw!^%ui?#NOK`efz`IP&7?^9$vcWu5MZS{ zioD<^z-(kGrhlj%1jCAqPj+HgbeDY)catk%jQh-*o3Y!eHr814n#hIZ3z667z0MT+ zt+}f+5qU!JLkqfc!?4P-;W&m>t+puDF`(Co5J0KL8TmXG8Km0^LOw_L%32yfKmOuJ zKlz>i{11Qk8(#ku#uLJd?eJJ=lI;8ZhFb=x-*}h>yq7H&i_zmrN{y*8sH;? zWFf&q^ML2Su( zgLqSt(2O}7AY8`vrAQ10pkz?9b}XM zArUEMaeY$NRZUH4IaQLvct{jmCU$9khzr_OByiIqlr<^(sEK_HWG_G2V? zU411E0S&(ZVmG@P_v2fMZH2{byd`E?^}X0BWJMfwknoPaJw0zoYu#AbfO0k#VSja) zn8hH5=MX2yFe7PD^E8^7d{UmE8&X8f_3|WL6{smvGlnLg2zQnCL%u={v+b*NMVq;Q zAN3NB$HD_vd08z1cFd}@kI(GLE9n2pB7c7T&4)^_qRa=Wn(dIy>I88%K!dIhn={DN zth*(*u?hhj^w#ZlSTyxJP|_?h%W>dqeQ59L%LbhY}6h2CY7>V%*W+5V3yyeW|o>cYUa^+ zLu1g}V|OOTs@2*Unb>0v(DP2`HKvy!d+y@Yd};pDmFcV3=drO*!WD`~GE6$eUYc;0 z2xk=!cz3n92708w#6RVd@?G=?i3Q8CUpXBlLL)rLuu)rL9n=L~W|fy~P(M!49h#=4 z#RhC;_pz;{t>vvGZjLrl16eAv3mY33+8J!G>?1)aVz}1i0c5*j&)o0~8{L<#T)R3uKQ%KyX~04=_DuZjXt>}}7%E zqei$o5@9f)9L5I4=NK;gS8!@(uFe~xv0f)NW-UPJpEx~!8Yn`nMukA}%6fGJNU9YE z2~~NKvEgi)8s5h?$?s6}9yKN!Z@5howb~f3`*E7+zI0{k?dcAp*o&hB`3T|NM-&;x z%2eUuu>+1$GHD(1s`Z7exzbCkyhm`6&}v(g&|qMmEvP`KHElH}hlpvo zM{o6SYh-!dQ-4Y2o_Ixps)K05sKmC~%=1;UeFckg6M|08xUfed@Kh*M5_ai(+NU`d zb`XBsu<6NaY>7KxWr6p0r@*5=#tr)sZ-gBy*Sv&_UthfkyOAx<~WDn6C zp%#qTpz}IuG$dqv#esnDP~`kdR_2oR?!im@g9R zEGo41g_$57!jN%x769IOH0yc;n^NDq5wyA%AGJ*ZudB$*Ubmb{!+UzB5$z>=+Z`B+ z2JBsc!u*SE-S7b47J`_iq^AHqUoLn7FKS7 zUadH8pp&Tq1A(|eewUgO8Z!l?EE-KLb*aSKGf)@h0zphtvyb0K=uo0p5katVLBwz$ zzz(U$T;d39g`+VYt{!Q=%nq!GN4l0pkB0jk+7!v`9Zl7D^c^xshZC}bp>X&tXSGxj zdsvg4zP3$q88;BGU^;euH0Uz0ovV`Fz^nx3+5#I!tkkv~B33%KpOs)cymmg4eyso# zh>rIX#)J_~XjevuL`n2m^!gtcZ}OX%AnQCyHo#0i#yUSVC&(bm>vDxa_zXx(%Vlcz zZF6q#ss05(SmE`vb>G@jxk^LntoCde`5)~K`xD%>WR`?9X#2=oUGo)e9hqa{HqzF` z1mw~^$TApY@fGrd;U%*{M#frl8|FVGRI(+Z>M6t`COS72l1LRPJ__=&VdzIk(H;{W z#pTXX)R#|t-)|p9cmOPNNEC!T`@9n#3{g1bf?XAn#JMpwJ9BB~ZD_|1CL}YEVoPX>|o_LRshG!)zCIl~7E%RB@35>ZMDL5Aid8Ni|C z$cE%z^HK7lCqs_nLuVUu*L-{u4M-bk{~BNC2%1bhr-FyTJrqJYvXjMin+ymNfwG3{ z5<^y&mfeRsRG55&`ZJX}VvZ%E*dS4ENQBXXl(d%+^enBnua<)x;4HWlC25Juno}lI zvh^V^q)k!34%A)-4x3h0Hp4)_x|zJ8)VmO$&^8dqqCXdgJ?WWsSV=-#Q{)j)n+l46 z28H%Z`6=!_>B3?PnVEHIY9#?`jFT&PQ`<~$X56HoLCiuEiNnffmz#nm)-?z{wVB1W z9$iDjt)%_5pIOv!l@Un@g&gQQ1($Rg@9gq(z>*zN0Mk99017x4HIWvX?YOGcUj=Lz zVEqsl;-v+b-2-!&cMxSv?*-qtsJ z+}ujfR-UOYxXb?)(EYG7djLBi`j)h9iUN`q5cLzUGXL~j{{^Ht z%Q0Xh3c>yq@5G-5)4eoxQcM=xp$%Cm?X%Fg^Dj%ZHvr6 zgu`Kk5?h8OS74Ge+OJ}b%=YEpV5otR3arTn`{Yk}`;=UA!r(nw61${3XtlYJ$u#F&+j8o&MUu)hC<=x}h zmM4viHZl6oN4LAAy$n8S%rc{sNoJJqoGM0-jT2Q%BJyCIHJn-lCLAsokhE~Bjdeue zBPxmPwB4hc$FYQaziY9zVQ;ThAi*cGz$VrSLWE;Up|tXyLX@MaBLcS(nsN{?%8X~- zXeBYQLC9(#cSO>h@a!8(rX>$A8kZv6B%6Qk+YleL>SW1LI4|7jNkftouoTL@tvB;3 zGMX;pl(LSlhdbQt?Ch9b(P>xhMb}Kbjb4+b&pycy@|2VIwHl>Z8CzA~PhiAs%QjeQqjUC%V!3yrn9;O9ZyS+<&X5JPw% zh(TK< zM5!p4i9^Pa8e5m<;H4`!W-rZ8m%epv`t8!UuTNdRH2+?{Pc~_7tj%uIm88a@#--*i z8dIw`%@~ZzKwS>hbu;cD{b!q+T*r6W)N8QxLK`%O7w@-!dc#assm*Br9Vj_dq75Sy zg+Fro^&M>|m4KnCrJxx5wOovz48~Fv@l=iIJB%mPH>m1*7KW(&5V*>8I;~F(7o^@e z?nUYu=@{gsDSlFcv-L;nzYW9v?IaAlWrLjtjDX{nS7#8R&XgRxW;K@^($2>MZLuEc z;3ov>R@dow|4Ii+at(?Y1iL#9`ESMHa1NG(sL>~wQlJ(u1l?@VIIdG-qdpS}J0#Zv z9606vO6#>fVO_#POQR(^{~nF2tf(JHnGqF~z8U*S{U`?A1ZhZh zXdfvbX?TJx$%iR;tI)dVN%QJ1d{qt!r{THAwh{={!7?N}@Ah-B4pV!q!^zHV@?!|> zLhF90h_3s9_bCvS^*#x6cD7*|v@jBCt(AsxT~PU+bnjW?R4CV`5za9{yR}K<7e-hI@l?J<%`D0;76G-+i4!sEX)^-7tXnf1hmjt>njw7$loY z3_U<-bu;euv8@!p2TS2&70K+hj~D=UALl5B&miVdAA#+b+4ulag9Jwny8OgF6;BV) zQ)Hsh|3RW24}U3YfcbW}=F7xCG85=@07!yh`#H21=xfL)21$f0DdWRF^p~2cBT;Wl zBF2t5;tX!#EbWa5f`Q_kvXr0Vu?e#^j69&1ocxY(6Z!97>Kz@sF)51kb7+9XDMpnb zh> z1A5K((IBaWd?q`zENVhu{AE1JCm|8V-9RUTrPbUnv2x)>lV@(Gz|lM-cD|uR{taOz z-#U~Vod3y|Y)*08$o|;JNLG-io`twO!eS9ARyFZ;Hcno43V#{=CjL2>7{+gg<6`7T zjBp|-tLgvSJI=}b7S`e!-WcbNVylPV169~Xp=he!K=}Em%1>sEQOpHWQ@Pa*176+K zJ_hN!>c?Ogc;#S>U(;@*T7;LA*UW^owot3MEaFI97RLj%B7YY+u=vmM0~Ljwe#h!F zXDAX)X`46B(Ar75B`CGLli2Z|e!@z2$IR@RIS?R^t3Y^1m_XL*L6YU#jQn+UhVxVd z^|5?O>P*$DEePw4b2JM=l2W{5PkMg@EBgVLPfH>e1pcj68?xJxw1%MP%G7sC=dRA2 zyFNQRJ#+58AZ^1gpxP5#Qc@~NP@PO&u+|K82c`l-=)a05yHrA{QkG_Ba&e7nRva@C zUW5zLc~3+5-=mRCT1-{ld!{~2i#h~|9P}%4ibt|)LQ6c#>b(fu9Qmi-l&;GEfX8H} zv_FS&MI!PXzRvfcdPvy_6@qC1SK3O90}T*7xYIlCAhb9wuWo{FR*)|gNs)pJ2eB8b z3y4S-vPhg~QQ?I<2rmtHIX6Suz$FdBFc$UoyIo>H47Trw+$^fPcJB`eMU04p{G9L~ zLPZzhNB9F6kKpN3(NnrViu+?7_l9^HJ)`J>i_If)ZnrqjGa~qB#Ixc#@w_;}&k27X z6<&lN0TeucQJjqCPj>~>d4*%?E*v}7F?L)`M9|7sj)E_Ke5FUEx%TQ5ls8oGO0WDo zxPpD+6kUs#zTntbyQja^;|?H19A6p4cL?8MaT@A_iW?4ym$y~<6L&VTcusxa5NFVb5(!+NW!NshegjT_ zR(#XXKrOxLcj4)jc#DGQ;vCsMD}!1edQb&dBQef!R5A2vFP-8nvXX5Z#B>*X_Zh^iZ+RzSYbEK9%nRUpkFIHsp^33_98gElZ+5Kn zIK$ISN5uP`@N_bBFgd7&N%G8R+Qt-&5?I9EFko-`{rgtmrq0Mr>Hnag#)1-X#fAz| z;w_x|fIonlybCPfb)WSIDAgfK4-lk6{UXs3!F`W+XV4!6r)LD;X z)-hT1UZT~CMHEntxCMViEQw{vt%GiH<*)lq2pXv|NqaG&MaS@G2`o%`b($eQ_wkVyHgDhf;wJb6ak}*Rak^d zCQL^~CfTL68lnfNQ9=PTYWmnDqY#6Tp8=gWK!vE}7q>D4h?(LY`m$YZvi<4}Qo)=N zCGfcj6?U$dj7mvm7qFOdE+$1CR5HrXn#uBVqf)jVuX|5@sbm{iV_R4L8BHJyMAqFx znEsdKo_eW50VN->AHD85{-t_VlxM+Qq9O7>Qu7-GIr!5mXK8#mz#BM^mA12cQ77KG zTMz0TX$9SObLa*a+i)2RP=yYtrxhAr@71ull^$`vF0Y1*=zqK})zi_p4yzkwns&*R zYqodhiE+z4K}HH<<_&9+iidaWfyt0Wq7ui$>o?@sG`K|S;Q0;tnw<-bX1MzCEw=~le1nL)k$1ba@On)k3K3g;fU2&>WgMyR2fBi zf)Lc}I*G()i16%J>Mgm2DR?KMS+(ejvb3E<)C$`PPGid7#Fz^lWdp@hW&>Usic;Yn zw*53OHOB>fsO{^D<_14fmZ11%caVN23RB?2SI!0zL{h&moF8`7`QY~gfv=k*} z;mVe2_+Rm3umke}#|q3C2rJAol1oTwf6)J*5;R3LB_|xIQZ#g>Ip55FA0n}pI@M1* zkMJ~u+P)m}_h@4|giL$+FBhZVemvn=!cI;nsMd8#Llgn^ZTL!8l!+&BMRj(=>Ilz-W|6|%dR6@g+Sf->U4R+wq-c&i1VK>{!bWdC@u)Y} z!leIS`S;j@AXO^So|H;psc)epAD`j)ziiOgO83n>6(I}Tn%tq|o zGx5p4Ay^7cAv6XVgiHbdmjXE2%pr$ett=EH6%&>p#t2@gCXU9;Mlc+g;tFG#r2c;J zWcZ&iZZUH6IfTGrf{=iF2niD{F&i_OyiQj)sVPv?i^kOPZ#kRCxx>^UTST3 z-GVePbx`Imd8W>QE!L{`8=z3d`6DzSo-7@3>u&@{iKEs*dT}B?`pfEcpVCItM~6rH zwJt6_c@!3%pzuV~9%&l>4-AWI#i VQOH8YESa6uDdqc_euUU|{sp~;mX`nk literal 0 HcmV?d00001 diff --git a/gemini-api/gemini_client.py b/gemini-api/gemini_client.py new file mode 100644 index 0000000..1fd350b --- /dev/null +++ b/gemini-api/gemini_client.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python3 +""" +Gemini API Client - Local Network Master API wrapper. +Supports LLM chat and LMM image generation with concurrency control. + +Usage: + python gemini_client.py chat "your question" [--model pro|flash] [--temperature 0.7] + python gemini_client.py image --prompt "description" [--output result.png] + python gemini_client.py image --prompt "description" --output icon.png --remove-watermark + python gemini_client.py image --event-data event.json --output result.png + python gemini_client.py batch requests.jsonl [--output-dir ./results/] +""" + +import sys +import os +import json +import argparse +import base64 +import time +import threading +from urllib.request import Request, urlopen, ProxyHandler, build_opener +from urllib.error import HTTPError, URLError +from concurrent.futures import ThreadPoolExecutor, as_completed + +# -- Configuration ---------------------------------------------------------- + +MASTER_URL = "http://192.168.1.5:10900" +API_KEYS = [ + "sk-gemini-api-key-002", # primary — routes to asus2023_chat (verified working) + "sk-gemini-api-key-001", + "test-api-key-001", +] +MAX_CONCURRENCY = 2 +DEFAULT_LLM_MODEL = "pro" +DEFAULT_LMM_MODEL = "nano-bananapro" +CHAT_TIMEOUT = 200 # seconds +IMAGE_TIMEOUT = 350 # seconds + +# -- Proxy bypass for LAN --------------------------------------------------- + +_opener = build_opener(ProxyHandler({})) # bypass all proxies + +# -- API Key rotation (thread-safe) ----------------------------------------- + +_key_index = 0 +_key_lock = threading.Lock() + +def _next_api_key(): + global _key_index + with _key_lock: + key = API_KEYS[_key_index % len(API_KEYS)] + _key_index += 1 + return key + +# -- Low-level HTTP ---------------------------------------------------------- + +def _post_json_once(endpoint: str, payload: dict, api_key: str, timeout: int = CHAT_TIMEOUT) -> dict: + """Single POST attempt with a specific API key.""" + url = f"{MASTER_URL}{endpoint}" + data = json.dumps(payload, ensure_ascii=False).encode("utf-8") + + req = Request(url, data=data, method="POST") + req.add_header("Content-Type", "application/json") + req.add_header("X-API-Key", api_key) + + try: + with _opener.open(req, timeout=timeout) as resp: + body = resp.read().decode("utf-8") + return json.loads(body) + except HTTPError as e: + body = e.read().decode("utf-8", errors="replace") + return {"error": f"HTTP {e.code}: {body}", "status": "failed"} + except URLError as e: + return {"error": f"Connection failed: {e.reason}", "status": "failed"} + except Exception as e: + return {"error": str(e), "status": "failed"} + + +def _post_json(endpoint: str, payload: dict, timeout: int = CHAT_TIMEOUT) -> dict: + """POST JSON with auto-retry across all API keys on failure.""" + last_resp = None + for _ in range(len(API_KEYS)): + api_key = _next_api_key() + resp = _post_json_once(endpoint, payload, api_key, timeout) + if resp.get("status") == "completed": + return resp + # Retryable server-side errors (not logged in, tool not activated, etc.) + err = resp.get("error", "") + if "未登录" in err or "All retries failed" in err: + print(f"[retry] key {api_key[:12]}... failed: {err[:60]}, trying next key", file=sys.stderr) + last_resp = resp + continue + # Non-retryable or unknown — return as-is + return resp + return last_resp or {"error": "All API keys exhausted", "status": "failed"} + +def _get(endpoint: str, timeout: int = 30) -> bytes: + """GET raw bytes from Master API.""" + url = f"{MASTER_URL}{endpoint}" + api_key = _next_api_key() + req = Request(url, method="GET") + req.add_header("X-API-Key", api_key) + with _opener.open(req, timeout=timeout) as resp: + return resp.read() + +# -- High-level API ---------------------------------------------------------- + +def chat_sync(messages: list, model: str = DEFAULT_LLM_MODEL, + temperature: float = 0.7, max_tokens: int = 4096, + new_conversation: bool = True) -> dict: + """Synchronous LLM chat. Returns full response dict.""" + payload = { + "messages": messages, + "model": model, + "temperature": temperature, + "max_tokens": max_tokens, + "new_conversation": new_conversation, + } + return _post_json("/api/v1/llm/chat/sync", payload, timeout=CHAT_TIMEOUT) + + +def image_sync(prompt: str = None, event_data: dict = None, + attachments: dict = None, + model: str = DEFAULT_LMM_MODEL, + output_format: str = "base64") -> dict: + """Synchronous image generation. Returns full response dict. + + IMPORTANT: Use output_format="base64" (default). The "url" format returns + URLs that are NOT downloadable (404). base64 is the only reliable method. + """ + payload = {"model": model, "output_format": output_format} + if prompt: + payload["prompt"] = prompt + if event_data: + payload["event_data"] = event_data + if attachments: + payload["attachments"] = attachments + return _post_json("/api/v1/lmm/image/sync", payload, timeout=IMAGE_TIMEOUT) + + +def save_image_from_response(resp: dict, output_path: str) -> str: + """Extract image from API response and save to file. + + Handles both base64 and URL formats (base64 preferred). + Returns the saved file path, or raises on failure. + """ + if resp.get("status") != "completed": + raise RuntimeError(f"Image generation failed: {resp.get('error', resp.get('status'))}") + + result = resp.get("result", {}) + + # Prefer base64 (reliable) + b64_data = result.get("image_base64") or result.get("base64") or result.get("image_data") + if b64_data: + img_bytes = base64.b64decode(b64_data) + with open(output_path, "wb") as f: + f.write(img_bytes) + return output_path + + # Fallback: try URL download (usually fails with 404) + image_url = result.get("image_url", "") + if image_url: + try: + return download_image_by_url(image_url, output_path) + except Exception as e: + raise RuntimeError( + f"base64 not in response and URL download failed: {e}. " + f"URL was: {MASTER_URL}{image_url}" + ) + + raise RuntimeError(f"No image data in response. Keys: {list(result.keys())}") + + +def download_image(task_id: str, output_path: str) -> str: + """Download generated image by task_id. Returns saved path.""" + data = _get(f"/api/v1/lmm/image/{task_id}", timeout=60) + with open(output_path, "wb") as f: + f.write(data) + return output_path + + +def download_image_by_url(image_url: str, output_path: str) -> str: + """Download image from the result's image_url field. + + WARNING: URL downloads frequently return 404. Prefer base64 format. + """ + if image_url.startswith("/"): + url = f"{MASTER_URL}{image_url}" + else: + url = image_url + req = Request(url, method="GET") + req.add_header("X-API-Key", _next_api_key()) + with _opener.open(req, timeout=60) as resp: + data = resp.read() + with open(output_path, "wb") as f: + f.write(data) + return output_path + + +# -- Watermark removal ------------------------------------------------------- + +def remove_watermark(input_path: str, output_path: str = None, + corner: str = "bottom_right", + region_w: int = 260, region_h: int = 200, + inpaint_radius: int = 12) -> str: + """Remove Gemini watermark (star logo) from generated images. + + Uses OpenCV inpainting to cleanly erase the bottom-right watermark + that nano-bananapro adds to all generated images. + + Args: + input_path: Path to the watermarked image. + output_path: Where to save. Defaults to overwriting input_path. + corner: Which corner has the watermark (default "bottom_right"). + region_w: Width of the watermark region in pixels (for 2048px image). + region_h: Height of the watermark region in pixels. + inpaint_radius: Radius for cv2.inpaint (larger = smoother but slower). + + Returns: + The output file path. + + Requires: pip install opencv-python numpy + """ + try: + import cv2 + import numpy as np + except ImportError: + raise RuntimeError( + "Watermark removal requires opencv-python and numpy. " + "Install with: pip install opencv-python numpy" + ) + + if output_path is None: + output_path = input_path + + img = cv2.imread(input_path) + if img is None: + raise FileNotFoundError(f"Cannot read image: {input_path}") + + h, w = img.shape[:2] + + # Scale region size proportionally to image dimensions (calibrated for 2048px) + scale = max(w, h) / 2048.0 + rw = int(region_w * scale) + rh = int(region_h * scale) + + # Build inpainting mask for the specified corner + mask = np.zeros((h, w), dtype=np.uint8) + if corner == "bottom_right": + cv2.rectangle(mask, (w - rw, h - rh), (w, h), 255, -1) + elif corner == "bottom_left": + cv2.rectangle(mask, (0, h - rh), (rw, h), 255, -1) + elif corner == "top_right": + cv2.rectangle(mask, (w - rw, 0), (w, rh), 255, -1) + elif corner == "top_left": + cv2.rectangle(mask, (0, 0), (rw, rh), 255, -1) + + result = cv2.inpaint(img, mask, inpaint_radius, cv2.INPAINT_TELEA) + + # Determine output format from extension + ext = os.path.splitext(output_path)[1].lower() + if ext in (".jpg", ".jpeg"): + cv2.imwrite(output_path, result, [cv2.IMWRITE_JPEG_QUALITY, 95]) + else: + cv2.imwrite(output_path, result) + + return output_path + + +# -- Batch execution with concurrency control -------------------------------- + +_semaphore = threading.Semaphore(MAX_CONCURRENCY) + +def _run_with_semaphore(fn, *args, **kwargs): + with _semaphore: + return fn(*args, **kwargs) + +def batch_execute(requests_list: list, output_dir: str = ".", + remove_wm: bool = False) -> list: + """Execute a list of requests with max concurrency = 2. + Each item: {"type": "chat"|"image", ...params} + Returns list of results. + """ + os.makedirs(output_dir, exist_ok=True) + results = [] + + def _execute_one(idx, item): + req_type = item.get("type", "chat") + if req_type == "chat": + messages = item.get("messages", [{"role": "user", "content": item.get("content", "")}]) + resp = chat_sync( + messages=messages, + model=item.get("model", DEFAULT_LLM_MODEL), + temperature=item.get("temperature", 0.7), + ) + return {"index": idx, "type": "chat", "response": resp} + elif req_type == "image": + resp = image_sync( + prompt=item.get("prompt"), + event_data=item.get("event_data"), + model=item.get("model", DEFAULT_LMM_MODEL), + output_format="base64", + ) + if resp.get("status") == "completed": + out_path = os.path.join(output_dir, f"image_{idx}.png") + try: + save_image_from_response(resp, out_path) + if remove_wm: + remove_watermark(out_path) + resp["local_path"] = out_path + except Exception as e: + resp["save_error"] = str(e) + return {"index": idx, "type": "image", "response": resp} + + with ThreadPoolExecutor(max_workers=MAX_CONCURRENCY) as pool: + futures = { + pool.submit(_run_with_semaphore, _execute_one, i, item): i + for i, item in enumerate(requests_list) + } + for future in as_completed(futures): + results.append(future.result()) + + results.sort(key=lambda r: r["index"]) + return results + +# -- CLI --------------------------------------------------------------------- + +def main(): + # Ensure UTF-8 output on Windows + if sys.platform == "win32": + sys.stdout.reconfigure(encoding="utf-8") + sys.stderr.reconfigure(encoding="utf-8") + + parser = argparse.ArgumentParser(description="Gemini API Client") + sub = parser.add_subparsers(dest="command", required=True) + + # chat + p_chat = sub.add_parser("chat", help="LLM text generation") + p_chat.add_argument("content", help="User message content") + p_chat.add_argument("--model", default=DEFAULT_LLM_MODEL, choices=["pro", "flash"]) + p_chat.add_argument("--temperature", type=float, default=0.7) + p_chat.add_argument("--max-tokens", type=int, default=4096) + p_chat.add_argument("--system", default=None, help="System prompt") + + # image + p_img = sub.add_parser("image", help="LMM image generation") + p_img.add_argument("--prompt", default=None, help="Direct image prompt") + p_img.add_argument("--event-data", default=None, help="Path to event_data JSON file") + p_img.add_argument("--output", "-o", default=None, help="Output file path") + p_img.add_argument("--model", default=DEFAULT_LMM_MODEL) + p_img.add_argument("--remove-watermark", "--rw", action="store_true", + help="Remove Gemini watermark (bottom-right star) after generation") + + # batch + p_batch = sub.add_parser("batch", help="Batch requests (max concurrency=2)") + p_batch.add_argument("input_file", help="JSONL file with requests") + p_batch.add_argument("--output-dir", default="./batch_results") + p_batch.add_argument("--remove-watermark", "--rw", action="store_true", + help="Remove watermark from all generated images") + + # health + sub.add_parser("health", help="Check API health") + + args = parser.parse_args() + + if args.command == "chat": + messages = [] + if args.system: + messages.append({"role": "user", "content": args.system}) + messages.append({"role": "user", "content": args.content}) + resp = chat_sync(messages, model=args.model, + temperature=args.temperature, + max_tokens=args.max_tokens) + if resp.get("error"): + print(f"ERROR: {resp['error']}", file=sys.stderr) + sys.exit(1) + # Extract content + result = resp.get("result", {}) + content = result.get("content", "") + print(content) + # Print metadata to stderr + model_used = result.get("model", "unknown") + usage = result.get("usage", {}) + fallback = result.get("fallback_reason") + meta = f"[model={model_used}, tokens={usage.get('prompt_tokens',0)}+{usage.get('completion_tokens',0)}" + if fallback: + meta += f", fallback={fallback}" + meta += "]" + print(meta, file=sys.stderr) + + elif args.command == "image": + event_data = None + if args.event_data: + with open(args.event_data, "r", encoding="utf-8") as f: + event_data = json.load(f) + + if not args.prompt and not event_data: + print("ERROR: --prompt or --event-data required", file=sys.stderr) + sys.exit(1) + + # Always use base64 — URL download is broken (returns 404) + resp = image_sync(prompt=args.prompt, event_data=event_data, + model=args.model, output_format="base64") + + if resp.get("error"): + print(f"ERROR: {resp['error']}", file=sys.stderr) + print(json.dumps(resp, ensure_ascii=False, indent=2), file=sys.stderr) + sys.exit(1) + + if resp.get("status") != "completed": + print(f"Status: {resp.get('status')}", file=sys.stderr) + print(json.dumps(resp, ensure_ascii=False, indent=2)) + sys.exit(1) + + result = resp.get("result", {}) + prompt_used = result.get("prompt_used", "") + gen_time = result.get("generation_time_seconds", 0) + + print(f"Prompt: {prompt_used[:150]}...", file=sys.stderr) + print(f"Generation time: {gen_time:.1f}s", file=sys.stderr) + + if args.output: + try: + save_image_from_response(resp, args.output) + print(f"Saved to: {args.output}") + + if args.remove_watermark: + print("Removing watermark...", file=sys.stderr) + remove_watermark(args.output) + print(f"Watermark removed: {args.output}") + + except Exception as e: + print(f"Save failed: {e}", file=sys.stderr) + sys.exit(1) + else: + # No output path — print base64 data info or URL + b64 = result.get("image_base64") + if b64: + print(f"[base64 image data: {len(b64)} chars, decode with base64.b64decode()]") + else: + image_url = result.get("image_url", "") + if image_url: + print(f"Image URL (may 404): {MASTER_URL}{image_url}") + else: + print(json.dumps(result, ensure_ascii=False, indent=2)) + + elif args.command == "batch": + requests_list = [] + with open(args.input_file, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + if line: + requests_list.append(json.loads(line)) + + print(f"Processing {len(requests_list)} requests (max concurrency={MAX_CONCURRENCY})...", + file=sys.stderr) + results = batch_execute(requests_list, args.output_dir, + remove_wm=args.remove_watermark) + print(json.dumps(results, ensure_ascii=False, indent=2)) + + elif args.command == "health": + try: + url = f"{MASTER_URL}/health" + req = Request(url, method="GET") + with _opener.open(req, timeout=10) as resp: + body = resp.read().decode("utf-8") + print(body) + except Exception as e: + print(f"Health check failed: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main()