update pay function

This commit is contained in:
2025-11-30 16:33:34 +08:00
parent 5a24cb9eec
commit 608ac4a962

View File

@@ -1999,7 +1999,7 @@ class MCPAgentIntegrated:
model=self.kimi_model, model=self.kimi_model,
messages=messages, messages=messages,
temperature=1.0, # Kimi 推荐 temperature=1.0, # Kimi 推荐
max_tokens=16000, # 足够容纳 reasoning_content max_tokens=8192, # 足够容纳 reasoning_content
) )
choice = response.choices[0] choice = response.choices[0]
@@ -2074,7 +2074,7 @@ class MCPAgentIntegrated:
model=self.deepmoney_model, model=self.deepmoney_model,
messages=messages, messages=messages,
temperature=0.7, temperature=0.7,
max_tokens=1000, max_tokens=8192,
) )
summary = response.choices[0].message.content summary = response.choices[0].message.content
@@ -2241,7 +2241,7 @@ class MCPAgentIntegrated:
model="kimi-k2-turbo-preview", # 使用非思考模型,更快 model="kimi-k2-turbo-preview", # 使用非思考模型,更快
messages=messages, messages=messages,
temperature=0.7, temperature=0.7,
max_tokens=2000, # 增加 token 限制以支持图表配置 max_tokens=8192, # 增加 token 限制以支持图表配置
) )
summary = response.choices[0].message.content summary = response.choices[0].message.content
@@ -2328,7 +2328,7 @@ class MCPAgentIntegrated:
model=self.deepmoney_model, model=self.deepmoney_model,
messages=messages, messages=messages,
temperature=0.3, temperature=0.3,
max_tokens=100, max_tokens=4096,
) )
title = response.choices[0].message.content.strip() title = response.choices[0].message.content.strip()
@@ -2423,7 +2423,7 @@ class MCPAgentIntegrated:
model=planning_model, model=planning_model,
messages=messages, messages=messages,
temperature=1.0, temperature=1.0,
max_tokens=16000, max_tokens=8192,
stream=True, # 启用流式输出 stream=True, # 启用流式输出
) )
@@ -2467,7 +2467,7 @@ class MCPAgentIntegrated:
model=self.deepmoney_model, model=self.deepmoney_model,
messages=messages, messages=messages,
temperature=0.7, temperature=0.7,
max_tokens=16000, max_tokens=8192,
) )
plan_content = fallback_response.choices[0].message.content plan_content = fallback_response.choices[0].message.content
@@ -2663,7 +2663,7 @@ class MCPAgentIntegrated:
model="kimi-k2-turbo-preview", model="kimi-k2-turbo-preview",
messages=messages, messages=messages,
temperature=0.7, temperature=0.7,
max_tokens=2000, max_tokens=8192,
stream=True, # 启用流式输出 stream=True, # 启用流式输出
) )
@@ -2697,7 +2697,7 @@ class MCPAgentIntegrated:
model=self.deepmoney_model, model=self.deepmoney_model,
messages=messages, messages=messages,
temperature=0.7, temperature=0.7,
max_tokens=2000, max_tokens=8192,
) )
final_summary = fallback_response.choices[0].message.content final_summary = fallback_response.choices[0].message.content
@@ -3709,7 +3709,7 @@ async def stream_role_response(
messages=messages, messages=messages,
stream=True, stream=True,
temperature=0.7, temperature=0.7,
max_tokens=16384, # 大幅增加 token 限制以避免输出被截断 max_tokens=8192, # 大幅增加 token 限制以避免输出被截断
) )
full_content = "" full_content = ""