update pay function
This commit is contained in:
@@ -1999,7 +1999,7 @@ class MCPAgentIntegrated:
|
||||
model=self.kimi_model,
|
||||
messages=messages,
|
||||
temperature=1.0, # Kimi 推荐
|
||||
max_tokens=16000, # 足够容纳 reasoning_content
|
||||
max_tokens=8192, # 足够容纳 reasoning_content
|
||||
)
|
||||
|
||||
choice = response.choices[0]
|
||||
@@ -2074,7 +2074,7 @@ class MCPAgentIntegrated:
|
||||
model=self.deepmoney_model,
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=1000,
|
||||
max_tokens=8192,
|
||||
)
|
||||
|
||||
summary = response.choices[0].message.content
|
||||
@@ -2241,7 +2241,7 @@ class MCPAgentIntegrated:
|
||||
model="kimi-k2-turbo-preview", # 使用非思考模型,更快
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=2000, # 增加 token 限制以支持图表配置
|
||||
max_tokens=8192, # 增加 token 限制以支持图表配置
|
||||
)
|
||||
|
||||
summary = response.choices[0].message.content
|
||||
@@ -2328,7 +2328,7 @@ class MCPAgentIntegrated:
|
||||
model=self.deepmoney_model,
|
||||
messages=messages,
|
||||
temperature=0.3,
|
||||
max_tokens=100,
|
||||
max_tokens=4096,
|
||||
)
|
||||
|
||||
title = response.choices[0].message.content.strip()
|
||||
@@ -2423,7 +2423,7 @@ class MCPAgentIntegrated:
|
||||
model=planning_model,
|
||||
messages=messages,
|
||||
temperature=1.0,
|
||||
max_tokens=16000,
|
||||
max_tokens=8192,
|
||||
stream=True, # 启用流式输出
|
||||
)
|
||||
|
||||
@@ -2467,7 +2467,7 @@ class MCPAgentIntegrated:
|
||||
model=self.deepmoney_model,
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=16000,
|
||||
max_tokens=8192,
|
||||
)
|
||||
|
||||
plan_content = fallback_response.choices[0].message.content
|
||||
@@ -2663,7 +2663,7 @@ class MCPAgentIntegrated:
|
||||
model="kimi-k2-turbo-preview",
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=2000,
|
||||
max_tokens=8192,
|
||||
stream=True, # 启用流式输出
|
||||
)
|
||||
|
||||
@@ -2697,7 +2697,7 @@ class MCPAgentIntegrated:
|
||||
model=self.deepmoney_model,
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=2000,
|
||||
max_tokens=8192,
|
||||
)
|
||||
|
||||
final_summary = fallback_response.choices[0].message.content
|
||||
@@ -3709,7 +3709,7 @@ async def stream_role_response(
|
||||
messages=messages,
|
||||
stream=True,
|
||||
temperature=0.7,
|
||||
max_tokens=16384, # 大幅增加 token 限制以避免输出被截断
|
||||
max_tokens=8192, # 大幅增加 token 限制以避免输出被截断
|
||||
)
|
||||
|
||||
full_content = ""
|
||||
|
||||
Reference in New Issue
Block a user