update pay ui
This commit is contained in:
@@ -89,7 +89,7 @@ MODEL_CONFIGS = {
|
||||
"api_key": "", # 空值
|
||||
"base_url": "http://111.62.35.50:8000/v1",
|
||||
"model": "deepmoney",
|
||||
"max_tokens": 8192, # DeepMoney 也有 8192 限制
|
||||
"max_tokens": 65536, # DeepMoney 本地托管,支持 65536
|
||||
},
|
||||
"gemini-3": {
|
||||
"api_key": "", # 需要配置Google API密钥
|
||||
@@ -99,8 +99,8 @@ MODEL_CONFIGS = {
|
||||
},
|
||||
}
|
||||
|
||||
# 保持向后兼容的配置(默认使用 deepseek)
|
||||
KIMI_CONFIG = MODEL_CONFIGS["deepseek"]
|
||||
# 保持向后兼容的配置(默认使用 deepmoney,本地托管,上下文长)
|
||||
KIMI_CONFIG = MODEL_CONFIGS["deepmoney"]
|
||||
DEEPMONEY_CONFIG = MODEL_CONFIGS["deepmoney"]
|
||||
|
||||
# ==================== MCP协议数据模型 ====================
|
||||
@@ -177,7 +177,7 @@ class AgentChatRequest(BaseModel):
|
||||
user_avatar: Optional[str] = None # 用户头像URL
|
||||
subscription_type: Optional[str] = None # 用户订阅类型(free/pro/max)
|
||||
session_id: Optional[str] = None # 会话ID(如果为空则创建新会话)
|
||||
model: Optional[str] = "deepseek" # 选择的模型(deepseek, kimi-k2, kimi-k2-thinking, glm-4.6, deepmoney, gemini-3)
|
||||
model: Optional[str] = "deepmoney" # 选择的模型(deepmoney, deepseek, kimi-k2, kimi-k2-thinking, glm-4.6, gemini-3)
|
||||
tools: Optional[List[str]] = None # 选择的工具列表(工具名称数组,如果为None则使用全部工具)
|
||||
|
||||
# ==================== MCP工具定义 ====================
|
||||
@@ -2005,12 +2005,12 @@ class MCPAgentIntegrated:
|
||||
# 添加当前用户问题
|
||||
messages.append({"role": "user", "content": user_query})
|
||||
|
||||
# 使用 Kimi 思考模型(实际是 deepseek,max_tokens 限制 8192)
|
||||
# 使用配置的模型(默认 deepmoney,支持 65536 上下文)
|
||||
response = self.kimi_client.chat.completions.create(
|
||||
model=self.kimi_model,
|
||||
messages=messages,
|
||||
temperature=1.0,
|
||||
max_tokens=8192,
|
||||
max_tokens=65536,
|
||||
)
|
||||
|
||||
choice = response.choices[0]
|
||||
@@ -2085,7 +2085,7 @@ class MCPAgentIntegrated:
|
||||
model=self.deepmoney_model,
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=8192,
|
||||
max_tokens=65536,
|
||||
)
|
||||
|
||||
summary = response.choices[0].message.content
|
||||
@@ -2276,10 +2276,10 @@ class MCPAgentIntegrated:
|
||||
|
||||
try:
|
||||
response = self.kimi_client.chat.completions.create(
|
||||
model=self.kimi_model, # 使用配置的模型(deepseek-chat)
|
||||
model=self.kimi_model, # 使用配置的模型(deepmoney)
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=8192,
|
||||
max_tokens=65536,
|
||||
)
|
||||
|
||||
summary = response.choices[0].message.content
|
||||
@@ -2361,12 +2361,12 @@ class MCPAgentIntegrated:
|
||||
}
|
||||
]
|
||||
|
||||
# 使用 DeepMoney 模型(更轻量,适合简单任务)
|
||||
# 使用 DeepMoney 模型(本地托管,支持长上下文)
|
||||
response = self.deepmoney_client.chat.completions.create(
|
||||
model=self.deepmoney_model,
|
||||
messages=messages,
|
||||
temperature=0.3,
|
||||
max_tokens=8192,
|
||||
max_tokens=65536,
|
||||
)
|
||||
|
||||
title = response.choices[0].message.content.strip()
|
||||
@@ -2457,8 +2457,8 @@ class MCPAgentIntegrated:
|
||||
|
||||
try:
|
||||
# 尝试使用选中的模型流式 API
|
||||
# 从模型配置获取 max_tokens,默认 8192
|
||||
model_max_tokens = model_config.get("max_tokens", 8192) if model_config else 8192
|
||||
# 从模型配置获取 max_tokens,默认 65536(deepmoney)
|
||||
model_max_tokens = model_config.get("max_tokens", 65536) if model_config else 65536
|
||||
stream = planning_client.chat.completions.create(
|
||||
model=planning_model,
|
||||
messages=messages,
|
||||
@@ -2502,12 +2502,12 @@ class MCPAgentIntegrated:
|
||||
})
|
||||
|
||||
try:
|
||||
# 使用 DeepMoney 备选方案(非流式,因为 DeepMoney 可能不支持流式)
|
||||
# 使用 DeepMoney 备选方案(非流式)
|
||||
fallback_response = self.deepmoney_client.chat.completions.create(
|
||||
model=self.deepmoney_model,
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=8192,
|
||||
max_tokens=65536,
|
||||
)
|
||||
|
||||
plan_content = fallback_response.choices[0].message.content
|
||||
@@ -2700,10 +2700,10 @@ class MCPAgentIntegrated:
|
||||
|
||||
try:
|
||||
summary_stream = self.kimi_client.chat.completions.create(
|
||||
model=self.kimi_model, # 使用配置的模型(deepseek-chat)
|
||||
model=self.kimi_model, # 使用配置的模型(deepmoney)
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=8192,
|
||||
max_tokens=65536,
|
||||
stream=True, # 启用流式输出
|
||||
)
|
||||
|
||||
@@ -2737,7 +2737,7 @@ class MCPAgentIntegrated:
|
||||
model=self.deepmoney_model,
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=8192,
|
||||
max_tokens=65536,
|
||||
)
|
||||
|
||||
final_summary = fallback_response.choices[0].message.content
|
||||
@@ -3681,8 +3681,8 @@ async def stream_role_response(
|
||||
|
||||
# 第一次调用:可能触发工具调用
|
||||
tool_calls_made = []
|
||||
# 从模型配置获取 max_tokens,默认 8192
|
||||
max_tokens = model_config.get("max_tokens", 8192)
|
||||
# 从模型配置获取 max_tokens,默认 65536(deepmoney)
|
||||
max_tokens = model_config.get("max_tokens", 65536)
|
||||
if openai_tools:
|
||||
response = client.chat.completions.create(
|
||||
model=model_config["model"],
|
||||
|
||||
@@ -49,7 +49,7 @@ export const AVAILABLE_MODELS: ModelConfig[] = [
|
||||
{
|
||||
id: 'deepmoney',
|
||||
name: 'DeepMoney',
|
||||
description: '金融专业模型',
|
||||
description: '金融专业模型,65K 上下文',
|
||||
icon: React.createElement(TrendingUp, { className: 'w-5 h-5' }),
|
||||
color: 'green',
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user