update pay function

This commit is contained in:
2025-11-28 15:32:03 +08:00
parent 9b7a221315
commit 8cf2850660
6 changed files with 1278 additions and 666 deletions

View File

@@ -2337,7 +2337,37 @@ async def search_chat_history(user_id: str, query: str, top_k: int = 10):
raise HTTPException(status_code=500, detail=str(e))
# ==================== 投研会议室系统 ====================
# ==================== 投研会议室系统 (V2 - 流式+工具调用) ====================
import random
# 投研会议室专用模型配置
MEETING_MODEL_CONFIGS = {
"kimi-k2-thinking": {
"api_key": "sk-TzB4VYJfCoXGcGrGMiewukVRzjuDsbVCkaZXi2LvkS8s60E5",
"base_url": "https://api.moonshot.cn/v1",
"model": "kimi-k2-thinking",
},
"deepseek": {
"api_key": "sk-7363bdb28d7d4bf0aa68eb9449f8f063",
"base_url": "https://api.deepseek.com",
"model": "deepseek-chat",
},
"deepmoney": {
"api_key": "",
"base_url": "http://111.62.35.50:8000/v1",
"model": "deepmoney",
},
}
# 每个角色可用的工具列表
ROLE_TOOLS = {
"buffett": ["search_china_news", "search_research_reports", "get_stock_basic_info", "get_stock_financial_index"],
"big_short": ["search_china_news", "get_stock_financial_index", "get_stock_balance_sheet", "get_stock_cashflow"],
"simons": ["get_stock_trade_data", "search_limit_up_stocks", "get_concept_statistics"],
"leek": [], # 韭菜不用工具
"fund_manager": ["search_china_news", "search_research_reports", "get_stock_basic_info"],
}
# 投研会议室角色配置
MEETING_ROLES = {
@@ -2345,508 +2375,435 @@ MEETING_ROLES = {
"id": "buffett",
"name": "巴菲特",
"nickname": "唱多者",
"role_type": "bull", # 多头
"role_type": "bull",
"avatar": "/avatars/buffett.png",
"model": "kimi-k2-thinking",
"color": "#10B981", # 绿色(上涨)
"color": "#10B981",
"description": "主观多头,善于分析事件的潜在利好和长期价值",
"tools": ROLE_TOOLS["buffett"],
"system_prompt": """你是"巴菲特",一位资深的价值投资者和主观多头分析师。
你的特点:
1. 善于发现事件和公司的潜在利好因素
2. 关注长期价值,不被短期波动干扰
3. 分析公司的护城河、竞争优势和管理层质量
4. 对市场保持乐观但理性的态度
2. 关注长期价值,分析护城河、竞争优势
3. 对市场保持乐观但理性的态度
分析风格
- 重点挖掘利好因素和投资机会
- 从产业链、市场格局、政策支持等角度分析
- 给出清晰的看多逻辑和目标预期
- 语言风格:稳重、专业、富有洞察力
你可以使用以下工具获取数据
- search_china_news: 搜索新闻
- search_research_reports: 搜索研报
- get_stock_basic_info: 获取股票基本信息
- get_stock_financial_index: 获取财务指标
注意你的发言要简洁有力每次发言控制在200字以内。直接表达观点不要客套。"""
分析时请先调用工具获取数据,再基于数据发表看多观点。
注意参考前面其他人的发言进行有针对性的回应。发言控制在200字以内。"""
},
"big_short": {
"id": "big_short",
"name": "大空头",
"nickname": "大空头",
"role_type": "bear", # 空头
"role_type": "bear",
"avatar": "/avatars/big_short.png",
"model": "kimi-k2-thinking",
"color": "#EF4444", # 红色(下跌)
"description": "善于分析事件和财报中的风险因素,帮助投资者避雷",
"system_prompt": """你是"大空头",一位专业的风险分析师和空头研究员。
"color": "#EF4444",
"description": "善于分析事件和财报中的风险因素",
"tools": ROLE_TOOLS["big_short"],
"system_prompt": """你是"大空头",一位专业的风险分析师。
你的特点:
1. 善于发现被市场忽视的风险因素
2. 擅长财报分析,发现财务造假和粉饰的迹象
3. 关注行业天花板、竞争加剧、估值泡沫等问题
4. 对市场保持警惕,帮助投资者避雷
2. 擅长财报分析,发现财务造假迹象
3. 关注行业天花板、竞争加剧、估值泡沫
分析风格
- 重点挖掘风险因素和潜在隐患
- 从财务数据、行业周期、估值水平等角度分析
- 给出清晰的风险提示和规避建议
- 语言风格:犀利、直接、善于质疑
你可以使用以下工具获取数据
- search_china_news: 搜索负面新闻
- get_stock_financial_index: 获取财务指标找问题
- get_stock_balance_sheet: 分析资产负债表
- get_stock_cashflow: 分析现金流
注意你的发言要简洁有力每次发言控制在200字以内。直接指出风险不要绕弯子。"""
分析时请先调用工具获取数据,再基于数据指出风险。
注意参考前面其他人的发言进行有针对性的反驳。发言控制在200字以内。"""
},
"simons": {
"id": "simons",
"name": "量化分析员",
"nickname": "西蒙斯",
"role_type": "quant", # 量化
"role_type": "quant",
"avatar": "/avatars/simons.png",
"model": "deepseek-v3",
"color": "#3B82F6", # 蓝色(中性)
"description": "中性立场,使用量化分析工具分析技术指标",
"model": "deepseek",
"color": "#3B82F6",
"description": "中性立场,使用量化工具分析技术指标",
"tools": ROLE_TOOLS["simons"],
"system_prompt": """你是"量化分析员"(昵称:西蒙斯),一位专业的量化交易研究员。
你的特点:
1. 使用数据和技术指标说话,保持中性立场
2. 擅长均线分析、量价关系、动能指标等技术分析
3. 关注市场情绪、资金流向、筹码分布等量化因素
4. 用概率思维看待市场,不做主观臆断
2. 擅长均线、量价、动能指标分析
3. 用概率思维看待市场
分析风格
- 基于技术指标给出客观分析
- 使用具体数据支撑观点5日均线、MACD、RSI等
- 给出量化的买卖信号和风险评估
- 语言风格:理性、客观、数据驱动
你可以使用以下工具获取数据
- get_stock_trade_data: 获取交易数据(价格、成交量)
- search_limit_up_stocks: 搜索涨停股票
- get_concept_statistics: 获取概念板块统计
注意你的发言要简洁有力每次发言控制在200字以内。多用数据说话少发表主观意见。"""
分析时请先调用工具获取数据,再基于数据给出技术分析。
注意参考前面其他人的发言用数据说话。发言控制在200字以内。"""
},
"leek": {
"id": "leek",
"name": "韭菜",
"nickname": "牢大",
"role_type": "retail", # 散户
"role_type": "retail",
"avatar": "/avatars/leek.png",
"model": "deepmoney",
"color": "#F59E0B", # 黄色
"description": "贪婪又讨厌亏损,热爱追涨杀跌的典型散户",
"color": "#F59E0B",
"description": "贪婪又讨厌亏损,热爱追涨杀跌",
"tools": [],
"system_prompt": """你是"韭菜"(昵称:牢大),一个典型的散户投资者。
你的特点:
1. 贪婪但又害怕亏损,典型的追涨杀跌
2. 容易被市场情绪影响,看到涨就想追,看到跌就想跑
3. 喜欢听小道消息,容易被"内幕"吸引
4. 短线思维,缺乏耐心,期望一夜暴富
1. 贪婪但又害怕亏损,追涨杀跌
2. 容易被市场情绪影响
3. 喜欢听小道消息,期望一夜暴富
分析风格:
- 用最朴素的散户思维来分析问题
- 经常关注"这个能赚多少""会不会跌"
- 容易情绪化,看涨时过度乐观,看跌时过度悲观
- 语言风格:口语化、情绪化、接地气
注意你的发言要简洁直接每次发言控制在150字以内。展现真实散户的心态可以有些搞笑但不要太出格。"""
你不需要调用工具,直接用散户视角发表看法。
注意参考前面其他人的发言用最朴素的方式回应。语言口语化、情绪化。发言控制在150字以内。"""
},
"fund_manager": {
"id": "fund_manager",
"name": "基金经理",
"nickname": "决策者",
"role_type": "manager", # 管理者
"role_type": "manager",
"avatar": "/avatars/fund_manager.png",
"model": "kimi-k2-thinking",
"color": "#8B5CF6", # 紫色
"description": "总结其他人的发言做出最终决策",
"system_prompt": """你是"基金经理",投研会议的主持人和最终决策者。
"color": "#8B5CF6",
"description": "综合分析做出最终决策",
"tools": ROLE_TOOLS["fund_manager"],
"system_prompt": """你是"基金经理",投研会议的最终决策者。
你的角色:
1. 综合各方观点,做出理性判断
2. 平衡多空观点,识别有价值的分析
3. 特别注意:韭菜的观点通常是反向指标
4. 给出专业、负责任的投资建议
3. 注意:韭菜的观点通常是反向指标
决策风格
- 综合考虑基本面、技术面、情绪面
- 权衡风险与收益,给出明确的投资建议
- 指出讨论中的关键洞察和需要注意的风险
- 语言风格:权威、专业、全面
如果需要补充信息,可以调用工具
- search_china_news: 搜索新闻
- search_research_reports: 搜索研报
- get_stock_basic_info: 获取股票基本信息
决策输出格式:
1. 综合评估:对讨论议题的整体判断
2. 关键观点:各方有价值的观点总结
3. 风险提示:需要注意的主要风险
4. 操作建议:具体的投资建议(买入/持有/观望/卖出)
5. 信心指数:对这个结论的信心程度1-10分
1. 综合评估
2. 关键观点
3. 风险提示
4. 操作建议(买入/持有/观望/卖出)
5. 信心指数1-10分
注意:如果讨论还不够充分,你可以要求继续讨论。每次发言控制在300字以内。"""
参考前面所有人的发言,给出综合判断。发言控制在300字以内。"""
}
}
# 投研会议室专用模型配置(扩展现有配置)
MEETING_MODEL_CONFIGS = {
**MODEL_CONFIGS,
"deepseek-v3": {
"api_key": "sk-1cf3dfadf7244a8680cd0a60da6f1efd",
"base_url": "https://api.deepseek.com/v1",
"model": "deepseek-chat",
}
}
class MeetingRoleMessage(BaseModel):
"""会议角色消息"""
role_id: str
role_name: str
nickname: str
avatar: str
color: str
content: str
timestamp: str
round_number: int # 第几轮讨论
class MeetingRequest(BaseModel):
"""投研会议请求"""
topic: str # 用户提出的议题
topic: str
user_id: str = "anonymous"
user_nickname: str = "匿名用户"
session_id: Optional[str] = None
user_message: Optional[str] = None # 用户在讨论中的插话
conversation_history: List[Dict[str, Any]] = [] # 之前的讨论历史
user_message: Optional[str] = None
conversation_history: List[Dict[str, Any]] = []
class MeetingResponse(BaseModel):
"""投研会议响应"""
success: bool
session_id: str
messages: List[Dict[str, Any]] # 本轮所有角色的发言
round_number: int # 当前轮次
is_concluded: bool # 是否已得出结论
conclusion: Optional[Dict[str, Any]] = None # 基金经理的结论(如果有)
def get_random_speaking_order() -> List[str]:
"""随机生成发言顺序(不包括基金经理)"""
roles = ["buffett", "big_short", "simons", "leek"]
random.shuffle(roles)
return roles
async def call_role_llm(role_id: str, prompt: str, context: str = "") -> str:
"""调用特定角色的LLM生成回复"""
async def call_role_tool(role_id: str, tool_name: str, arguments: dict) -> dict:
"""调用角色的工具"""
handler = TOOL_HANDLERS.get(tool_name)
if not handler:
return {"success": False, "error": f"Unknown tool: {tool_name}"}
try:
result = await handler(arguments)
return {"success": True, "tool": tool_name, "result": result}
except Exception as e:
logger.error(f"Tool {tool_name} failed: {e}")
return {"success": False, "tool": tool_name, "error": str(e)}
async def stream_role_response(
role_id: str,
topic: str,
context: str,
tools: List[dict]
) -> AsyncGenerator[dict, None]:
"""流式生成角色回复,支持工具调用"""
role = MEETING_ROLES.get(role_id)
if not role:
raise ValueError(f"Unknown role: {role_id}")
yield {"type": "error", "error": f"Unknown role: {role_id}"}
return
model_name = role["model"]
model_config = MEETING_MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["kimi-k2-thinking"])
model_config = MEETING_MODEL_CONFIGS.get(model_name)
if not model_config:
yield {"type": "error", "error": f"Unknown model: {model_name}"}
return
try:
client = OpenAI(
api_key=model_config["api_key"],
base_url=model_config["base_url"]
base_url=model_config["base_url"],
timeout=180
)
messages = [
{"role": "system", "content": role["system_prompt"]},
{"role": "user", "content": f"议题:{topic}\n\n{context}"}
]
if context:
messages.append({"role": "user", "content": f"当前讨论背景:\n{context}"})
# 准备工具定义(如果该角色有工具)
role_tool_names = role.get("tools", [])
openai_tools = None
if role_tool_names:
openai_tools = []
for tool in TOOLS:
if tool.name in role_tool_names:
openai_tools.append({
"type": "function",
"function": {
"name": tool.name,
"description": tool.description,
"parameters": tool.parameters
}
})
messages.append({"role": "user", "content": prompt})
# 第一次调用:可能触发工具调用
tool_calls_made = []
if openai_tools:
response = client.chat.completions.create(
model=model_config["model"],
messages=messages,
tools=openai_tools,
tool_choice="auto",
stream=False, # 工具调用不使用流式
temperature=0.7,
max_tokens=1000,
)
response = client.chat.completions.create(
assistant_message = response.choices[0].message
# 处理工具调用
if assistant_message.tool_calls:
messages.append(assistant_message)
for tool_call in assistant_message.tool_calls:
tool_name = tool_call.function.name
try:
arguments = json.loads(tool_call.function.arguments)
except:
arguments = {}
# 发送工具调用开始事件
yield {
"type": "tool_call_start",
"tool": tool_name,
"arguments": arguments
}
# 执行工具调用
result = await call_role_tool(role_id, tool_name, arguments)
tool_calls_made.append(result)
# 发送工具调用结果事件
yield {
"type": "tool_call_result",
"tool": tool_name,
"result": result
}
# 添加工具结果到消息
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(result, ensure_ascii=False)
})
# 流式生成最终回复
stream = client.chat.completions.create(
model=model_config["model"],
messages=messages,
stream=True,
temperature=0.7,
max_tokens=500,
)
return response.choices[0].message.content.strip()
except Exception as e:
logger.error(f"调用角色 {role_id} 的 LLM 失败: {e}")
return f"[{role['name']}暂时无法发言,请稍后重试]"
async def determine_speaking_order(topic: str) -> List[str]:
"""使用 K2 模型决定发言顺序"""
try:
client = OpenAI(
api_key=MODEL_CONFIGS["kimi-k2-thinking"]["api_key"],
base_url=MODEL_CONFIGS["kimi-k2-thinking"]["base_url"]
)
response = client.chat.completions.create(
model=MODEL_CONFIGS["kimi-k2-thinking"]["model"],
messages=[
{
"role": "system",
"content": """你是一个会议主持助手。根据用户提出的议题,决定投研会议中各角色的最佳发言顺序。
可用角色(不包括基金经理,他最后总结):
- buffett: 巴菲特(主观多头,分析利好)
- big_short: 大空头(风险分析师)
- simons: 量化分析员(技术分析)
- leek: 韭菜(散户视角)
根据议题性质,安排最合适的发言顺序。比如:
- 如果是分析某公司/事件,建议先让多头分析利好,再让空头分析风险
- 如果是技术走势问题,可以先让量化分析
- 韭菜可以随时插入,提供散户视角
只需要返回角色ID列表用逗号分隔例如buffett,simons,big_short,leek"""
},
{"role": "user", "content": f"议题:{topic}"}
],
temperature=0.3,
max_tokens=100,
)
order_str = response.choices[0].message.content.strip()
# 解析返回的顺序
order = [r.strip() for r in order_str.split(",") if r.strip() in MEETING_ROLES]
# 确保所有非管理者角色都在列表中
for role_id, role in MEETING_ROLES.items():
if role["role_type"] != "manager" and role_id not in order:
order.append(role_id)
return order
except Exception as e:
logger.error(f"决定发言顺序失败: {e}")
# 返回默认顺序
return ["buffett", "big_short", "simons", "leek"]
async def check_conclusion_ready(discussion_history: str, topic: str) -> tuple[bool, str]:
"""基金经理判断是否可以得出结论"""
try:
client = OpenAI(
api_key=MODEL_CONFIGS["kimi-k2-thinking"]["api_key"],
base_url=MODEL_CONFIGS["kimi-k2-thinking"]["base_url"]
)
response = client.chat.completions.create(
model=MODEL_CONFIGS["kimi-k2-thinking"]["model"],
messages=[
{
"role": "system",
"content": MEETING_ROLES["fund_manager"]["system_prompt"]
},
{
"role": "user",
"content": f"""议题:{topic}
目前的讨论内容:
{discussion_history}
请判断:
1. 目前的讨论是否足够充分,可以得出最终结论?
2. 如果可以,请给出你的最终决策。
3. 如果不可以,请说明还需要讨论什么,并要求继续讨论。
请以JSON格式回复
{{
"can_conclude": true/false,
"reasoning": "判断理由",
"conclusion": "如果可以结论,这里是你的完整决策;如果不能,这里是需要继续讨论的方向"
}}"""
full_content = ""
for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
full_content += content
yield {
"type": "content_delta",
"content": content
}
],
temperature=0.5,
max_tokens=800,
)
result = response.choices[0].message.content.strip()
# 尝试解析JSON
try:
# 处理可能的 markdown 代码块
if "```json" in result:
result = result.split("```json")[1].split("```")[0].strip()
elif "```" in result:
result = result.split("```")[1].split("```")[0].strip()
data = json.loads(result)
return data.get("can_conclude", False), data.get("conclusion", result)
except json.JSONDecodeError:
# 如果JSON解析失败直接返回内容
return True, result
# 发送完成事件
yield {
"type": "content_done",
"full_content": full_content,
"tool_calls": tool_calls_made
}
except Exception as e:
logger.error(f"检查结论状态失败: {e}")
return True, "基于目前的讨论,建议投资者谨慎对待,继续关注后续发展。"
logger.error(f"Role {role_id} stream failed: {e}")
yield {"type": "error", "error": str(e)}
@app.post("/agent/meeting/start")
async def start_investment_meeting(request: MeetingRequest):
@app.post("/agent/meeting/stream")
async def stream_investment_meeting(request: MeetingRequest):
"""
启动投研会议
流式投研会议 V2
第一轮:所有角色(除基金经理外)依次发言
- 随机发言顺序
- 每个角色流式输出
- 支持工具调用
- 支持用户中途发言
"""
logger.info(f"启动投研会议: {request.topic} (user: {request.user_id})")
logger.info(f"[Meeting V2] 启动: {request.topic}")
session_id = request.session_id or str(uuid.uuid4())
messages = []
round_number = 1
async def generate_meeting_stream() -> AsyncGenerator[str, None]:
session_id = request.session_id or str(uuid.uuid4())
round_number = len(request.conversation_history) // 5 + 1
# 决定发言顺序
speaking_order = await determine_speaking_order(request.topic)
logger.info(f"发言顺序: {speaking_order}")
# 发送会话开始
yield f"data: {json.dumps({'type': 'session_start', 'session_id': session_id, 'round': round_number}, ensure_ascii=False)}\n\n"
# 构建讨论上下文
context = f"议题:{request.topic}\n\n这是第一轮讨论,请针对议题发表你的观点。"
# 构建上下文
context_parts = []
if request.conversation_history:
context_parts.append("之前的讨论:")
for msg in request.conversation_history:
context_parts.append(f"{msg.get('role_name', '未知')}】:{msg.get('content', '')}")
# 依次让每个角色发言
for role_id in speaking_order:
role = MEETING_ROLES[role_id]
if role["role_type"] == "manager":
continue # 基金经理不在第一轮发言
# 加入之前角色的发言作为上下文
prev_context = context
if messages:
prev_context += "\n\n其他人的观点:\n"
for msg in messages:
prev_context += f"- {msg['role_name']}{msg['content']}\n"
# 调用LLM生成发言
content = await call_role_llm(role_id, request.topic, prev_context)
message = {
"role_id": role_id,
"role_name": role["name"],
"nickname": role["nickname"],
"avatar": role["avatar"],
"color": role["color"],
"content": content,
"timestamp": datetime.now().isoformat(),
"round_number": round_number
}
messages.append(message)
# 第一轮结束后,基金经理判断是否可以得出结论
discussion_summary = "\n".join([
f"{msg['role_name']}】:{msg['content']}"
for msg in messages
])
can_conclude, conclusion_content = await check_conclusion_ready(discussion_summary, request.topic)
# 添加基金经理的发言
fund_manager = MEETING_ROLES["fund_manager"]
fund_manager_message = {
"role_id": "fund_manager",
"role_name": fund_manager["name"],
"nickname": fund_manager["nickname"],
"avatar": fund_manager["avatar"],
"color": fund_manager["color"],
"content": conclusion_content,
"timestamp": datetime.now().isoformat(),
"round_number": round_number,
"is_conclusion": can_conclude
}
messages.append(fund_manager_message)
return {
"success": True,
"session_id": session_id,
"messages": messages,
"round_number": round_number,
"is_concluded": can_conclude,
"conclusion": fund_manager_message if can_conclude else None
}
@app.post("/agent/meeting/continue")
async def continue_investment_meeting(request: MeetingRequest):
"""
继续投研会议讨论
根据之前的讨论历史,继续新一轮讨论
支持用户在讨论中插话
"""
logger.info(f"继续投研会议: {request.topic} (round: {len(request.conversation_history) // 5 + 1})")
session_id = request.session_id or str(uuid.uuid4())
messages = []
round_number = len(request.conversation_history) // 5 + 2 # 估算轮次
# 构建历史讨论上下文
history_context = "历史讨论:\n"
for msg in request.conversation_history:
history_context += f"{msg.get('role_name', '未知')}】:{msg.get('content', '')}\n"
# 如果用户有插话,先处理用户消息
if request.user_message:
history_context += f"\n【用户】:{request.user_message}\n"
messages.append({
"role_id": "user",
"role_name": "用户",
"nickname": request.user_nickname,
"avatar": "",
"color": "#6366F1",
"content": request.user_message,
"timestamp": datetime.now().isoformat(),
"round_number": round_number
})
# 新一轮讨论的发言顺序
speaking_order = await determine_speaking_order(request.topic)
# 依次让每个角色发言
for role_id in speaking_order:
role = MEETING_ROLES[role_id]
if role["role_type"] == "manager":
continue
# 构建本次发言的上下文
current_context = f"议题:{request.topic}\n\n{history_context}"
if messages:
current_context += "\n本轮讨论:\n"
for msg in messages:
if msg["role_id"] != "user":
current_context += f"- {msg['role_name']}{msg['content']}\n"
# 调用LLM
prompt = f"这是第{round_number}轮讨论,请根据之前的讨论内容,进一步阐述或补充你的观点。"
if request.user_message:
prompt += f"\n\n用户刚才说:{request.user_message}\n请也回应用户的观点。"
context_parts.append(f"\n用户刚才说:{request.user_message}")
content = await call_role_llm(role_id, prompt, current_context)
context = "\n".join(context_parts) if context_parts else "这是第一轮讨论,请针对议题发表你的观点。"
message = {
"role_id": role_id,
"role_name": role["name"],
"nickname": role["nickname"],
"avatar": role["avatar"],
"color": role["color"],
"content": content,
# 随机发言顺序
speaking_order = get_random_speaking_order()
yield f"data: {json.dumps({'type': 'order_decided', 'order': speaking_order}, ensure_ascii=False)}\n\n"
all_messages = []
accumulated_context = context
# 依次让每个角色发言
for role_id in speaking_order:
role = MEETING_ROLES[role_id]
# 发送开始发言事件
yield f"data: {json.dumps({'type': 'speaking_start', 'role_id': role_id, 'role_name': role['name'], 'color': role['color']}, ensure_ascii=False)}\n\n"
# 准备工具列表
role_tools = [t for t in TOOLS if t.name in role.get("tools", [])]
# 流式生成回复
full_content = ""
tool_calls = []
async for event in stream_role_response(role_id, request.topic, accumulated_context, role_tools):
if event["type"] == "tool_call_start":
yield f"data: {json.dumps({'type': 'tool_call_start', 'role_id': role_id, 'tool': event['tool'], 'arguments': event['arguments']}, ensure_ascii=False)}\n\n"
elif event["type"] == "tool_call_result":
yield f"data: {json.dumps({'type': 'tool_call_result', 'role_id': role_id, 'tool': event['tool'], 'result': event['result']}, ensure_ascii=False)}\n\n"
tool_calls.append(event["result"])
elif event["type"] == "content_delta":
yield f"data: {json.dumps({'type': 'content_delta', 'role_id': role_id, 'content': event['content']}, ensure_ascii=False)}\n\n"
full_content += event["content"]
elif event["type"] == "content_done":
full_content = event["full_content"]
tool_calls = event.get("tool_calls", [])
elif event["type"] == "error":
yield f"data: {json.dumps({'type': 'error', 'role_id': role_id, 'error': event['error']}, ensure_ascii=False)}\n\n"
full_content = f"[{role['name']}暂时无法发言]"
# 构建完整消息
message = {
"role_id": role_id,
"role_name": role["name"],
"nickname": role["nickname"],
"avatar": role["avatar"],
"color": role["color"],
"content": full_content,
"tool_calls": tool_calls,
"timestamp": datetime.now().isoformat(),
"round_number": round_number
}
all_messages.append(message)
# 发送消息完成事件
yield f"data: {json.dumps({'type': 'message_complete', 'message': message}, ensure_ascii=False)}\n\n"
# 更新上下文
accumulated_context += f"\n\n{role['name']}】:{full_content}"
await asyncio.sleep(0.3)
# 基金经理总结
fund_manager = MEETING_ROLES["fund_manager"]
yield f"data: {json.dumps({'type': 'speaking_start', 'role_id': 'fund_manager', 'role_name': fund_manager['name'], 'color': fund_manager['color']}, ensure_ascii=False)}\n\n"
fm_full_content = ""
fm_tool_calls = []
fm_tools = [t for t in TOOLS if t.name in fund_manager.get("tools", [])]
async for event in stream_role_response("fund_manager", request.topic, accumulated_context, fm_tools):
if event["type"] == "tool_call_start":
yield f"data: {json.dumps({'type': 'tool_call_start', 'role_id': 'fund_manager', 'tool': event['tool'], 'arguments': event['arguments']}, ensure_ascii=False)}\n\n"
elif event["type"] == "tool_call_result":
yield f"data: {json.dumps({'type': 'tool_call_result', 'role_id': 'fund_manager', 'tool': event['tool'], 'result': event['result']}, ensure_ascii=False)}\n\n"
fm_tool_calls.append(event["result"])
elif event["type"] == "content_delta":
yield f"data: {json.dumps({'type': 'content_delta', 'role_id': 'fund_manager', 'content': event['content']}, ensure_ascii=False)}\n\n"
fm_full_content += event["content"]
elif event["type"] == "content_done":
fm_full_content = event["full_content"]
elif event["type"] == "error":
fm_full_content = "[基金经理暂时无法发言]"
fm_message = {
"role_id": "fund_manager",
"role_name": fund_manager["name"],
"nickname": fund_manager["nickname"],
"avatar": fund_manager["avatar"],
"color": fund_manager["color"],
"content": fm_full_content,
"tool_calls": fm_tool_calls,
"timestamp": datetime.now().isoformat(),
"round_number": round_number
"round_number": round_number,
"is_conclusion": True
}
messages.append(message)
# 本轮结束后,基金经理再次判断
all_discussion = history_context + "\n本轮讨论:\n" + "\n".join([
f"{msg['role_name']}】:{msg['content']}"
for msg in messages if msg["role_id"] != "user"
])
yield f"data: {json.dumps({'type': 'message_complete', 'message': fm_message}, ensure_ascii=False)}\n\n"
can_conclude, conclusion_content = await check_conclusion_ready(all_discussion, request.topic)
# 发送会议状态(不强制结束,用户可以继续)
yield f"data: {json.dumps({'type': 'round_end', 'round_number': round_number, 'can_continue': True}, ensure_ascii=False)}\n\n"
# 添加基金经理的发言
fund_manager = MEETING_ROLES["fund_manager"]
fund_manager_message = {
"role_id": "fund_manager",
"role_name": fund_manager["name"],
"nickname": fund_manager["nickname"],
"avatar": fund_manager["avatar"],
"color": fund_manager["color"],
"content": conclusion_content,
"timestamp": datetime.now().isoformat(),
"round_number": round_number,
"is_conclusion": can_conclude
}
messages.append(fund_manager_message)
return {
"success": True,
"session_id": session_id,
"messages": messages,
"round_number": round_number,
"is_concluded": can_conclude,
"conclusion": fund_manager_message if can_conclude else None
}
return StreamingResponse(
generate_meeting_stream(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
@app.get("/agent/meeting/roles")
@@ -2863,111 +2820,13 @@ async def get_meeting_roles():
"avatar": role["avatar"],
"color": role["color"],
"description": role["description"],
"tools": role.get("tools", []),
}
for role in MEETING_ROLES.values()
]
}
@app.post("/agent/meeting/stream")
async def stream_investment_meeting(request: MeetingRequest):
"""
流式投研会议
以 SSE 方式逐个角色流式返回发言
"""
logger.info(f"流式投研会议: {request.topic} (user: {request.user_id})")
async def generate_meeting_stream() -> AsyncGenerator[str, None]:
session_id = request.session_id or str(uuid.uuid4())
round_number = 1
all_messages = []
# 发送会话开始事件
yield f"data: {json.dumps({'type': 'session_start', 'session_id': session_id}, ensure_ascii=False)}\n\n"
# 决定发言顺序
speaking_order = await determine_speaking_order(request.topic)
yield f"data: {json.dumps({'type': 'order_decided', 'order': speaking_order}, ensure_ascii=False)}\n\n"
context = f"议题:{request.topic}\n\n这是第一轮讨论,请针对议题发表你的观点。"
# 依次让每个角色发言
for role_id in speaking_order:
role = MEETING_ROLES[role_id]
if role["role_type"] == "manager":
continue
# 发送"正在发言"状态
yield f"data: {json.dumps({'type': 'speaking_start', 'role_id': role_id, 'role_name': role['name']}, ensure_ascii=False)}\n\n"
# 构建上下文
prev_context = context
if all_messages:
prev_context += "\n\n其他人的观点:\n"
for msg in all_messages:
prev_context += f"- {msg['role_name']}{msg['content']}\n"
# 调用LLM生成发言
content = await call_role_llm(role_id, request.topic, prev_context)
message = {
"role_id": role_id,
"role_name": role["name"],
"nickname": role["nickname"],
"avatar": role["avatar"],
"color": role["color"],
"content": content,
"timestamp": datetime.now().isoformat(),
"round_number": round_number
}
all_messages.append(message)
# 发送完整发言
yield f"data: {json.dumps({'type': 'message', 'message': message}, ensure_ascii=False)}\n\n"
# 短暂延迟,让前端有时间处理
await asyncio.sleep(0.5)
# 基金经理总结
fund_manager = MEETING_ROLES["fund_manager"]
yield f"data: {json.dumps({'type': 'speaking_start', 'role_id': 'fund_manager', 'role_name': fund_manager['name']}, ensure_ascii=False)}\n\n"
discussion_summary = "\n".join([
f"{msg['role_name']}】:{msg['content']}"
for msg in all_messages
])
can_conclude, conclusion_content = await check_conclusion_ready(discussion_summary, request.topic)
fund_manager_message = {
"role_id": "fund_manager",
"role_name": fund_manager["name"],
"nickname": fund_manager["nickname"],
"avatar": fund_manager["avatar"],
"color": fund_manager["color"],
"content": conclusion_content,
"timestamp": datetime.now().isoformat(),
"round_number": round_number,
"is_conclusion": can_conclude
}
yield f"data: {json.dumps({'type': 'message', 'message': fund_manager_message}, ensure_ascii=False)}\n\n"
# 发送会议结束事件
yield f"data: {json.dumps({'type': 'meeting_end', 'is_concluded': can_conclude, 'round_number': round_number}, ensure_ascii=False)}\n\n"
return StreamingResponse(
generate_meeting_stream(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
# ==================== 健康检查 ====================
@app.get("/health")