import { Context } from "../../types" import llm from "../../utils/llm" import { cleanLLMRes } from "../../utils/llm/base" import getChatHistory from "./chatHistory" const agent = async (ctx: Context) => { const { logger, requestId, larkCard, larkService: { message }, appInfo, larkBody: { messageId, msgText, chatId, mentions, rawMsgText, openId }, } = ctx const cardGender = larkCard.child("groupAgent") const loadingMessageId = await message.updateOrReply( cardGender.genPendingCard("正在分析时间区间,请稍等...") ) // 使用大模型解析用户输入 const { startTime, endTime } = await llm.timeParser(msgText, requestId) logger.info("解析的时间范围", { startTime, endTime }) // 更新卡片 await message.updateOrReply( cardGender.genPendingCard("正在爬楼中,请稍等...") ) // 获取聊天记录 const { messages: chatHistory, mentions: historyMentions } = await getChatHistory(ctx, { chatId, startTime, endTime, mentions, senderOpenId: openId, excludedMessageIds: [loadingMessageId, messageId], excludeMentions: [appInfo.appName], }) logger.debug("获取的聊天记录", { count: chatHistory.length }) // 根据Mention,拼装原始消息 let userInput = rawMsgText.trim() for (const mention of mentions ?? []) { if (mention.name !== appInfo.appName) { userInput = userInput.replace(mention.key, `@${mention.name}`) } else { userInput = userInput.replace(mention.key, "") } } // 调用大模型 try { await message.updateOrReply( cardGender.genPendingCard("LLM输出中,请稍等...") ) const llmRes = (await llm.invoke( "groupAgent", { userName: historyMentions.get(openId || "") ?? "用户", userInput, chatHistory: JSON.stringify(chatHistory), time: new Date().toLocaleString("zh-CN", { timeZone: "Asia/Shanghai" }), }, requestId )) as string logger.info("LLM调用成功", { requestId, url: `http://langfuse.ai.srv/project/cm1j2tkj9001gukrgdvc1swuw/sessions/${requestId}`, }) const cleanedLlmRes = cleanLLMRes(llmRes) await message.updateOrReply(cardGender.genSuccessCard(cleanedLlmRes)) } catch (error: any) { logger.error("LLM调用失败", { error: error.message }) await message.updateOrReply( cardGender.genErrorCard("LLM调用失败: " + error.message) ) } } export default agent