168 lines
4.4 KiB
TypeScript
168 lines
4.4 KiB
TypeScript
import { PromptTemplate } from "@langchain/core/prompts"
|
||
import { ChatOpenAI } from "@langchain/openai"
|
||
import { CallbackHandler, Langfuse } from "langfuse-langchain"
|
||
import { z } from "zod"
|
||
|
||
import db from "../db"
|
||
|
||
/**
|
||
* 获取Langfuse
|
||
* @returns
|
||
*/
|
||
const getLangfuse = async (name: string, requestId: string) => {
|
||
const langfuseParams = {
|
||
publicKey: await db.appConfig.getLangfusePk(),
|
||
secretKey: await db.appConfig.getLangfuseSk(),
|
||
baseUrl: "http://langfuse.c5-cloudml.xiaomi.srv",
|
||
sessionId: requestId,
|
||
name,
|
||
}
|
||
return {
|
||
langfuseHandler: new CallbackHandler(langfuseParams),
|
||
langfuse: new Langfuse(langfuseParams),
|
||
}
|
||
}
|
||
|
||
const modelMap = {
|
||
"deepseek-chat": {
|
||
model: "deepseek-chat",
|
||
apiKey: "xx",
|
||
baseURL: "http://10.38.214.162:8003/v1",
|
||
},
|
||
"qwen2-72b-instruct-int4": {
|
||
model: "qwen2-72b-instruct-int4",
|
||
apiKey: "xx",
|
||
baseURL: "http://10.38.214.206:8000/v1",
|
||
},
|
||
"gpt-4o": {
|
||
model: "gpt-4o",
|
||
apiKey: "sk-EhbBTR0QjhH22iLr9aCb04D2B0F44f88A07c2924Eb54CfA4",
|
||
baseURL: "https://api.gpt.ge/v1",
|
||
},
|
||
"qwen-72b-instruct-int4/v1": {
|
||
model: "qwen-72b-instruct-int4/v1",
|
||
apiKey: "xx",
|
||
baseURL:
|
||
"http://ms-13871-qwen-model-128k-9-1012195754.kscn-tj5-prod2-cloudml.xiaomi.srv/v1",
|
||
},
|
||
}
|
||
|
||
/**
|
||
* 获取模型
|
||
* @param modelName 模型名称
|
||
* @param temperature 温度
|
||
*/
|
||
const getModel = async (modelName: keyof typeof modelMap, temperature = 0) => {
|
||
const { model, apiKey, baseURL } = modelMap[modelName]
|
||
return new ChatOpenAI(
|
||
{ temperature, model, apiKey },
|
||
{
|
||
baseURL,
|
||
}
|
||
)
|
||
}
|
||
|
||
const groupAgentConfig = z.object({
|
||
chatId: z.string().describe("群聊ID"),
|
||
chatName: z.string().describe("群聊名称"),
|
||
functionId: z.string().describe("功能ID"),
|
||
functionName: z.string().describe("功能名称"),
|
||
startTime: z.string().describe("开始时间,格式为 YYYY-MM-DD HH:mm:ss"),
|
||
endTime: z.string().describe("结束时间,格式为 YYYY-MM-DD HH:mm:ss"),
|
||
})
|
||
|
||
/**
|
||
* 解析GroupAgent用户输入
|
||
* @param userInput 用户输入
|
||
* @param groupInfo 群聊信息
|
||
* @param functionInfo 功能信息
|
||
* @param requestId 请求ID
|
||
* @returns
|
||
*/
|
||
const parseGroupAgentQuery = async (
|
||
userInput: string,
|
||
groupInfo: string,
|
||
functionInfo: string,
|
||
requestId: string
|
||
) => {
|
||
const { langfuseHandler } = await getLangfuse(
|
||
"parseGroupAgentQuery",
|
||
requestId
|
||
)
|
||
const model = await getModel("qwen-72b-instruct-int4/v1")
|
||
const structuredLlm = model.withStructuredOutput(groupAgentConfig, {
|
||
name: "groupAgent",
|
||
})
|
||
return await structuredLlm.invoke(
|
||
`
|
||
当前时间为:${new Date().toLocaleString("zh-CN", { timeZone: "Asia/Shanghai" })}
|
||
所有可用群组信息:${groupInfo}
|
||
所有支持功能信息:${functionInfo}
|
||
你是一个专业的语义解析工程师,给定以下用户输入,帮我解析出群聊ID、群聊名称、功能ID、功能名称、开始时间和结束时间。
|
||
默认功能为总结消息,时间如果用户没有输入则留空
|
||
返回格式定义为:
|
||
\`\`\`
|
||
{
|
||
"chatId": "oc_ef98c2a9229657f99d4ef573a30fe91c",
|
||
"chatName": "MIAI-FE 人工智能部-前端组",
|
||
"functionId": "summary-qwen-72b-instruct-int4",
|
||
"functionName": "总结消息",
|
||
"startTime": "2022-01-01 00:00:00",
|
||
"endTime": "2022-01-01 23:59:59"
|
||
}
|
||
\`\`\`
|
||
如果不包含对应内容,请返回空值。
|
||
|
||
用户输入:
|
||
\`\`\`
|
||
${userInput.replaceAll("`", " ")}
|
||
\`\`\`
|
||
`,
|
||
{
|
||
callbacks: [langfuseHandler],
|
||
}
|
||
)
|
||
}
|
||
|
||
/**
|
||
* 调用LLM模型
|
||
* @param promptName 提示Key
|
||
* @param variables 变量
|
||
* @param requestId 请求ID
|
||
* @param temperature 温度
|
||
* @returns
|
||
*/
|
||
const invoke = async (
|
||
promptName: string,
|
||
variables: Record<string, any>,
|
||
requestId: string,
|
||
temperature = 0
|
||
) => {
|
||
const { langfuse, langfuseHandler } = await getLangfuse("invoke", requestId)
|
||
const prompt = await langfuse.getPrompt(promptName)
|
||
const config = prompt.config as { modelName: keyof typeof modelMap }
|
||
|
||
const langchainTextPrompt = PromptTemplate.fromTemplate(
|
||
prompt.getLangchainPrompt()
|
||
).withConfig({
|
||
metadata: { langfusePrompt: prompt },
|
||
})
|
||
|
||
const chain = langchainTextPrompt.pipe(
|
||
await getModel(config.modelName, temperature)
|
||
)
|
||
|
||
const { content } = await chain.invoke(variables, {
|
||
callbacks: [langfuseHandler],
|
||
})
|
||
|
||
return content
|
||
}
|
||
|
||
const llm = {
|
||
parseGroupAgentQuery,
|
||
invoke,
|
||
}
|
||
|
||
export default llm
|