refactor: 使用 openai SDK 替换 fetch 调用 LLM
All checks were successful
continuous-integration/drone/push Build is passing

- 安装 openai 包
- 重命名 zhipu.ts -> llm.ts
- 使用 OpenAI SDK 替代原生 fetch 实现
- 更新所有导入路径
This commit is contained in:
2026-03-10 11:58:27 +08:00
parent 0cb240791b
commit 6b9fba254d
8 changed files with 61 additions and 62 deletions

View File

@@ -1,4 +1,4 @@
import { getAnswer } from "../zhipu";
import { getAnswer } from "../llm";
import { parseAIGeneratedJSON } from "@/utils/json";
import { PreprocessResult } from "./types";
import { createLogger } from "@/lib/logger";

View File

@@ -1,4 +1,4 @@
import { getAnswer } from "../zhipu";
import { getAnswer } from "../llm";
import { parseAIGeneratedJSON } from "@/utils/json";
import { EntriesGenerationResult } from "./types";
import { createLogger } from "@/lib/logger";

37
src/lib/bigmodel/llm.ts Normal file
View File

@@ -0,0 +1,37 @@
"use server";
import OpenAI from "openai";
const openai = new OpenAI({
apiKey: process.env.ZHIPU_API_KEY,
baseURL: "https://open.bigmodel.cn/api/paas/v4",
});
type Messages = Array<
| { role: "system"; content: string }
| { role: "user"; content: string }
| { role: "assistant"; content: string }
>;
async function getAnswer(prompt: string): Promise<string>;
async function getAnswer(prompt: Messages): Promise<string>;
async function getAnswer(prompt: string | Messages): Promise<string> {
const messages: Messages = typeof prompt === "string"
? [{ role: "user", content: prompt }]
: prompt;
const response = await openai.chat.completions.create({
model: process.env.ZHIPU_MODEL_NAME || "glm-4",
messages: messages as OpenAI.Chat.Completions.ChatCompletionMessageParam[],
temperature: 0.2,
});
const content = response.choices[0]?.message?.content;
if (!content) {
throw new Error("AI API 返回空响应");
}
return content.trim();
}
export { getAnswer };

View File

@@ -1,4 +1,4 @@
import { getAnswer } from "../zhipu";
import { getAnswer } from "../llm";
import { parseAIGeneratedJSON } from "@/utils/json";
import { LanguageDetectionResult, TranslationLLMResponse } from "./types";
import { createLogger } from "@/lib/logger";

View File

@@ -1,58 +0,0 @@
"use server";
type Messages = { role: string; content: string; }[];
const LLM_TIMEOUT_MS = 30000;
async function callZhipuAPI(
messages: Messages,
model = process.env.ZHIPU_MODEL_NAME,
) {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), LLM_TIMEOUT_MS);
const url = "https://open.bigmodel.cn/api/paas/v4/chat/completions";
const response = await fetch(url, {
method: "POST",
headers: {
Authorization: "Bearer " + process.env.ZHIPU_API_KEY,
"Content-Type": "application/json",
},
body: JSON.stringify({
model: model,
messages: messages,
temperature: 0.2,
thinking: {
type: "disabled",
},
}),
signal: controller.signal,
});
clearTimeout(timeoutId);
if (!response.ok) {
throw new Error(`API 调用失败: ${response.status} ${response.statusText}`);
}
return await response.json();
}
async function getAnswer(prompt: string): Promise<string>;
async function getAnswer(prompt: Messages): Promise<string>;
async function getAnswer(prompt: string | Messages): Promise<string> {
const messages = typeof prompt === "string"
? [{ role: "user", content: prompt }]
: prompt;
const response = await callZhipuAPI(messages);
if (!response.choices?.[0]?.message?.content) {
throw new Error("AI API 返回空响应");
}
return response.choices[0].message.content.trim();
}
export { getAnswer };

View File

@@ -8,7 +8,7 @@ import {
import { ValidateError } from "@/lib/errors";
import { createLogger } from "@/lib/logger";
import { serviceTranslateText } from "./translator-service";
import { getAnswer } from "@/lib/bigmodel/zhipu";
import { getAnswer } from "@/lib/bigmodel/llm";
const log = createLogger("translator-action");