diff --git a/backend/.env.example b/backend/.env.example index de2fec5..3b20391 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -61,3 +61,8 @@ JWT_EXPIRE_HOURS=168 # 服务启动时自动创建的管理员账号 ADMIN_PHONE=15549380526 ADMIN_PASSWORD=lam1988324 + +# =============== GLM AI 配置 =============== +# 智谱 GLM API 配置 (用于生成标题和标签) +GLM_API_KEY=32440cd3f3444d1f8fe721304acea8bd.YXNLrk7eIJMKcg4t +GLM_MODEL=glm-4.7-flash diff --git a/backend/app/core/config.py b/backend/app/core/config.py index 9be622c..b7ca06e 100644 --- a/backend/app/core/config.py +++ b/backend/app/core/config.py @@ -39,6 +39,10 @@ class Settings(BaseSettings): ADMIN_PHONE: str = "" ADMIN_PASSWORD: str = "" + # GLM AI 配置 + GLM_API_KEY: str = "" + GLM_MODEL: str = "glm-4.7-flash" + @property def LATENTSYNC_DIR(self) -> Path: """LatentSync 目录路径 (动态计算)""" diff --git a/backend/app/services/glm_service.py b/backend/app/services/glm_service.py index 56e2427..0ce428d 100644 --- a/backend/app/services/glm_service.py +++ b/backend/app/services/glm_service.py @@ -1,19 +1,29 @@ """ GLM AI 服务 -使用智谱 GLM-4.7-Flash 生成标题和标签 +使用智谱 GLM 生成标题和标签 """ import json import re -import httpx from loguru import logger +from zai import ZhipuAiClient + +from app.core.config import settings class GLMService: """GLM AI 服务""" - API_URL = "https://open.bigmodel.cn/api/paas/v4/chat/completions" - API_KEY = "5915240ea48d4e93b454bc2412d1cc54.e054ej4pPqi9G6rc" + def __init__(self): + self.client = None + + def _get_client(self): + """获取或创建 ZhipuAI 客户端""" + if self.client is None: + if not settings.GLM_API_KEY: + raise Exception("GLM_API_KEY 未配置") + self.client = ZhipuAiClient(api_key=settings.GLM_API_KEY) + return self.client async def generate_title_tags(self, text: str) -> dict: """ @@ -38,34 +48,25 @@ class GLMService: {{"title": "标题", "tags": ["标签1", "标签2", "标签3"]}}""" try: - async with httpx.AsyncClient(timeout=30.0) as client: - response = await client.post( - self.API_URL, - headers={ - "Content-Type": "application/json", - "Authorization": f"Bearer {self.API_KEY}" - }, - json={ - "model": "glm-4-flash", - "messages": [{"role": "user", "content": prompt}], - "max_tokens": 500, - "temperature": 0.7 - } - ) - response.raise_for_status() - data = response.json() + client = self._get_client() + logger.info(f"Calling GLM API with model: {settings.GLM_MODEL}") + + response = client.chat.completions.create( + model=settings.GLM_MODEL, + messages=[{"role": "user", "content": prompt}], + thinking={"type": "disabled"}, # 禁用思考模式,加快响应 + max_tokens=500, + temperature=0.7 + ) - # 提取生成的内容 - content = data["choices"][0]["message"]["content"] - logger.info(f"GLM response: {content}") + # 提取生成的内容 + content = response.choices[0].message.content + logger.info(f"GLM response (model: {settings.GLM_MODEL}): {content}") - # 解析 JSON - result = self._parse_json_response(content) - return result + # 解析 JSON + result = self._parse_json_response(content) + return result - except httpx.HTTPError as e: - logger.error(f"GLM API request failed: {e}") - raise Exception(f"AI 服务请求失败: {str(e)}") except Exception as e: logger.error(f"GLM service error: {e}") raise Exception(f"AI 生成失败: {str(e)}")