feat(llm): 增强 LLM 服务的健壮性与容错能力

- 新增模型降级机制,当主模型失败时自动尝试备选模型列表【FALLBACK_MODELS】
- 增强 `_chat` 方法,支持空返回检测、json_mode 回退和多重错误处理
- 重构 `_parse_json` 方法,实现五重容错解析策略以应对不同模型的输出格式
- 为 `generate_copy`、`generate_copy_with_reference` 和 `analyze_hotspots` 方法添加重试逻辑,在 JSON 解析失败时自动关闭 json_mode 重试

🔧 chore(config): 更新默认模型配置与安全令牌

- 将默认 LLM 模型从 `gemini-3-flash-preview` 更改为 `deepseek-v3`
- 更新 `xsec_token` 安全令牌

 feat(sd): 集成 ReActor 换脸功能并扩展人设主题池

- 在 `SDService` 中新增头像管理静态方法 (`load_face_image`, `save_face_image`) 和 ReActor 参数构建方法
- 为 `txt2img` 方法添加 `face_image` 参数,支持在生成图片时自动换脸
- 在 `main.py` 的 Web UI 中新增头像上传、预览与管理界面
- 扩展 `generate_images` 函数,支持根据复选框状态启用换脸功能
- 重构人设系统,为 24 种预设人设分别定义专属的【主题池】和【评论关键词池】,并实现人设切换时的自动联动更新
- 在自动化发布 (`auto_publish_once`) 和定时调度 (`_scheduler_loop`) 中集成换脸选项

📝 docs(main): 添加新图片资源

- 新增图片资源文件:`beauty.png`, `my_face.png`, `myself.jpg`, `zjz.png`
This commit is contained in:
zhoujie 2026-02-09 23:08:10 +08:00
parent 500e47ebcb
commit 358b957f5d
8 changed files with 809 additions and 87 deletions

BIN
beauty.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 MiB

View File

@ -3,7 +3,7 @@
"base_url": "https://wolfai.top/v1",
"sd_url": "http://127.0.0.1:7861",
"mcp_url": "http://localhost:18060/mcp",
"model": "gemini-3-flash-preview",
"model": "deepseek-v3",
"persona": "温柔知性的时尚博主",
"auto_reply_enabled": false,
"schedule_enabled": false,
@ -21,5 +21,5 @@
"base_url": "https://wolfai.top/v1"
}
],
"xsec_token": "ABdAEbqP9ScgelmyolJxsnpCr_e645SCpnub2dLZJc4Ck="
"xsec_token": "ABfkw0sdbz9Lf-js1d83biryHO6o13nCCPwPbVK6eGYR8="
}

View File

@ -224,6 +224,9 @@ PROMPT_COPY_WITH_REFERENCE = """
class LLMService:
"""LLM API 服务封装"""
# 当主模型返回空内容时,依次尝试的备选模型列表
FALLBACK_MODELS = ["deepseek-v3", "gemini-2.5-flash", "deepseek-v3.1"]
def __init__(self, api_key: str, base_url: str, model: str = "gpt-3.5-turbo"):
self.api_key = api_key
self.base_url = base_url.rstrip("/")
@ -231,7 +234,7 @@ class LLMService:
def _chat(self, system_prompt: str, user_message: str,
json_mode: bool = True, temperature: float = 0.8) -> str:
"""底层聊天接口"""
"""底层聊天接口含空返回检测、json_mode 回退、模型降级)"""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
@ -239,36 +242,149 @@ class LLMService:
if json_mode:
user_message = user_message + "\n请以json格式返回。"
payload = {
"model": self.model,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_message},
],
"temperature": temperature,
}
if json_mode:
payload["response_format"] = {"type": "json_object"}
# 构建要尝试的模型列表:主模型 + 备选模型(去重)
models_to_try = [self.model] + [m for m in self.FALLBACK_MODELS if m != self.model]
try:
resp = requests.post(
f"{self.base_url}/chat/completions",
headers=headers, json=payload, timeout=90
)
resp.raise_for_status()
content = resp.json()["choices"][0]["message"]["content"]
return content
except requests.exceptions.Timeout:
raise TimeoutError("LLM 请求超时,请检查网络或换一个模型")
except requests.exceptions.HTTPError as e:
raise ConnectionError(f"LLM API 错误 ({resp.status_code}): {resp.text[:200]}")
except Exception as e:
raise RuntimeError(f"LLM 调用异常: {e}")
last_error = None
for model_idx, current_model in enumerate(models_to_try):
payload = {
"model": current_model,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_message},
],
"temperature": temperature,
}
if json_mode:
payload["response_format"] = {"type": "json_object"}
try:
resp = requests.post(
f"{self.base_url}/chat/completions",
headers=headers, json=payload, timeout=90
)
resp.raise_for_status()
content = resp.json()["choices"][0]["message"]["content"]
# 检测空返回 — 如果启用了 json_mode 且返回为空,回退去掉 response_format 重试
if not content or not content.strip():
if json_mode:
logger.warning("[%s] LLM 返回空内容 (json_mode=True),关闭 json_mode 回退重试...", current_model)
payload.pop("response_format", None)
resp2 = requests.post(
f"{self.base_url}/chat/completions",
headers=headers, json=payload, timeout=90
)
resp2.raise_for_status()
content = resp2.json()["choices"][0]["message"]["content"]
if not content or not content.strip():
# 当前模型完全无法返回内容,尝试下一个模型
if model_idx < len(models_to_try) - 1:
next_model = models_to_try[model_idx + 1]
logger.warning("[%s] 返回空内容,自动降级到模型: %s", current_model, next_model)
continue
raise RuntimeError(f"所有模型均返回空内容(已尝试: {', '.join(models_to_try[:model_idx+1])}")
if model_idx > 0:
logger.info("模型降级成功: %s%s", self.model, current_model)
return content
except requests.exceptions.HTTPError as e:
status = getattr(resp, 'status_code', 0)
body = getattr(resp, 'text', '')[:300]
# 某些模型/提供商不支持 response_format自动回退重试
if json_mode and status in (400, 422, 500):
logger.warning("[%s] json_mode 请求失败 (HTTP %s),关闭 response_format 回退重试...", current_model, status)
payload.pop("response_format", None)
try:
resp2 = requests.post(
f"{self.base_url}/chat/completions",
headers=headers, json=payload, timeout=90
)
resp2.raise_for_status()
content = resp2.json()["choices"][0]["message"]["content"]
if content and content.strip():
if model_idx > 0:
logger.info("模型降级成功: %s%s", self.model, current_model)
return content
except Exception:
pass
# 当前模型失败,尝试下一个
last_error = ConnectionError(f"LLM API 错误 ({status}): {body}")
if model_idx < len(models_to_try) - 1:
logger.warning("[%s] HTTP %s 失败,降级到: %s", current_model, status, models_to_try[model_idx + 1])
continue
raise last_error
except requests.exceptions.Timeout:
last_error = TimeoutError(f"[{current_model}] LLM 请求超时")
if model_idx < len(models_to_try) - 1:
logger.warning("[%s] 请求超时,降级到: %s", current_model, models_to_try[model_idx + 1])
continue
raise TimeoutError("LLM 请求超时,所有模型均超时,请检查网络")
except (ConnectionError, RuntimeError):
raise
except Exception as e:
last_error = RuntimeError(f"LLM 调用异常: {e}")
if model_idx < len(models_to_try) - 1:
logger.warning("[%s] 调用异常 (%s),降级到: %s", current_model, e, models_to_try[model_idx + 1])
continue
raise last_error
raise last_error or RuntimeError("LLM 调用失败: 未知错误")
def _parse_json(self, text: str) -> dict:
"""从 LLM 返回文本中解析 JSON"""
cleaned = re.sub(r"```json\s*|```", "", text).strip()
return json.loads(cleaned)
"""从 LLM 返回文本中解析 JSON多重容错"""
if not text or not text.strip():
raise ValueError("LLM 返回内容为空,无法解析 JSON")
raw = text.strip()
# 策略1: 去除 markdown 代码块
cleaned = re.sub(r"```(?:json)?\s*", "", raw)
cleaned = re.sub(r"```", "", cleaned).strip()
# 策略2: 直接解析
try:
return json.loads(cleaned)
except json.JSONDecodeError:
pass
# 策略3: 提取最外层的 { ... } 块
match = re.search(r'(\{[\s\S]*\})', cleaned)
if match:
try:
return json.loads(match.group(1))
except json.JSONDecodeError:
pass
# 策略4: 逐行查找 JSON 开始位置
for i, ch in enumerate(cleaned):
if ch == '{':
try:
return json.loads(cleaned[i:])
except json.JSONDecodeError:
pass
break
# 策略5: 尝试修复常见问题(尾部多余逗号、缺少闭合括号)
try:
# 去除尾部多余逗号
fixed = re.sub(r',\s*([}\]])', r'\1', cleaned)
return json.loads(fixed)
except json.JSONDecodeError:
pass
# 全部失败,打日志并抛出有用的错误信息
preview = raw[:500] if len(raw) > 500 else raw
logger.error("JSON 解析全部失败LLM 原始返回: %s", preview)
raise ValueError(
f"LLM 返回内容无法解析为 JSON。\n"
f"返回内容前200字: {raw[:200]}\n\n"
f"💡 可能原因: 模型不支持 JSON 输出格式,建议更换模型重试"
)
# ---------- 业务方法 ----------
@ -290,51 +406,96 @@ class LLMService:
return []
def generate_copy(self, topic: str, style: str) -> dict:
"""生成小红书文案"""
content = self._chat(
PROMPT_COPYWRITING,
f"主题:{topic}\n风格:{style}",
temperature=0.92,
)
data = self._parse_json(content)
"""生成小红书文案(含重试逻辑)"""
last_error = None
for attempt in range(2):
try:
# 第二次尝试不使用 json_mode兼容不支持的模型
use_json_mode = (attempt == 0)
content = self._chat(
PROMPT_COPYWRITING,
f"主题:{topic}\n风格:{style}",
json_mode=use_json_mode,
temperature=0.92,
)
data = self._parse_json(content)
# 强制标题长度限制
title = data.get("title", "")
if len(title) > 20:
title = title[:20]
data["title"] = title
# 强制标题长度限制
title = data.get("title", "")
if len(title) > 20:
title = title[:20]
data["title"] = title
# 去 AI 化后处理
if "content" in data:
data["content"] = self._humanize_content(data["content"])
# 去 AI 化后处理
if "content" in data:
data["content"] = self._humanize_content(data["content"])
return data
return data
except (json.JSONDecodeError, ValueError) as e:
last_error = e
if attempt == 0:
logger.warning("文案生成 JSON 解析失败 (尝试 %d/2): %s,将关闭 json_mode 重试", attempt + 1, e)
continue
else:
logger.error("文案生成 JSON 解析失败 (尝试 %d/2): %s", attempt + 1, e)
raise RuntimeError(f"文案生成失败: LLM 返回无法解析为 JSON已重试 2 次。\n最后错误: {last_error}")
def generate_copy_with_reference(self, topic: str, style: str,
reference_notes: str) -> dict:
"""参考热门笔记生成文案"""
"""参考热门笔记生成文案(含重试逻辑)"""
prompt = PROMPT_COPY_WITH_REFERENCE.format(
reference_notes=reference_notes, topic=topic, style=style
)
content = self._chat(prompt, f"请创作关于「{topic}」的小红书笔记",
temperature=0.92)
data = self._parse_json(content)
last_error = None
for attempt in range(2):
try:
use_json_mode = (attempt == 0)
content = self._chat(
prompt, f"请创作关于「{topic}」的小红书笔记",
json_mode=use_json_mode, temperature=0.92,
)
data = self._parse_json(content)
title = data.get("title", "")
if len(title) > 20:
data["title"] = title[:20]
title = data.get("title", "")
if len(title) > 20:
data["title"] = title[:20]
# 去 AI 化后处理
if "content" in data:
data["content"] = self._humanize_content(data["content"])
if "content" in data:
data["content"] = self._humanize_content(data["content"])
return data
return data
except (json.JSONDecodeError, ValueError) as e:
last_error = e
if attempt == 0:
logger.warning("参考文案生成 JSON 解析失败 (尝试 %d/2): %s,将关闭 json_mode 重试", attempt + 1, e)
continue
else:
logger.error("参考文案生成 JSON 解析失败 (尝试 %d/2): %s", attempt + 1, e)
raise RuntimeError(f"参考文案生成失败: LLM 返回无法解析为 JSON已重试 2 次。\n最后错误: {last_error}")
def analyze_hotspots(self, feed_data: str) -> dict:
"""分析热门内容趋势"""
"""分析热门内容趋势(含重试逻辑)"""
prompt = PROMPT_HOTSPOT_ANALYSIS.format(feed_data=feed_data)
content = self._chat(prompt, "请分析以上热门笔记数据")
return self._parse_json(content)
last_error = None
for attempt in range(2):
try:
use_json_mode = (attempt == 0)
content = self._chat(prompt, "请分析以上热门笔记数据",
json_mode=use_json_mode)
return self._parse_json(content)
except (json.JSONDecodeError, ValueError) as e:
last_error = e
if attempt == 0:
logger.warning("热点分析 JSON 解析失败 (尝试 %d/2): %s,将关闭 json_mode 重试", attempt + 1, e)
continue
else:
logger.error("热点分析 JSON 解析失败 (尝试 %d/2): %s", attempt + 1, e)
raise RuntimeError(f"热点分析失败: LLM 返回无法解析为 JSON已重试 2 次。\n最后错误: {last_error}")
@staticmethod
def _humanize_content(text: str) -> str:

501
main.py
View File

@ -19,7 +19,7 @@ import matplotlib.pyplot as plt
from config_manager import ConfigManager, OUTPUT_DIR
from llm_service import LLMService
from sd_service import SDService, DEFAULT_NEGATIVE
from sd_service import SDService, DEFAULT_NEGATIVE, FACE_IMAGE_PATH
from mcp_client import MCPClient, get_mcp_client
# ================= matplotlib 中文字体配置 =================
@ -262,6 +262,31 @@ def save_my_user_id(user_id_input):
return f"✅ 用户 ID 已保存: `{uid}`"
# ================= 头像/换脸管理 =================
def upload_face_image(img):
"""上传并保存头像图片"""
if img is None:
return None, "❌ 请上传头像图片"
try:
if isinstance(img, str) and os.path.isfile(img):
img = Image.open(img).convert("RGB")
elif not isinstance(img, Image.Image):
return None, "❌ 无法识别图片格式"
path = SDService.save_face_image(img)
return img, f"✅ 头像已保存至 {os.path.basename(path)}"
except Exception as e:
return None, f"❌ 保存失败: {e}"
def load_saved_face_image():
"""加载已保存的头像"""
img = SDService.load_face_image()
if img:
return img, "✅ 已加载保存的头像"
return None, " 尚未设置头像"
def generate_copy(model, topic, style):
"""生成文案"""
api_key, base_url, _ = _get_llm_config()
@ -284,20 +309,33 @@ def generate_copy(model, topic, style):
return "", "", "", "", f"❌ 生成失败: {e}"
def generate_images(sd_url, prompt, neg_prompt, model, steps, cfg_scale):
"""生成图片"""
def generate_images(sd_url, prompt, neg_prompt, model, steps, cfg_scale, face_swap_on, face_img):
"""生成图片(可选 ReActor 换脸)"""
if not model:
return None, [], "❌ 未选择 SD 模型"
try:
svc = SDService(sd_url)
# 判断是否启用换脸
face_image = None
if face_swap_on and face_img is not None:
if isinstance(face_img, Image.Image):
face_image = face_img
elif isinstance(face_img, str) and os.path.isfile(face_img):
face_image = Image.open(face_img).convert("RGB")
if face_swap_on and face_image is None:
# 尝试从默认路径加载
face_image = SDService.load_face_image()
images = svc.txt2img(
prompt=prompt,
negative_prompt=neg_prompt,
model=model,
steps=int(steps),
cfg_scale=float(cfg_scale),
face_image=face_image,
)
return images, images, f"✅ 生成 {len(images)} 张图片"
swap_hint = " (已换脸)" if face_image else ""
return images, images, f"✅ 生成 {len(images)} 张图片{swap_hint}"
except Exception as e:
logger.error("图片生成失败: %s", e)
return None, [], f"❌ 绘图失败: {e}"
@ -1025,7 +1063,289 @@ DEFAULT_PERSONAS = [
RANDOM_PERSONA_LABEL = "🎲 随机人设(每次自动切换)"
# ================= 主题池 =================
# ================= 人设 → 分类关键词/主题池映射 =================
# 每个人设对应一组相符的评论关键词和主题,切换人设时自动同步
PERSONA_POOL_MAP = {
# ---- 时尚穿搭类 ----
"温柔知性的时尚博主": {
"topics": [
"春季穿搭", "通勤穿搭", "约会穿搭", "显瘦穿搭", "法式穿搭",
"极简穿搭", "氛围感穿搭", "一衣多穿", "秋冬叠穿", "夏日清凉穿搭",
"生活美学", "衣橱整理", "配色技巧", "基础款穿搭", "轻熟风穿搭",
],
"keywords": [
"穿搭", "ootd", "早春穿搭", "通勤穿搭", "显瘦", "法式穿搭",
"极简风", "氛围感", "轻熟风", "高级感穿搭", "配色",
],
},
"元气满满的大学生": {
"topics": [
"学生党穿搭", "宿舍美食", "平价好物", "校园生活", "学生党护肤",
"期末复习", "社团活动", "寝室改造", "奶茶测评", "拍照打卡地",
"一人食食谱", "考研经验", "实习经验", "省钱攻略",
],
"keywords": [
"学生党", "平价好物", "宿舍", "校园", "奶茶", "探店",
"拍照", "省钱", "大学生活", "期末", "开学", "室友",
],
},
"30岁都市白领丽人": {
"topics": [
"通勤穿搭", "职场干货", "面试技巧", "简历优化", "时间管理",
"理财入门", "轻熟风穿搭", "职场妆容", "咖啡探店", "高效工作法",
"副业分享", "自律生活", "下班后充电", "职场人际关系",
],
"keywords": [
"通勤穿搭", "职场", "面试", "理财", "自律", "高效",
"咖啡", "轻熟", "白领", "上班族", "时间管理", "副业",
],
},
"精致妈妈": {
"topics": [
"育儿经验", "家居收纳", "辅食制作", "亲子游", "母婴好物",
"宝宝穿搭", "早教启蒙", "产后恢复", "家常菜做法", "小户型收纳",
"家庭教育", "孕期护理", "宝宝辅食", "妈妈穿搭",
],
"keywords": [
"育儿", "收纳", "辅食", "母婴", "亲子", "早教",
"宝宝", "家居", "待产", "产后", "妈妈", "家常菜",
],
},
"文艺青年摄影师": {
"topics": [
"旅行攻略", "小众旅行地", "拍照打卡地", "城市citywalk", "古镇旅行",
"手机摄影技巧", "胶片摄影", "人像摄影", "风光摄影", "街拍",
"咖啡探店", "文艺书店", "展览打卡", "独立书店",
],
"keywords": [
"旅行", "摄影", "打卡", "citywalk", "胶片", "拍照",
"小众", "展览", "文艺", "街拍", "风光", "人像",
],
},
"健身达人营养师": {
"topics": [
"减脂餐分享", "居家健身", "帕梅拉跟练", "跑步入门", "体态矫正",
"增肌餐", "蛋白质补充", "运动穿搭", "健身房攻略", "马甲线养成",
"热量计算", "健康早餐", "运动恢复", "减脂食谱",
],
"keywords": [
"减脂", "健身", "减脂餐", "蛋白质", "体态", "马甲线",
"帕梅拉", "跑步", "热量", "增肌", "运动", "健康餐",
],
},
"资深美妆博主": {
"topics": [
"妆容教程", "眼妆教程", "唇妆合集", "底妆测评", "护肤心得",
"防晒测评", "学生党平价护肤", "敏感肌护肤", "美白攻略",
"成分党护肤", "换季护肤", "早C晚A护肤", "抗老护肤",
],
"keywords": [
"护肤", "化妆教程", "眼影", "口红", "底妆", "防晒",
"美白", "敏感肌", "成分", "平价", "测评", "粉底",
],
},
"独居女孩": {
"topics": [
"独居生活", "租房改造", "氛围感房间", "一人食食谱", "好物分享",
"香薰推荐", "居家好物", "断舍离", "仪式感生活", "独居安全",
"解压方式", "emo急救指南", "桌面布置", "小户型装修",
],
"keywords": [
"独居", "租房改造", "好物", "氛围感", "一人食", "仪式感",
"解压", "居家", "香薰", "ins风", "房间", "断舍离",
],
},
"甜品烘焙爱好者": {
"topics": [
"烘焙教程", "0失败甜品", "下午茶推荐", "蛋糕教程", "面包制作",
"饼干烘焙", "奶油裱花", "巧克力甜品", "网红甜品", "便当制作",
"早餐食谱", "咖啡配甜品", "节日甜品", "低卡甜品",
],
"keywords": [
"烘焙", "甜品", "蛋糕", "面包", "下午茶", "曲奇",
"裱花", "抹茶", "巧克力", "奶油", "食谱", "烤箱",
],
},
"数码科技女生": {
"topics": [
"iPad生产力", "手机摄影技巧", "好用App推荐", "电子产品测评",
"桌面布置", "数码好物", "耳机测评", "平板学习", "生产力工具",
"手机壳推荐", "充电设备", "智能家居",
],
"keywords": [
"iPad", "App推荐", "数码", "测评", "手机", "耳机",
"桌面", "科技", "电子产品", "平板", "生产力", "充电",
],
},
"小镇姑娘在大城市打拼": {
"topics": [
"省钱攻略", "成长日记", "平价好物", "租房改造", "副业分享",
"理财入门", "独居生活", "面试技巧", "通勤穿搭", "自律生活",
"城市生存指南", "女性成长", "攒钱计划",
],
"keywords": [
"省钱", "平价", "租房", "副业", "理财", "成长",
"自律", "打工", "攒钱", "面试", "独居", "北漂",
],
},
"中医养生爱好者": {
"topics": [
"节气养生", "食疗方子", "泡脚养生", "体质调理", "艾灸",
"中药茶饮", "作息调整", "经络按摩", "养胃食谱", "祛湿方法",
"睡眠改善", "女性调理", "养生汤", "二十四节气",
],
"keywords": [
"养生", "食疗", "泡脚", "中医", "艾灸", "祛湿",
"节气", "体质", "养胃", "经络", "调理", "药膳",
],
},
"二次元coser": {
"topics": [
"cos日常", "动漫周边", "漫展攻略", "cos化妆教程", "假发造型",
"lolita穿搭", "二次元好物", "手办收藏", "动漫推荐", "cos道具制作",
"jk穿搭", "谷子收藏", "二次元摄影",
],
"keywords": [
"cos", "动漫", "二次元", "漫展", "lolita", "手办",
"jk", "假发", "谷子", "周边", "番剧", "coser",
],
},
"北漂程序媛": {
"topics": [
"高效工作法", "程序员日常", "好用App推荐", "副业分享", "自律生活",
"时间管理", "iPad生产力", "解压方式", "通勤穿搭", "理财入门",
"独居生活", "技术学习", "面试经验", "桌面布置",
],
"keywords": [
"程序员", "高效", "App推荐", "自律", "副业", "iPad",
"技术", "工作", "北漂", "面试", "代码", "桌面",
],
},
"复古穿搭博主": {
"topics": [
"vintage风穿搭", "中古饰品", "复古妆容", "二手vintage", "古着穿搭",
"法式穿搭", "复古包包", "跳蚤市场", "旧物改造", "港风穿搭",
"文艺穿搭", "配饰搭配", "vintage探店",
],
"keywords": [
"vintage", "复古", "中古", "古着", "港风", "法式",
"饰品", "二手", "旧物", "跳蚤市场", "复古穿搭", "文艺",
],
},
"考研上岸学姐": {
"topics": [
"考研经验", "英语学习方法", "书单推荐", "时间管理", "自律生活",
"考研择校", "政治复习", "数学刷题", "考研英语", "复试经验",
"专业课复习", "考研心态", "背诵技巧", "刷题方法",
],
"keywords": [
"考研", "英语学习", "书单", "自律", "学习方法", "上岸",
"刷题", "备考", "复习", "笔记", "时间管理", "择校",
],
},
"新手养猫人": {
"topics": [
"养猫日常", "猫粮测评", "猫咪用品", "新手养宠指南", "猫咪健康",
"猫咪行为", "驱虫攻略", "猫砂测评", "猫玩具推荐", "猫咪拍照",
"多猫家庭", "领养代替购买", "猫咪绝育",
],
"keywords": [
"养猫", "猫粮", "猫咪", "宠物", "猫砂", "驱虫",
"铲屎官", "喵喵", "猫玩具", "猫零食", "新手养猫", "猫咪日常",
],
},
"咖啡重度爱好者": {
"topics": [
"咖啡探店", "手冲咖啡", "咖啡豆推荐", "咖啡器具", "拿铁艺术",
"家庭咖啡", "咖啡配甜品", "独立咖啡馆", "冷萃咖啡", "咖啡知识",
"意式咖啡", "探店打卡", "咖啡拉花",
],
"keywords": [
"咖啡", "手冲", "拿铁", "探店", "咖啡豆", "美式",
"咖啡馆", "意式", "冷萃", "拉花", "咖啡器具", "独立咖啡馆",
],
},
"极简主义生活家": {
"topics": [
"断舍离", "极简生活", "收纳技巧", "高质量生活", "减法生活",
"胶囊衣橱", "极简护肤", "环保生活", "数字断舍离", "极简穿搭",
"极简房间", "消费降级", "物欲管理",
],
"keywords": [
"断舍离", "极简", "收纳", "高质量", "减法", "胶囊衣橱",
"简约", "环保", "整理", "少即是多", "极简风", "质感",
],
},
"汉服爱好者": {
"topics": [
"汉服穿搭", "国风穿搭", "传统文化", "汉服发型", "汉服配饰",
"汉服拍照", "古风妆容", "汉服日常", "汉服科普", "形制科普",
"古风摄影", "新中式穿搭", "汉服探店",
],
"keywords": [
"汉服", "国风", "传统文化", "古风", "新中式", "形制",
"发簪", "明制", "宋制", "唐制", "汉服日常", "古风摄影",
],
},
"插画师小姐姐": {
"topics": [
"手绘教程", "创作灵感", "iPad绘画", "插画分享", "水彩教程",
"Procreate技巧", "配色方案", "角色设计", "头像绘制", "手账素材",
"接稿经验", "画师日常", "绘画工具推荐",
],
"keywords": [
"插画", "手绘", "Procreate", "画画", "iPad绘画", "水彩",
"配色", "创作", "画师", "手账", "教程", "素材",
],
},
"海归女孩": {
"topics": [
"中西文化差异", "海外生活", "留学经验", "英语学习方法", "海归求职",
"旅行攻略", "异国美食", "海外好物", "文化冲击", "语言学习",
"签证攻略", "海归适应", "国外探店",
],
"keywords": [
"留学", "海归", "英语", "海外", "文化差异", "旅行",
"异国", "签证", "语言", "出国", "求职", "国外",
],
},
"瑜伽老师": {
"topics": [
"瑜伽入门", "冥想练习", "体态矫正", "呼吸法", "居家瑜伽",
"拉伸教程", "肩颈放松", "瑜伽体式", "自律生活", "身心灵",
"瑜伽穿搭", "晨练瑜伽", "睡前瑜伽",
],
"keywords": [
"瑜伽", "冥想", "体态", "拉伸", "放松", "呼吸",
"柔韧", "健康", "自律", "晨练", "入门", "体式",
],
},
"美甲设计师": {
"topics": [
"美甲教程", "流行甲型", "美甲合集", "简约美甲", "法式美甲",
"手绘美甲", "季节美甲", "显白美甲", "美甲配色", "短甲美甲",
"新娘美甲", "美甲工具推荐", "日式美甲",
],
"keywords": [
"美甲", "甲型", "法式美甲", "手绘", "显白", "短甲",
"指甲", "美甲教程", "配色", "日式美甲", "腮红甲", "猫眼甲",
],
},
"家居软装设计师": {
"topics": [
"小户型改造", "氛围感布置", "软装搭配", "家居好物", "收纳技巧",
"客厅布置", "卧室改造", "灯光设计", "绿植布置", "装修避坑",
"北欧风格", "ins风家居", "墙面装饰",
],
"keywords": [
"家居", "软装", "改造", "收纳", "氛围感", "小户型",
"装修", "灯光", "绿植", "北欧", "ins风", "布置",
],
},
}
# 为"随机人设"使用的全量池(兼容旧逻辑)
DEFAULT_TOPICS = [
# 穿搭类
"春季穿搭", "通勤穿搭", "约会穿搭", "显瘦穿搭", "小个子穿搭",
@ -1064,7 +1384,7 @@ DEFAULT_STYLES = [
"知识科普", "经验分享", "清单合集", "对比测评", "沉浸式体验",
]
# ================= 评论关键词池 =================
# 全量评论关键词池(兼容旧逻辑 / 随机人设)
DEFAULT_COMMENT_KEYWORDS = [
# 穿搭时尚
"穿搭", "ootd", "早春穿搭", "通勤穿搭", "显瘦", "小个子穿搭",
@ -1087,6 +1407,77 @@ DEFAULT_COMMENT_KEYWORDS = [
]
def _match_persona_pools(persona_text: str) -> dict | None:
"""根据人设文本模糊匹配对应的关键词池和主题池
返回 {"topics": [...], "keywords": [...]} None未匹配
"""
if not persona_text or persona_text == RANDOM_PERSONA_LABEL:
return None
# 精确匹配
for key, pools in PERSONA_POOL_MAP.items():
if key in persona_text or persona_text in key:
return pools
# 关键词模糊匹配
_CATEGORY_HINTS = {
"时尚|穿搭|搭配|衣服": "温柔知性的时尚博主",
"大学|学生|校园": "元气满满的大学生",
"白领|职场|通勤|上班": "30岁都市白领丽人",
"妈妈|育儿|宝宝|母婴": "精致妈妈",
"摄影|旅行|旅游|文艺": "文艺青年摄影师",
"健身|运动|减脂|增肌|营养": "健身达人营养师",
"美妆|化妆|护肤|美白": "资深美妆博主",
"独居|租房|一人": "独居女孩",
"烘焙|甜品|蛋糕|面包": "甜品烘焙爱好者",
"数码|科技|App|电子": "数码科技女生",
"小镇|打拼|省钱|攒钱": "小镇姑娘在大城市打拼",
"中医|养生|食疗|节气": "中医养生爱好者",
"二次元|cos|动漫|漫展": "二次元coser",
"程序|代码|开发|码农": "北漂程序媛",
"复古|vintage|中古|古着": "复古穿搭博主",
"考研|备考|上岸|学习方法": "考研上岸学姐",
"猫|铲屎|喵": "新手养猫人",
"咖啡|手冲|拿铁": "咖啡重度爱好者",
"极简|断舍离|简约": "极简主义生活家",
"汉服|国风|传统文化": "汉服爱好者",
"插画|手绘|画画|绘画": "插画师小姐姐",
"海归|留学|海外": "海归女孩",
"瑜伽|冥想|身心灵": "瑜伽老师",
"美甲|甲型|指甲": "美甲设计师",
"家居|软装|装修|改造": "家居软装设计师",
}
for hints, persona_key in _CATEGORY_HINTS.items():
if any(h in persona_text for h in hints.split("|")):
return PERSONA_POOL_MAP.get(persona_key)
return None
def get_persona_topics(persona_text: str) -> list[str]:
"""获取人设对应的主题池,未匹配则返回全量池"""
pools = _match_persona_pools(persona_text)
return pools["topics"] if pools else DEFAULT_TOPICS
def get_persona_keywords(persona_text: str) -> list[str]:
"""获取人设对应的评论关键词池,未匹配则返回全量池"""
pools = _match_persona_pools(persona_text)
return pools["keywords"] if pools else DEFAULT_COMMENT_KEYWORDS
def on_persona_changed(persona_text: str):
"""人设切换时联动更新评论关键词池和主题池"""
keywords = get_persona_keywords(persona_text)
topics = get_persona_topics(persona_text)
keywords_str = ", ".join(keywords)
topics_str = ", ".join(topics)
matched = _match_persona_pools(persona_text)
if matched:
label = persona_text[:15] if len(persona_text) > 15 else persona_text
hint = f"✅ 已切换至「{label}」专属关键词/主题池"
else:
hint = " 使用通用全量关键词/主题池"
return keywords_str, topics_str, hint
def _auto_log_append(msg: str):
"""记录自动化日志"""
ts = datetime.now().strftime("%H:%M:%S")
@ -1122,7 +1513,9 @@ def auto_comment_once(keywords_str, mcp_url, model, persona_text):
return f"🚫 今日评论已达上限 ({DAILY_LIMITS['comments']})"
persona_text = _resolve_persona(persona_text)
keywords = [k.strip() for k in keywords_str.split(",") if k.strip()] if keywords_str else DEFAULT_COMMENT_KEYWORDS
# 如果用户未手动修改关键词池,则使用人设匹配的专属关键词池
persona_keywords = get_persona_keywords(persona_text)
keywords = [k.strip() for k in keywords_str.split(",") if k.strip()] if keywords_str else persona_keywords
keyword = random.choice(keywords)
_auto_log_append(f"🔍 搜索关键词: {keyword}")
@ -1367,9 +1760,9 @@ def auto_favorite_once(keywords_str, fav_count, mcp_url):
return f"❌ 收藏失败: {e}"
def _auto_publish_with_log(topics_str, mcp_url, sd_url_val, sd_model_name, model):
def _auto_publish_with_log(topics_str, mcp_url, sd_url_val, sd_model_name, model, face_swap_on=False):
"""一键发布 + 同步刷新日志"""
msg = auto_publish_once(topics_str, mcp_url, sd_url_val, sd_model_name, model)
msg = auto_publish_once(topics_str, mcp_url, sd_url_val, sd_model_name, model, face_swap_on=face_swap_on)
return msg, get_auto_log()
@ -1540,7 +1933,7 @@ def auto_reply_once(max_replies, mcp_url, model, persona_text):
return f"❌ 自动回复失败: {e}"
def auto_publish_once(topics_str, mcp_url, sd_url_val, sd_model_name, model):
def auto_publish_once(topics_str, mcp_url, sd_url_val, sd_model_name, model, face_swap_on=False):
"""一键发布:自动生成文案 → 生成图片 → 本地备份 → 发布到小红书(含限额)"""
try:
if _is_in_cooldown():
@ -1551,7 +1944,7 @@ def auto_publish_once(topics_str, mcp_url, sd_url_val, sd_model_name, model):
topics = [t.strip() for t in topics_str.split(",") if t.strip()] if topics_str else DEFAULT_TOPICS
topic = random.choice(topics)
style = random.choice(DEFAULT_STYLES)
_auto_log_append(f"📝 主题: {topic} | 风格: {style}")
_auto_log_append(f"📝 主题: {topic} | 风格: {style} (主题池: {len(topics)} 个)")
# 生成文案
api_key, base_url, _ = _get_llm_config()
@ -1575,7 +1968,15 @@ def auto_publish_once(topics_str, mcp_url, sd_url_val, sd_model_name, model):
return "❌ SD WebUI 未连接或未选择模型,请先在全局设置中连接"
sd_svc = SDService(sd_url_val)
images = sd_svc.txt2img(prompt=sd_prompt, model=sd_model_name)
# 自动发布也支持换脸
face_image = None
if face_swap_on:
face_image = SDService.load_face_image()
if face_image:
_auto_log_append("🎭 换脸已启用")
else:
_auto_log_append("⚠️ 换脸已启用但未找到头像,跳过换脸")
images = sd_svc.txt2img(prompt=sd_prompt, model=sd_model_name, face_image=face_image)
if not images:
_record_error()
return "❌ 图片生成失败:没有返回图片"
@ -1648,7 +2049,7 @@ def _scheduler_loop(comment_enabled, publish_enabled, reply_enabled, like_enable
fav_min, fav_max, fav_count_per_run,
op_start_hour, op_end_hour,
keywords, topics, mcp_url, sd_url_val, sd_model_name,
model, persona_text):
model, persona_text, face_swap_on=False):
"""后台定时调度循环(含运营时段、冷却、收藏、统计)"""
_auto_log_append("🤖 自动化调度器已启动")
_auto_log_append(f"⏰ 运营时段: {int(op_start_hour)}:00 - {int(op_end_hour)}:00")
@ -1742,7 +2143,7 @@ def _scheduler_loop(comment_enabled, publish_enabled, reply_enabled, like_enable
if publish_enabled and now >= next_publish:
try:
_auto_log_append("--- 🔄 执行自动发布 ---")
msg = auto_publish_once(topics, mcp_url, sd_url_val, sd_model_name, model)
msg = auto_publish_once(topics, mcp_url, sd_url_val, sd_model_name, model, face_swap_on=face_swap_on)
_auto_log_append(msg)
except Exception as e:
_auto_log_append(f"❌ 自动发布异常: {e}")
@ -1781,7 +2182,7 @@ def start_scheduler(comment_on, publish_on, reply_on, like_on, favorite_on,
fav_min, fav_max, fav_count_per_run,
op_start_hour, op_end_hour,
keywords, topics, mcp_url, sd_url_val, sd_model_name,
model, persona_text):
model, persona_text, face_swap_on=False):
"""启动定时自动化"""
global _auto_thread
if _auto_running.is_set():
@ -1807,6 +2208,7 @@ def start_scheduler(comment_on, publish_on, reply_on, like_on, favorite_on,
op_start_hour, op_end_hour,
keywords, topics, mcp_url, sd_url_val, sd_model_name,
model, persona_text),
kwargs={"face_swap_on": face_swap_on},
daemon=True,
)
_auto_thread.start()
@ -2066,6 +2468,38 @@ with gr.Blocks(
)
status_bar = gr.Markdown("🔄 等待连接...")
gr.Markdown("---")
gr.Markdown("#### 🎭 AI 换脸 (ReActor)")
gr.Markdown(
"> 上传你的头像,生成含人物的图片时自动替换为你的脸\n"
"> 需要 SD WebUI 已安装 [ReActor](https://github.com/Gourieff/sd-webui-reactor) 扩展"
)
with gr.Row():
face_image_input = gr.Image(
label="上传头像 (正面清晰照片效果最佳)",
type="pil",
height=180,
scale=1,
)
face_image_preview = gr.Image(
label="当前头像",
type="pil",
height=180,
interactive=False,
value=SDService.load_face_image(),
scale=1,
)
with gr.Row():
btn_save_face = gr.Button("💾 保存头像", variant="primary", size="sm")
face_swap_toggle = gr.Checkbox(
label="🎭 生成图片时启用 AI 换脸",
value=os.path.isfile(FACE_IMAGE_PATH),
interactive=True,
)
face_status = gr.Markdown(
"✅ 头像已就绪" if os.path.isfile(FACE_IMAGE_PATH) else " 尚未设置头像"
)
gr.Markdown("---")
gr.Markdown("#### 🖥️ 系统设置")
with gr.Row():
@ -2415,6 +2849,9 @@ with gr.Blocks(
"> 一键评论引流 + 一键点赞 + 一键收藏 + 一键回复 + 一键发布 + 随机定时全自动\n\n"
"⚠️ **注意**: 请确保已连接 LLM、SD WebUI 和 MCP 服务"
)
persona_pool_hint = gr.Markdown(
value=f"🎭 当前人设池: **{config.get('persona', '随机')[:20]}** → 关键词/主题池已匹配",
)
with gr.Row():
# 左栏: 一键操作
@ -2425,9 +2862,9 @@ with gr.Blocks(
"每次随机选关键词搜索,从结果中随机选笔记"
)
auto_comment_keywords = gr.Textbox(
label="评论关键词池 (逗号分隔)",
value=", ".join(DEFAULT_COMMENT_KEYWORDS),
placeholder="关键词1, 关键词2, ...",
label="评论关键词池 (逗号分隔,随人设自动切换)",
value=", ".join(get_persona_keywords(config.get("persona", ""))),
placeholder="关键词1, 关键词2, ... (切换人设自动更新)",
)
btn_auto_comment = gr.Button(
"💬 一键评论 (单次)", variant="primary", size="lg",
@ -2482,9 +2919,9 @@ with gr.Blocks(
"> 随机选主题+风格 → AI 生成文案 → SD 生成图片 → 自动发布"
)
auto_publish_topics = gr.Textbox(
label="主题池 (逗号分隔)",
value=", ".join(random.sample(DEFAULT_TOPICS, min(15, len(DEFAULT_TOPICS)))),
placeholder="主题会从池中随机选取,可自行修改",
label="主题池 (逗号分隔,随人设自动切换)",
value=", ".join(get_persona_topics(config.get("persona", ""))),
placeholder="主题会从池中随机选取,切换人设自动更新",
)
btn_auto_publish = gr.Button(
"🚀 一键发布 (单次)", variant="primary", size="lg",
@ -2642,6 +3079,13 @@ with gr.Blocks(
outputs=[status_bar],
)
# ---- 头像/换脸管理 ----
btn_save_face.click(
fn=upload_face_image,
inputs=[face_image_input],
outputs=[face_image_preview, face_status],
)
# ---- Tab 1: 内容创作 ----
btn_gen_copy.click(
fn=generate_copy,
@ -2651,7 +3095,8 @@ with gr.Blocks(
btn_gen_img.click(
fn=generate_images,
inputs=[sd_url, res_prompt, neg_prompt, sd_model, steps, cfg_scale],
inputs=[sd_url, res_prompt, neg_prompt, sd_model, steps, cfg_scale,
face_swap_toggle, face_image_preview],
outputs=[gallery, state_images, status_bar],
)
@ -2798,6 +3243,13 @@ with gr.Blocks(
)
# ---- Tab 6: 自动运营 ----
# 人设切换 → 联动更新评论关键词池和主题池
persona.change(
fn=on_persona_changed,
inputs=[persona],
outputs=[auto_comment_keywords, auto_publish_topics, persona_pool_hint],
)
btn_auto_comment.click(
fn=_auto_comment_with_log,
inputs=[auto_comment_keywords, mcp_url, llm_model, persona],
@ -2820,7 +3272,7 @@ with gr.Blocks(
)
btn_auto_publish.click(
fn=_auto_publish_with_log,
inputs=[auto_publish_topics, mcp_url, sd_url, sd_model, llm_model],
inputs=[auto_publish_topics, mcp_url, sd_url, sd_model, llm_model, face_swap_toggle],
outputs=[auto_publish_result, auto_log_display],
)
btn_start_sched.click(
@ -2833,7 +3285,8 @@ with gr.Blocks(
sched_fav_min, sched_fav_max, sched_fav_count,
sched_start_hour, sched_end_hour,
auto_comment_keywords, auto_publish_topics,
mcp_url, sd_url, sd_model, llm_model, persona],
mcp_url, sd_url, sd_model, llm_model, persona,
face_swap_toggle],
outputs=[sched_result],
)
btn_stop_sched.click(

BIN
my_face.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.4 MiB

BIN
myself.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

View File

@ -1,16 +1,20 @@
"""
Stable Diffusion 服务模块
封装对 SD WebUI API 的调用支持 txt2img img2img
封装对 SD WebUI API 的调用支持 txt2img img2img支持 ReActor 换脸
"""
import requests
import base64
import io
import logging
import os
from PIL import Image
logger = logging.getLogger(__name__)
SD_TIMEOUT = 900 # 图片生成可能需要较长时间
SD_TIMEOUT = 1800 # 图片生成可能需要较长时间
# 头像文件默认保存路径
FACE_IMAGE_PATH = os.path.join(os.path.dirname(__file__), "my_face.png")
# 默认反向提示词(针对 JuggernautXL / SDXL 优化,偏向东方审美)
DEFAULT_NEGATIVE = (
@ -31,6 +35,100 @@ class SDService:
def __init__(self, sd_url: str = "http://127.0.0.1:7860"):
self.sd_url = sd_url.rstrip("/")
# ---------- 工具方法 ----------
@staticmethod
def _image_to_base64(img: Image.Image) -> str:
"""PIL Image → base64 字符串"""
buf = io.BytesIO()
img.save(buf, format="PNG")
return base64.b64encode(buf.getvalue()).decode("utf-8")
@staticmethod
def load_face_image(path: str = None) -> Image.Image | None:
"""加载头像图片,不存在则返回 None"""
path = path or FACE_IMAGE_PATH
if path and os.path.isfile(path):
try:
return Image.open(path).convert("RGB")
except Exception as e:
logger.warning("头像加载失败: %s", e)
return None
@staticmethod
def save_face_image(img: Image.Image, path: str = None) -> str:
"""保存头像图片,返回保存路径"""
path = path or FACE_IMAGE_PATH
img = img.convert("RGB")
img.save(path, format="PNG")
logger.info("头像已保存: %s", path)
return path
def _build_reactor_args(self, face_image: Image.Image) -> dict:
"""构建 ReActor 换脸参数alwayson_scripts 格式)
参数索引对照 (reactor script-info):
0: source_image (base64) 1: enable 2: source_faces
3: target_faces 4: model 5: restore_face
6: restore_visibility 7: restore_first 8: upscaler
9: scale 10: upscaler_vis 11: swap_in_source
12: swap_in_generated 13: log_level 14: gender_source
15: gender_target 16: save_original 17: codeformer_weight
18: source_hash_check 19: target_hash_check 20: exec_provider
21: face_mask_correction 22: select_source 23: face_model
24: source_folder 25: multiple_sources 26: random_image
27: force_upscale 28: threshold 29: max_faces
30: tab_single
"""
face_b64 = self._image_to_base64(face_image)
return {
"reactor": {
"args": [
face_b64, # 0: source image (base64)
True, # 1: enable ReActor
"0", # 2: source face index
"0", # 3: target face index
"inswapper_128.onnx", # 4: swap model
"CodeFormer", # 5: restore face method
1, # 6: restore face visibility
True, # 7: restore face first, then upscale
"None", # 8: upscaler
1, # 9: scale
1, # 10: upscaler visibility
False, # 11: swap in source
True, # 12: swap in generated
"Minimum", # 13: log level
"No", # 14: gender detection (source)
"No", # 15: gender detection (target)
False, # 16: save original
0.6, # 17: CodeFormer weight (fidelity)
True, # 18: source hash check
False, # 19: target hash check
"CUDA", # 20: execution provider
True, # 21: face mask correction
"Image(s)", # 22: select source type
"None", # 23: face model name
"", # 24: source folder
None, # 25: multiple source images
False, # 26: random image
False, # 27: force upscale
0.5, # 28: detection threshold
0, # 29: max faces (0 = no limit)
"tab_single", # 30: tab
],
}
}
def has_reactor(self) -> bool:
"""检查 SD WebUI 是否安装了 ReActor 扩展"""
try:
resp = requests.get(f"{self.sd_url}/sdapi/v1/scripts", timeout=5)
scripts = resp.json()
all_scripts = scripts.get("txt2img", []) + scripts.get("img2img", [])
return any("reactor" in s.lower() for s in all_scripts)
except Exception:
return False
def check_connection(self) -> tuple[bool, str]:
"""检查 SD 服务是否可用"""
try:
@ -74,8 +172,13 @@ class SDService:
seed: int = -1,
sampler_name: str = "DPM++ 2M",
scheduler: str = "Karras",
face_image: Image.Image = None,
) -> list[Image.Image]:
"""文生图(参数针对 JuggernautXL 优化)"""
"""文生图(参数针对 JuggernautXL 优化)
Args:
face_image: 头像 PIL Image传入后自动启用 ReActor 换脸
"""
if model:
self.switch_model(model)
@ -92,6 +195,11 @@ class SDService:
"scheduler": scheduler,
}
# 如果提供了头像,通过 ReActor 换脸
if face_image is not None:
payload["alwayson_scripts"] = self._build_reactor_args(face_image)
logger.info("🎭 ReActor 换脸已启用")
resp = requests.post(
f"{self.sd_url}/sdapi/v1/txt2img",
json=payload,

BIN
zjz.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 MiB