xhs_factory/services/engagement.py
zhoujie b635108b89 refactor: split monolithic main.py into services/ + ui/ modules (improve-maintainability)
- main.py: 4360 → 146 lines (96.6% reduction), entry layer only
- services/: rate_limiter, autostart, persona, connection, profile,
  hotspot, content, engagement, scheduler, queue_ops (10 business modules)
- ui/app.py: all Gradio UI code extracted into build_app(cfg, analytics)
- Fix: with gr.Blocks() indented inside build_app function
- Fix: cfg.all property (not get_all method)
- Fix: STATUS_LABELS, get_persona_keywords, fetch_proactive_notes imports
- Fix: queue_ops module-level set_publish_callback moved into configure()
- Fix: pub_queue.format_*() wrapped as queue_format_table/calendar helpers
- All 14 files syntax-verified, build_app() runtime-verified
- 58/58 tasks complete"
2026-02-24 22:50:56 +08:00

198 lines
7.1 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

"""
services/engagement.py
评论管家:手动评论、回复、笔记加载等互动功能
"""
import logging
import gradio as gr
from mcp_client import get_mcp_client
from llm_service import LLMService
from services.connection import _get_llm_config
from services.hotspot import _pick_from_cache, _set_cache, _get_cache
logger = logging.getLogger("autobot")
def load_note_for_comment(feed_id, xsec_token, mcp_url):
"""加载目标笔记详情 (标题+正文+已有评论), 用于 AI 分析"""
if not feed_id or not xsec_token:
return "❌ 请先选择笔记", "", "", ""
try:
client = get_mcp_client(mcp_url)
result = client.get_feed_detail(feed_id, xsec_token, load_all_comments=True)
if "error" in result:
return f"{result['error']}", "", "", ""
full_text = result.get("text", "")
# 尝试分离正文和评论
if "评论" in full_text:
parts = full_text.split("评论", 1)
content_part = parts[0].strip()
comments_part = "评论" + parts[1] if len(parts) > 1 else ""
else:
content_part = full_text[:500]
comments_part = ""
return "✅ 笔记内容已加载", content_part[:800], comments_part[:1500], full_text
except Exception as e:
return f"{e}", "", "", ""
def ai_generate_comment(model, persona,
post_title, post_content, existing_comments):
"""AI 生成主动评论"""
persona = _resolve_persona(persona)
api_key, base_url, _ = _get_llm_config()
if not api_key:
return "⚠️ 请先配置 LLM 提供商", "❌ LLM 未配置"
if not model:
return "⚠️ 请先连接 LLM", "❌ 未选模型"
if not post_title and not post_content:
return "⚠️ 请先加载笔记内容", "❌ 无笔记内容"
try:
svc = LLMService(api_key, base_url, model)
comment = svc.generate_proactive_comment(
persona, post_title, post_content[:600], existing_comments[:800]
)
return comment, "✅ 评论已生成"
except Exception as e:
logger.error(f"AI 评论生成失败: {e}")
return f"生成失败: {e}", f"{e}"
def send_comment(feed_id, xsec_token, comment_content, mcp_url):
"""发送评论到别人的笔记"""
if not all([feed_id, xsec_token, comment_content]):
return "❌ 缺少必要参数 (笔记ID / token / 评论内容)"
try:
client = get_mcp_client(mcp_url)
result = client.post_comment(feed_id, xsec_token, comment_content)
if "error" in result:
return f"{result['error']}"
return "✅ 评论已发送!"
except Exception as e:
return f"{e}"
# ---- 模块 B: 回复我的笔记评论 ----
def fetch_my_notes(mcp_url):
"""通过已保存的 userId 获取我的笔记列表"""
my_uid = cfg.get("my_user_id", "")
xsec = cfg.get("xsec_token", "")
if not my_uid:
return (
gr.update(choices=[], value=None),
"❌ 未配置用户 ID请先到「账号登录」页填写并保存",
)
if not xsec:
return (
gr.update(choices=[], value=None),
"❌ 未获取 xsec_token请先登录",
)
try:
client = get_mcp_client(mcp_url)
result = client.get_user_profile(my_uid, xsec)
if "error" in result:
return gr.update(choices=[], value=None), f"{result['error']}"
# 从 raw 中解析 feeds
raw = result.get("raw", {})
text = result.get("text", "")
data = None
if raw and isinstance(raw, dict):
for item in raw.get("content", []):
if item.get("type") == "text":
try:
data = json.loads(item["text"])
except (json.JSONDecodeError, KeyError):
pass
if not data:
try:
data = json.loads(text)
except (json.JSONDecodeError, TypeError):
pass
feeds = (data or {}).get("feeds") or []
if not feeds:
return (
gr.update(choices=[], value=None),
"⚠️ 未找到你的笔记,可能账号还没有发布内容",
)
entries = []
for f in feeds:
nc = f.get("noteCard") or {}
user = nc.get("user") or {}
interact = nc.get("interactInfo") or {}
entries.append({
"feed_id": f.get("id", ""),
"xsec_token": f.get("xsecToken", ""),
"title": nc.get("displayTitle", "未知标题"),
"author": user.get("nickname", user.get("nickName", "")),
"user_id": user.get("userId", ""),
"likes": interact.get("likedCount", "0"),
"type": nc.get("type", ""),
})
_set_cache("my_notes", entries)
choices = [
f"[{i+1}] {e['title'][:20]} | {e['type']} | ❤{e['likes']}"
for i, e in enumerate(entries)
]
return (
gr.update(choices=choices, value=choices[0] if choices else None),
f"✅ 找到 {len(entries)} 篇笔记",
)
except Exception as e:
return gr.update(choices=[], value=None), f"{e}"
def on_my_note_selected(selected):
return _pick_from_cache(selected, "my_notes")
def fetch_my_note_comments(feed_id, xsec_token, mcp_url):
"""获取我的笔记的评论列表"""
if not feed_id or not xsec_token:
return "❌ 请先选择笔记", ""
try:
client = get_mcp_client(mcp_url)
result = client.get_feed_detail(feed_id, xsec_token, load_all_comments=True)
if "error" in result:
return f"{result['error']}", ""
return "✅ 评论加载完成", result.get("text", "暂无评论")
except Exception as e:
return f"{e}", ""
def ai_reply_comment(model, persona, post_title, comment_text):
"""AI 生成评论回复"""
persona = _resolve_persona(persona)
api_key, base_url, _ = _get_llm_config()
if not api_key:
return "⚠️ 请先配置 LLM 提供商", "❌ LLM 未配置"
if not model:
return "⚠️ 请先连接 LLM 并选择模型", "❌ 未选择模型"
if not comment_text:
return "请输入需要回复的评论内容", "⚠️ 请输入评论"
try:
svc = LLMService(api_key, base_url, model)
reply = svc.generate_reply(persona, post_title, comment_text)
return reply, "✅ 回复已生成"
except Exception as e:
logger.error(f"AI 回复生成失败: {e}")
return f"生成失败: {e}", f"{e}"
def send_reply(feed_id, xsec_token, reply_content, mcp_url):
"""发送评论回复"""
if not all([feed_id, xsec_token, reply_content]):
return "❌ 缺少必要参数"
try:
client = get_mcp_client(mcp_url)
result = client.post_comment(feed_id, xsec_token, reply_content)
if "error" in result:
return f"❌ 回复失败: {result['error']}"
return "✅ 回复已发送"
except Exception as e:
return f"❌ 发送失败: {e}"