- 新增 GitHub Issue 模板(Bug 报告、功能请求)和 Pull Request 模板 - 新增 Code of Conduct(贡献者行为准则)和 Security Policy(安全政策) - 新增 CI 工作流(GitHub Actions),包含 ruff 代码检查和导入验证 - 新增开发依赖文件 requirements-dev.txt 📦 build(ci): 配置 GitHub Actions 持续集成 - 在 push 到 main 分支和 pull request 时自动触发 CI - 添加 lint 任务执行 ruff 代码风格检查 - 添加 import-check 任务验证核心服务模块导入 ♻️ refactor(structure): 重构项目目录结构 - 将根目录的 6 个服务模块迁移至 services/ 包 - 更新所有相关文件的导入语句(main.py、ui/、services/) - 根目录仅保留 main.py 作为唯一 Python 入口文件 🔧 chore(config): 调整配置和资源文件路径 - 将 config.json 移至 config/ 目录,更新相关引用 - 将个人头像图片移至 assets/faces/ 目录,更新 .gitignore - 更新 Dockerfile 和 docker-compose.yml 中的配置路径 📝 docs(readme): 完善 README 文档 - 添加项目状态徽章(Python 版本、License、CI) - 更新项目结构图反映实际目录布局 - 修正使用指南中的 Tab 名称和操作路径 - 替换 your-username 占位符为格式提示 🗑️ chore(cleanup): 清理冗余文件 - 删除旧版备份文件、测试脚本、临时记录和运行日志 - 删除散落的个人图片文件(已归档至 assets/faces/)
198 lines
7.1 KiB
Python
198 lines
7.1 KiB
Python
"""
|
||
services/engagement.py
|
||
评论管家:手动评论、回复、笔记加载等互动功能
|
||
"""
|
||
import logging
|
||
|
||
import gradio as gr
|
||
|
||
from .mcp_client import get_mcp_client
|
||
from .llm_service import LLMService
|
||
from .connection import _get_llm_config
|
||
from .hotspot import _pick_from_cache, _set_cache, _get_cache
|
||
|
||
logger = logging.getLogger("autobot")
|
||
|
||
def load_note_for_comment(feed_id, xsec_token, mcp_url):
|
||
"""加载目标笔记详情 (标题+正文+已有评论), 用于 AI 分析"""
|
||
if not feed_id or not xsec_token:
|
||
return "❌ 请先选择笔记", "", "", ""
|
||
try:
|
||
client = get_mcp_client(mcp_url)
|
||
result = client.get_feed_detail(feed_id, xsec_token, load_all_comments=True)
|
||
if "error" in result:
|
||
return f"❌ {result['error']}", "", "", ""
|
||
full_text = result.get("text", "")
|
||
# 尝试分离正文和评论
|
||
if "评论" in full_text:
|
||
parts = full_text.split("评论", 1)
|
||
content_part = parts[0].strip()
|
||
comments_part = "评论" + parts[1] if len(parts) > 1 else ""
|
||
else:
|
||
content_part = full_text[:500]
|
||
comments_part = ""
|
||
return "✅ 笔记内容已加载", content_part[:800], comments_part[:1500], full_text
|
||
except Exception as e:
|
||
return f"❌ {e}", "", "", ""
|
||
|
||
|
||
def ai_generate_comment(model, persona,
|
||
post_title, post_content, existing_comments):
|
||
"""AI 生成主动评论"""
|
||
persona = _resolve_persona(persona)
|
||
api_key, base_url, _ = _get_llm_config()
|
||
if not api_key:
|
||
return "⚠️ 请先配置 LLM 提供商", "❌ LLM 未配置"
|
||
if not model:
|
||
return "⚠️ 请先连接 LLM", "❌ 未选模型"
|
||
if not post_title and not post_content:
|
||
return "⚠️ 请先加载笔记内容", "❌ 无笔记内容"
|
||
try:
|
||
svc = LLMService(api_key, base_url, model)
|
||
comment = svc.generate_proactive_comment(
|
||
persona, post_title, post_content[:600], existing_comments[:800]
|
||
)
|
||
return comment, "✅ 评论已生成"
|
||
except Exception as e:
|
||
logger.error(f"AI 评论生成失败: {e}")
|
||
return f"生成失败: {e}", f"❌ {e}"
|
||
|
||
|
||
def send_comment(feed_id, xsec_token, comment_content, mcp_url):
|
||
"""发送评论到别人的笔记"""
|
||
if not all([feed_id, xsec_token, comment_content]):
|
||
return "❌ 缺少必要参数 (笔记ID / token / 评论内容)"
|
||
try:
|
||
client = get_mcp_client(mcp_url)
|
||
result = client.post_comment(feed_id, xsec_token, comment_content)
|
||
if "error" in result:
|
||
return f"❌ {result['error']}"
|
||
return "✅ 评论已发送!"
|
||
except Exception as e:
|
||
return f"❌ {e}"
|
||
|
||
|
||
# ---- 模块 B: 回复我的笔记评论 ----
|
||
|
||
def fetch_my_notes(mcp_url):
|
||
"""通过已保存的 userId 获取我的笔记列表"""
|
||
my_uid = cfg.get("my_user_id", "")
|
||
xsec = cfg.get("xsec_token", "")
|
||
if not my_uid:
|
||
return (
|
||
gr.update(choices=[], value=None),
|
||
"❌ 未配置用户 ID,请先到「账号登录」页填写并保存",
|
||
)
|
||
if not xsec:
|
||
return (
|
||
gr.update(choices=[], value=None),
|
||
"❌ 未获取 xsec_token,请先登录",
|
||
)
|
||
try:
|
||
client = get_mcp_client(mcp_url)
|
||
result = client.get_user_profile(my_uid, xsec)
|
||
if "error" in result:
|
||
return gr.update(choices=[], value=None), f"❌ {result['error']}"
|
||
|
||
# 从 raw 中解析 feeds
|
||
raw = result.get("raw", {})
|
||
text = result.get("text", "")
|
||
data = None
|
||
if raw and isinstance(raw, dict):
|
||
for item in raw.get("content", []):
|
||
if item.get("type") == "text":
|
||
try:
|
||
data = json.loads(item["text"])
|
||
except (json.JSONDecodeError, KeyError):
|
||
pass
|
||
if not data:
|
||
try:
|
||
data = json.loads(text)
|
||
except (json.JSONDecodeError, TypeError):
|
||
pass
|
||
|
||
feeds = (data or {}).get("feeds") or []
|
||
if not feeds:
|
||
return (
|
||
gr.update(choices=[], value=None),
|
||
"⚠️ 未找到你的笔记,可能账号还没有发布内容",
|
||
)
|
||
|
||
entries = []
|
||
for f in feeds:
|
||
nc = f.get("noteCard") or {}
|
||
user = nc.get("user") or {}
|
||
interact = nc.get("interactInfo") or {}
|
||
entries.append({
|
||
"feed_id": f.get("id", ""),
|
||
"xsec_token": f.get("xsecToken", ""),
|
||
"title": nc.get("displayTitle", "未知标题"),
|
||
"author": user.get("nickname", user.get("nickName", "")),
|
||
"user_id": user.get("userId", ""),
|
||
"likes": interact.get("likedCount", "0"),
|
||
"type": nc.get("type", ""),
|
||
})
|
||
|
||
_set_cache("my_notes", entries)
|
||
choices = [
|
||
f"[{i+1}] {e['title'][:20]} | {e['type']} | ❤{e['likes']}"
|
||
for i, e in enumerate(entries)
|
||
]
|
||
return (
|
||
gr.update(choices=choices, value=choices[0] if choices else None),
|
||
f"✅ 找到 {len(entries)} 篇笔记",
|
||
)
|
||
except Exception as e:
|
||
return gr.update(choices=[], value=None), f"❌ {e}"
|
||
|
||
|
||
def on_my_note_selected(selected):
|
||
return _pick_from_cache(selected, "my_notes")
|
||
|
||
|
||
def fetch_my_note_comments(feed_id, xsec_token, mcp_url):
|
||
"""获取我的笔记的评论列表"""
|
||
if not feed_id or not xsec_token:
|
||
return "❌ 请先选择笔记", ""
|
||
try:
|
||
client = get_mcp_client(mcp_url)
|
||
result = client.get_feed_detail(feed_id, xsec_token, load_all_comments=True)
|
||
if "error" in result:
|
||
return f"❌ {result['error']}", ""
|
||
return "✅ 评论加载完成", result.get("text", "暂无评论")
|
||
except Exception as e:
|
||
return f"❌ {e}", ""
|
||
|
||
|
||
def ai_reply_comment(model, persona, post_title, comment_text):
|
||
"""AI 生成评论回复"""
|
||
persona = _resolve_persona(persona)
|
||
api_key, base_url, _ = _get_llm_config()
|
||
if not api_key:
|
||
return "⚠️ 请先配置 LLM 提供商", "❌ LLM 未配置"
|
||
if not model:
|
||
return "⚠️ 请先连接 LLM 并选择模型", "❌ 未选择模型"
|
||
if not comment_text:
|
||
return "请输入需要回复的评论内容", "⚠️ 请输入评论"
|
||
try:
|
||
svc = LLMService(api_key, base_url, model)
|
||
reply = svc.generate_reply(persona, post_title, comment_text)
|
||
return reply, "✅ 回复已生成"
|
||
except Exception as e:
|
||
logger.error(f"AI 回复生成失败: {e}")
|
||
return f"生成失败: {e}", f"❌ {e}"
|
||
|
||
|
||
def send_reply(feed_id, xsec_token, reply_content, mcp_url):
|
||
"""发送评论回复"""
|
||
if not all([feed_id, xsec_token, reply_content]):
|
||
return "❌ 缺少必要参数"
|
||
try:
|
||
client = get_mcp_client(mcp_url)
|
||
result = client.post_comment(feed_id, xsec_token, reply_content)
|
||
if "error" in result:
|
||
return f"❌ 回复失败: {result['error']}"
|
||
return "✅ 回复已发送"
|
||
except Exception as e:
|
||
return f"❌ 发送失败: {e}"
|