- 新增 GitHub Issue 模板(Bug 报告、功能请求)和 Pull Request 模板 - 新增 Code of Conduct(贡献者行为准则)和 Security Policy(安全政策) - 新增 CI 工作流(GitHub Actions),包含 ruff 代码检查和导入验证 - 新增开发依赖文件 requirements-dev.txt 📦 build(ci): 配置 GitHub Actions 持续集成 - 在 push 到 main 分支和 pull request 时自动触发 CI - 添加 lint 任务执行 ruff 代码风格检查 - 添加 import-check 任务验证核心服务模块导入 ♻️ refactor(structure): 重构项目目录结构 - 将根目录的 6 个服务模块迁移至 services/ 包 - 更新所有相关文件的导入语句(main.py、ui/、services/) - 根目录仅保留 main.py 作为唯一 Python 入口文件 🔧 chore(config): 调整配置和资源文件路径 - 将 config.json 移至 config/ 目录,更新相关引用 - 将个人头像图片移至 assets/faces/ 目录,更新 .gitignore - 更新 Dockerfile 和 docker-compose.yml 中的配置路径 📝 docs(readme): 完善 README 文档 - 添加项目状态徽章(Python 版本、License、CI) - 更新项目结构图反映实际目录布局 - 修正使用指南中的 Tab 名称和操作路径 - 替换 your-username 占位符为格式提示 🗑️ chore(cleanup): 清理冗余文件 - 删除旧版备份文件、测试脚本、临时记录和运行日志 - 删除散落的个人图片文件(已归档至 assets/faces/)
191 lines
6.3 KiB
Python
191 lines
6.3 KiB
Python
"""
|
||
services/hotspot.py
|
||
热点探测、热点生成、笔记列表缓存(供评论管家主动评论使用)
|
||
"""
|
||
import threading
|
||
import logging
|
||
|
||
import gradio as gr
|
||
|
||
from .llm_service import LLMService
|
||
from .mcp_client import get_mcp_client
|
||
from .connection import _get_llm_config
|
||
from .persona import _resolve_persona
|
||
|
||
logger = logging.getLogger("autobot")
|
||
|
||
# ==================================================
|
||
# Tab 2: 热点探测
|
||
# ==================================================
|
||
|
||
|
||
def search_hotspots(keyword, sort_by, mcp_url):
|
||
"""搜索小红书热门内容"""
|
||
if not keyword:
|
||
return "❌ 请输入搜索关键词", ""
|
||
try:
|
||
client = get_mcp_client(mcp_url)
|
||
result = client.search_feeds(keyword, sort_by=sort_by)
|
||
if "error" in result:
|
||
return f"❌ 搜索失败: {result['error']}", ""
|
||
text = result.get("text", "无结果")
|
||
return "✅ 搜索完成", text
|
||
except Exception as e:
|
||
logger.error("热点搜索失败: %s", e)
|
||
return f"❌ 搜索失败: {e}", ""
|
||
|
||
|
||
def analyze_and_suggest(model, keyword, search_result):
|
||
"""AI 分析热点并给出建议"""
|
||
if not search_result:
|
||
return "❌ 请先搜索", "", ""
|
||
api_key, base_url, _ = _get_llm_config()
|
||
if not api_key:
|
||
return "❌ 请先配置 LLM 提供商", "", ""
|
||
try:
|
||
svc = LLMService(api_key, base_url, model)
|
||
analysis = svc.analyze_hotspots(search_result)
|
||
|
||
topics = "\n".join(f"• {t}" for t in analysis.get("hot_topics", []))
|
||
patterns = "\n".join(f"• {p}" for p in analysis.get("title_patterns", []))
|
||
suggestions = "\n".join(
|
||
f"**{s['topic']}** - {s['reason']}"
|
||
for s in analysis.get("suggestions", [])
|
||
)
|
||
structure = analysis.get("content_structure", "")
|
||
|
||
summary = (
|
||
f"## 🔥 热门选题\n{topics}\n\n"
|
||
f"## 📝 标题套路\n{patterns}\n\n"
|
||
f"## 📐 内容结构\n{structure}\n\n"
|
||
f"## 💡 推荐选题\n{suggestions}"
|
||
)
|
||
return "✅ 分析完成", summary, keyword
|
||
except Exception as e:
|
||
logger.error("热点分析失败: %s", e)
|
||
return f"❌ 分析失败: {e}", "", ""
|
||
|
||
|
||
def generate_from_hotspot(model, topic_from_hotspot, style, search_result, sd_model_name, persona_text):
|
||
"""基于热点分析生成文案(自动适配 SD 模型,支持人设)"""
|
||
if not topic_from_hotspot:
|
||
return "", "", "", "", "❌ 请先选择或输入选题"
|
||
api_key, base_url, _ = _get_llm_config()
|
||
if not api_key:
|
||
return "", "", "", "", "❌ 请先配置 LLM 提供商"
|
||
try:
|
||
svc = LLMService(api_key, base_url, model)
|
||
persona = _resolve_persona(persona_text) if persona_text else None
|
||
data = svc.generate_copy_with_reference(
|
||
topic=topic_from_hotspot,
|
||
style=style,
|
||
reference_notes=search_result[:2000],
|
||
sd_model_name=sd_model_name,
|
||
persona=persona,
|
||
)
|
||
tags = data.get("tags", [])
|
||
return (
|
||
data.get("title", ""),
|
||
data.get("content", ""),
|
||
data.get("sd_prompt", ""),
|
||
", ".join(tags),
|
||
"✅ 基于热点的文案已生成",
|
||
)
|
||
except Exception as e:
|
||
return "", "", "", "", f"❌ 生成失败: {e}"
|
||
|
||
|
||
# ==================================================
|
||
# Tab 3: 评论管家
|
||
# ==================================================
|
||
|
||
# ---- 共用: 笔记列表缓存(线程安全)----
|
||
|
||
# 主动评论缓存
|
||
_cached_proactive_entries: list[dict] = []
|
||
# 我的笔记评论缓存
|
||
_cached_my_note_entries: list[dict] = []
|
||
# 缓存互斥锁,防止并发回调产生竞态
|
||
_cache_lock = threading.RLock()
|
||
|
||
|
||
def _set_cache(name: str, entries: list):
|
||
"""线程安全地更新笔记列表缓存"""
|
||
global _cached_proactive_entries, _cached_my_note_entries
|
||
with _cache_lock:
|
||
if name == "proactive":
|
||
_cached_proactive_entries = list(entries)
|
||
else:
|
||
_cached_my_note_entries = list(entries)
|
||
|
||
|
||
def _get_cache(name: str) -> list:
|
||
"""线程安全地获取笔记列表缓存快照(返回副本)"""
|
||
with _cache_lock:
|
||
if name == "proactive":
|
||
return list(_cached_proactive_entries)
|
||
return list(_cached_my_note_entries)
|
||
|
||
|
||
def _fetch_and_cache(keyword, mcp_url, cache_name="proactive"):
|
||
"""通用: 获取笔记列表并线程安全地缓存"""
|
||
try:
|
||
client = get_mcp_client(mcp_url)
|
||
if keyword and keyword.strip():
|
||
entries = client.search_feeds_parsed(keyword.strip())
|
||
src = f"搜索「{keyword.strip()}」"
|
||
else:
|
||
entries = client.list_feeds_parsed()
|
||
src = "首页推荐"
|
||
|
||
_set_cache(cache_name, entries)
|
||
|
||
if not entries:
|
||
return gr.update(choices=[], value=None), f"⚠️ 从{src}未找到笔记"
|
||
|
||
choices = []
|
||
for i, e in enumerate(entries):
|
||
title_short = (e["title"] or "无标题")[:28]
|
||
label = f"[{i+1}] {title_short} | @{e['author'] or '未知'} | ❤ {e['likes']}"
|
||
choices.append(label)
|
||
|
||
return (
|
||
gr.update(choices=choices, value=choices[0]),
|
||
f"✅ 从{src}获取 {len(entries)} 条笔记",
|
||
)
|
||
except Exception as e:
|
||
_set_cache(cache_name, [])
|
||
return gr.update(choices=[], value=None), f"❌ {e}"
|
||
|
||
|
||
def _pick_from_cache(selected, cache_name="proactive"):
|
||
"""通用: 从缓存中提取选中条目的 feed_id / xsec_token / title(线程安全快照)"""
|
||
cache = _get_cache(cache_name)
|
||
if not selected or not cache:
|
||
return "", "", ""
|
||
try:
|
||
# 尝试从 [N] 前缀提取序号
|
||
idx = int(selected.split("]")[0].replace("[", "")) - 1
|
||
if 0 <= idx < len(cache):
|
||
e = cache[idx]
|
||
return e["feed_id"], e["xsec_token"], e.get("title", "")
|
||
except (ValueError, IndexError):
|
||
pass
|
||
# 回退: 模糊匹配标题
|
||
for e in cache:
|
||
if e.get("title", "")[:15] in selected:
|
||
return e["feed_id"], e["xsec_token"], e.get("title", "")
|
||
return "", "", ""
|
||
|
||
|
||
# ---- 模块 A: 主动评论他人 ----
|
||
|
||
def fetch_proactive_notes(keyword, mcp_url):
|
||
return _fetch_and_cache(keyword, mcp_url, "proactive")
|
||
|
||
|
||
def on_proactive_note_selected(selected):
|
||
return _pick_from_cache(selected, "proactive")
|
||
|
||
|