Files
Ombre_Brain/server.py

1493 lines
60 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
# ============================================================
# Module: MCP Server Entry Point (server.py)
# 模块MCP 服务器主入口
#
# Starts the Ombre Brain MCP service and registers memory
# operation tools for Claude to call.
# 启动 Ombre Brain MCP 服务,注册记忆操作工具供 Claude 调用。
#
# Core responsibilities:
# 核心职责:
# - Initialize config, bucket manager, dehydrator, decay engine
# 初始化配置、记忆桶管理器、脱水器、衰减引擎
# - Expose 5 MCP tools:
# 暴露 5 个 MCP 工具:
# breath — Surface unresolved memories or search by keyword
# 浮现未解决记忆 或 按关键词检索
# hold — Store a single memory
# 存储单条记忆
# grow — Diary digest, auto-split into multiple buckets
# 日记归档,自动拆分多桶
# trace — Modify metadata / resolved / delete
# 修改元数据 / resolved 标记 / 删除
# pulse — System status + bucket listing
# 系统状态 + 所有桶列表
#
# Startup:
# 启动方式:
# Local: python server.py
# Remote: OMBRE_TRANSPORT=streamable-http python server.py
# Docker: docker-compose up
# ============================================================
import os
import sys
import random
import logging
import asyncio
import httpx
# --- Ensure same-directory modules can be imported ---
# --- 确保同目录下的模块能被正确导入 ---
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from mcp.server.fastmcp import FastMCP
from bucket_manager import BucketManager
from dehydrator import Dehydrator
from decay_engine import DecayEngine
from embedding_engine import EmbeddingEngine
from import_memory import ImportEngine
from utils import load_config, setup_logging, strip_wikilinks, count_tokens_approx
# --- Load config & init logging / 加载配置 & 初始化日志 ---
config = load_config()
setup_logging(config.get("log_level", "INFO"))
logger = logging.getLogger("ombre_brain")
# --- Initialize core components / 初始化核心组件 ---
bucket_mgr = BucketManager(config) # Bucket manager / 记忆桶管理器
dehydrator = Dehydrator(config) # Dehydrator / 脱水器
decay_engine = DecayEngine(config, bucket_mgr) # Decay engine / 衰减引擎
embedding_engine = EmbeddingEngine(config) # Embedding engine / 向量化引擎
import_engine = ImportEngine(config, bucket_mgr, dehydrator, embedding_engine) # Import engine / 导入引擎
# --- Create MCP server instance / 创建 MCP 服务器实例 ---
# host="0.0.0.0" so Docker container's SSE is externally reachable
# stdio mode ignores host (no network)
mcp = FastMCP(
"Ombre Brain",
host="0.0.0.0",
port=8000,
)
# =============================================================
# /health endpoint: lightweight keepalive
# 轻量保活接口
# For Cloudflare Tunnel or reverse proxy to ping, preventing idle timeout
# 供 Cloudflare Tunnel 或反代定期 ping防止空闲超时断连
# =============================================================
@mcp.custom_route("/health", methods=["GET"])
async def health_check(request):
from starlette.responses import JSONResponse
try:
stats = await bucket_mgr.get_stats()
return JSONResponse({
"status": "ok",
"buckets": stats["permanent_count"] + stats["dynamic_count"],
"decay_engine": "running" if decay_engine.is_running else "stopped",
})
except Exception as e:
return JSONResponse({"status": "error", "detail": str(e)}, status_code=500)
# =============================================================
# /breath-hook endpoint: Dedicated hook for SessionStart
# 会话启动专用挂载点
# =============================================================
@mcp.custom_route("/breath-hook", methods=["GET"])
async def breath_hook(request):
from starlette.responses import PlainTextResponse
try:
all_buckets = await bucket_mgr.list_all(include_archive=False)
# pinned
pinned = [b for b in all_buckets if b["metadata"].get("pinned") or b["metadata"].get("protected")]
# top 2 unresolved by score
unresolved = [b for b in all_buckets
if not b["metadata"].get("resolved", False)
and b["metadata"].get("type") not in ("permanent", "feel")
and not b["metadata"].get("pinned")
and not b["metadata"].get("protected")]
scored = sorted(unresolved, key=lambda b: decay_engine.calculate_score(b["metadata"]), reverse=True)
parts = []
token_budget = 10000
for b in pinned:
summary = await dehydrator.dehydrate(strip_wikilinks(b["content"]), {k: v for k, v in b["metadata"].items() if k != "tags"})
parts.append(f"📌 [核心准则] {summary}")
token_budget -= count_tokens_approx(summary)
# Diversity: top-1 fixed + shuffle rest from top-20
candidates = list(scored)
if len(candidates) > 1:
top1 = [candidates[0]]
pool = candidates[1:min(20, len(candidates))]
random.shuffle(pool)
candidates = top1 + pool + candidates[min(20, len(candidates)):]
# Hard cap: max 20 surfacing buckets in hook
candidates = candidates[:20]
for b in candidates:
if token_budget <= 0:
break
summary = await dehydrator.dehydrate(strip_wikilinks(b["content"]), {k: v for k, v in b["metadata"].items() if k != "tags"})
summary_tokens = count_tokens_approx(summary)
if summary_tokens > token_budget:
break
parts.append(summary)
token_budget -= summary_tokens
if not parts:
return PlainTextResponse("")
return PlainTextResponse("[Ombre Brain - 记忆浮现]\n" + "\n---\n".join(parts))
except Exception as e:
logger.warning(f"Breath hook failed: {e}")
return PlainTextResponse("")
# =============================================================
# /dream-hook endpoint: Dedicated hook for Dreaming
# Dreaming 专用挂载点
# =============================================================
@mcp.custom_route("/dream-hook", methods=["GET"])
async def dream_hook(request):
from starlette.responses import PlainTextResponse
try:
all_buckets = await bucket_mgr.list_all(include_archive=False)
candidates = [
b for b in all_buckets
if b["metadata"].get("type") not in ("permanent", "feel")
and not b["metadata"].get("pinned", False)
and not b["metadata"].get("protected", False)
]
candidates.sort(key=lambda b: b["metadata"].get("created", ""), reverse=True)
recent = candidates[:10]
if not recent:
return PlainTextResponse("")
parts = []
for b in recent:
meta = b["metadata"]
resolved_tag = "[已解决]" if meta.get("resolved", False) else "[未解决]"
parts.append(
f"{meta.get('name', b['id'])} {resolved_tag} "
f"V{meta.get('valence', 0.5):.1f}/A{meta.get('arousal', 0.3):.1f}\n"
f"{strip_wikilinks(b['content'][:200])}"
)
return PlainTextResponse("[Ombre Brain - Dreaming]\n" + "\n---\n".join(parts))
except Exception as e:
logger.warning(f"Dream hook failed: {e}")
return PlainTextResponse("")
# =============================================================
# Internal helper: merge-or-create
# 内部辅助:检查是否可合并,可以则合并,否则新建
# Shared by hold and grow to avoid duplicate logic
# hold 和 grow 共用,避免重复逻辑
# =============================================================
async def _merge_or_create(
content: str,
tags: list,
importance: int,
domain: list,
valence: float,
arousal: float,
name: str = "",
) -> tuple[str, bool]:
"""
Check if a similar bucket exists for merging; merge if so, create if not.
Returns (bucket_id_or_name, is_merged).
检查是否有相似桶可合并,有则合并,无则新建。
返回 (桶ID或名称, 是否合并)。
"""
try:
existing = await bucket_mgr.search(content, limit=1, domain_filter=domain or None)
except Exception as e:
logger.warning(f"Search for merge failed, creating new / 合并搜索失败,新建: {e}")
existing = []
if existing and existing[0].get("score", 0) > config.get("merge_threshold", 75):
bucket = existing[0]
# --- Never merge into pinned/protected buckets ---
# --- 不合并到钉选/保护桶 ---
if not (bucket["metadata"].get("pinned") or bucket["metadata"].get("protected")):
try:
merged = await dehydrator.merge(bucket["content"], content)
old_v = bucket["metadata"].get("valence", 0.5)
old_a = bucket["metadata"].get("arousal", 0.3)
merged_valence = round((old_v + valence) / 2, 2)
merged_arousal = round((old_a + arousal) / 2, 2)
await bucket_mgr.update(
bucket["id"],
content=merged,
tags=list(set(bucket["metadata"].get("tags", []) + tags)),
importance=max(bucket["metadata"].get("importance", 5), importance),
domain=list(set(bucket["metadata"].get("domain", []) + domain)),
valence=merged_valence,
arousal=merged_arousal,
)
# --- Update embedding after merge ---
try:
await embedding_engine.generate_and_store(bucket["id"], merged)
except Exception:
pass
return bucket["metadata"].get("name", bucket["id"]), True
except Exception as e:
logger.warning(f"Merge failed, creating new / 合并失败,新建: {e}")
bucket_id = await bucket_mgr.create(
content=content,
tags=tags,
importance=importance,
domain=domain,
valence=valence,
arousal=arousal,
name=name or None,
)
# --- Generate embedding for new bucket ---
try:
await embedding_engine.generate_and_store(bucket_id, content)
except Exception:
pass
return bucket_id, False
# =============================================================
# Tool 1: breath — Breathe
# 工具 1breath — 呼吸
#
# No args: surface highest-weight unresolved memories (active push)
# 无参数:浮现权重最高的未解决记忆
# With args: search by keyword + emotion coordinates
# 有参数:按关键词+情感坐标检索记忆
# =============================================================
@mcp.tool()
async def breath(
query: str = "",
max_tokens: int = 10000,
domain: str = "",
valence: float = -1,
arousal: float = -1,
max_results: int = 20,
) -> str:
"""检索/浮现记忆。不传query或传空=自动浮现,有query=关键词检索。max_tokens控制返回总token上限(默认10000)。domain逗号分隔,valence/arousal 0~1(-1忽略)。max_results控制返回数量上限(默认20,最大50)。"""
await decay_engine.ensure_started()
max_results = min(max_results, 50)
max_tokens = min(max_tokens, 20000)
# --- No args or empty query: surfacing mode (weight pool active push) ---
# --- 无参数或空query浮现模式权重池主动推送---
if not query or not query.strip():
try:
all_buckets = await bucket_mgr.list_all(include_archive=False)
except Exception as e:
logger.error(f"Failed to list buckets for surfacing / 浮现列桶失败: {e}")
return "记忆系统暂时无法访问。"
# --- Pinned/protected buckets: always surface as core principles ---
# --- 钉选桶:作为核心准则,始终浮现 ---
pinned_buckets = [
b for b in all_buckets
if b["metadata"].get("pinned") or b["metadata"].get("protected")
]
pinned_results = []
for b in pinned_buckets:
try:
clean_meta = {k: v for k, v in b["metadata"].items() if k != "tags"}
summary = await dehydrator.dehydrate(strip_wikilinks(b["content"]), clean_meta)
pinned_results.append(f"📌 [核心准则] {summary}")
except Exception as e:
logger.warning(f"Failed to dehydrate pinned bucket / 钉选桶脱水失败: {e}")
continue
# --- Unresolved buckets: surface top N by weight ---
# --- 未解决桶:按权重浮现前 N 条 ---
unresolved = [
b for b in all_buckets
if not b["metadata"].get("resolved", False)
and b["metadata"].get("type") not in ("permanent", "feel")
and not b["metadata"].get("pinned", False)
and not b["metadata"].get("protected", False)
]
logger.info(
f"Breath surfacing: {len(all_buckets)} total, "
f"{len(pinned_buckets)} pinned, {len(unresolved)} unresolved"
)
scored = sorted(
unresolved,
key=lambda b: decay_engine.calculate_score(b["metadata"]),
reverse=True,
)
if scored:
top_scores = [(b["metadata"].get("name", b["id"]), decay_engine.calculate_score(b["metadata"])) for b in scored[:5]]
logger.info(f"Top unresolved scores: {top_scores}")
# --- Token-budgeted surfacing with diversity + hard cap ---
# --- 按 token 预算浮现,带多样性 + 硬上限 ---
# Top-1 always surfaces; rest sampled from top-20 for diversity
token_budget = max_tokens
for r in pinned_results:
token_budget -= count_tokens_approx(r)
candidates = list(scored)
if len(candidates) > 1:
# Ensure highest-score bucket is first, shuffle rest from top-20
top1 = [candidates[0]]
pool = candidates[1:min(20, len(candidates))]
random.shuffle(pool)
candidates = top1 + pool + candidates[min(20, len(candidates)):]
# Hard cap: never surface more than max_results buckets
candidates = candidates[:max_results]
dynamic_results = []
for b in candidates:
if token_budget <= 0:
break
try:
clean_meta = {k: v for k, v in b["metadata"].items() if k != "tags"}
summary = await dehydrator.dehydrate(strip_wikilinks(b["content"]), clean_meta)
summary_tokens = count_tokens_approx(summary)
if summary_tokens > token_budget:
break
# NOTE: no touch() here — surfacing should NOT reset decay timer
score = decay_engine.calculate_score(b["metadata"])
dynamic_results.append(f"[权重:{score:.2f}] {summary}")
token_budget -= summary_tokens
except Exception as e:
logger.warning(f"Failed to dehydrate surfaced bucket / 浮现脱水失败: {e}")
continue
if not pinned_results and not dynamic_results:
return "权重池平静,没有需要处理的记忆。"
parts = []
if pinned_results:
parts.append("=== 核心准则 ===\n" + "\n---\n".join(pinned_results))
if dynamic_results:
parts.append("=== 浮现记忆 ===\n" + "\n---\n".join(dynamic_results))
return "\n\n".join(parts)
# --- Feel retrieval: domain="feel" is a special channel ---
# --- Feel 检索domain="feel" 是独立入口 ---
if domain.strip().lower() == "feel":
try:
all_buckets = await bucket_mgr.list_all(include_archive=False)
feels = [b for b in all_buckets if b["metadata"].get("type") == "feel"]
feels.sort(key=lambda b: b["metadata"].get("created", ""), reverse=True)
if not feels:
return "没有留下过 feel。"
results = []
for f in feels:
created = f["metadata"].get("created", "")
entry = f"[{created}]\n{strip_wikilinks(f['content'])}"
results.append(entry)
if count_tokens_approx("\n---\n".join(results)) > max_tokens:
break
return "=== 你留下的 feel ===\n" + "\n---\n".join(results)
except Exception as e:
logger.error(f"Feel retrieval failed: {e}")
return "读取 feel 失败。"
# --- With args: search mode (keyword + vector dual channel) ---
# --- 有参数:检索模式(关键词 + 向量双通道)---
domain_filter = [d.strip() for d in domain.split(",") if d.strip()] or None
q_valence = valence if 0 <= valence <= 1 else None
q_arousal = arousal if 0 <= arousal <= 1 else None
try:
matches = await bucket_mgr.search(
query,
limit=max(max_results, 20),
domain_filter=domain_filter,
query_valence=q_valence,
query_arousal=q_arousal,
)
except Exception as e:
logger.error(f"Search failed / 检索失败: {e}")
return "检索过程出错,请稍后重试。"
# --- Exclude pinned/protected from search results (they surface in surfacing mode) ---
# --- 搜索模式排除钉选桶(它们在浮现模式中始终可见)---
matches = [b for b in matches if not (b["metadata"].get("pinned") or b["metadata"].get("protected"))]
# --- Vector similarity channel: find semantically related buckets ---
# --- 向量相似度通道:找到语义相关的桶 ---
matched_ids = {b["id"] for b in matches}
try:
vector_results = await embedding_engine.search_similar(query, top_k=max(max_results, 20))
for bucket_id, sim_score in vector_results:
if bucket_id not in matched_ids and sim_score > 0.5:
bucket = await bucket_mgr.get(bucket_id)
if bucket and not (bucket["metadata"].get("pinned") or bucket["metadata"].get("protected")):
bucket["score"] = round(sim_score * 100, 2)
bucket["vector_match"] = True
matches.append(bucket)
matched_ids.add(bucket_id)
except Exception as e:
logger.warning(f"Vector search failed, using keyword only / 向量搜索失败: {e}")
results = []
token_used = 0
for bucket in matches:
if token_used >= max_tokens:
break
try:
clean_meta = {k: v for k, v in bucket["metadata"].items() if k != "tags"}
# --- Memory reconstruction: shift displayed valence by current mood ---
# --- 记忆重构:根据当前情绪微调展示层 valence±0.1---
if q_valence is not None and "valence" in clean_meta:
original_v = float(clean_meta.get("valence", 0.5))
shift = (q_valence - 0.5) * 0.2 # ±0.1 max shift
clean_meta["valence"] = max(0.0, min(1.0, original_v + shift))
summary = await dehydrator.dehydrate(strip_wikilinks(bucket["content"]), clean_meta)
summary_tokens = count_tokens_approx(summary)
if token_used + summary_tokens > max_tokens:
break
await bucket_mgr.touch(bucket["id"])
if bucket.get("vector_match"):
summary = f"[语义关联] {summary}"
results.append(summary)
token_used += summary_tokens
except Exception as e:
logger.warning(f"Failed to dehydrate search result / 检索结果脱水失败: {e}")
continue
# --- Random surfacing: when search returns < 3, 40% chance to float old memories ---
# --- 随机浮现:检索结果不足 3 条时40% 概率从低权重旧桶里漂上来 ---
if len(matches) < 3 and random.random() < 0.4:
try:
all_buckets = await bucket_mgr.list_all(include_archive=False)
matched_ids = {b["id"] for b in matches}
low_weight = [
b for b in all_buckets
if b["id"] not in matched_ids
and decay_engine.calculate_score(b["metadata"]) < 2.0
]
if low_weight:
drifted = random.sample(low_weight, min(random.randint(1, 3), len(low_weight)))
drift_results = []
for b in drifted:
clean_meta = {k: v for k, v in b["metadata"].items() if k != "tags"}
summary = await dehydrator.dehydrate(strip_wikilinks(b["content"]), clean_meta)
drift_results.append(f"[surface_type: random]\n{summary}")
results.append("--- 忽然想起来 ---\n" + "\n---\n".join(drift_results))
except Exception as e:
logger.warning(f"Random surfacing failed / 随机浮现失败: {e}")
if not results:
return "未找到相关记忆。"
return "\n---\n".join(results)
# =============================================================
# Tool 2: hold — Hold on to this
# 工具 2hold — 握住,留下来
# =============================================================
@mcp.tool()
async def hold(
content: str,
tags: str = "",
importance: int = 5,
pinned: bool = False,
feel: bool = False,
source_bucket: str = "", valence: float = -1,
arousal: float = -1,
) -> str:
"""存储单条记忆,自动打标+合并。tags逗号分隔,importance 1-10。pinned=True创建永久钉选桶。feel=True存储你的第一人称感受(不参与普通浮现)。source_bucket=被消化的记忆桶ID(feel模式下,标记源记忆为已消化)。"""
await decay_engine.ensure_started()
# --- Input validation / 输入校验 ---
if not content or not content.strip():
return "内容为空,无法存储。"
importance = max(1, min(10, importance))
extra_tags = [t.strip() for t in tags.split(",") if t.strip()]
# --- Feel mode: store as feel type, minimal metadata ---
# --- Feel 模式:存为 feel 类型,最少元数据 ---
if feel:
# Feel valence/arousal = model's own perspective
feel_valence = valence if 0 <= valence <= 1 else 0.5
feel_arousal = arousal if 0 <= arousal <= 1 else 0.3
bucket_id = await bucket_mgr.create(
content=content,
tags=[],
importance=5,
domain=[],
valence=feel_valence,
arousal=feel_arousal,
name=None,
bucket_type="feel",
)
try:
await embedding_engine.generate_and_store(bucket_id, content)
except Exception:
pass
# --- Mark source memory as digested + store model's valence perspective ---
# --- 标记源记忆为已消化 + 存储模型视角的 valence ---
if source_bucket and source_bucket.strip():
try:
update_kwargs = {"digested": True}
if 0 <= valence <= 1:
update_kwargs["model_valence"] = feel_valence
await bucket_mgr.update(source_bucket.strip(), **update_kwargs)
except Exception as e:
logger.warning(f"Failed to mark source as digested / 标记已消化失败: {e}")
return f"🫧feel→{bucket_id}"
# --- Step 1: auto-tagging / 自动打标 ---
try:
analysis = await dehydrator.analyze(content)
except Exception as e:
logger.warning(f"Auto-tagging failed, using defaults / 自动打标失败: {e}")
analysis = {
"domain": ["未分类"], "valence": 0.5, "arousal": 0.3,
"tags": [], "suggested_name": "",
}
domain = analysis["domain"]
valence = analysis["valence"]
arousal = analysis["arousal"]
auto_tags = analysis["tags"]
suggested_name = analysis.get("suggested_name", "")
all_tags = list(dict.fromkeys(auto_tags + extra_tags))
# --- Pinned buckets bypass merge and are created directly in permanent dir ---
# --- 钉选桶跳过合并,直接新建到 permanent 目录 ---
if pinned:
bucket_id = await bucket_mgr.create(
content=content,
tags=all_tags,
importance=10,
domain=domain,
valence=valence,
arousal=arousal,
name=suggested_name or None,
bucket_type="permanent",
pinned=True,
)
try:
await embedding_engine.generate_and_store(bucket_id, content)
except Exception:
pass
return f"📌钉选→{bucket_id} {','.join(domain)}"
# --- Step 2: merge or create / 合并或新建 ---
result_name, is_merged = await _merge_or_create(
content=content,
tags=all_tags,
importance=importance,
domain=domain,
valence=valence,
arousal=arousal,
name=suggested_name,
)
action = "合并→" if is_merged else "新建→"
return f"{action}{result_name} {','.join(domain)}"
# =============================================================
# Tool 3: grow — Grow, fragments become memories
# 工具 3grow — 生长,一天的碎片长成记忆
# =============================================================
@mcp.tool()
async def grow(content: str) -> str:
"""日记归档,自动拆分为多桶。短内容(<30字)走快速路径。"""
await decay_engine.ensure_started()
if not content or not content.strip():
return "内容为空,无法整理。"
# --- Short content fast path: skip digest, use hold logic directly ---
# --- 短内容快速路径:跳过 digest 拆分,直接走 hold 逻辑省一次 API ---
# For very short inputs (like "1"), calling digest is wasteful:
# it sends the full DIGEST_PROMPT (~800 tokens) to DeepSeek for nothing.
# Instead, run analyze + create directly.
if len(content.strip()) < 30:
logger.info(f"grow short-content fast path: {len(content.strip())} chars")
try:
analysis = await dehydrator.analyze(content)
except Exception as e:
logger.warning(f"Fast-path analyze failed / 快速路径打标失败: {e}")
analysis = {
"domain": ["未分类"], "valence": 0.5, "arousal": 0.3,
"tags": [], "suggested_name": "",
}
result_name, is_merged = await _merge_or_create(
content=content.strip(),
tags=analysis.get("tags", []),
importance=analysis.get("importance", 5) if isinstance(analysis.get("importance"), int) else 5,
domain=analysis.get("domain", ["未分类"]),
valence=analysis.get("valence", 0.5),
arousal=analysis.get("arousal", 0.3),
name=analysis.get("suggested_name", ""),
)
action = "合并" if is_merged else "新建"
return f"{action}{result_name} | {','.join(analysis.get('domain', []))} V{analysis.get('valence', 0.5):.1f}/A{analysis.get('arousal', 0.3):.1f}"
# --- Step 1: let API split and organize / 让 API 拆分整理 ---
try:
items = await dehydrator.digest(content)
except Exception as e:
logger.error(f"Diary digest failed / 日记整理失败: {e}")
return f"日记整理失败: {e}"
if not items:
return "内容为空或整理失败。"
results = []
created = 0
merged = 0
# --- Step 2: merge or create each item (with per-item error handling) ---
# --- 逐条合并或新建(单条失败不影响其他)---
for item in items:
try:
result_name, is_merged = await _merge_or_create(
content=item["content"],
tags=item.get("tags", []),
importance=item.get("importance", 5),
domain=item.get("domain", ["未分类"]),
valence=item.get("valence", 0.5),
arousal=item.get("arousal", 0.3),
name=item.get("name", ""),
)
if is_merged:
results.append(f"📎{result_name}")
merged += 1
else:
results.append(f"📝{item.get('name', result_name)}")
created += 1
except Exception as e:
logger.warning(
f"Failed to process diary item / 日记条目处理失败: "
f"{item.get('name', '?')}: {e}"
)
results.append(f"⚠️{item.get('name', '?')}")
return f"{len(items)}条|新{created}{merged}\n" + "\n".join(results)
# =============================================================
# Tool 4: trace — Trace, redraw the outline of a memory
# 工具 4trace — 描摹,重新勾勒记忆的轮廓
# Also handles deletion (delete=True)
# 同时承接删除功能
# =============================================================
@mcp.tool()
async def trace(
bucket_id: str,
name: str = "",
domain: str = "",
valence: float = -1,
arousal: float = -1,
importance: int = -1,
tags: str = "",
resolved: int = -1,
pinned: int = -1,
digested: int = -1,
content: str = "",
delete: bool = False,
) -> str:
"""修改记忆元数据或内容。resolved=1沉底/0激活,pinned=1钉选/0取消,digested=1隐藏(保留但不浮现)/0取消隐藏,content=替换桶正文,delete=True删除。只传需改的,-1或空=不改。"""
if not bucket_id or not bucket_id.strip():
return "请提供有效的 bucket_id。"
# --- Delete mode / 删除模式 ---
if delete:
success = await bucket_mgr.delete(bucket_id)
if success:
embedding_engine.delete_embedding(bucket_id)
return f"已遗忘记忆桶: {bucket_id}" if success else f"未找到记忆桶: {bucket_id}"
bucket = await bucket_mgr.get(bucket_id)
if not bucket:
return f"未找到记忆桶: {bucket_id}"
# --- Collect only fields actually passed / 只收集用户实际传入的字段 ---
updates = {}
if name:
updates["name"] = name
if domain:
updates["domain"] = [d.strip() for d in domain.split(",") if d.strip()]
if 0 <= valence <= 1:
updates["valence"] = valence
if 0 <= arousal <= 1:
updates["arousal"] = arousal
if 1 <= importance <= 10:
updates["importance"] = importance
if tags:
updates["tags"] = [t.strip() for t in tags.split(",") if t.strip()]
if resolved in (0, 1):
updates["resolved"] = bool(resolved)
if pinned in (0, 1):
updates["pinned"] = bool(pinned)
if pinned == 1:
updates["importance"] = 10 # pinned → lock importance
if digested in (0, 1):
updates["digested"] = bool(digested)
if content:
updates["content"] = content
if not updates:
return "没有任何字段需要修改。"
success = await bucket_mgr.update(bucket_id, **updates)
if not success:
return f"修改失败: {bucket_id}"
# Re-generate embedding if content changed
if "content" in updates:
try:
await embedding_engine.generate_and_store(bucket_id, updates["content"])
except Exception:
pass
changed = ", ".join(f"{k}={v}" for k, v in updates.items() if k != "content")
if "content" in updates:
changed += (", content=已替换" if changed else "content=已替换")
# Explicit hint about resolved state change semantics
# 特别提示 resolved 状态变化的语义
if "resolved" in updates:
if updates["resolved"]:
changed += " → 已沉底,只在关键词触发时重新浮现"
else:
changed += " → 已重新激活,将参与浮现排序"
if "digested" in updates:
if updates["digested"]:
changed += " → 已隐藏,保留但不再浮现"
else:
changed += " → 已取消隐藏,重新参与浮现"
return f"已修改记忆桶 {bucket_id}: {changed}"
# =============================================================
# Tool 5: pulse — Heartbeat, system status + memory listing
# 工具 5pulse — 脉搏,系统状态 + 记忆列表
# =============================================================
@mcp.tool()
async def pulse(include_archive: bool = False) -> str:
"""系统状态+记忆桶列表。include_archive=True含归档。"""
try:
stats = await bucket_mgr.get_stats()
except Exception as e:
return f"获取系统状态失败: {e}"
status = (
f"=== Ombre Brain 记忆系统 ===\n"
f"固化记忆桶: {stats['permanent_count']}\n"
f"动态记忆桶: {stats['dynamic_count']}\n"
f"归档记忆桶: {stats['archive_count']}\n"
f"总存储大小: {stats['total_size_kb']:.1f} KB\n"
f"衰减引擎: {'运行中' if decay_engine.is_running else '已停止'}\n"
)
# --- List all bucket summaries / 列出所有桶摘要 ---
try:
buckets = await bucket_mgr.list_all(include_archive=include_archive)
except Exception as e:
return status + f"\n列出记忆桶失败: {e}"
if not buckets:
return status + "\n记忆库为空。"
lines = []
for b in buckets:
meta = b.get("metadata", {})
if meta.get("pinned") or meta.get("protected"):
icon = "📌"
elif meta.get("type") == "permanent":
icon = "📦"
elif meta.get("type") == "feel":
icon = "🫧"
elif meta.get("type") == "archived":
icon = "🗄️"
elif meta.get("resolved", False):
icon = ""
else:
icon = "💭"
try:
score = decay_engine.calculate_score(meta)
except Exception:
score = 0.0
domains = ",".join(meta.get("domain", []))
val = meta.get("valence", 0.5)
aro = meta.get("arousal", 0.3)
resolved_tag = " [已解决]" if meta.get("resolved", False) else ""
lines.append(
f"{icon} [{meta.get('name', b['id'])}]{resolved_tag} "
f"主题:{domains} "
f"情感:V{val:.1f}/A{aro:.1f} "
f"重要:{meta.get('importance', '?')} "
f"权重:{score:.2f} "
f"标签:{','.join(meta.get('tags', []))}"
)
return status + "\n=== 记忆列表 ===\n" + "\n".join(lines)
# =============================================================
# Tool 6: dream — Dreaming, digest recent memories
# 工具 6dream — 做梦,消化最近的记忆
#
# Reads recent surface-level buckets (≤10), returns them for
# Claude to introspect under prompt guidance.
# 读取最近新增的表层桶≤10个返回给 Claude 在提示词引导下自主思考。
# Claude then decides: resolve some, write feels, or do nothing.
# =============================================================
@mcp.tool()
async def dream() -> str:
"""做梦——读取最近新增的记忆桶,供你自省。读完后可以trace(resolved=1)放下,或hold(feel=True)写感受。"""
await decay_engine.ensure_started()
try:
all_buckets = await bucket_mgr.list_all(include_archive=False)
except Exception as e:
logger.error(f"Dream failed to list buckets: {e}")
return "记忆系统暂时无法访问。"
# --- Filter: recent surface-level dynamic buckets (not permanent/pinned/feel) ---
candidates = [
b for b in all_buckets
if b["metadata"].get("type") not in ("permanent", "feel")
and not b["metadata"].get("pinned", False)
and not b["metadata"].get("protected", False)
]
# --- Sort by creation time desc, take top 10 ---
candidates.sort(key=lambda b: b["metadata"].get("created", ""), reverse=True)
recent = candidates[:10]
if not recent:
return "没有需要消化的新记忆。"
parts = []
for b in recent:
meta = b["metadata"]
resolved_tag = " [已解决]" if meta.get("resolved", False) else " [未解决]"
domains = ",".join(meta.get("domain", []))
val = meta.get("valence", 0.5)
aro = meta.get("arousal", 0.3)
created = meta.get("created", "")
parts.append(
f"[{meta.get('name', b['id'])}]{resolved_tag} "
f"主题:{domains} V{val:.1f}/A{aro:.1f} "
f"创建:{created}\n"
f"ID: {b['id']}\n"
f"{strip_wikilinks(b['content'][:500])}"
)
header = (
"=== Dreaming ===\n"
"以下是你最近的记忆。用第一人称想:\n"
"- 这些东西里有什么在你这里留下了重量?\n"
"- 有什么还没想清楚?\n"
"- 有什么可以放下了?\n"
"想完之后:值得放下的用 trace(bucket_id, resolved=1)\n"
"有沉淀的用 hold(content=\"...\", feel=True, source_bucket=\"bucket_id\", valence=你的感受) 写下来。\n"
"valence 是你对这段记忆的感受,不是事件本身的情绪。\n"
"没有沉淀就不写,不强迫产出。\n"
)
# --- Connection hint: find most similar pair via embeddings ---
connection_hint = ""
if embedding_engine and embedding_engine.enabled and len(recent) >= 2:
try:
best_pair = None
best_sim = 0.0
ids = [b["id"] for b in recent]
names = {b["id"]: b["metadata"].get("name", b["id"]) for b in recent}
embeddings = {}
for bid in ids:
emb = await embedding_engine.get_embedding(bid)
if emb is not None:
embeddings[bid] = emb
for i, id_a in enumerate(ids):
for id_b in ids[i+1:]:
if id_a in embeddings and id_b in embeddings:
sim = embedding_engine._cosine_similarity(embeddings[id_a], embeddings[id_b])
if sim > best_sim:
best_sim = sim
best_pair = (id_a, id_b)
if best_pair and best_sim > 0.5:
connection_hint = (
f"\n💭 [{names[best_pair[0]]}] 和 [{names[best_pair[1]]}] "
f"似乎有关联 (相似度:{best_sim:.2f})——不替你下结论,你自己想。\n"
)
except Exception as e:
logger.warning(f"Dream connection hint failed: {e}")
# --- Feel crystallization hint: detect repeated feel themes ---
crystal_hint = ""
if embedding_engine and embedding_engine.enabled:
try:
feels = [b for b in all_buckets if b["metadata"].get("type") == "feel"]
if len(feels) >= 3:
feel_embeddings = {}
for f in feels:
emb = await embedding_engine.get_embedding(f["id"])
if emb is not None:
feel_embeddings[f["id"]] = emb
# Find clusters: feels with similarity > 0.7 to at least 2 others
for fid, femb in feel_embeddings.items():
similar_feels = []
for oid, oemb in feel_embeddings.items():
if oid != fid:
sim = embedding_engine._cosine_similarity(femb, oemb)
if sim > 0.7:
similar_feels.append(oid)
if len(similar_feels) >= 2:
feel_bucket = next((f for f in feels if f["id"] == fid), None)
if feel_bucket and not feel_bucket["metadata"].get("pinned"):
content_preview = strip_wikilinks(feel_bucket["content"][:80])
crystal_hint = (
f"\n🔮 你已经写过 {len(similar_feels)+1} 条相似的 feel "
f"(围绕「{content_preview}…」)。"
f"如果这已经是确信而不只是感受了,"
f"你可以用 hold(content=\"...\", pinned=True) 升级它。"
f"不急,你自己决定。\n"
)
break
except Exception as e:
logger.warning(f"Dream crystallization hint failed: {e}")
return header + "\n---\n".join(parts) + connection_hint + crystal_hint
# =============================================================
# Dashboard API endpoints (for lightweight Web UI)
# 仪表板 API轻量 Web UI 用)
# =============================================================
@mcp.custom_route("/api/buckets", methods=["GET"])
async def api_buckets(request):
"""List all buckets with metadata (no content for efficiency)."""
from starlette.responses import JSONResponse
try:
all_buckets = await bucket_mgr.list_all(include_archive=True)
result = []
for b in all_buckets:
meta = b.get("metadata", {})
result.append({
"id": b["id"],
"name": meta.get("name", b["id"]),
"type": meta.get("type", "dynamic"),
"domain": meta.get("domain", []),
"tags": meta.get("tags", []),
"valence": meta.get("valence", 0.5),
"arousal": meta.get("arousal", 0.3),
"model_valence": meta.get("model_valence"),
"importance": meta.get("importance", 5),
"resolved": meta.get("resolved", False),
"pinned": meta.get("pinned", False),
"digested": meta.get("digested", False),
"created": meta.get("created", ""),
"last_active": meta.get("last_active", ""),
"activation_count": meta.get("activation_count", 1),
"score": decay_engine.calculate_score(meta),
"content_preview": strip_wikilinks(b.get("content", ""))[:200],
})
result.sort(key=lambda x: x["score"], reverse=True)
return JSONResponse(result)
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500)
@mcp.custom_route("/api/bucket/{bucket_id}", methods=["GET"])
async def api_bucket_detail(request):
"""Get full bucket content by ID."""
from starlette.responses import JSONResponse
bucket_id = request.path_params["bucket_id"]
bucket = await bucket_mgr.get(bucket_id)
if not bucket:
return JSONResponse({"error": "not found"}, status_code=404)
meta = bucket.get("metadata", {})
return JSONResponse({
"id": bucket["id"],
"metadata": meta,
"content": strip_wikilinks(bucket.get("content", "")),
"score": decay_engine.calculate_score(meta),
})
@mcp.custom_route("/api/search", methods=["GET"])
async def api_search(request):
"""Search buckets by query."""
from starlette.responses import JSONResponse
query = request.query_params.get("q", "")
if not query:
return JSONResponse({"error": "missing q parameter"}, status_code=400)
try:
matches = await bucket_mgr.search(query, limit=10)
result = []
for b in matches:
meta = b.get("metadata", {})
result.append({
"id": b["id"],
"name": meta.get("name", b["id"]),
"score": b.get("score", 0),
"domain": meta.get("domain", []),
"valence": meta.get("valence", 0.5),
"arousal": meta.get("arousal", 0.3),
"content_preview": strip_wikilinks(b.get("content", ""))[:200],
})
return JSONResponse(result)
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500)
@mcp.custom_route("/api/network", methods=["GET"])
async def api_network(request):
"""Get embedding similarity network for visualization."""
from starlette.responses import JSONResponse
try:
all_buckets = await bucket_mgr.list_all(include_archive=False)
nodes = []
edges = []
embeddings = {}
for b in all_buckets:
meta = b.get("metadata", {})
bid = b["id"]
nodes.append({
"id": bid,
"name": meta.get("name", bid),
"type": meta.get("type", "dynamic"),
"domain": meta.get("domain", []),
"valence": meta.get("valence", 0.5),
"arousal": meta.get("arousal", 0.3),
"score": decay_engine.calculate_score(meta),
"resolved": meta.get("resolved", False),
"pinned": meta.get("pinned", False),
"digested": meta.get("digested", False),
})
if embedding_engine and embedding_engine.enabled:
emb = await embedding_engine.get_embedding(bid)
if emb is not None:
embeddings[bid] = emb
# Build edges from embeddings (similarity > 0.5)
ids = list(embeddings.keys())
for i, id_a in enumerate(ids):
for id_b in ids[i+1:]:
sim = embedding_engine._cosine_similarity(embeddings[id_a], embeddings[id_b])
if sim > 0.5:
edges.append({"source": id_a, "target": id_b, "similarity": round(sim, 3)})
return JSONResponse({"nodes": nodes, "edges": edges})
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500)
@mcp.custom_route("/api/breath-debug", methods=["GET"])
async def api_breath_debug(request):
"""Debug endpoint: simulate breath scoring and return per-bucket breakdown."""
from starlette.responses import JSONResponse
query = request.query_params.get("q", "")
q_valence = request.query_params.get("valence")
q_arousal = request.query_params.get("arousal")
q_valence = float(q_valence) if q_valence else None
q_arousal = float(q_arousal) if q_arousal else None
try:
all_buckets = await bucket_mgr.list_all(include_archive=False)
results = []
w = {
"topic": bucket_mgr.w_topic,
"emotion": bucket_mgr.w_emotion,
"time": bucket_mgr.w_time,
"importance": bucket_mgr.w_importance,
}
w_sum = sum(w.values())
for bucket in all_buckets:
meta = bucket.get("metadata", {})
bid = bucket["id"]
try:
topic = bucket_mgr._calc_topic_score(query, bucket) if query else 0.0
emotion = bucket_mgr._calc_emotion_score(q_valence, q_arousal, meta)
time_s = bucket_mgr._calc_time_score(meta)
imp = max(1, min(10, int(meta.get("importance", 5)))) / 10.0
raw_total = (
topic * w["topic"]
+ emotion * w["emotion"]
+ time_s * w["time"]
+ imp * w["importance"]
)
normalized = (raw_total / w_sum) * 100 if w_sum > 0 else 0
resolved = meta.get("resolved", False)
if resolved:
normalized *= 0.3
results.append({
"id": bid,
"name": meta.get("name", bid),
"domain": meta.get("domain", []),
"type": meta.get("type", "dynamic"),
"resolved": resolved,
"pinned": meta.get("pinned", False),
"scores": {
"topic": round(topic, 4),
"emotion": round(emotion, 4),
"time": round(time_s, 4),
"importance": round(imp, 4),
},
"weights": w,
"raw_total": round(raw_total, 4),
"normalized": round(normalized, 2),
"passed_threshold": normalized >= bucket_mgr.fuzzy_threshold,
})
except Exception:
continue
results.sort(key=lambda x: x["normalized"], reverse=True)
passed = [r for r in results if r["passed_threshold"]]
return JSONResponse({
"query": query,
"valence": q_valence,
"arousal": q_arousal,
"weights": w,
"threshold": bucket_mgr.fuzzy_threshold,
"total_candidates": len(results),
"passed_count": len(passed),
"results": results[:50], # top 50 for debug
})
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500)
@mcp.custom_route("/dashboard", methods=["GET"])
async def dashboard(request):
"""Serve the dashboard HTML page."""
from starlette.responses import HTMLResponse
import os
dashboard_path = os.path.join(os.path.dirname(__file__), "dashboard.html")
try:
with open(dashboard_path, "r", encoding="utf-8") as f:
return HTMLResponse(f.read())
except FileNotFoundError:
return HTMLResponse("<h1>dashboard.html not found</h1>", status_code=404)
@mcp.custom_route("/api/config", methods=["GET"])
async def api_config_get(request):
"""Get current runtime config (safe fields only, API key masked)."""
from starlette.responses import JSONResponse
dehy = config.get("dehydration", {})
emb = config.get("embedding", {})
api_key = dehy.get("api_key", "")
masked_key = f"{api_key[:4]}...{api_key[-4:]}" if len(api_key) > 8 else ("***" if api_key else "")
return JSONResponse({
"dehydration": {
"model": dehy.get("model", ""),
"base_url": dehy.get("base_url", ""),
"api_key_masked": masked_key,
"max_tokens": dehy.get("max_tokens", 1024),
"temperature": dehy.get("temperature", 0.1),
},
"embedding": {
"enabled": emb.get("enabled", False),
"model": emb.get("model", ""),
},
"merge_threshold": config.get("merge_threshold", 75),
"transport": config.get("transport", "stdio"),
"buckets_dir": config.get("buckets_dir", ""),
})
@mcp.custom_route("/api/config", methods=["POST"])
async def api_config_update(request):
"""Hot-update runtime config. Optionally persist to config.yaml."""
from starlette.responses import JSONResponse
import yaml
try:
body = await request.json()
except Exception:
return JSONResponse({"error": "invalid JSON"}, status_code=400)
updated = []
# --- Dehydration config ---
if "dehydration" in body:
d = body["dehydration"]
dehy = config.setdefault("dehydration", {})
for key in ("model", "base_url", "max_tokens", "temperature"):
if key in d:
dehy[key] = d[key]
updated.append(f"dehydration.{key}")
if "api_key" in d and d["api_key"]:
dehy["api_key"] = d["api_key"]
updated.append("dehydration.api_key")
# Hot-reload dehydrator
dehydrator.model = dehy.get("model", "deepseek-chat")
dehydrator.base_url = dehy.get("base_url", "")
dehydrator.api_key = dehy.get("api_key", "")
if hasattr(dehydrator, "client") and dehydrator.api_key:
from openai import AsyncOpenAI
dehydrator.client = AsyncOpenAI(
api_key=dehydrator.api_key,
base_url=dehydrator.base_url,
)
# --- Embedding config ---
if "embedding" in body:
e = body["embedding"]
emb = config.setdefault("embedding", {})
if "enabled" in e:
emb["enabled"] = bool(e["enabled"])
embedding_engine.enabled = emb["enabled"]
updated.append("embedding.enabled")
if "model" in e:
emb["model"] = e["model"]
embedding_engine.model = emb["model"]
updated.append("embedding.model")
# --- Merge threshold ---
if "merge_threshold" in body:
config["merge_threshold"] = int(body["merge_threshold"])
updated.append("merge_threshold")
# --- Persist to config.yaml if requested ---
if body.get("persist", False):
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.yaml")
try:
save_config = {}
if os.path.exists(config_path):
with open(config_path, "r", encoding="utf-8") as f:
save_config = yaml.safe_load(f) or {}
if "dehydration" in body:
sc_dehy = save_config.setdefault("dehydration", {})
for key in ("model", "base_url", "max_tokens", "temperature"):
if key in body["dehydration"]:
sc_dehy[key] = body["dehydration"][key]
# Never persist api_key to yaml (use env var)
if "embedding" in body:
sc_emb = save_config.setdefault("embedding", {})
for key in ("enabled", "model"):
if key in body["embedding"]:
sc_emb[key] = body["embedding"][key]
if "merge_threshold" in body:
save_config["merge_threshold"] = int(body["merge_threshold"])
with open(config_path, "w", encoding="utf-8") as f:
yaml.dump(save_config, f, default_flow_style=False, allow_unicode=True)
updated.append("persisted_to_yaml")
except Exception as e:
return JSONResponse({"error": f"persist failed: {e}", "updated": updated}, status_code=500)
return JSONResponse({"updated": updated, "ok": True})
# =============================================================
# Import API — conversation history import
# 导入 API — 对话历史导入
# =============================================================
@mcp.custom_route("/api/import/upload", methods=["POST"])
async def api_import_upload(request):
"""Upload a conversation file and start import."""
from starlette.responses import JSONResponse
if import_engine.is_running:
return JSONResponse({"error": "Import already running"}, status_code=409)
content_type = request.headers.get("content-type", "")
filename = ""
try:
if "multipart/form-data" in content_type:
form = await request.form()
file_field = form.get("file")
if not file_field:
return JSONResponse({"error": "No file field"}, status_code=400)
raw_bytes = await file_field.read()
filename = getattr(file_field, "filename", "upload")
raw_content = raw_bytes.decode("utf-8", errors="replace")
else:
body = await request.body()
raw_content = body.decode("utf-8", errors="replace")
# Try to get filename from query params
filename = request.query_params.get("filename", "upload")
if not raw_content.strip():
return JSONResponse({"error": "Empty file"}, status_code=400)
preserve_raw = request.query_params.get("preserve_raw", "").lower() in ("1", "true")
resume = request.query_params.get("resume", "").lower() in ("1", "true")
except Exception as e:
return JSONResponse({"error": f"Failed to read upload: {e}"}, status_code=400)
# Start import in background
async def _run_import():
try:
await import_engine.start(raw_content, filename, preserve_raw, resume)
except Exception as e:
logger.error(f"Import failed: {e}")
asyncio.create_task(_run_import())
return JSONResponse({
"status": "started",
"filename": filename,
"size_bytes": len(raw_content.encode()),
})
@mcp.custom_route("/api/import/status", methods=["GET"])
async def api_import_status(request):
"""Get current import progress."""
from starlette.responses import JSONResponse
return JSONResponse(import_engine.get_status())
@mcp.custom_route("/api/import/pause", methods=["POST"])
async def api_import_pause(request):
"""Pause the running import."""
from starlette.responses import JSONResponse
if not import_engine.is_running:
return JSONResponse({"error": "No import running"}, status_code=400)
import_engine.pause()
return JSONResponse({"status": "pause_requested"})
@mcp.custom_route("/api/import/patterns", methods=["GET"])
async def api_import_patterns(request):
"""Detect high-frequency patterns after import."""
from starlette.responses import JSONResponse
try:
patterns = await import_engine.detect_patterns()
return JSONResponse({"patterns": patterns})
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500)
@mcp.custom_route("/api/import/results", methods=["GET"])
async def api_import_results(request):
"""List recently imported/created buckets for review."""
from starlette.responses import JSONResponse
try:
limit = int(request.query_params.get("limit", "50"))
all_buckets = await bucket_mgr.list_all(include_archive=False)
# Sort by created time, newest first
all_buckets.sort(key=lambda b: b["metadata"].get("created", ""), reverse=True)
results = []
for b in all_buckets[:limit]:
results.append({
"id": b["id"],
"name": b["metadata"].get("name", ""),
"content": b["content"][:300],
"type": b["metadata"].get("type", ""),
"domain": b["metadata"].get("domain", []),
"tags": b["metadata"].get("tags", []),
"importance": b["metadata"].get("importance", 5),
"created": b["metadata"].get("created", ""),
})
return JSONResponse({"buckets": results, "total": len(all_buckets)})
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500)
@mcp.custom_route("/api/import/review", methods=["POST"])
async def api_import_review(request):
"""Apply review decisions: mark buckets as important/noise/pinned."""
from starlette.responses import JSONResponse
try:
body = await request.json()
except Exception:
return JSONResponse({"error": "Invalid JSON"}, status_code=400)
decisions = body.get("decisions", [])
if not decisions:
return JSONResponse({"error": "No decisions provided"}, status_code=400)
applied = 0
errors = 0
for d in decisions:
bid = d.get("bucket_id", "")
action = d.get("action", "")
if not bid or not action:
continue
try:
if action == "important":
await bucket_mgr.update(bid, importance=9)
elif action == "pin":
await bucket_mgr.update(bid, pinned=True)
elif action == "noise":
await bucket_mgr.update(bid, resolved=True, importance=1)
elif action == "delete":
file_path = bucket_mgr._find_bucket_file(bid)
if file_path:
os.remove(file_path)
applied += 1
except Exception as e:
logger.warning(f"Review action failed for {bid}: {e}")
errors += 1
return JSONResponse({"applied": applied, "errors": errors})
# --- Entry point / 启动入口 ---
if __name__ == "__main__":
transport = config.get("transport", "stdio")
logger.info(f"Ombre Brain starting | transport: {transport}")
if transport in ("sse", "streamable-http"):
import threading
import uvicorn
from starlette.middleware.cors import CORSMiddleware
# --- Application-level keepalive: ping /health every 60s ---
# --- 应用层保活:每 60 秒 ping 一次 /health防止 Cloudflare Tunnel 空闲断连 ---
async def _keepalive_loop():
await asyncio.sleep(10) # Wait for server to fully start
async with httpx.AsyncClient() as client:
while True:
try:
await client.get("http://localhost:8000/health", timeout=5)
logger.debug("Keepalive ping OK / 保活 ping 成功")
except Exception as e:
logger.warning(f"Keepalive ping failed / 保活 ping 失败: {e}")
await asyncio.sleep(60)
def _start_keepalive():
loop = asyncio.new_event_loop()
loop.run_until_complete(_keepalive_loop())
t = threading.Thread(target=_start_keepalive, daemon=True)
t.start()
# --- Add CORS middleware so remote clients (Cloudflare Tunnel / ngrok) can connect ---
# --- 添加 CORS 中间件让远程客户端Cloudflare Tunnel / ngrok能正常连接 ---
if transport == "streamable-http":
_app = mcp.streamable_http_app()
else:
_app = mcp.sse_app()
_app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
expose_headers=["*"],
)
logger.info("CORS middleware enabled for remote transport / 已启用 CORS 中间件")
uvicorn.run(_app, host="0.0.0.0", port=8000)
else:
mcp.run(transport=transport)