Domain layer (zero framework imports): - domain/models.py: pure dataclasses (RuleDocument, RuleSearchResult, Conversation, ChatMessage, LLMResponse, ChatResult) - domain/ports.py: ABC interfaces (RuleRepository, LLMPort, ConversationStore, IssueTracker) - domain/services.py: ChatService orchestrates Q&A flow using only ports Outbound adapters (implement domain ports): - adapters/outbound/openrouter.py: OpenRouterLLM with persistent httpx client, robust JSON parsing, regex citation fallback - adapters/outbound/sqlite_convos.py: SQLiteConversationStore with async_sessionmaker, timezone-aware datetimes, cleanup support - adapters/outbound/gitea_issues.py: GiteaIssueTracker with markdown injection protection (fenced code blocks) - adapters/outbound/chroma_rules.py: ChromaRuleRepository with clamped similarity scores Inbound adapter: - adapters/inbound/api.py: thin FastAPI router with input validation (max_length constraints), proper HTTP status codes (503 for missing LLM) Configuration & wiring: - config/settings.py: Pydantic v2 SettingsConfigDict (no module-level singleton) - config/container.py: create_app() factory with lifespan-managed DI - main.py: minimal entry point Test infrastructure (90 tests, all passing): - tests/fakes/: in-memory implementations of all 4 ports - tests/domain/: 26 tests for models and ChatService - tests/adapters/: 64 tests for all adapters using fakes/mocks - No real API calls, no model downloads, no disk I/O in fast tests Also fixes: aiosqlite version constraint (>=0.19.0), adds hatch build targets for new package layout. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
53 lines
1.8 KiB
Python
53 lines
1.8 KiB
Python
"""In-memory RuleRepository for testing — no ChromaDB, no embeddings."""
|
|
|
|
from typing import Optional
|
|
|
|
from domain.models import RuleDocument, RuleSearchResult
|
|
from domain.ports import RuleRepository
|
|
|
|
|
|
class FakeRuleRepository(RuleRepository):
|
|
"""Stores rules in a list; search returns all rules sorted by naive keyword overlap."""
|
|
|
|
def __init__(self):
|
|
self.documents: list[RuleDocument] = []
|
|
|
|
def add_documents(self, docs: list[RuleDocument]) -> None:
|
|
self.documents.extend(docs)
|
|
|
|
def search(
|
|
self, query: str, top_k: int = 10, section_filter: Optional[str] = None
|
|
) -> list[RuleSearchResult]:
|
|
query_words = set(query.lower().split())
|
|
results = []
|
|
for doc in self.documents:
|
|
if section_filter and doc.section != section_filter:
|
|
continue
|
|
content_words = set(doc.content.lower().split())
|
|
overlap = len(query_words & content_words)
|
|
if overlap > 0:
|
|
similarity = min(1.0, overlap / max(len(query_words), 1))
|
|
results.append(
|
|
RuleSearchResult(
|
|
rule_id=doc.rule_id,
|
|
title=doc.title,
|
|
content=doc.content,
|
|
section=doc.section,
|
|
similarity=similarity,
|
|
)
|
|
)
|
|
results.sort(key=lambda r: r.similarity, reverse=True)
|
|
return results[:top_k]
|
|
|
|
def count(self) -> int:
|
|
return len(self.documents)
|
|
|
|
def clear_all(self) -> None:
|
|
self.documents.clear()
|
|
|
|
def get_stats(self) -> dict:
|
|
sections: dict[str, int] = {}
|
|
for doc in self.documents:
|
|
sections[doc.section] = sections.get(doc.section, 0) + 1
|
|
return {"total_rules": len(self.documents), "sections": sections}
|