Multi-agent AI automation system with shared message bus, specialized roles (coder/researcher/reviewer), and deny-by-default security. - Config system with Pydantic validation and YAML loading - Async message bus with inter-agent delegation - LLM providers: Anthropic (Claude) and LiteLLM (DeepSeek/Kimi/MiniMax) - Tool system: registry, builtins (file/bash/web), approval engine, MCP client - Agent engine with tool-calling loop and orchestrator for multi-agent management - CLI channel (REPL) and Discord channel - Docker + Dockge deployment config - Typer CLI: chat, serve, status, agents commands Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
50 lines
1.2 KiB
Python
50 lines
1.2 KiB
Python
"""LLM provider abstract base class."""
|
|
|
|
from __future__ import annotations
|
|
|
|
from abc import ABC, abstractmethod
|
|
from dataclasses import dataclass, field
|
|
from typing import Any
|
|
|
|
|
|
@dataclass
|
|
class ToolCallRequest:
|
|
"""A single tool call from the LLM."""
|
|
|
|
id: str
|
|
name: str
|
|
arguments: dict[str, Any]
|
|
|
|
|
|
@dataclass
|
|
class LLMResponse:
|
|
"""Standardized response from any LLM provider."""
|
|
|
|
content: str = ""
|
|
tool_calls: list[ToolCallRequest] = field(default_factory=list)
|
|
finish_reason: str = ""
|
|
usage: dict[str, int] = field(default_factory=dict)
|
|
|
|
@property
|
|
def has_tool_calls(self) -> bool:
|
|
return len(self.tool_calls) > 0
|
|
|
|
|
|
class LLMProvider(ABC):
|
|
"""Abstract base for LLM providers."""
|
|
|
|
@abstractmethod
|
|
async def complete(
|
|
self,
|
|
messages: list[dict[str, Any]],
|
|
tools: list[dict[str, Any]] | None = None,
|
|
model: str | None = None,
|
|
max_tokens: int = 8192,
|
|
temperature: float = 0.3,
|
|
) -> LLMResponse:
|
|
"""Send messages to the LLM and get a response."""
|
|
|
|
@abstractmethod
|
|
def get_default_model(self) -> str:
|
|
"""Return the default model string for this provider."""
|