Initial implementation of xtrm-agent multi-agent system

Multi-agent AI automation system with shared message bus, specialized
roles (coder/researcher/reviewer), and deny-by-default security.

- Config system with Pydantic validation and YAML loading
- Async message bus with inter-agent delegation
- LLM providers: Anthropic (Claude) and LiteLLM (DeepSeek/Kimi/MiniMax)
- Tool system: registry, builtins (file/bash/web), approval engine, MCP client
- Agent engine with tool-calling loop and orchestrator for multi-agent management
- CLI channel (REPL) and Discord channel
- Docker + Dockge deployment config
- Typer CLI: chat, serve, status, agents commands

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Kaloyan Danchev
2026-02-18 10:21:42 +02:00
commit 378d599125
34 changed files with 4124 additions and 0 deletions
View File
+131
View File
@@ -0,0 +1,131 @@
"""Anthropic/Claude LLM provider."""
from __future__ import annotations
from typing import Any
import anthropic
from xtrm_agent.llm.provider import LLMProvider, LLMResponse, ToolCallRequest
def _openai_tools_to_anthropic(tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""Convert OpenAI function-calling tool schema to Anthropic format."""
result = []
for tool in tools:
func = tool.get("function", tool)
result.append(
{
"name": func["name"],
"description": func.get("description", ""),
"input_schema": func.get("parameters", {"type": "object", "properties": {}}),
}
)
return result
class AnthropicProvider(LLMProvider):
"""Claude via the Anthropic SDK."""
def __init__(self, model: str = "claude-sonnet-4-5-20250929") -> None:
self.client = anthropic.AsyncAnthropic()
self.model = model
async def complete(
self,
messages: list[dict[str, Any]],
tools: list[dict[str, Any]] | None = None,
model: str | None = None,
max_tokens: int = 8192,
temperature: float = 0.3,
) -> LLMResponse:
model = model or self.model
# Extract system message
system_text = ""
api_messages = []
for msg in messages:
if msg["role"] == "system":
system_text = msg["content"] if isinstance(msg["content"], str) else str(msg["content"])
else:
api_messages.append(self._convert_message(msg))
kwargs: dict[str, Any] = {
"model": model,
"max_tokens": max_tokens,
"temperature": temperature,
"messages": api_messages,
}
if system_text:
kwargs["system"] = system_text
if tools:
kwargs["tools"] = _openai_tools_to_anthropic(tools)
response = await self.client.messages.create(**kwargs)
return self._parse_response(response)
def get_default_model(self) -> str:
return self.model
def _convert_message(self, msg: dict[str, Any]) -> dict[str, Any]:
"""Convert a message to Anthropic format."""
role = msg["role"]
# Tool results → user message with tool_result blocks
if role == "tool":
return {
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": msg["tool_call_id"],
"content": msg.get("content", ""),
}
],
}
# Assistant messages with tool_calls → content blocks
if role == "assistant" and "tool_calls" in msg:
blocks: list[dict[str, Any]] = []
if msg.get("content"):
blocks.append({"type": "text", "text": msg["content"]})
for tc in msg["tool_calls"]:
func = tc.get("function", tc)
blocks.append(
{
"type": "tool_use",
"id": tc.get("id", func.get("id", "")),
"name": func["name"],
"input": func.get("arguments", {}),
}
)
return {"role": "assistant", "content": blocks}
return {"role": role, "content": msg.get("content", "")}
def _parse_response(self, response: anthropic.types.Message) -> LLMResponse:
"""Parse Anthropic response into standardized LLMResponse."""
text_parts: list[str] = []
tool_calls: list[ToolCallRequest] = []
for block in response.content:
if block.type == "text":
text_parts.append(block.text)
elif block.type == "tool_use":
tool_calls.append(
ToolCallRequest(
id=block.id,
name=block.name,
arguments=block.input if isinstance(block.input, dict) else {},
)
)
return LLMResponse(
content="\n".join(text_parts),
tool_calls=tool_calls,
finish_reason=response.stop_reason or "",
usage={
"input_tokens": response.usage.input_tokens,
"output_tokens": response.usage.output_tokens,
},
)
+92
View File
@@ -0,0 +1,92 @@
"""LiteLLM provider — DeepSeek, Kimi, MiniMax, and more."""
from __future__ import annotations
import json
from typing import Any
import litellm
from json_repair import repair_json
from xtrm_agent.llm.provider import LLMProvider, LLMResponse, ToolCallRequest
class LiteLLMProvider(LLMProvider):
"""Multi-provider via LiteLLM."""
def __init__(self, model: str = "deepseek/deepseek-chat-v3.1") -> None:
self.model = model
litellm.drop_params = True
async def complete(
self,
messages: list[dict[str, Any]],
tools: list[dict[str, Any]] | None = None,
model: str | None = None,
max_tokens: int = 8192,
temperature: float = 0.3,
) -> LLMResponse:
model = model or self.model
kwargs: dict[str, Any] = {
"model": model,
"messages": messages,
"max_tokens": max_tokens,
"temperature": temperature,
}
if tools:
kwargs["tools"] = tools
kwargs["tool_choice"] = "auto"
response = await litellm.acompletion(**kwargs)
return self._parse_response(response)
def get_default_model(self) -> str:
return self.model
def _parse_response(self, response: Any) -> LLMResponse:
"""Parse LiteLLM (OpenAI-format) response."""
choice = response.choices[0]
message = choice.message
content = message.content or ""
tool_calls: list[ToolCallRequest] = []
if message.tool_calls:
for tc in message.tool_calls:
args = self._parse_arguments(tc.function.arguments)
tool_calls.append(
ToolCallRequest(
id=tc.id,
name=tc.function.name,
arguments=args,
)
)
usage_data = {}
if hasattr(response, "usage") and response.usage:
usage_data = {
"input_tokens": getattr(response.usage, "prompt_tokens", 0),
"output_tokens": getattr(response.usage, "completion_tokens", 0),
}
return LLMResponse(
content=content,
tool_calls=tool_calls,
finish_reason=choice.finish_reason or "",
usage=usage_data,
)
def _parse_arguments(self, raw: str | dict) -> dict[str, Any]:
"""Parse tool call arguments, using json-repair for malformed JSON."""
if isinstance(raw, dict):
return raw
try:
return json.loads(raw)
except (json.JSONDecodeError, TypeError):
try:
repaired = repair_json(raw)
result = json.loads(repaired)
return result if isinstance(result, dict) else {}
except Exception:
return {}
+49
View File
@@ -0,0 +1,49 @@
"""LLM provider abstract base class."""
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any
@dataclass
class ToolCallRequest:
"""A single tool call from the LLM."""
id: str
name: str
arguments: dict[str, Any]
@dataclass
class LLMResponse:
"""Standardized response from any LLM provider."""
content: str = ""
tool_calls: list[ToolCallRequest] = field(default_factory=list)
finish_reason: str = ""
usage: dict[str, int] = field(default_factory=dict)
@property
def has_tool_calls(self) -> bool:
return len(self.tool_calls) > 0
class LLMProvider(ABC):
"""Abstract base for LLM providers."""
@abstractmethod
async def complete(
self,
messages: list[dict[str, Any]],
tools: list[dict[str, Any]] | None = None,
model: str | None = None,
max_tokens: int = 8192,
temperature: float = 0.3,
) -> LLMResponse:
"""Send messages to the LLM and get a response."""
@abstractmethod
def get_default_model(self) -> str:
"""Return the default model string for this provider."""