Multi-agent AI automation system with shared message bus, specialized roles (coder/researcher/reviewer), and deny-by-default security. - Config system with Pydantic validation and YAML loading - Async message bus with inter-agent delegation - LLM providers: Anthropic (Claude) and LiteLLM (DeepSeek/Kimi/MiniMax) - Tool system: registry, builtins (file/bash/web), approval engine, MCP client - Agent engine with tool-calling loop and orchestrator for multi-agent management - CLI channel (REPL) and Discord channel - Docker + Dockge deployment config - Typer CLI: chat, serve, status, agents commands Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
132 lines
4.4 KiB
Python
132 lines
4.4 KiB
Python
"""Anthropic/Claude LLM provider."""
|
|
|
|
from __future__ import annotations
|
|
|
|
from typing import Any
|
|
|
|
import anthropic
|
|
|
|
from xtrm_agent.llm.provider import LLMProvider, LLMResponse, ToolCallRequest
|
|
|
|
|
|
def _openai_tools_to_anthropic(tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
"""Convert OpenAI function-calling tool schema to Anthropic format."""
|
|
result = []
|
|
for tool in tools:
|
|
func = tool.get("function", tool)
|
|
result.append(
|
|
{
|
|
"name": func["name"],
|
|
"description": func.get("description", ""),
|
|
"input_schema": func.get("parameters", {"type": "object", "properties": {}}),
|
|
}
|
|
)
|
|
return result
|
|
|
|
|
|
class AnthropicProvider(LLMProvider):
|
|
"""Claude via the Anthropic SDK."""
|
|
|
|
def __init__(self, model: str = "claude-sonnet-4-5-20250929") -> None:
|
|
self.client = anthropic.AsyncAnthropic()
|
|
self.model = model
|
|
|
|
async def complete(
|
|
self,
|
|
messages: list[dict[str, Any]],
|
|
tools: list[dict[str, Any]] | None = None,
|
|
model: str | None = None,
|
|
max_tokens: int = 8192,
|
|
temperature: float = 0.3,
|
|
) -> LLMResponse:
|
|
model = model or self.model
|
|
|
|
# Extract system message
|
|
system_text = ""
|
|
api_messages = []
|
|
for msg in messages:
|
|
if msg["role"] == "system":
|
|
system_text = msg["content"] if isinstance(msg["content"], str) else str(msg["content"])
|
|
else:
|
|
api_messages.append(self._convert_message(msg))
|
|
|
|
kwargs: dict[str, Any] = {
|
|
"model": model,
|
|
"max_tokens": max_tokens,
|
|
"temperature": temperature,
|
|
"messages": api_messages,
|
|
}
|
|
if system_text:
|
|
kwargs["system"] = system_text
|
|
if tools:
|
|
kwargs["tools"] = _openai_tools_to_anthropic(tools)
|
|
|
|
response = await self.client.messages.create(**kwargs)
|
|
return self._parse_response(response)
|
|
|
|
def get_default_model(self) -> str:
|
|
return self.model
|
|
|
|
def _convert_message(self, msg: dict[str, Any]) -> dict[str, Any]:
|
|
"""Convert a message to Anthropic format."""
|
|
role = msg["role"]
|
|
|
|
# Tool results → user message with tool_result blocks
|
|
if role == "tool":
|
|
return {
|
|
"role": "user",
|
|
"content": [
|
|
{
|
|
"type": "tool_result",
|
|
"tool_use_id": msg["tool_call_id"],
|
|
"content": msg.get("content", ""),
|
|
}
|
|
],
|
|
}
|
|
|
|
# Assistant messages with tool_calls → content blocks
|
|
if role == "assistant" and "tool_calls" in msg:
|
|
blocks: list[dict[str, Any]] = []
|
|
if msg.get("content"):
|
|
blocks.append({"type": "text", "text": msg["content"]})
|
|
for tc in msg["tool_calls"]:
|
|
func = tc.get("function", tc)
|
|
blocks.append(
|
|
{
|
|
"type": "tool_use",
|
|
"id": tc.get("id", func.get("id", "")),
|
|
"name": func["name"],
|
|
"input": func.get("arguments", {}),
|
|
}
|
|
)
|
|
return {"role": "assistant", "content": blocks}
|
|
|
|
return {"role": role, "content": msg.get("content", "")}
|
|
|
|
def _parse_response(self, response: anthropic.types.Message) -> LLMResponse:
|
|
"""Parse Anthropic response into standardized LLMResponse."""
|
|
text_parts: list[str] = []
|
|
tool_calls: list[ToolCallRequest] = []
|
|
|
|
for block in response.content:
|
|
if block.type == "text":
|
|
text_parts.append(block.text)
|
|
elif block.type == "tool_use":
|
|
tool_calls.append(
|
|
ToolCallRequest(
|
|
id=block.id,
|
|
name=block.name,
|
|
arguments=block.input if isinstance(block.input, dict) else {},
|
|
)
|
|
)
|
|
|
|
return LLMResponse(
|
|
content="\n".join(text_parts),
|
|
tool_calls=tool_calls,
|
|
finish_reason=response.stop_reason or "",
|
|
usage={
|
|
"input_tokens": response.usage.input_tokens,
|
|
"output_tokens": response.usage.output_tokens,
|
|
},
|
|
)
|