Skip to main content

Overview

The jarvis-ai crate provides AI capabilities for Jarvis:
  • Claude and Gemini API clients with streaming support
  • Whisper API for speech-to-text
  • Session management with automatic tool-call loops
  • Token usage tracking
  • Skill-based routing between providers

Core Trait

AiClient

The main trait for AI providers.
use jarvis_ai::{AiClient, Message, ToolDefinition, AiResponse, AiError};
use async_trait::async_trait;

#[async_trait]
pub trait AiClient: Send + Sync {
    async fn send_message(
        &self,
        messages: &[Message],
        tools: &[ToolDefinition],
    ) -> Result<AiResponse, AiError>;

    async fn send_message_streaming(
        &self,
        messages: &[Message],
        tools: &[ToolDefinition],
        on_chunk: Box<dyn Fn(String) + Send + Sync>,
    ) -> Result<AiResponse, AiError>;
}

Data Types

Message

A message in a conversation.
Message
struct

ToolDefinition

Defines a tool that the AI can call.
ToolDefinition
struct

AiResponse

Response from an AI API call.
AiResponse
struct

ToolCall

A tool invocation request from the AI.
ToolCall
struct

TokenUsage

Token usage statistics.
TokenUsage
struct
Methods:
  • total_tokens() -> u64 — Sum of input and output tokens

Clients

ClaudeClient

Claude (Anthropic) API client.
use jarvis_ai::{ClaudeClient, ClaudeConfig};

let config = ClaudeConfig {
    api_key: "sk-ant-...".to_string(),
    model: "claude-3-5-sonnet-20241022".to_string(),
    max_tokens: 4096,
};

let client = ClaudeClient::new(config);

GeminiClient

Google Gemini API client.
use jarvis_ai::{GeminiClient, GeminiConfig};

let config = GeminiConfig {
    api_key: "AIza...".to_string(),
    model: "gemini-2.0-flash".to_string(),
};

let client = GeminiClient::new(config);

WhisperClient

OpenAI Whisper API for speech-to-text.
use jarvis_ai::{WhisperClient, WhisperConfig};

let config = WhisperConfig {
    api_key: "sk-...".to_string(),
    model: "whisper-1".to_string(),
};

let client = WhisperClient::new(config);
let transcript = client.transcribe(&audio_data).await?;

Session Management

Session

Manages a conversation with message history and tool execution.
use jarvis_ai::Session;

let mut session = Session::new("claude")
    .with_system_prompt("You are a helpful assistant.")
    .with_tools(tools)
    .with_tool_executor(executor)
    .with_max_tool_rounds(10);
new
fn(impl Into<String>) -> Session
Create a new session with a provider name.
with_system_prompt
fn(impl Into<String>) -> Session
Set the system prompt (prepended to every API call).
with_tools
fn(Vec<ToolDefinition>) -> Session
Register available tools.
with_tool_executor
fn(ToolExecutor) -> Session
Set the tool execution callback.Type: ToolExecutor = Arc<dyn Fn(&str, &serde_json::Value) -> String + Send + Sync>
with_max_tool_rounds
fn(u32) -> Session
Set maximum tool-call loop iterations to prevent infinite loops.

Session Methods

// Get conversation history
let messages = session.messages();

// Get token tracker
let tracker = session.tracker();

// Clear conversation
session.clear();

// Get message count
let count = session.message_count();

Skill Router

Routes AI requests to the appropriate provider based on skill type.
use jarvis_ai::{SkillRouter, Provider, Skill};
use std::sync::Arc;

let mut router = SkillRouter::new();

// Register clients
router.register_client(Provider::Claude, Arc::new(claude_client));
router.register_client(Provider::Gemini, Arc::new(gemini_client));

// Register skills
router.register_skill(Skill {
    name: "code_assist".to_string(),
    provider: Provider::Claude,
    system_prompt: Some("You are an expert programmer.".to_string()),
});

router.register_skill(Skill {
    name: "general_chat".to_string(),
    provider: Provider::Gemini,
    system_prompt: None,
});

// Route a message
let response = router.route(
    "code_assist",
    &messages,
    &tools
).await?;
Provider
enum
AI provider selection.
  • Claude — Anthropic Claude
  • Gemini — Google Gemini
Skill
struct

Token Tracking

TokenTracker

Tracks token usage across multiple API calls.
use jarvis_ai::TokenTracker;

let tracker = TokenTracker::new();
tracker.record("claude", 1000, 500);

let total = tracker.total_tokens();
let cost = tracker.total_cost();

Error Handling

AiError
enum
Errors that can occur during AI operations.
  • ApiError(String) — API returned an error
  • RateLimited — API rate limit hit
  • NetworkError(String) — Network connection failed
  • ParseError(String) — Failed to parse response
  • Timeout — Request timed out

Usage Example

use jarvis_ai::{
    ClaudeClient, ClaudeConfig, Session,
    Message, Role, ToolDefinition, AiError
};

#[tokio::main]
async fn main() -> Result<(), AiError> {
    // Create client
    let config = ClaudeConfig {
        api_key: std::env::var("ANTHROPIC_API_KEY").unwrap(),
        model: "claude-3-5-sonnet-20241022".to_string(),
        max_tokens: 4096,
    };
    let client = ClaudeClient::new(config);
    
    // Create session
    let mut session = Session::new("claude")
        .with_system_prompt("You are a helpful assistant.");
    
    // Send message
    let messages = vec![
        Message {
            role: Role::User,
            content: "What is Rust?".to_string(),
        }
    ];
    
    let response = client.send_message(&messages, &[]).await?;
    println!("Assistant: {}", response.content);
    println!("Tokens used: {}", response.usage.total_tokens());
    
    Ok(())
}