Skip to main content

Setup

from ai_stats import AIStats

client = AIStats(api_key="your-api-key")

Chat completions

completion = client.generate_text(
    {"model": "openai/gpt-4o-mini", "messages": [{"role": "user", "content": "Hi!"}]}
)

for line in client.stream_text(
    {"model": "openai/gpt-4o-mini", "messages": [{"role": "user", "content": "Stream a story"}]}
):
    print(line)

Responses

resp = client.generate_response(
    {"model": "openai/gpt-4.1", "input": [{"role": "user", "content": [{"type": "output_text", "text": "Summarise"}]}]}
)

for line in client.stream_response(
    {"model": "openai/gpt-4.1", "input": [{"role": "user", "content": [{"type": "output_text", "text": "Stream it"}]}]}
):
    print(line)

Images

client.generate_image(
    ImageGenerationRequest.model_validate({"model": "openai/gpt-image-1", "prompt": "A lighthouse at dusk"})
)

client.generate_image_edit(
    ImageGenerationRequest.model_validate({"model": "openai/gpt-image-1", "prompt": "Make it sunset", "image": "data:image/png;base64,..."})
)

Audio

client.generate_speech({"model": "openai/gpt-4o-mini-tts", "input": "Hello world"})
client.generate_transcription({"model": "openai/gpt-4o-transcribe", "file": "data:audio/mp3;base64,..."})
client.generate_translation({"model": "openai/gpt-4o-translate", "file": "data:audio/mp3;base64,..."})

Video

client.generate_video({"model": "openai/gpt-video-1", "prompt": "A serene mountain lake at sunrise"})

Embeddings

embedding = client.generate_embedding({"model": "openai/text-embedding-3-large", "input": "Sample text"})

Moderations

client.generate_moderation({"model": "openai/omni-moderation-latest", "input": "Text to check"})

Batch & files

uploaded = client.upload_file(purpose="batch", file=open("batchinput.jsonl", "rb"))
batch = client.create_batch({"input_file_id": uploaded.id, "endpoint": "responses", "completion_window": "24h"})
batch_status = client.get_batch(batch.id)
files = client.list_files()

Models & health

models = client.get_models()
health = client.get_health()

Errors

try:
    client.generate_text({"model": "invalid", "messages": []})
except Exception as exc:
    print(f"API error: {exc}")
Verify the API is operational and check system status.

Error Handling

try:
    response = await client.chat_completions(
        model="invalid-model",
        messages=[{"role": "user", "content": "Hello"}]
    )
except Exception as e:
    print(f"API Error: {e}")
    # Handle authentication, rate limits, etc.
Handle API errors gracefully including rate limits, authentication failures, and network issues.

Configuration Options

from ai_stats import AIStats
import httpx

# Custom configuration
client = AIStats(
    api_key="your-api-key",
    base_url="https://api.ai-stats.phaseo.app",  # Custom base URL
    timeout=httpx.Timeout(30.0),  # Custom timeout
    headers={"Custom-Header": "value"}  # Additional headers
)
Customize client behavior with base URLs, timeouts, headers, and other configuration options.

Type Hints

from ai_stats import AIStats
from typing import List, Dict, Any

async def chat_with_model(
    client: AIStats,
    model: str,
    messages: List[Dict[str, str]]
) -> Dict[str, Any]:
    return await client.chat_completions(
        model=model,
        messages=messages
    )
Rich type hints for modern Python editors and IDEs, improving development experience.

Best Practices

  • Use async with context managers for proper resource cleanup
  • Handle rate limits with exponential backoff
  • Check credits before expensive operations
  • Use streaming for real-time user experiences
  • Store API keys securely (environment variables, not in code)