Skip to main content

Setup

import { AIStats } from "@ai-stats/sdk";

const client = new AIStats({ apiKey: process.env.AI_STATS_API_KEY! });

Chat completions

const completion = await client.generateText({
  model: "openai/gpt-4o-mini",
  messages: [
    { role: "system", content: "You are a helpful assistant." },
    { role: "user", content: "What is AI Stats?" },
  ],
  temperature: 0.7,
});

for await (const line of client.streamText({
  model: "openai/gpt-4o-mini",
  messages: [{ role: "user", content: "Tell me a story" }],
})) {
  console.log(line);
}

Responses

const response = await client.generateResponse({
  model: "openai/gpt-4.1",
  input: [{ role: "user", content: [{ type: "output_text", text: "Summarise this" }] }],
});

for await (const line of client.streamResponse({
  model: "openai/gpt-4.1",
  input: [{ role: "user", content: [{ type: "output_text", text: "Stream this" }] }],
  stream: true,
})) {
  console.log(line);
}

Images

await client.generateImage({
  model: "openai/gpt-image-1",
  prompt: "A lighthouse at golden hour",
});

await client.generateImageEdit({
  model: "openai/gpt-image-1",
  prompt: "Make it sunset",
  image: "data:image/png;base64,...",
});

Audio

await client.generateSpeech({
  model: "openai/gpt-4o-mini-tts",
  input: "Hello world",
});

await client.generateTranscription({
  model: "openai/gpt-4o-transcribe",
  file: "data:audio/mp3;base64,...",
});

await client.generateTranslation({
  model: "openai/gpt-4o-translate",
  file: "data:audio/mp3;base64,...",
});

Video

await client.generateVideo({
  model: "openai/gpt-video-1",
  prompt: "A serene mountain lake at sunrise",
});

Embeddings

const embedding = await client.generateEmbedding({
  model: "openai/text-embedding-3-large",
  input: "Sample text",
});

Moderations

await client.generateModeration({
  model: "openai/omni-moderation-latest",
  input: "Some text to check",
});

Batch & files

const file = await client.uploadFile({ purpose: "batch", file: new Blob(["{}"], { type: "application/json" }) });
const batch = await client.createBatch({
  input_file_id: file.id,
  endpoint: "responses",
  completion_window: "24h",
});
const batchStatus = await client.getBatch(batch.id);
const files = await client.listFiles();

Models & health

const models = await client.getModels();
const health = await client.getHealth();

Error handling

try {
  await client.generateText({ model: "invalid", messages: [] });
} catch (error) {
  console.error("API Error:", (error as Error).message);
}
Last modified on December 12, 2025