Skip to content

TypeScript AiClient API

import { AiClient } from '@hiddenpath/ai-lib-ts';
const client = await AiClient.new('openai/gpt-4o');
import { createClientBuilder } from '@hiddenpath/ai-lib-ts';
const client = await createClientBuilder()
.withFallbacks(['anthropic/claude-3-5-sonnet', 'deepseek/deepseek-chat'])
.withTimeout(30000)
.withStrictStreaming(true)
.build('openai/gpt-4o');

Set the conversation messages:

const response = await client
.chat([
Message.system('You are helpful.'),
Message.user('Hello!'),
])
.execute();

Quick single user message:

const response = await client
.chat()
.user('What is TypeScript?')
.execute();

Set sampling temperature (0.0 - 2.0):

const response = await client
.chat([Message.user('Be creative')])
.temperature(0.9)
.execute();

Set maximum output tokens:

const response = await client
.chat([Message.user('Hi')])
.maxTokens(500)
.execute();

Add tool definitions:

const tool = Tool.define(
'search',
{
type: 'object',
properties: {
query: { type: 'string' },
},
required: ['query'],
},
'Search the web'
);
const response = await client
.chat([Message.user('Search for TypeScript')])
.tools([tool])
.execute();

Enable streaming mode:

const stream = client
.chat([Message.user('Tell a story')])
.stream()
.executeStream();
for await (const event of stream) {
if (event.event_type === 'PartialContentDelta') {
process.stdout.write(event.content);
}
}

Execute the request and return the response:

const response = await client
.chat([Message.user('Hello')])
.execute();
console.log(response.content);
console.log(response.toolCalls);
console.log(response.usage);

Execute and return response with timing stats:

const { response, stats } = await client
.chat([Message.user('Hi')])
.executeWithStats();
console.log('Tokens:', stats.totalTokens);
console.log('Latency:', stats.latencyMs, 'ms');
console.log('Model:', stats.model);

Execute as a stream:

const stream = client
.chat([Message.user('Write code')])
.stream()
.executeStream();
for await (const event of stream) {
// Handle events
}

Execute stream with cancellation support:

const { stream, cancelHandle } = client
.chat([Message.user('Long task')])
.stream()
.executeStreamWithCancel();
// Later, from another context:
// cancelHandle.cancel();
for await (const event of stream) {
if (event.event_type === 'PartialContentDelta') {
process.stdout.write(event.content);
}
}
interface ChatResponse {
content: string;
toolCalls?: ParsedToolCall[];
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
model: string;
finishReason: TerminationReason;
}
import { AiLibError, StandardErrorCode, isRetryable, isFallbackable } from '@hiddenpath/ai-lib-ts';
try {
const response = await client.chat([Message.user('Hi')]).execute();
} catch (e) {
if (e instanceof AiLibError) {
console.log('Error code:', e.code);
console.log('Message:', e.message);
console.log('Retryable:', isRetryable(e.code));
console.log('Fallbackable:', isFallbackable(e.code));
}
}

Get runtime signals for monitoring:

const signals = await client.signals();
console.log('Circuit breaker:', signals.circuitBreaker?.state);
console.log('Rate limiter:', signals.rateLimiter?.available);
console.log('Inflight:', signals.inflight?.inUse);