跳转到内容

可观测性

两个运行时均为生产部署提供可观测性功能。

ai-lib-rust 使用 tracing 生态:

use tracing_subscriber;
// Enable logging
tracing_subscriber::init();
// All AI-Lib operations emit structured log events
let client = AiClient::from_model("openai/gpt-4o").await?;

日志级别:

  • INFO — 请求/响应摘要
  • DEBUG — 协议加载、管道阶段
  • TRACE — 单个帧、JSONPath 匹配

每次请求都会返回使用统计:

let (response, stats) = client.chat()
.user("Hello")
.execute_with_stats()
.await?;
println!("Model: {}", stats.model);
println!("Provider: {}", stats.provider);
println!("Prompt tokens: {}", stats.prompt_tokens);
println!("Completion tokens: {}", stats.completion_tokens);
println!("Total tokens: {}", stats.total_tokens);
println!("Latency: {}ms", stats.latency_ms);
response, stats = await client.chat() \
.user("Hello") \
.execute_with_stats()
print(f"Tokens: {stats.total_tokens}")
print(f"Latency: {stats.latency_ms}ms")
const { response, stats } = await client.chat().user('Hello').executeWithStats();
console.log(`Model: ${stats.model}`);
console.log(`Provider: ${stats.provider}`);
console.log(`Prompt tokens: ${stats.promptTokens}`);
console.log(`Completion tokens: ${stats.completionTokens}`);
console.log(`Total tokens: ${stats.totalTokens}`);
console.log(`Latency: ${stats.latencyMs}ms`);
response, stats, _ := aiClient.Chat().
User("你好").
ExecuteWithStats(ctx)
fmt.Printf("模型: %s\n", stats.Model)
fmt.Printf("提供商: %s\n", stats.Provider)
fmt.Printf("Tokens: %d\n", stats.TotalTokens)
fmt.Printf("延迟: %dms\n", stats.LatencyMs)
from ai_lib_python.telemetry import MetricsCollector
metrics = MetricsCollector()
client = await AiClient.builder() \
.model("openai/gpt-4o") \
.metrics(metrics) \
.build()
# After some requests...
prometheus_text = metrics.export_prometheus()
import { MetricsCollector } from '@hiddenpath/ai-lib-ts/telemetry';
const metrics = new MetricsCollector();
const client = await AiClient.builder().model('openai/gpt-4o').metrics(metrics).build();
// After some requests...
const prometheusText = metrics.exportPrometheus();
import "github.com/ailib-official/ai-lib-go/telemetry"
metrics := telemetry.NewMetricsCollector()
aiClient, _ := client.NewAiClientBuilder().
Model("openai/gpt-4o").
Metrics(metrics).
Build(ctx)
prometheusText := metrics.ExportPrometheus()

跟踪的指标:

  • ai_lib_requests_total — 按 model/provider 的请求数
  • ai_lib_request_duration_seconds — 延迟直方图
  • ai_lib_tokens_total — 按类型的 token 使用量
  • ai_lib_errors_total — 按类型的错误数

Python:分布式追踪(OpenTelemetry)

Section titled “Python:分布式追踪(OpenTelemetry)”
from ai_lib_python.telemetry import Tracer
tracer = Tracer(
service_name="my-app",
endpoint="http://jaeger:4317",
)
client = await AiClient.builder() \
.model("openai/gpt-4o") \
.tracer(tracer) \
.build()

TypeScript:分布式追踪(OpenTelemetry)

Section titled “TypeScript:分布式追踪(OpenTelemetry)”
import { Tracer } from '@hiddenpath/ai-lib-ts/telemetry';
const tracer = new Tracer({
serviceName: 'my-app',
endpoint: 'http://jaeger:4317',
});
const client = await AiClient.builder().model('openai/gpt-4o').tracer(tracer).build();

追踪包含以下 span:

  • 协议加载
  • 请求编译
  • HTTP 传输
  • 管道处理
  • 事件映射
from ai_lib_python.telemetry import HealthChecker
health = HealthChecker()
status = await health.check()
print(f"Healthy: {status.is_healthy}")
print(f"Details: {status.details}")
import { HealthChecker } from '@hiddenpath/ai-lib-ts/telemetry';
const health = new HealthChecker();
const status = await health.check();
console.log(`Healthy: ${status.isHealthy}`);
console.log(`Details: ${status.details}`);
health := telemetry.NewHealthChecker()
status, _ := health.Check(ctx)
fmt.Printf("健康状态: %v\n", status.IsHealthy)

收集 AI 响应的反馈:

from ai_lib_python.telemetry import FeedbackCollector
feedback = FeedbackCollector()
# After getting a response
feedback.record(
request_id=stats.request_id,
rating=5,
comment="Helpful response",
)
import { FeedbackCollector } from '@hiddenpath/ai-lib-ts/telemetry';
const feedback = new FeedbackCollector();
// After getting a response
feedback.record({
requestId: stats.requestId,
rating: 5,
comment: 'Helpful response',
});

监控熔断器与速率限制器状态:

// Rust
let state = client.circuit_state(); // Closed, Open, HalfOpen
let inflight = client.current_inflight();
# Python
signals = client.signals_snapshot()
print(f"Circuit: {signals.circuit_state}")
print(f"Inflight: {signals.current_inflight}")
// TypeScript
const signals = client.signalsSnapshot();
console.log(`Circuit: ${signals.circuitState}`);
console.log(`Inflight: ${signals.currentInflight}`);
// Go
signals := aiClient.SignalsSnapshot()
fmt.Printf("熔断器状态: %s\n", signals.CircuitState)
fmt.Printf("进行中请求: %d\n", signals.CurrentInflight)