Skip to content

Advanced Features (Go)

Beyond core chat functionality, ai-lib-go provides several advanced capabilities.

Generate and work with vector embeddings:

import "github.com/ailib-official/ai-lib-go/embeddings"
// Create embedding client
client, err := embeddings.NewEmbeddingClient(ctx, "openai/text-embedding-3-small", nil)
if err != nil {
panic(err)
}
// Generate embeddings
results, err := client.Embed(ctx, []string{
"Go programming language",
"Python programming language",
"Cooking recipes",
})
if err != nil {
panic(err)
}
// Calculate similarity
sim := embeddings.CosineSimilarity(results[0], results[1])
fmt.Printf("Go vs Python similarity: %.3f\n", sim)

Vector operations include cosine similarity, Euclidean distance, and dot product.

Cache responses to reduce costs and latency:

import "github.com/ailib-official/ai-lib-go/cache"
// Configure cache manager
mgr := cache.NewCacheManager(cache.NewMemoryCache(), 3600 * time.Second)
// Apply to client
aiClient, _ := client.NewAiClient(ctx, "openai", &client.Options{
Cache: mgr,
})
// First call hits the provider
resp1, _ := aiClient.Chat().Model("gpt-4o").User("What is 2+2?").Execute(ctx)
// Second identical call returns cached response
resp2, _ := aiClient.Chat().Model("gpt-4o").User("What is 2+2?").Execute(ctx)

Execute multiple requests efficiently:

import "github.com/ailib-official/ai-lib-go/batch"
executor := batch.NewBatchExecutor(5, 30 * time.Second)
requests := []client.ChatRequest{
aiClient.Chat().User("Question 1"),
aiClient.Chat().User("Question 2"),
aiClient.Chat().User("Question 3"),
}
results := executor.Execute(ctx, requests)
for _, res := range results {
if res.Error != nil {
fmt.Printf("Error: %v\n", res.Error)
continue
}
fmt.Println(res.Response.Content)
}

Estimate token usage and costs:

import "github.com/ailib-official/ai-lib-go/tokens"
counter := tokens.GetCounterForModel("gpt-4o")
count := counter.Count("Hello, how are you?")
fmt.Printf("Tokens: %d\n", count)
pricing := tokens.GetPricingForModel("openai/gpt-4o")
cost := pricing.Estimate(promptTokens, completionTokens)
fmt.Printf("Estimated cost: $%.4f\n", cost)

Extend the client with custom plugins:

import "github.com/ailib-official/ai-lib-go/plugins"
type LoggingPlugin struct{}
func (p *LoggingPlugin) OnRequest(req *plugins.Request) {
fmt.Printf("Sending request to %s\n", req.Model)
}
func (p *LoggingPlugin) OnResponse(res *plugins.Response) {
fmt.Printf("Got %d tokens\n", res.Usage.TotalTokens)
}
aiClient.RegisterPlugin(&LoggingPlugin{})

Content filtering and safety:

import "github.com/ailib-official/ai-lib-go/guardrails"
config := guardrails.NewConfig().
AddFilter(guardrails.NewKeywordFilter([]string{"unsafe_word"})).
EnablePiiDetection()
aiClient.SetGuardrails(config)

Smart model routing:

import "github.com/ailib-official/ai-lib-go/routing"
manager := routing.NewModelManager().
AddModel("openai/gpt-4o", 0.7).
AddModel("anthropic/claude-3-5-sonnet", 0.3).
SetStrategy(routing.StrategyWeighted)

Request/response interception:

import "github.com/ailib-official/ai-lib-go/interceptors"
pipeline := interceptors.NewPipeline().
Add(&LoggingInterceptor{}).
Add(&MetricsInterceptor{}).
Add(&AuditInterceptor{})