Use the official LLMWise Go SDK to call multiple AI models with one API key. Idiomatic Go patterns, streaming SSE, context cancellation, and built-in failover.
Credit-based pay-per-use with token-settled billing. No monthly subscription. Paid credits never expire.
Replace multiple AI subscriptions with one wallet that includes routing, failover, and optimization.
go get github.com/llmwise-ai/llmwise-go
// go get github.com/llmwise-ai/llmwise-go
package main
import (
"context"
"fmt"
"log"
"os"
"github.com/llmwise-ai/llmwise-go"
)
func main() {
client := llmwise.NewClient(os.Getenv("LLMWISE_API_KEY"))
// Basic chat request
resp, err := client.Chat(context.Background(), &llmwise.ChatRequest{
Model: "auto",
Messages: []llmwise.Message{
{Role: "user", Content: "Explain goroutines vs OS threads."},
},
MaxTokens: 512,
})
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.Content)
// Streaming chat with context cancellation
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
stream, err := client.ChatStream(ctx, &llmwise.ChatRequest{
Model: "claude-sonnet-4.5",
Messages: []llmwise.Message{{Role: "user", Content: "Write a concurrent web crawler in Go."}},
Stream: true,
})
if err != nil {
log.Fatal(err)
}
defer stream.Close()
for stream.Next() {
ev := stream.Event()
if ev.Delta != "" {
fmt.Print(ev.Delta)
}
if ev.Done {
fmt.Printf("\n\nCredits charged: %d\n", ev.CreditsCharged)
break
}
}
if err := stream.Err(); err != nil {
log.Fatal(err)
}
}Everything you need to integrate LLMWise's multi-model API into your Go project.
Add the official LLMWise Go module to your project. Requires Go 1.21 or later.
go get github.com/llmwise-ai/llmwise-go
Store your API key as an environment variable. The SDK reads it at client creation time.
export LLMWISE_API_KEY="your_api_key_here"
Instantiate the LLMWise client. It uses a shared http.Client with connection pooling by default.
import "github.com/llmwise-ai/llmwise-go"
client := llmwise.NewClient(os.Getenv("LLMWISE_API_KEY"))Call client.Chat with a context and a typed ChatRequest struct. The response includes content, token counts, and cost metadata.
resp, err := client.Chat(context.Background(), &llmwise.ChatRequest{
Model: "gemini-3-flash",
Messages: []llmwise.Message{
{Role: "user", Content: "What is the difference between channels and mutexes in Go?"},
},
})
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.Content)Use client.ChatStream to receive SSE events. The iterator pattern (Next/Event/Err) is idiomatic Go. Pass a context with a timeout for cancellation.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
stream, err := client.ChatStream(ctx, &llmwise.ChatRequest{
Model: "deepseek-v3",
Messages: []llmwise.Message{{Role: "user", Content: "Implement a rate limiter in Go."}},
Stream: true,
})
if err != nil {
log.Fatal(err)
}
defer stream.Close()
for stream.Next() {
if ev := stream.Event(); ev.Delta != "" {
fmt.Print(ev.Delta)
}
}Use the Compare endpoint to send the same prompt to multiple models and compare their outputs side by side.
resp, err := client.Compare(context.Background(), &llmwise.CompareRequest{
Models: []string{"gpt-5.2", "claude-sonnet-4.5", "gemini-3-flash"},
Messages: []llmwise.Message{
{Role: "user", Content: "Explain the CAP theorem."},
},
})
if err != nil {
log.Fatal(err)
}
for _, r := range resp.Responses {
fmt.Printf("[%s]: %s\n\n", r.Model, r.Content)
}Credit-based pay-per-use with token-settled billing. No monthly subscription. Paid credits never expire.
Replace multiple AI subscriptions with one wallet that includes routing, failover, and optimization.
Pricing changes, new model launches, and optimization tips. No spam.