Use a Next.js Route Handler as a secure proxy to call LLMWise and stream results to the browser without exposing your API key.
You only pay credits per request. No monthly subscription. Paid credits never expire.
Replace multiple AI subscriptions with one wallet that includes routing, failover, and optimization.
npm install llmwise
// app/api/llmwise/chat/route.ts — server-only proxy (keeps API key secret)
export async function POST(req: Request) {
const body = await req.json();
const upstream = await fetch("https://llmwise.ai/api/v1/chat", {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "text/event-stream",
Authorization: "Bearer " + process.env.LLMWISE_API_KEY,
},
body: JSON.stringify({ ...body, stream: true }),
});
// Stream upstream SSE straight through to the browser.
return new Response(upstream.body, {
status: upstream.status,
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
},
});
}
// app/page.tsx — minimal client that parses SSE JSON and renders deltas
"use client";
import { useState } from "react";
async function* readSSE(res: Response) {
const reader = res.body?.getReader();
if (!reader) return;
const decoder = new TextDecoder();
let buffer = "";
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
let idx = 0;
while ((idx = buffer.indexOf("\n\n")) !== -1) {
const raw = buffer.slice(0, idx);
buffer = buffer.slice(idx + 2);
for (const line of raw.split("\n")) {
if (!line.startsWith("data: ")) continue;
const data = line.slice(6);
if (data === "[DONE]") return;
yield JSON.parse(data);
}
}
}
}
export default function Page() {
const [model, setModel] = useState("auto");
const [input, setInput] = useState("");
const [out, setOut] = useState("");
const [loading, setLoading] = useState(false);
async function send() {
setLoading(true);
setOut("");
const res = await fetch("/api/llmwise/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model,
messages: [{ role: "user", content: input }],
stream: true,
}),
});
for await (const ev of readSSE(res)) {
if (ev.error) break;
if (ev.delta) setOut((p) => p + ev.delta);
if (ev.event === "done") break;
}
setLoading(false);
}
return (
<div style={{ maxWidth: 720, margin: "0 auto", padding: 16 }}>
<select value={model} onChange={(e) => setModel(e.target.value)}>
<option value="auto">auto</option>
<option value="gpt-5.2">gpt-5.2</option>
<option value="claude-sonnet-4.5">claude-sonnet-4.5</option>
<option value="gemini-3-flash">gemini-3-flash</option>
<option value="deepseek-v3">deepseek-v3</option>
</select>
<div style={{ marginTop: 12, whiteSpace: "pre-wrap", minHeight: 200 }}>{out}</div>
<textarea value={input} onChange={(e) => setInput(e.target.value)} style={{ width: "100%" }} />
<button disabled={loading} onClick={send}>{loading ? "Thinking..." : "Send"}</button>
</div>
);
}Everything you need to integrate LLMWise's multi-model API into your Next.js project.
You can call LLMWise with plain fetch (no SDK required). If you want typed helpers for server-side calls, install the official SDK.
npm install llmwise
Create a POST handler that forwards requests to https://llmwise.ai/api/v1/chat. This keeps your API key server-only and streams SSE to the browser.
// app/api/llmwise/chat/route.ts
export async function POST(req: Request) {
const body = await req.json();
const upstream = await fetch("https://llmwise.ai/api/v1/chat", {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "text/event-stream",
Authorization: "Bearer " + process.env.LLMWISE_API_KEY,
},
body: JSON.stringify({ ...body, stream: true }),
});
return new Response(upstream.body, { headers: { "Content-Type": "text/event-stream" } });
}LLMWise streaming is SSE with JSON chunks that include a delta field. Read lines starting with data: and append ev.delta.
async function* readSSE(res: Response) {
const reader = res.body?.getReader();
if (!reader) return;
const decoder = new TextDecoder();
let buffer = "";
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
let idx = 0;
while ((idx = buffer.indexOf("\n\n")) !== -1) {
const raw = buffer.slice(0, idx);
buffer = buffer.slice(idx + 2);
for (const line of raw.split("\n")) {
if (!line.startsWith("data: ")) continue;
const data = line.slice(6);
if (data === "[DONE]") return;
yield JSON.parse(data);
}
}
}
}Store your LLMWise API key in .env.local. Next.js will automatically load it on the server side. Never expose the key to the client — the Route Handler keeps it server-only.
# .env.local LLMWISE_API_KEY=your_api_key_here
Since the only difference between models is the model string, you can add a dropdown (or use model="auto") to route by task type. You can also upgrade to Compare/Blend/Judge for multi-model workflows.
// Pass model in the request body — the Route Handler reads it
await fetch("/api/llmwise/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: selectedModel,
messages,
stream: true,
}),
});You only pay credits per request. No monthly subscription. Paid credits never expire.
Replace multiple AI subscriptions with one wallet that includes routing, failover, and optimization.