Lightweight SDK for RadKod AI API — OpenAI-compatible LLM proxy with streaming and tool calling.
npm install @radkod/aiimport { RadKodAI } from '@radkod/ai'
const ai = new RadKodAI({
baseUrl: 'https://ai.radkod.com',
apiKey: 'benv-your-key-here'
})
// Simple prompt
const answer = await ai.prompt('What is quantum computing?')
console.log(answer)await ai.promptStream('Count to 10', (token) => {
process.stdout.write(token)
})Or with full control:
const result = await ai.chatStream(
{
model: 'z-ai/glm4.7',
messages: [{ role: 'user', content: 'Hello!' }]
},
{
onToken: (token) => process.stdout.write(token),
onDone: (fullText) => console.log('\n\nDone:', fullText.length, 'chars'),
onError: (err) => console.error('Error:', err)
}
)Define tools and handlers — the SDK runs the tool loop automatically:
const result = await ai.chatWithTools(
{
messages: [{ role: 'user', content: 'What is the weather in Istanbul?' }],
tools: [
{
type: 'function',
function: {
name: 'get_weather',
description: 'Get current weather for a city',
parameters: {
type: 'object',
properties: {
city: { type: 'string', description: 'City name' }
},
required: ['city']
}
}
}
]
},
{
get_weather: async ({ city }) => {
// Your tool implementation
return { temperature: 22, condition: 'sunny', city }
}
}
)
console.log(result.finalText) // "The weather in Istanbul is 22°C and sunny."const response = await ai.chat({
model: 'meta/llama-3.3-70b-instruct',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain REST vs GraphQL' }
],
temperature: 0.7,
max_tokens: 2000
})
console.log(response.choices[0].message.content)const models = await ai.models()
models.forEach(m => console.log(m.id, '—', m.owned_by))const health = await ai.health()
console.log(health.status) // "ok"<script type="module">
import { RadKodAI } from 'https://cdn.jsdelivr.net/npm/@radkod/ai/dist/index.mjs'
const ai = new RadKodAI({ apiKey: 'benv-xxx' })
const answer = await ai.prompt('Hello!')
document.body.textContent = answer
</script>const ai = new RadKodAI({
baseUrl: 'https://ai.radkod.com', // API server URL
apiKey: 'benv-xxx', // API key (optional in open mode)
defaultModel: 'z-ai/glm4.7', // Default model for all requests
timeout: 300000 // Request timeout in ms (default: 5min)
})import { RadKodAI, RadKodError } from '@radkod/ai'
try {
await ai.prompt('Hello')
} catch (err) {
if (err instanceof RadKodError) {
console.log(err.status) // 401, 429, 502, etc.
console.log(err.body) // API error response body
}
}MIT