Skip to content
Open
103 changes: 103 additions & 0 deletions docs/my-website/docs/providers/telnyx.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Telnyx

Telnyx provides an OpenAI-compatible Inference API for LLM access to hosted models.

## Overview

| Property | Details |
|-------|--------|
| Provider ID | `telnyx` |
| API Base | `https://api.telnyx.com/v2/ai` |
| API Key Env | `TELNYX_API_KEY` |
| OpenAI Compatible | Yes |
| Website | [telnyx.com](https://telnyx.com) |
| API Docs | [developers.telnyx.com/docs/inference](https://developers.telnyx.com/docs/inference/getting-started) |
| Sign Up | [telnyx.com/sign-up](https://telnyx.com/sign-up) |

## Quick Start

### Installation

```bash
pip install litellm
```

### Usage

```python
import os
import litellm

os.environ["TELNYX_API_KEY"] = "your-api-key"

# Chat completion
response = litellm.completion(
model="telnyx/moonshotai/Kimi-K2.6",
messages=[{"role": "user", "content": "Hello!"}],
)
print(response.choices[0].message.content)

# With streaming
response = litellm.completion(
model="telnyx/moonshotai/Kimi-K2.6",
messages=[{"role": "user", "content": "Hello!"}],
stream=True,
)
for chunk in response:
print(chunk.choices[0].delta.content or "", end="")
```

### Using with OpenAI SDK

Since Telnyx is OpenAI-compatible, you can also use the OpenAI SDK directly:

```python
from openai import OpenAI

client = OpenAI(
api_key=os.environ["TELNYX_API_KEY"],
base_url="https://api.telnyx.com/v2/ai/openai",
)

response = client.chat.completions.create(
model="moonshotai/Kimi-K2.6",
messages=[{"role": "user", "content": "Hello!"}],
)
```

## Available Models

| Model ID | Parameters | Context Length | Best For |
|----------|-----------|---------------|----------|
| `moonshotai/Kimi-K2.6` | 1.0T | 256K | Highest intelligence, voice AI |
| `zai-org/GLM-5.1-FP8` | 753.9B | 202K | Efficient reasoning, function calling |
| `MiniMaxAI/MiniMax-M2.7` | — | 2M | Cheapest, high intelligence |

See [Telnyx Available Models](https://developers.telnyx.com/docs/inference/models) for the full list.

## Embeddings

```python
response = litellm.embedding(
model="telnyx/thenlper/gte-large",
input=["Hello world"],
)
```

## Proxy Server (LiteLLM Proxy)

Add Telnyx to your LiteLLM proxy config:

```yaml
model_list:
- model_name: kimi-k2.6
litellm_params:
model: telnyx/moonshotai/Kimi-K2.6
api_key: os.environ/TELNYX_API_KEY
```

## Getting an API Key

1. Sign up at [telnyx.com/sign-up](https://telnyx.com/sign-up)
2. Navigate to the [Telnyx Portal](https://portal.telnyx.com/)
3. Create an API key under **Auth > API Keys**
1 change: 1 addition & 0 deletions docs/my-website/sidebars.js
Original file line number Diff line number Diff line change
Expand Up @@ -1009,6 +1009,7 @@ const sidebars = {
"providers/scaleway",
"providers/stability",
"providers/synthetic",
"providers/telnyx",
"providers/snowflake",
"providers/togetherai",
"providers/topaz",
Expand Down
4 changes: 4 additions & 0 deletions litellm/llms/openai_like/providers.json
Original file line number Diff line number Diff line change
Expand Up @@ -101,5 +101,9 @@
"param_mappings": {
"max_completion_tokens": "max_tokens"
}
},
"telnyx": {
"base_url": "https://api.telnyx.com/v2/ai",
"api_key_env": "TELNYX_API_KEY"
}
}
1 change: 1 addition & 0 deletions litellm/types/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3305,6 +3305,7 @@ class LlmProviders(str, Enum):
POE = "poe"
CHUTES = "chutes"
XIAOMI_MIMO = "xiaomi_mimo"
TELNYX = "telnyx"
LITELLM_AGENT = "litellm_agent"
CURSOR = "cursor"
BEDROCK_MANTLE = "bedrock_mantle"
Expand Down
17 changes: 17 additions & 0 deletions provider_endpoints_support.json
Original file line number Diff line number Diff line change
Expand Up @@ -2011,6 +2011,23 @@
"a2a": false
}
},
"telnyx": {
"display_name": "Telnyx (`telnyx`)",
"url": "https://docs.litellm.ai/docs/providers/telnyx",
"endpoints": {
"chat_completions": true,
"messages": false,
"responses": false,
"embeddings": true,
"image_generations": false,
"audio_transcriptions": false,
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false,
"a2a": false
}
},
"text-completion-codestral": {
"display_name": "Text Completion Codestral (`text-completion-codestral`)",
"url": "https://docs.litellm.ai/docs/providers/codestral",
Expand Down
142 changes: 142 additions & 0 deletions tests/test_litellm/llms/openai_like/test_telnyx_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
"""
Tests for Telnyx provider configuration via JSON providers system.
"""

import os
import sys

import pytest

from unittest.mock import MagicMock, patch

# Add workspace to path
workspace_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../.."))
sys.path.insert(0, workspace_path)

import litellm


class TestTelnyxProviderLoader:
"""Test Telnyx provider loading and configuration"""

def test_telnyx_provider_exists(self):
"""Test that Telnyx is registered as a JSON provider"""
from litellm.llms.openai_like.json_loader import JSONProviderRegistry

assert JSONProviderRegistry.exists("telnyx")

def test_telnyx_provider_config(self):
"""Test that Telnyx provider config has correct values"""
from litellm.llms.openai_like.json_loader import JSONProviderRegistry

telnyx = JSONProviderRegistry.get("telnyx")
assert telnyx is not None
assert telnyx.base_url == "https://api.telnyx.com/v2/ai"
assert telnyx.api_key_env == "TELNYX_API_KEY"

def test_telnyx_dynamic_config_generation(self):
"""Test dynamic config class creation for Telnyx"""
from litellm.llms.openai_like.dynamic_config import create_config_class
from litellm.llms.openai_like.json_loader import JSONProviderRegistry

provider = JSONProviderRegistry.get("telnyx")
config_class = create_config_class(provider)
config = config_class()

# Test API info resolution
api_base, api_key = config._get_openai_compatible_provider_info(None, None)
assert api_base == "https://api.telnyx.com/v2/ai"

# Test with custom base
api_base, api_key = config._get_openai_compatible_provider_info(
"https://custom.api.com", "test-key"
)
assert api_base == "https://custom.api.com"
assert api_key == "test-key"

def test_telnyx_provider_resolution(self):
"""Test that provider resolution finds Telnyx"""
from litellm.litellm_core_utils.get_llm_provider_logic import (
get_llm_provider,
)

model, provider, api_key, api_base = get_llm_provider(
model="telnyx/meta-llama/Meta-Llama-3.1-8B-Instruct",
custom_llm_provider=None,
api_base=None,
api_key=None,
)

assert model == "meta-llama/Meta-Llama-3.1-8B-Instruct"
assert provider == "telnyx"
assert api_base == "https://api.telnyx.com/v2/ai"

def test_telnyx_supported_params(self):
"""Test that Telnyx config returns supported params"""
from litellm.llms.openai_like.dynamic_config import create_config_class
from litellm.llms.openai_like.json_loader import JSONProviderRegistry

provider = JSONProviderRegistry.get("telnyx")
config_class = create_config_class(provider)
config = config_class()

supported = config.get_supported_openai_params("meta-llama/Meta-Llama-3.1-8B-Instruct")
assert isinstance(supported, list)
assert len(supported) > 0

def test_telnyx_provider_config_manager(self):
"""Test that ProviderConfigManager returns Telnyx configs"""
from litellm import LlmProviders
from litellm.utils import ProviderConfigManager

config = ProviderConfigManager.get_provider_chat_config(
model="meta-llama/Meta-Llama-3.1-8B-Instruct", provider=LlmProviders.TELNYX
)

assert config is not None
assert config.custom_llm_provider == "telnyx"

def test_telnyx_llm_providers_enum(self):
"""Test that TELNYX exists in LlmProviders enum"""
from litellm import LlmProviders

assert hasattr(LlmProviders, "TELNYX")
assert LlmProviders.TELNYX.value == "telnyx"


if __name__ == "__main__":
print("Testing Telnyx Provider System...")

test = TestTelnyxProviderLoader()

print("\n1. Testing provider exists...")
test.test_telnyx_provider_exists()
print(" ✓ Telnyx provider registered")

print("\n2. Testing provider config...")
test.test_telnyx_provider_config()
print(" ✓ Config values correct")

print("\n3. Testing dynamic config generation...")
test.test_telnyx_dynamic_config_generation()
print(" ✓ Dynamic config works")

print("\n4. Testing provider resolution...")
test.test_telnyx_provider_resolution()
print(" ✓ Provider resolution works")

print("\n5. Testing supported params...")
test.test_telnyx_supported_params()
print(" ✓ Supported params work")

print("\n6. Testing config manager...")
test.test_telnyx_provider_config_manager()
print(" ✓ Config manager works")

print("\n7. Testing LlmProviders enum...")
test.test_telnyx_llm_providers_enum()
print(" ✓ LlmProviders enum works")

print("\n" + "=" * 50)
print("✓ All Telnyx provider tests passed!")
print("=" * 50)
Loading