Skip to content
Open
1 change: 1 addition & 0 deletions templates/adaptive_rag/.env.example
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
OPENAI_API_KEY="sk-***"
# MINIMAX_API_KEY="your-minimax-api-key" # Optional: use MiniMax instead of OpenAI
11 changes: 11 additions & 0 deletions templates/adaptive_rag/app.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,17 @@ $llm: !pw.xpacks.llm.llms.OpenAIChat
temperature: 0
capacity: 8

# Alternatively, use MiniMax with its OpenAI-compatible API (https://api.minimax.io/v1):
# $llm: !pw.xpacks.llm.llms.OpenAIChat
# model: "MiniMax-M2.7"
# api_key: $MINIMAX_API_KEY
# base_url: "https://api.minimax.io/v1"
# retry_strategy: !pw.udfs.ExponentialBackoffRetryStrategy
# max_retries: 6
# cache_strategy: !pw.udfs.DefaultCache {}
# temperature: 1.0
# capacity: 8

# Specifies the embedder model for converting text into embeddings.
$embedder: !pw.xpacks.llm.embedders.OpenAIEmbedder
model: "text-embedding-3-small"
Expand Down
1 change: 1 addition & 0 deletions templates/multimodal_rag/.env.example
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
OPENAI_API_KEY="sk-***"
# MINIMAX_API_KEY="your-minimax-api-key" # Optional: use MiniMax instead of OpenAI
12 changes: 12 additions & 0 deletions templates/multimodal_rag/app.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,18 @@ $llm: !pw.xpacks.llm.llms.OpenAIChat
capacity: 8
async_mode: "fully_async"

# Alternatively, use MiniMax with its OpenAI-compatible API (https://api.minimax.io/v1):
# $llm: !pw.xpacks.llm.llms.OpenAIChat
# model: "MiniMax-M2.7"
# api_key: $MINIMAX_API_KEY
# base_url: "https://api.minimax.io/v1"
# retry_strategy: !pw.udfs.ExponentialBackoffRetryStrategy
# max_retries: 6
# cache_strategy: !pw.udfs.DefaultCache {}
# temperature: 1.0
# capacity: 8
# async_mode: "fully_async"

# Specifies the embedder model for converting text into embeddings.
$embedder: !pw.xpacks.llm.embedders.OpenAIEmbedder
model: "text-embedding-3-small"
Expand Down
1 change: 1 addition & 0 deletions templates/question_answering_rag/.env.example
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
OPENAI_API_KEY="sk-***"
# MINIMAX_API_KEY="your-minimax-api-key" # Optional: use MiniMax instead of OpenAI
12 changes: 12 additions & 0 deletions templates/question_answering_rag/app.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,18 @@ $llm: !pw.xpacks.llm.llms.OpenAIChat
capacity: 8
async_mode: "fully_async"

# Alternatively, use MiniMax with its OpenAI-compatible API (https://api.minimax.io/v1):
# $llm: !pw.xpacks.llm.llms.OpenAIChat
# model: "MiniMax-M2.7"
# api_key: $MINIMAX_API_KEY
# base_url: "https://api.minimax.io/v1"
# retry_strategy: !pw.udfs.ExponentialBackoffRetryStrategy
# max_retries: 6
# cache_strategy: !pw.udfs.DefaultCache {}
# temperature: 1.0
# capacity: 8
# async_mode: "fully_async"

# Specifies the embedder model for converting text into embeddings.
$embedder: !pw.xpacks.llm.embedders.OpenAIEmbedder
model: "text-embedding-3-small"
Expand Down
1 change: 1 addition & 0 deletions templates/slides_ai_search/.env.example
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
OPENAI_API_KEY="YOUR OPENAI_API_KEY"
PATHWAY_LICENSE_KEY="YOUR PATHWAY KEY" # can be obtained here: https://pathway.com/user/license
# MINIMAX_API_KEY="your-minimax-api-key" # Optional: use MiniMax instead of OpenAI
14 changes: 14 additions & 0 deletions templates/slides_ai_search/app.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,20 @@ llm: !pw.xpacks.llm.llms.OpenAIChat
capacity: 8 # reduce this in case you are hitting API throttle limits
async_mode: "fully_async"

# Alternatively, use MiniMax with its OpenAI-compatible API (https://api.minimax.io/v1):
# llm: !pw.xpacks.llm.llms.OpenAIChat
# model: "MiniMax-M2.7"
# api_key: $MINIMAX_API_KEY
# base_url: "https://api.minimax.io/v1"
# retry_strategy: !pw.udfs.ExponentialBackoffRetryStrategy
# max_retries: 6
# initial_delay: 2500
# backoff_factor: 2.5
# cache_strategy: !pw.udfs.DefaultCache {}
# temperature: 1.0
# capacity: 8
# async_mode: "fully_async"

# Specifies the embedder model for converting text into embeddings.
$embedder: !pw.xpacks.llm.embedders.OpenAIEmbedder
cache_strategy: !pw.udfs.DefaultCache {}
Expand Down