2026-01-14 14:31:43 +00:00
import { ensureAuthProfileStore , resolveAuthProfileOrder } from "../agents/auth-profiles.js" ;
2026-01-14 05:39:47 +00:00
import { resolveEnvApiKey } from "../agents/model-auth.js" ;
import {
formatApiKeyPreview ,
normalizeApiKeyInput ,
validateApiKeyInput ,
} from "./auth-choice.api-key.js" ;
2026-02-13 16:18:16 +01:00
import { applyAuthChoiceHuggingface } from "./auth-choice.apply.huggingface.js" ;
2026-02-17 13:36:48 +09:00
import type { ApplyAuthChoiceParams , ApplyAuthChoiceResult } from "./auth-choice.apply.js" ;
2026-02-13 16:18:16 +01:00
import { applyAuthChoiceOpenRouter } from "./auth-choice.apply.openrouter.js" ;
2026-01-14 05:39:47 +00:00
import { applyDefaultModelChoice } from "./auth-choice.default-model.js" ;
import {
applyGoogleGeminiModelDefault ,
GOOGLE_GEMINI_DEFAULT_MODEL ,
} from "./google-gemini-model-default.js" ;
import {
applyAuthProfileConfig ,
2026-02-04 04:10:13 -08:00
applyCloudflareAiGatewayConfig ,
applyCloudflareAiGatewayProviderConfig ,
2026-02-07 00:19:04 -08:00
applyQianfanConfig ,
applyQianfanProviderConfig ,
2026-01-17 11:46:37 +02:00
applyKimiCodeConfig ,
applyKimiCodeProviderConfig ,
Feat/litellm provider (#12823)
* feat: add LiteLLM provider types, env var, credentials, and auth choice
Add litellm-api-key auth choice, LITELLM_API_KEY env var mapping,
setLitellmApiKey() credential storage, and LITELLM_DEFAULT_MODEL_REF.
* feat: add LiteLLM onboarding handler and provider config
Add applyLitellmProviderConfig which properly registers
models.providers.litellm with baseUrl, api type, and model definitions.
This fixes the critical bug from PR #6488 where the provider entry was
never created, causing model resolution to fail at runtime.
* docs: add LiteLLM provider documentation
Add setup guide covering onboarding, manual config, virtual keys,
model routing, and usage tracking. Link from provider index.
* docs: add LiteLLM to sidebar navigation in docs.json
Add providers/litellm to both English and Chinese provider page lists
so the docs page appears in the sidebar navigation.
* test: add LiteLLM non-interactive onboarding test
Wire up litellmApiKey flag inference and auth-choice handler for the
non-interactive onboarding path, and add an integration test covering
profile, model default, and credential storage.
* fix: register --litellm-api-key CLI flag and add preferred provider mapping
Wire up the missing Commander CLI option, action handler mapping, and
help text for --litellm-api-key. Add litellm-api-key to the preferred
provider map for consistency with other providers.
* fix: remove zh-CN sidebar entry for litellm (no localized page yet)
* style: format buildLitellmModelDefinition return type
* fix(onboarding): harden LiteLLM provider setup (#12823)
* refactor(onboarding): keep auth-choice provider dispatcher under size limit
---------
Co-authored-by: Peter Steinberger <steipete@gmail.com>
2026-02-11 02:46:56 -08:00
applyLitellmConfig ,
applyLitellmProviderConfig ,
2026-01-14 05:39:47 +00:00
applyMoonshotConfig ,
2026-02-02 20:25:14 +08:00
applyMoonshotConfigCn ,
2026-01-14 05:39:47 +00:00
applyMoonshotProviderConfig ,
2026-02-03 21:58:04 -08:00
applyMoonshotProviderConfigCn ,
2026-01-14 05:39:47 +00:00
applyOpencodeZenConfig ,
applyOpencodeZenProviderConfig ,
applySyntheticConfig ,
applySyntheticProviderConfig ,
2026-02-10 00:49:34 +01:00
applyTogetherConfig ,
applyTogetherProviderConfig ,
feat: add Venice AI provider integration
Venice AI is a privacy-focused AI inference provider with support for
uncensored models and access to major proprietary models via their
anonymized proxy.
This integration adds:
- Complete model catalog with 25 models:
- 15 private models (Llama, Qwen, DeepSeek, Venice Uncensored, etc.)
- 10 anonymized models (Claude, GPT-5.2, Gemini, Grok, Kimi, MiniMax)
- Auto-discovery from Venice API with fallback to static catalog
- VENICE_API_KEY environment variable support
- Interactive onboarding via 'venice-api-key' auth choice
- Model selection prompt showing all available Venice models
- Provider auto-registration when API key is detected
- Comprehensive documentation covering:
- Privacy modes (private vs anonymized)
- All 25 models with context windows and features
- Streaming, function calling, and vision support
- Model selection recommendations
Privacy modes:
- Private: Fully private, no logging (open-source models)
- Anonymized: Proxied through Venice (proprietary models)
Default model: venice/llama-3.3-70b (good balance of capability + privacy)
Venice API: https://api.venice.ai/api/v1 (OpenAI-compatible)
2026-01-24 16:56:42 -07:00
applyVeniceConfig ,
applyVeniceProviderConfig ,
2026-01-16 14:40:56 +01:00
applyVercelAiGatewayConfig ,
applyVercelAiGatewayProviderConfig ,
2026-01-29 00:30:17 +08:00
applyXiaomiConfig ,
applyXiaomiProviderConfig ,
2026-01-14 05:39:47 +00:00
applyZaiConfig ,
2026-02-12 21:01:48 +08:00
applyZaiProviderConfig ,
2026-02-04 04:10:13 -08:00
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF ,
Feat/litellm provider (#12823)
* feat: add LiteLLM provider types, env var, credentials, and auth choice
Add litellm-api-key auth choice, LITELLM_API_KEY env var mapping,
setLitellmApiKey() credential storage, and LITELLM_DEFAULT_MODEL_REF.
* feat: add LiteLLM onboarding handler and provider config
Add applyLitellmProviderConfig which properly registers
models.providers.litellm with baseUrl, api type, and model definitions.
This fixes the critical bug from PR #6488 where the provider entry was
never created, causing model resolution to fail at runtime.
* docs: add LiteLLM provider documentation
Add setup guide covering onboarding, manual config, virtual keys,
model routing, and usage tracking. Link from provider index.
* docs: add LiteLLM to sidebar navigation in docs.json
Add providers/litellm to both English and Chinese provider page lists
so the docs page appears in the sidebar navigation.
* test: add LiteLLM non-interactive onboarding test
Wire up litellmApiKey flag inference and auth-choice handler for the
non-interactive onboarding path, and add an integration test covering
profile, model default, and credential storage.
* fix: register --litellm-api-key CLI flag and add preferred provider mapping
Wire up the missing Commander CLI option, action handler mapping, and
help text for --litellm-api-key. Add litellm-api-key to the preferred
provider map for consistency with other providers.
* fix: remove zh-CN sidebar entry for litellm (no localized page yet)
* style: format buildLitellmModelDefinition return type
* fix(onboarding): harden LiteLLM provider setup (#12823)
* refactor(onboarding): keep auth-choice provider dispatcher under size limit
---------
Co-authored-by: Peter Steinberger <steipete@gmail.com>
2026-02-11 02:46:56 -08:00
LITELLM_DEFAULT_MODEL_REF ,
2026-02-07 00:19:04 -08:00
QIANFAN_DEFAULT_MODEL_REF ,
2026-01-31 06:04:10 +01:00
KIMI_CODING_MODEL_REF ,
2026-01-14 05:39:47 +00:00
MOONSHOT_DEFAULT_MODEL_REF ,
SYNTHETIC_DEFAULT_MODEL_REF ,
2026-02-10 00:49:34 +01:00
TOGETHER_DEFAULT_MODEL_REF ,
feat: add Venice AI provider integration
Venice AI is a privacy-focused AI inference provider with support for
uncensored models and access to major proprietary models via their
anonymized proxy.
This integration adds:
- Complete model catalog with 25 models:
- 15 private models (Llama, Qwen, DeepSeek, Venice Uncensored, etc.)
- 10 anonymized models (Claude, GPT-5.2, Gemini, Grok, Kimi, MiniMax)
- Auto-discovery from Venice API with fallback to static catalog
- VENICE_API_KEY environment variable support
- Interactive onboarding via 'venice-api-key' auth choice
- Model selection prompt showing all available Venice models
- Provider auto-registration when API key is detected
- Comprehensive documentation covering:
- Privacy modes (private vs anonymized)
- All 25 models with context windows and features
- Streaming, function calling, and vision support
- Model selection recommendations
Privacy modes:
- Private: Fully private, no logging (open-source models)
- Anonymized: Proxied through Venice (proprietary models)
Default model: venice/llama-3.3-70b (good balance of capability + privacy)
Venice API: https://api.venice.ai/api/v1 (OpenAI-compatible)
2026-01-24 16:56:42 -07:00
VENICE_DEFAULT_MODEL_REF ,
2026-01-16 14:40:56 +01:00
VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF ,
2026-01-29 00:30:17 +08:00
XIAOMI_DEFAULT_MODEL_REF ,
2026-02-04 04:10:13 -08:00
setCloudflareAiGatewayConfig ,
2026-02-07 00:19:04 -08:00
setQianfanApiKey ,
2026-01-14 05:39:47 +00:00
setGeminiApiKey ,
Feat/litellm provider (#12823)
* feat: add LiteLLM provider types, env var, credentials, and auth choice
Add litellm-api-key auth choice, LITELLM_API_KEY env var mapping,
setLitellmApiKey() credential storage, and LITELLM_DEFAULT_MODEL_REF.
* feat: add LiteLLM onboarding handler and provider config
Add applyLitellmProviderConfig which properly registers
models.providers.litellm with baseUrl, api type, and model definitions.
This fixes the critical bug from PR #6488 where the provider entry was
never created, causing model resolution to fail at runtime.
* docs: add LiteLLM provider documentation
Add setup guide covering onboarding, manual config, virtual keys,
model routing, and usage tracking. Link from provider index.
* docs: add LiteLLM to sidebar navigation in docs.json
Add providers/litellm to both English and Chinese provider page lists
so the docs page appears in the sidebar navigation.
* test: add LiteLLM non-interactive onboarding test
Wire up litellmApiKey flag inference and auth-choice handler for the
non-interactive onboarding path, and add an integration test covering
profile, model default, and credential storage.
* fix: register --litellm-api-key CLI flag and add preferred provider mapping
Wire up the missing Commander CLI option, action handler mapping, and
help text for --litellm-api-key. Add litellm-api-key to the preferred
provider map for consistency with other providers.
* fix: remove zh-CN sidebar entry for litellm (no localized page yet)
* style: format buildLitellmModelDefinition return type
* fix(onboarding): harden LiteLLM provider setup (#12823)
* refactor(onboarding): keep auth-choice provider dispatcher under size limit
---------
Co-authored-by: Peter Steinberger <steipete@gmail.com>
2026-02-11 02:46:56 -08:00
setLitellmApiKey ,
2026-01-31 06:04:10 +01:00
setKimiCodingApiKey ,
2026-01-14 05:39:47 +00:00
setMoonshotApiKey ,
setOpencodeZenApiKey ,
setSyntheticApiKey ,
2026-02-10 00:49:34 +01:00
setTogetherApiKey ,
feat: add Venice AI provider integration
Venice AI is a privacy-focused AI inference provider with support for
uncensored models and access to major proprietary models via their
anonymized proxy.
This integration adds:
- Complete model catalog with 25 models:
- 15 private models (Llama, Qwen, DeepSeek, Venice Uncensored, etc.)
- 10 anonymized models (Claude, GPT-5.2, Gemini, Grok, Kimi, MiniMax)
- Auto-discovery from Venice API with fallback to static catalog
- VENICE_API_KEY environment variable support
- Interactive onboarding via 'venice-api-key' auth choice
- Model selection prompt showing all available Venice models
- Provider auto-registration when API key is detected
- Comprehensive documentation covering:
- Privacy modes (private vs anonymized)
- All 25 models with context windows and features
- Streaming, function calling, and vision support
- Model selection recommendations
Privacy modes:
- Private: Fully private, no logging (open-source models)
- Anonymized: Proxied through Venice (proprietary models)
Default model: venice/llama-3.3-70b (good balance of capability + privacy)
Venice API: https://api.venice.ai/api/v1 (OpenAI-compatible)
2026-01-24 16:56:42 -07:00
setVeniceApiKey ,
2026-01-16 14:40:56 +01:00
setVercelAiGatewayApiKey ,
2026-01-29 00:30:17 +08:00
setXiaomiApiKey ,
2026-01-14 05:39:47 +00:00
setZaiApiKey ,
ZAI_DEFAULT_MODEL_REF ,
} from "./onboard-auth.js" ;
import { OPENCODE_ZEN_DEFAULT_MODEL } from "./opencode-zen-model-default.js" ;
2026-02-12 19:16:04 +01:00
import { detectZaiEndpoint } from "./zai-endpoint-detect.js" ;
2026-01-14 05:39:47 +00:00
export async function applyAuthChoiceApiProviders (
params : ApplyAuthChoiceParams ,
) : Promise < ApplyAuthChoiceResult | null > {
let nextConfig = params . config ;
let agentModelOverride : string | undefined ;
const noteAgentModel = async ( model : string ) = > {
2026-01-31 16:19:20 +09:00
if ( ! params . agentId ) {
return ;
}
2026-01-14 05:39:47 +00:00
await params . prompter . note (
` Default model set to ${ model } for agent " ${ params . agentId } ". ` ,
"Model configured" ,
) ;
} ;
2026-01-23 01:27:52 -06:00
let authChoice = params . authChoice ;
if (
authChoice === "apiKey" &&
params . opts ? . tokenProvider &&
params . opts . tokenProvider !== "anthropic" &&
params . opts . tokenProvider !== "openai"
) {
if ( params . opts . tokenProvider === "openrouter" ) {
authChoice = "openrouter-api-key" ;
Feat/litellm provider (#12823)
* feat: add LiteLLM provider types, env var, credentials, and auth choice
Add litellm-api-key auth choice, LITELLM_API_KEY env var mapping,
setLitellmApiKey() credential storage, and LITELLM_DEFAULT_MODEL_REF.
* feat: add LiteLLM onboarding handler and provider config
Add applyLitellmProviderConfig which properly registers
models.providers.litellm with baseUrl, api type, and model definitions.
This fixes the critical bug from PR #6488 where the provider entry was
never created, causing model resolution to fail at runtime.
* docs: add LiteLLM provider documentation
Add setup guide covering onboarding, manual config, virtual keys,
model routing, and usage tracking. Link from provider index.
* docs: add LiteLLM to sidebar navigation in docs.json
Add providers/litellm to both English and Chinese provider page lists
so the docs page appears in the sidebar navigation.
* test: add LiteLLM non-interactive onboarding test
Wire up litellmApiKey flag inference and auth-choice handler for the
non-interactive onboarding path, and add an integration test covering
profile, model default, and credential storage.
* fix: register --litellm-api-key CLI flag and add preferred provider mapping
Wire up the missing Commander CLI option, action handler mapping, and
help text for --litellm-api-key. Add litellm-api-key to the preferred
provider map for consistency with other providers.
* fix: remove zh-CN sidebar entry for litellm (no localized page yet)
* style: format buildLitellmModelDefinition return type
* fix(onboarding): harden LiteLLM provider setup (#12823)
* refactor(onboarding): keep auth-choice provider dispatcher under size limit
---------
Co-authored-by: Peter Steinberger <steipete@gmail.com>
2026-02-11 02:46:56 -08:00
} else if ( params . opts . tokenProvider === "litellm" ) {
authChoice = "litellm-api-key" ;
2026-01-23 01:27:52 -06:00
} else if ( params . opts . tokenProvider === "vercel-ai-gateway" ) {
authChoice = "ai-gateway-api-key" ;
2026-02-04 04:10:13 -08:00
} else if ( params . opts . tokenProvider === "cloudflare-ai-gateway" ) {
authChoice = "cloudflare-ai-gateway-api-key" ;
2026-01-23 01:27:52 -06:00
} else if ( params . opts . tokenProvider === "moonshot" ) {
authChoice = "moonshot-api-key" ;
2026-01-31 06:04:10 +01:00
} else if (
params . opts . tokenProvider === "kimi-code" ||
params . opts . tokenProvider === "kimi-coding"
) {
2026-01-23 01:27:52 -06:00
authChoice = "kimi-code-api-key" ;
} else if ( params . opts . tokenProvider === "google" ) {
authChoice = "gemini-api-key" ;
} else if ( params . opts . tokenProvider === "zai" ) {
authChoice = "zai-api-key" ;
2026-01-29 00:30:17 +08:00
} else if ( params . opts . tokenProvider === "xiaomi" ) {
authChoice = "xiaomi-api-key" ;
2026-01-23 01:27:52 -06:00
} else if ( params . opts . tokenProvider === "synthetic" ) {
authChoice = "synthetic-api-key" ;
feat: add Venice AI provider integration
Venice AI is a privacy-focused AI inference provider with support for
uncensored models and access to major proprietary models via their
anonymized proxy.
This integration adds:
- Complete model catalog with 25 models:
- 15 private models (Llama, Qwen, DeepSeek, Venice Uncensored, etc.)
- 10 anonymized models (Claude, GPT-5.2, Gemini, Grok, Kimi, MiniMax)
- Auto-discovery from Venice API with fallback to static catalog
- VENICE_API_KEY environment variable support
- Interactive onboarding via 'venice-api-key' auth choice
- Model selection prompt showing all available Venice models
- Provider auto-registration when API key is detected
- Comprehensive documentation covering:
- Privacy modes (private vs anonymized)
- All 25 models with context windows and features
- Streaming, function calling, and vision support
- Model selection recommendations
Privacy modes:
- Private: Fully private, no logging (open-source models)
- Anonymized: Proxied through Venice (proprietary models)
Default model: venice/llama-3.3-70b (good balance of capability + privacy)
Venice API: https://api.venice.ai/api/v1 (OpenAI-compatible)
2026-01-24 16:56:42 -07:00
} else if ( params . opts . tokenProvider === "venice" ) {
authChoice = "venice-api-key" ;
2026-02-10 00:49:34 +01:00
} else if ( params . opts . tokenProvider === "together" ) {
authChoice = "together-api-key" ;
2026-02-13 16:18:16 +01:00
} else if ( params . opts . tokenProvider === "huggingface" ) {
authChoice = "huggingface-api-key" ;
2026-01-23 01:27:52 -06:00
} else if ( params . opts . tokenProvider === "opencode" ) {
authChoice = "opencode-zen" ;
2026-02-04 16:36:37 +08:00
} else if ( params . opts . tokenProvider === "qianfan" ) {
authChoice = "qianfan-api-key" ;
2026-01-23 01:27:52 -06:00
}
}
2026-02-15 06:04:42 +00:00
async function ensureMoonshotApiKeyCredential ( promptMessage : string ) : Promise < void > {
let hasCredential = false ;
if ( ! hasCredential && params . opts ? . token && params . opts ? . tokenProvider === "moonshot" ) {
await setMoonshotApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
hasCredential = true ;
}
const envKey = resolveEnvApiKey ( "moonshot" ) ;
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing MOONSHOT_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
await setMoonshotApiKey ( envKey . apiKey , params . agentDir ) ;
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : promptMessage ,
validate : validateApiKeyInput ,
} ) ;
await setMoonshotApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
}
}
2026-01-23 01:27:52 -06:00
if ( authChoice === "openrouter-api-key" ) {
2026-02-13 16:18:16 +01:00
return applyAuthChoiceOpenRouter ( params ) ;
2026-01-14 05:39:47 +00:00
}
Feat/litellm provider (#12823)
* feat: add LiteLLM provider types, env var, credentials, and auth choice
Add litellm-api-key auth choice, LITELLM_API_KEY env var mapping,
setLitellmApiKey() credential storage, and LITELLM_DEFAULT_MODEL_REF.
* feat: add LiteLLM onboarding handler and provider config
Add applyLitellmProviderConfig which properly registers
models.providers.litellm with baseUrl, api type, and model definitions.
This fixes the critical bug from PR #6488 where the provider entry was
never created, causing model resolution to fail at runtime.
* docs: add LiteLLM provider documentation
Add setup guide covering onboarding, manual config, virtual keys,
model routing, and usage tracking. Link from provider index.
* docs: add LiteLLM to sidebar navigation in docs.json
Add providers/litellm to both English and Chinese provider page lists
so the docs page appears in the sidebar navigation.
* test: add LiteLLM non-interactive onboarding test
Wire up litellmApiKey flag inference and auth-choice handler for the
non-interactive onboarding path, and add an integration test covering
profile, model default, and credential storage.
* fix: register --litellm-api-key CLI flag and add preferred provider mapping
Wire up the missing Commander CLI option, action handler mapping, and
help text for --litellm-api-key. Add litellm-api-key to the preferred
provider map for consistency with other providers.
* fix: remove zh-CN sidebar entry for litellm (no localized page yet)
* style: format buildLitellmModelDefinition return type
* fix(onboarding): harden LiteLLM provider setup (#12823)
* refactor(onboarding): keep auth-choice provider dispatcher under size limit
---------
Co-authored-by: Peter Steinberger <steipete@gmail.com>
2026-02-11 02:46:56 -08:00
if ( authChoice === "litellm-api-key" ) {
const store = ensureAuthProfileStore ( params . agentDir , { allowKeychainPrompt : false } ) ;
const profileOrder = resolveAuthProfileOrder ( { cfg : nextConfig , store , provider : "litellm" } ) ;
const existingProfileId = profileOrder . find ( ( profileId ) = > Boolean ( store . profiles [ profileId ] ) ) ;
const existingCred = existingProfileId ? store . profiles [ existingProfileId ] : undefined ;
let profileId = "litellm:default" ;
let hasCredential = false ;
if ( existingProfileId && existingCred ? . type === "api_key" ) {
profileId = existingProfileId ;
hasCredential = true ;
}
if ( ! hasCredential && params . opts ? . token && params . opts ? . tokenProvider === "litellm" ) {
await setLitellmApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
hasCredential = true ;
}
if ( ! hasCredential ) {
await params . prompter . note (
"LiteLLM provides a unified API to 100+ LLM providers.\nGet your API key from your LiteLLM proxy or https://litellm.ai\nDefault proxy runs on http://localhost:4000" ,
"LiteLLM" ,
) ;
const envKey = resolveEnvApiKey ( "litellm" ) ;
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing LITELLM_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
await setLitellmApiKey ( envKey . apiKey , params . agentDir ) ;
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : "Enter LiteLLM API key" ,
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
await setLitellmApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
Feat/litellm provider (#12823)
* feat: add LiteLLM provider types, env var, credentials, and auth choice
Add litellm-api-key auth choice, LITELLM_API_KEY env var mapping,
setLitellmApiKey() credential storage, and LITELLM_DEFAULT_MODEL_REF.
* feat: add LiteLLM onboarding handler and provider config
Add applyLitellmProviderConfig which properly registers
models.providers.litellm with baseUrl, api type, and model definitions.
This fixes the critical bug from PR #6488 where the provider entry was
never created, causing model resolution to fail at runtime.
* docs: add LiteLLM provider documentation
Add setup guide covering onboarding, manual config, virtual keys,
model routing, and usage tracking. Link from provider index.
* docs: add LiteLLM to sidebar navigation in docs.json
Add providers/litellm to both English and Chinese provider page lists
so the docs page appears in the sidebar navigation.
* test: add LiteLLM non-interactive onboarding test
Wire up litellmApiKey flag inference and auth-choice handler for the
non-interactive onboarding path, and add an integration test covering
profile, model default, and credential storage.
* fix: register --litellm-api-key CLI flag and add preferred provider mapping
Wire up the missing Commander CLI option, action handler mapping, and
help text for --litellm-api-key. Add litellm-api-key to the preferred
provider map for consistency with other providers.
* fix: remove zh-CN sidebar entry for litellm (no localized page yet)
* style: format buildLitellmModelDefinition return type
* fix(onboarding): harden LiteLLM provider setup (#12823)
* refactor(onboarding): keep auth-choice provider dispatcher under size limit
---------
Co-authored-by: Peter Steinberger <steipete@gmail.com>
2026-02-11 02:46:56 -08:00
hasCredential = true ;
}
}
if ( hasCredential ) {
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId ,
provider : "litellm" ,
mode : "api_key" ,
} ) ;
}
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : LITELLM_DEFAULT_MODEL_REF ,
applyDefaultConfig : applyLitellmConfig ,
applyProviderConfig : applyLitellmProviderConfig ,
noteDefault : LITELLM_DEFAULT_MODEL_REF ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
return { config : nextConfig , agentModelOverride } ;
}
2026-01-23 01:27:52 -06:00
if ( authChoice === "ai-gateway-api-key" ) {
2026-01-16 14:40:56 +01:00
let hasCredential = false ;
2026-01-23 01:27:52 -06:00
if (
! hasCredential &&
params . opts ? . token &&
params . opts ? . tokenProvider === "vercel-ai-gateway"
) {
await setVercelAiGatewayApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
hasCredential = true ;
}
2026-01-16 14:40:56 +01:00
const envKey = resolveEnvApiKey ( "vercel-ai-gateway" ) ;
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing AI_GATEWAY_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
await setVercelAiGatewayApiKey ( envKey . apiKey , params . agentDir ) ;
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : "Enter Vercel AI Gateway API key" ,
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
await setVercelAiGatewayApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
2026-01-16 14:40:56 +01:00
}
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "vercel-ai-gateway:default" ,
provider : "vercel-ai-gateway" ,
mode : "api_key" ,
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF ,
applyDefaultConfig : applyVercelAiGatewayConfig ,
applyProviderConfig : applyVercelAiGatewayProviderConfig ,
noteDefault : VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
}
return { config : nextConfig , agentModelOverride } ;
}
2026-02-04 04:10:13 -08:00
if ( authChoice === "cloudflare-ai-gateway-api-key" ) {
let hasCredential = false ;
let accountId = params . opts ? . cloudflareAiGatewayAccountId ? . trim ( ) ? ? "" ;
let gatewayId = params . opts ? . cloudflareAiGatewayGatewayId ? . trim ( ) ? ? "" ;
const ensureAccountGateway = async ( ) = > {
if ( ! accountId ) {
const value = await params . prompter . text ( {
message : "Enter Cloudflare Account ID" ,
2026-02-13 00:25:05 -03:00
validate : ( val ) = > ( String ( val ? ? "" ) . trim ( ) ? undefined : "Account ID is required" ) ,
2026-02-04 04:10:13 -08:00
} ) ;
2026-02-13 00:25:05 -03:00
accountId = String ( value ? ? "" ) . trim ( ) ;
2026-02-04 04:10:13 -08:00
}
if ( ! gatewayId ) {
const value = await params . prompter . text ( {
message : "Enter Cloudflare AI Gateway ID" ,
2026-02-13 00:25:05 -03:00
validate : ( val ) = > ( String ( val ? ? "" ) . trim ( ) ? undefined : "Gateway ID is required" ) ,
2026-02-04 04:10:13 -08:00
} ) ;
2026-02-13 00:25:05 -03:00
gatewayId = String ( value ? ? "" ) . trim ( ) ;
2026-02-04 04:10:13 -08:00
}
} ;
const optsApiKey = normalizeApiKeyInput ( params . opts ? . cloudflareAiGatewayApiKey ? ? "" ) ;
if ( ! hasCredential && accountId && gatewayId && optsApiKey ) {
await setCloudflareAiGatewayConfig ( accountId , gatewayId , optsApiKey , params . agentDir ) ;
hasCredential = true ;
}
const envKey = resolveEnvApiKey ( "cloudflare-ai-gateway" ) ;
if ( ! hasCredential && envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing CLOUDFLARE_AI_GATEWAY_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
await ensureAccountGateway ( ) ;
await setCloudflareAiGatewayConfig (
accountId ,
gatewayId ,
normalizeApiKeyInput ( envKey . apiKey ) ,
params . agentDir ,
) ;
hasCredential = true ;
}
}
if ( ! hasCredential && optsApiKey ) {
await ensureAccountGateway ( ) ;
await setCloudflareAiGatewayConfig ( accountId , gatewayId , optsApiKey , params . agentDir ) ;
hasCredential = true ;
}
if ( ! hasCredential ) {
await ensureAccountGateway ( ) ;
const key = await params . prompter . text ( {
message : "Enter Cloudflare AI Gateway API key" ,
validate : validateApiKeyInput ,
} ) ;
await setCloudflareAiGatewayConfig (
accountId ,
gatewayId ,
2026-02-13 00:25:05 -03:00
normalizeApiKeyInput ( String ( key ? ? "" ) ) ,
2026-02-04 04:10:13 -08:00
params . agentDir ,
) ;
hasCredential = true ;
}
if ( hasCredential ) {
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "cloudflare-ai-gateway:default" ,
provider : "cloudflare-ai-gateway" ,
mode : "api_key" ,
} ) ;
}
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF ,
applyDefaultConfig : ( cfg ) = >
applyCloudflareAiGatewayConfig ( cfg , {
accountId : accountId || params . opts ? . cloudflareAiGatewayAccountId ,
gatewayId : gatewayId || params . opts ? . cloudflareAiGatewayGatewayId ,
} ) ,
applyProviderConfig : ( cfg ) = >
applyCloudflareAiGatewayProviderConfig ( cfg , {
accountId : accountId || params . opts ? . cloudflareAiGatewayAccountId ,
gatewayId : gatewayId || params . opts ? . cloudflareAiGatewayGatewayId ,
} ) ,
noteDefault : CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
}
return { config : nextConfig , agentModelOverride } ;
}
2026-01-23 01:27:52 -06:00
if ( authChoice === "moonshot-api-key" ) {
2026-02-15 06:04:42 +00:00
await ensureMoonshotApiKeyCredential ( "Enter Moonshot API key" ) ;
2026-01-14 05:39:47 +00:00
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "moonshot:default" ,
provider : "moonshot" ,
mode : "api_key" ,
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : MOONSHOT_DEFAULT_MODEL_REF ,
applyDefaultConfig : applyMoonshotConfig ,
applyProviderConfig : applyMoonshotProviderConfig ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
}
return { config : nextConfig , agentModelOverride } ;
2026-01-17 11:46:37 +02:00
}
2026-02-02 20:25:14 +08:00
if ( authChoice === "moonshot-api-key-cn" ) {
2026-02-15 06:04:42 +00:00
await ensureMoonshotApiKeyCredential ( "Enter Moonshot API key (.cn)" ) ;
2026-02-02 20:25:14 +08:00
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "moonshot:default" ,
provider : "moonshot" ,
mode : "api_key" ,
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : MOONSHOT_DEFAULT_MODEL_REF ,
applyDefaultConfig : applyMoonshotConfigCn ,
2026-02-03 21:58:04 -08:00
applyProviderConfig : applyMoonshotProviderConfigCn ,
2026-02-02 20:25:14 +08:00
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
}
return { config : nextConfig , agentModelOverride } ;
}
2026-01-23 01:27:52 -06:00
if ( authChoice === "kimi-code-api-key" ) {
2026-01-17 11:46:37 +02:00
let hasCredential = false ;
2026-01-31 06:04:10 +01:00
const tokenProvider = params . opts ? . tokenProvider ? . trim ( ) . toLowerCase ( ) ;
if (
! hasCredential &&
params . opts ? . token &&
( tokenProvider === "kimi-code" || tokenProvider === "kimi-coding" )
) {
await setKimiCodingApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
2026-01-23 01:27:52 -06:00
hasCredential = true ;
}
if ( ! hasCredential ) {
await params . prompter . note (
[
2026-01-31 06:04:10 +01:00
"Kimi Coding uses a dedicated endpoint and API key." ,
2026-01-23 01:27:52 -06:00
"Get your API key at: https://www.kimi.com/code/en" ,
] . join ( "\n" ) ,
2026-01-31 06:04:10 +01:00
"Kimi Coding" ,
2026-01-23 01:27:52 -06:00
) ;
}
2026-01-31 06:04:10 +01:00
const envKey = resolveEnvApiKey ( "kimi-coding" ) ;
2026-01-17 11:46:37 +02:00
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
2026-01-31 06:04:10 +01:00
message : ` Use existing KIMI_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
2026-01-17 11:46:37 +02:00
initialValue : true ,
} ) ;
if ( useExisting ) {
2026-01-31 06:04:10 +01:00
await setKimiCodingApiKey ( envKey . apiKey , params . agentDir ) ;
2026-01-17 11:46:37 +02:00
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
2026-01-31 06:04:10 +01:00
message : "Enter Kimi Coding API key" ,
2026-01-17 11:46:37 +02:00
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
await setKimiCodingApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
2026-01-17 11:46:37 +02:00
}
nextConfig = applyAuthProfileConfig ( nextConfig , {
2026-01-31 06:04:10 +01:00
profileId : "kimi-coding:default" ,
provider : "kimi-coding" ,
2026-01-17 17:44:54 +00:00
mode : "api_key" ,
2026-01-17 11:46:37 +02:00
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
2026-01-31 06:04:10 +01:00
defaultModel : KIMI_CODING_MODEL_REF ,
2026-01-17 11:46:37 +02:00
applyDefaultConfig : applyKimiCodeConfig ,
applyProviderConfig : applyKimiCodeProviderConfig ,
2026-01-31 06:04:10 +01:00
noteDefault : KIMI_CODING_MODEL_REF ,
2026-01-17 11:46:37 +02:00
noteAgentModel ,
2026-01-17 17:44:54 +00:00
prompter : params.prompter ,
2026-01-17 11:46:37 +02:00
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
}
return { config : nextConfig , agentModelOverride } ;
2026-01-14 05:39:47 +00:00
}
2026-01-23 01:27:52 -06:00
if ( authChoice === "gemini-api-key" ) {
2026-01-14 05:39:47 +00:00
let hasCredential = false ;
2026-01-23 01:27:52 -06:00
if ( ! hasCredential && params . opts ? . token && params . opts ? . tokenProvider === "google" ) {
await setGeminiApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
hasCredential = true ;
}
2026-01-14 05:39:47 +00:00
const envKey = resolveEnvApiKey ( "google" ) ;
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing GEMINI_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
await setGeminiApiKey ( envKey . apiKey , params . agentDir ) ;
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : "Enter Gemini API key" ,
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
await setGeminiApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
2026-01-14 05:39:47 +00:00
}
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "google:default" ,
provider : "google" ,
mode : "api_key" ,
} ) ;
if ( params . setDefaultModel ) {
const applied = applyGoogleGeminiModelDefault ( nextConfig ) ;
nextConfig = applied . next ;
if ( applied . changed ) {
await params . prompter . note (
` Default model set to ${ GOOGLE_GEMINI_DEFAULT_MODEL } ` ,
"Model configured" ,
) ;
}
} else {
agentModelOverride = GOOGLE_GEMINI_DEFAULT_MODEL ;
await noteAgentModel ( GOOGLE_GEMINI_DEFAULT_MODEL ) ;
}
return { config : nextConfig , agentModelOverride } ;
}
2026-02-12 21:01:48 +08:00
if (
authChoice === "zai-api-key" ||
authChoice === "zai-coding-global" ||
authChoice === "zai-coding-cn" ||
authChoice === "zai-global" ||
authChoice === "zai-cn"
) {
2026-02-12 19:16:04 +01:00
let endpoint : "global" | "cn" | "coding-global" | "coding-cn" | undefined ;
2026-02-12 21:01:48 +08:00
if ( authChoice === "zai-coding-global" ) {
endpoint = "coding-global" ;
} else if ( authChoice === "zai-coding-cn" ) {
endpoint = "coding-cn" ;
} else if ( authChoice === "zai-global" ) {
endpoint = "global" ;
} else if ( authChoice === "zai-cn" ) {
endpoint = "cn" ;
}
// Input API key
2026-01-14 05:39:47 +00:00
let hasCredential = false ;
2026-02-12 19:16:04 +01:00
let apiKey = "" ;
2026-01-23 01:27:52 -06:00
if ( ! hasCredential && params . opts ? . token && params . opts ? . tokenProvider === "zai" ) {
2026-02-12 19:16:04 +01:00
apiKey = normalizeApiKeyInput ( params . opts . token ) ;
await setZaiApiKey ( apiKey , params . agentDir ) ;
2026-01-23 01:27:52 -06:00
hasCredential = true ;
}
2026-01-14 05:39:47 +00:00
const envKey = resolveEnvApiKey ( "zai" ) ;
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing ZAI_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
2026-02-12 19:16:04 +01:00
apiKey = envKey . apiKey ;
await setZaiApiKey ( apiKey , params . agentDir ) ;
2026-01-14 05:39:47 +00:00
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : "Enter Z.AI API key" ,
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
apiKey = normalizeApiKeyInput ( String ( key ? ? "" ) ) ;
2026-02-12 19:16:04 +01:00
await setZaiApiKey ( apiKey , params . agentDir ) ;
}
// zai-api-key: auto-detect endpoint + choose a working default model.
let modelIdOverride : string | undefined ;
if ( ! endpoint ) {
const detected = await detectZaiEndpoint ( { apiKey } ) ;
if ( detected ) {
endpoint = detected . endpoint ;
modelIdOverride = detected . modelId ;
await params . prompter . note ( detected . note , "Z.AI endpoint" ) ;
} else {
endpoint = await params . prompter . select ( {
message : "Select Z.AI endpoint" ,
options : [
{
value : "coding-global" ,
label : "Coding-Plan-Global" ,
hint : "GLM Coding Plan Global (api.z.ai)" ,
} ,
{
value : "coding-cn" ,
label : "Coding-Plan-CN" ,
hint : "GLM Coding Plan CN (open.bigmodel.cn)" ,
} ,
{
value : "global" ,
label : "Global" ,
hint : "Z.AI Global (api.z.ai)" ,
} ,
{
value : "cn" ,
label : "CN" ,
hint : "Z.AI CN (open.bigmodel.cn)" ,
} ,
] ,
initialValue : "global" ,
} ) ;
}
2026-01-14 05:39:47 +00:00
}
2026-02-12 19:16:04 +01:00
2026-01-14 05:39:47 +00:00
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "zai:default" ,
provider : "zai" ,
mode : "api_key" ,
} ) ;
2026-02-12 19:16:04 +01:00
const defaultModel = modelIdOverride ? ` zai/ ${ modelIdOverride } ` : ZAI_DEFAULT_MODEL_REF ;
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel ,
applyDefaultConfig : ( config ) = >
applyZaiConfig ( config , {
endpoint ,
. . . ( modelIdOverride ? { modelId : modelIdOverride } : { } ) ,
} ) ,
applyProviderConfig : ( config ) = >
applyZaiProviderConfig ( config , {
endpoint ,
. . . ( modelIdOverride ? { modelId : modelIdOverride } : { } ) ,
} ) ,
noteDefault : defaultModel ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
2026-01-14 05:39:47 +00:00
return { config : nextConfig , agentModelOverride } ;
}
2026-01-29 00:30:17 +08:00
if ( authChoice === "xiaomi-api-key" ) {
let hasCredential = false ;
if ( ! hasCredential && params . opts ? . token && params . opts ? . tokenProvider === "xiaomi" ) {
await setXiaomiApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
hasCredential = true ;
}
const envKey = resolveEnvApiKey ( "xiaomi" ) ;
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing XIAOMI_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
await setXiaomiApiKey ( envKey . apiKey , params . agentDir ) ;
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : "Enter Xiaomi API key" ,
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
await setXiaomiApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
2026-01-29 00:30:17 +08:00
}
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "xiaomi:default" ,
provider : "xiaomi" ,
mode : "api_key" ,
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : XIAOMI_DEFAULT_MODEL_REF ,
applyDefaultConfig : applyXiaomiConfig ,
applyProviderConfig : applyXiaomiProviderConfig ,
noteDefault : XIAOMI_DEFAULT_MODEL_REF ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
}
return { config : nextConfig , agentModelOverride } ;
}
2026-01-23 01:27:52 -06:00
if ( authChoice === "synthetic-api-key" ) {
if ( params . opts ? . token && params . opts ? . tokenProvider === "synthetic" ) {
2026-02-13 00:25:05 -03:00
await setSyntheticApiKey ( String ( params . opts . token ? ? "" ) . trim ( ) , params . agentDir ) ;
2026-01-23 01:27:52 -06:00
} else {
const key = await params . prompter . text ( {
message : "Enter Synthetic API key" ,
validate : ( value ) = > ( value ? . trim ( ) ? undefined : "Required" ) ,
} ) ;
2026-02-13 00:25:05 -03:00
await setSyntheticApiKey ( String ( key ? ? "" ) . trim ( ) , params . agentDir ) ;
2026-01-23 01:27:52 -06:00
}
2026-01-14 05:39:47 +00:00
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "synthetic:default" ,
provider : "synthetic" ,
mode : "api_key" ,
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : SYNTHETIC_DEFAULT_MODEL_REF ,
applyDefaultConfig : applySyntheticConfig ,
applyProviderConfig : applySyntheticProviderConfig ,
noteDefault : SYNTHETIC_DEFAULT_MODEL_REF ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
}
return { config : nextConfig , agentModelOverride } ;
}
feat: add Venice AI provider integration
Venice AI is a privacy-focused AI inference provider with support for
uncensored models and access to major proprietary models via their
anonymized proxy.
This integration adds:
- Complete model catalog with 25 models:
- 15 private models (Llama, Qwen, DeepSeek, Venice Uncensored, etc.)
- 10 anonymized models (Claude, GPT-5.2, Gemini, Grok, Kimi, MiniMax)
- Auto-discovery from Venice API with fallback to static catalog
- VENICE_API_KEY environment variable support
- Interactive onboarding via 'venice-api-key' auth choice
- Model selection prompt showing all available Venice models
- Provider auto-registration when API key is detected
- Comprehensive documentation covering:
- Privacy modes (private vs anonymized)
- All 25 models with context windows and features
- Streaming, function calling, and vision support
- Model selection recommendations
Privacy modes:
- Private: Fully private, no logging (open-source models)
- Anonymized: Proxied through Venice (proprietary models)
Default model: venice/llama-3.3-70b (good balance of capability + privacy)
Venice API: https://api.venice.ai/api/v1 (OpenAI-compatible)
2026-01-24 16:56:42 -07:00
if ( authChoice === "venice-api-key" ) {
let hasCredential = false ;
if ( ! hasCredential && params . opts ? . token && params . opts ? . tokenProvider === "venice" ) {
await setVeniceApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
hasCredential = true ;
}
if ( ! hasCredential ) {
await params . prompter . note (
[
"Venice AI provides privacy-focused inference with uncensored models." ,
"Get your API key at: https://venice.ai/settings/api" ,
"Supports 'private' (fully private) and 'anonymized' (proxy) modes." ,
] . join ( "\n" ) ,
"Venice AI" ,
) ;
}
const envKey = resolveEnvApiKey ( "venice" ) ;
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing VENICE_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
await setVeniceApiKey ( envKey . apiKey , params . agentDir ) ;
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : "Enter Venice AI API key" ,
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
await setVeniceApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
feat: add Venice AI provider integration
Venice AI is a privacy-focused AI inference provider with support for
uncensored models and access to major proprietary models via their
anonymized proxy.
This integration adds:
- Complete model catalog with 25 models:
- 15 private models (Llama, Qwen, DeepSeek, Venice Uncensored, etc.)
- 10 anonymized models (Claude, GPT-5.2, Gemini, Grok, Kimi, MiniMax)
- Auto-discovery from Venice API with fallback to static catalog
- VENICE_API_KEY environment variable support
- Interactive onboarding via 'venice-api-key' auth choice
- Model selection prompt showing all available Venice models
- Provider auto-registration when API key is detected
- Comprehensive documentation covering:
- Privacy modes (private vs anonymized)
- All 25 models with context windows and features
- Streaming, function calling, and vision support
- Model selection recommendations
Privacy modes:
- Private: Fully private, no logging (open-source models)
- Anonymized: Proxied through Venice (proprietary models)
Default model: venice/llama-3.3-70b (good balance of capability + privacy)
Venice API: https://api.venice.ai/api/v1 (OpenAI-compatible)
2026-01-24 16:56:42 -07:00
}
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "venice:default" ,
provider : "venice" ,
mode : "api_key" ,
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : VENICE_DEFAULT_MODEL_REF ,
applyDefaultConfig : applyVeniceConfig ,
applyProviderConfig : applyVeniceProviderConfig ,
noteDefault : VENICE_DEFAULT_MODEL_REF ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
}
return { config : nextConfig , agentModelOverride } ;
}
2026-01-23 01:27:52 -06:00
if ( authChoice === "opencode-zen" ) {
2026-01-14 05:39:47 +00:00
let hasCredential = false ;
2026-01-23 01:27:52 -06:00
if ( ! hasCredential && params . opts ? . token && params . opts ? . tokenProvider === "opencode" ) {
await setOpencodeZenApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
hasCredential = true ;
}
if ( ! hasCredential ) {
await params . prompter . note (
[
"OpenCode Zen provides access to Claude, GPT, Gemini, and more models." ,
"Get your API key at: https://opencode.ai/auth" ,
2026-02-06 01:55:02 +01:00
"OpenCode Zen bills per request. Check your OpenCode dashboard for details." ,
2026-01-23 01:27:52 -06:00
] . join ( "\n" ) ,
"OpenCode Zen" ,
) ;
}
2026-01-14 05:39:47 +00:00
const envKey = resolveEnvApiKey ( "opencode" ) ;
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing OPENCODE_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
await setOpencodeZenApiKey ( envKey . apiKey , params . agentDir ) ;
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : "Enter OpenCode Zen API key" ,
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
await setOpencodeZenApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
2026-01-14 05:39:47 +00:00
}
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "opencode:default" ,
provider : "opencode" ,
mode : "api_key" ,
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : OPENCODE_ZEN_DEFAULT_MODEL ,
applyDefaultConfig : applyOpencodeZenConfig ,
applyProviderConfig : applyOpencodeZenProviderConfig ,
noteDefault : OPENCODE_ZEN_DEFAULT_MODEL ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
2026-02-10 00:49:34 +01:00
}
return { config : nextConfig , agentModelOverride } ;
}
if ( authChoice === "together-api-key" ) {
let hasCredential = false ;
if ( ! hasCredential && params . opts ? . token && params . opts ? . tokenProvider === "together" ) {
await setTogetherApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
hasCredential = true ;
}
if ( ! hasCredential ) {
await params . prompter . note (
[
"Together AI provides access to leading open-source models including Llama, DeepSeek, Qwen, and more." ,
"Get your API key at: https://api.together.xyz/settings/api-keys" ,
] . join ( "\n" ) ,
"Together AI" ,
) ;
}
const envKey = resolveEnvApiKey ( "together" ) ;
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing TOGETHER_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
await setTogetherApiKey ( envKey . apiKey , params . agentDir ) ;
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : "Enter Together AI API key" ,
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
await setTogetherApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
2026-02-10 00:49:34 +01:00
}
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "together:default" ,
provider : "together" ,
mode : "api_key" ,
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : TOGETHER_DEFAULT_MODEL_REF ,
applyDefaultConfig : applyTogetherConfig ,
applyProviderConfig : applyTogetherProviderConfig ,
noteDefault : TOGETHER_DEFAULT_MODEL_REF ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
2026-01-14 05:39:47 +00:00
}
return { config : nextConfig , agentModelOverride } ;
}
2026-02-13 16:18:16 +01:00
if ( authChoice === "huggingface-api-key" ) {
return applyAuthChoiceHuggingface ( { . . . params , authChoice } ) ;
}
2026-02-04 16:36:37 +08:00
if ( authChoice === "qianfan-api-key" ) {
let hasCredential = false ;
if ( ! hasCredential && params . opts ? . token && params . opts ? . tokenProvider === "qianfan" ) {
setQianfanApiKey ( normalizeApiKeyInput ( params . opts . token ) , params . agentDir ) ;
hasCredential = true ;
}
if ( ! hasCredential ) {
await params . prompter . note (
[
"Get your API key at: https://console.bce.baidu.com/qianfan/ais/console/apiKey" ,
"API key format: bce-v3/ALTAK-..." ,
] . join ( "\n" ) ,
"QIANFAN" ,
) ;
}
2026-02-05 14:40:56 +08:00
const envKey = resolveEnvApiKey ( "qianfan" ) ;
2026-02-04 16:36:37 +08:00
if ( envKey ) {
const useExisting = await params . prompter . confirm ( {
message : ` Use existing QIANFAN_API_KEY ( ${ envKey . source } , ${ formatApiKeyPreview ( envKey . apiKey ) } )? ` ,
initialValue : true ,
} ) ;
if ( useExisting ) {
setQianfanApiKey ( envKey . apiKey , params . agentDir ) ;
hasCredential = true ;
}
}
if ( ! hasCredential ) {
const key = await params . prompter . text ( {
message : "Enter QIANFAN API key" ,
validate : validateApiKeyInput ,
} ) ;
2026-02-13 00:25:05 -03:00
setQianfanApiKey ( normalizeApiKeyInput ( String ( key ? ? "" ) ) , params . agentDir ) ;
2026-02-04 16:36:37 +08:00
}
nextConfig = applyAuthProfileConfig ( nextConfig , {
profileId : "qianfan:default" ,
provider : "qianfan" ,
mode : "api_key" ,
} ) ;
{
const applied = await applyDefaultModelChoice ( {
config : nextConfig ,
setDefaultModel : params.setDefaultModel ,
defaultModel : QIANFAN_DEFAULT_MODEL_REF ,
applyDefaultConfig : applyQianfanConfig ,
applyProviderConfig : applyQianfanProviderConfig ,
noteDefault : QIANFAN_DEFAULT_MODEL_REF ,
noteAgentModel ,
prompter : params.prompter ,
} ) ;
nextConfig = applied . config ;
agentModelOverride = applied . agentModelOverride ? ? agentModelOverride ;
}
return { config : nextConfig , agentModelOverride } ;
}
2026-01-14 05:39:47 +00:00
return null ;
}