Add openai_base_url config override for built-in provider (#12031)

We regularly get bug reports from users who mistakenly have the
`OPENAI_BASE_URL` environment variable set. This PR deprecates this
environment variable in favor of a top-level config key
`openai_base_url` that is used for the same purpose. By making it a
config key, it will be more visible to users. It will also participate
in all of the infrastructure we've added for layered and managed
configs.

Summary
- introduce the `openai_base_url` top-level config key, update
schema/tests, and route the built-in openai provider through it while
- fall back to deprecated `OPENAI_BASE_URL` env var but warn user of
deprecation when no `openai_base_url` config key is present
- update CLI, SDK, and TUI code to prefer the new config path (with a
deprecated env-var fallback) and document the SDK behavior change
This commit is contained in:
Eric Traut 2026-03-13 20:12:25 -06:00 committed by GitHub
parent b859a98e0f
commit 4b9d5c8c1b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 233 additions and 70 deletions

View file

@ -49,6 +49,11 @@ stream_max_retries = 0
"#
)
};
let openai_base_url_line = if model_provider_id == "openai" {
format!("openai_base_url = \"{server_uri}/v1\"\n")
} else {
String::new()
};
// Phase 3: write the final config file.
let config_toml = codex_home.join("config.toml");
std::fs::write(
@ -62,6 +67,7 @@ compact_prompt = "{compact_prompt}"
model_auto_compact_token_limit = {auto_compact_limit}
model_provider = "{model_provider_id}"
{openai_base_url_line}
[features]
{feature_entries}

View file

@ -158,15 +158,7 @@ async fn auto_compaction_remote_emits_started_and_completed_items() -> Result<()
AuthCredentialsStoreMode::File,
)?;
let server_base_url = format!("{}/v1", server.uri());
let mut mcp = McpProcess::new_with_env(
codex_home.path(),
&[
("OPENAI_BASE_URL", Some(server_base_url.as_str())),
("OPENAI_API_KEY", None),
],
)
.await?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_thread(&mut mcp).await?;

View file

@ -2260,6 +2260,10 @@
},
"type": "array"
},
"openai_base_url": {
"description": "Base URL override for the built-in `openai` model provider.",
"type": "string"
},
"oss_provider": {
"description": "Preferred OSS provider for local models, e.g. \"lmstudio\" or \"ollama\".",
"type": "string"

View file

@ -4134,7 +4134,7 @@ model_verbosity = "high"
supports_websockets: false,
};
let model_provider_map = {
let mut model_provider_map = built_in_model_providers();
let mut model_provider_map = built_in_model_providers(/* openai_base_url */ None);
model_provider_map.insert("openai-custom".to_string(), openai_custom_provider.clone());
model_provider_map
};

View file

@ -138,6 +138,7 @@ pub(crate) const DEFAULT_AGENT_MAX_DEPTH: i32 = 1;
pub(crate) const DEFAULT_AGENT_JOB_MAX_RUNTIME_SECONDS: Option<u64> = None;
pub const CONFIG_TOML_FILE: &str = "config.toml";
const OPENAI_BASE_URL_ENV_VAR: &str = "OPENAI_BASE_URL";
fn resolve_sqlite_home_env(resolved_cwd: &Path) -> Option<PathBuf> {
let raw = std::env::var(codex_state::SQLITE_HOME_ENV).ok()?;
@ -1343,6 +1344,9 @@ pub struct ConfigToml {
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: Option<String>,
/// Base URL override for the built-in `openai` model provider.
pub openai_base_url: Option<String>,
/// Machine-local realtime audio device preferences used by realtime voice.
#[serde(default)]
pub audio: Option<RealtimeAudioToml>,
@ -2249,7 +2253,28 @@ impl Config {
let agent_roles =
agent_roles::load_agent_roles(&cfg, &config_layer_stack, &mut startup_warnings)?;
let mut model_providers = built_in_model_providers();
let openai_base_url = cfg
.openai_base_url
.clone()
.filter(|value| !value.is_empty());
let openai_base_url_from_env = std::env::var(OPENAI_BASE_URL_ENV_VAR)
.ok()
.filter(|value| !value.is_empty());
if openai_base_url_from_env.is_some() {
if openai_base_url.is_some() {
tracing::warn!(
env_var = OPENAI_BASE_URL_ENV_VAR,
"deprecated env var is ignored because `openai_base_url` is set in config.toml"
);
} else {
startup_warnings.push(format!(
"`{OPENAI_BASE_URL_ENV_VAR}` is deprecated. Set `openai_base_url` in config.toml instead."
));
}
}
let effective_openai_base_url = openai_base_url.or(openai_base_url_from_env);
let mut model_providers = built_in_model_providers(effective_openai_base_url);
// Merge user-defined providers into the built-in list.
for (key, provider) in cfg.model_providers.into_iter() {
model_providers.entry(key).or_insert(provider);

View file

@ -82,6 +82,7 @@ pub use model_provider_info::DEFAULT_OLLAMA_PORT;
pub use model_provider_info::LMSTUDIO_OSS_PROVIDER_ID;
pub use model_provider_info::ModelProviderInfo;
pub use model_provider_info::OLLAMA_OSS_PROVIDER_ID;
pub use model_provider_info::OPENAI_PROVIDER_ID;
pub use model_provider_info::WireApi;
pub use model_provider_info::built_in_model_providers;
pub use model_provider_info::create_oss_provider_with_base_url;

View file

@ -28,6 +28,7 @@ const MAX_STREAM_MAX_RETRIES: u64 = 100;
const MAX_REQUEST_MAX_RETRIES: u64 = 100;
const OPENAI_PROVIDER_NAME: &str = "OpenAI";
pub const OPENAI_PROVIDER_ID: &str = "openai";
const CHAT_WIRE_API_REMOVED_ERROR: &str = "`wire_api = \"chat\"` is no longer supported.\nHow to fix: set `wire_api = \"responses\"` in your provider config.\nMore info: https://github.com/openai/codex/discussions/7782";
pub(crate) const LEGACY_OLLAMA_CHAT_PROVIDER_ID: &str = "ollama-chat";
pub(crate) const OLLAMA_CHAT_PROVIDER_REMOVED_ERROR: &str = "`ollama-chat` is no longer supported.\nHow to fix: replace `ollama-chat` with `ollama` in `model_provider`, `oss_provider`, or `--local-provider`.\nMore info: https://github.com/openai/codex/discussions/7782";
@ -225,17 +226,11 @@ impl ModelProviderInfo {
.map(Duration::from_millis)
.unwrap_or(Duration::from_millis(DEFAULT_STREAM_IDLE_TIMEOUT_MS))
}
pub fn create_openai_provider() -> ModelProviderInfo {
pub fn create_openai_provider(base_url: Option<String>) -> ModelProviderInfo {
ModelProviderInfo {
name: OPENAI_PROVIDER_NAME.into(),
// Allow users to override the default OpenAI endpoint by
// exporting `OPENAI_BASE_URL`. This is useful when pointing
// Codex at a proxy, mock server, or Azure-style deployment
// without requiring a full TOML override for the built-in
// OpenAI provider.
base_url: std::env::var("OPENAI_BASE_URL")
.ok()
.filter(|v| !v.trim().is_empty()),
base_url,
env_key: None,
env_key_instructions: None,
experimental_bearer_token: None,
@ -278,15 +273,18 @@ pub const LMSTUDIO_OSS_PROVIDER_ID: &str = "lmstudio";
pub const OLLAMA_OSS_PROVIDER_ID: &str = "ollama";
/// Built-in default provider list.
pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
pub fn built_in_model_providers(
openai_base_url: Option<String>,
) -> HashMap<String, ModelProviderInfo> {
use ModelProviderInfo as P;
let openai_provider = P::create_openai_provider(openai_base_url);
// We do not want to be in the business of adjucating which third-party
// providers are bundled with Codex CLI, so we only include the OpenAI and
// open source ("oss") providers by default. Users are encouraged to add to
// `model_providers` in config.toml to add their own providers.
[
("openai", P::create_openai_provider()),
(OPENAI_PROVIDER_ID, openai_provider),
(
OLLAMA_OSS_PROVIDER_ID,
create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Responses),

View file

@ -92,6 +92,23 @@ impl ModelsManager {
auth_manager: Arc<AuthManager>,
model_catalog: Option<ModelsResponse>,
collaboration_modes_config: CollaborationModesConfig,
) -> Self {
Self::new_with_provider(
codex_home,
auth_manager,
model_catalog,
collaboration_modes_config,
ModelProviderInfo::create_openai_provider(/* base_url */ None),
)
}
/// Construct a manager with an explicit provider used for remote model refreshes.
pub fn new_with_provider(
codex_home: PathBuf,
auth_manager: Arc<AuthManager>,
model_catalog: Option<ModelsResponse>,
collaboration_modes_config: CollaborationModesConfig,
provider: ModelProviderInfo,
) -> Self {
let cache_path = codex_home.join(MODEL_CACHE_FILE);
let cache_manager = ModelsCacheManager::new(cache_path, DEFAULT_MODEL_CACHE_TTL);
@ -113,7 +130,7 @@ impl ModelsManager {
auth_manager,
etag: RwLock::new(None),
cache_manager,
provider: ModelProviderInfo::create_openai_provider(),
provider,
}
}
@ -413,20 +430,13 @@ impl ModelsManager {
auth_manager: Arc<AuthManager>,
provider: ModelProviderInfo,
) -> Self {
let cache_path = codex_home.join(MODEL_CACHE_FILE);
let cache_manager = ModelsCacheManager::new(cache_path, DEFAULT_MODEL_CACHE_TTL);
Self {
remote_models: RwLock::new(
Self::load_remote_models_from_file()
.unwrap_or_else(|err| panic!("failed to load bundled models.json: {err}")),
),
catalog_mode: CatalogMode::Default,
collaboration_modes_config: CollaborationModesConfig::default(),
Self::new_with_provider(
codex_home,
auth_manager,
etag: RwLock::new(None),
cache_manager,
None,
CollaborationModesConfig::default(),
provider,
}
)
}
/// Get model identifier without consulting remote state or cache.

View file

@ -1,6 +1,7 @@
use crate::AuthManager;
use crate::CodexAuth;
use crate::ModelProviderInfo;
use crate::OPENAI_PROVIDER_ID;
use crate::agent::AgentControl;
use crate::codex::Codex;
use crate::codex::CodexSpawnArgs;
@ -168,6 +169,11 @@ impl ThreadManager {
collaboration_modes_config: CollaborationModesConfig,
) -> Self {
let codex_home = config.codex_home.clone();
let openai_models_provider = config
.model_providers
.get(OPENAI_PROVIDER_ID)
.cloned()
.unwrap_or_else(|| ModelProviderInfo::create_openai_provider(/* base_url */ None));
let (thread_created_tx, _) = broadcast::channel(THREAD_CREATED_CHANNEL_CAPACITY);
let plugins_manager = Arc::new(PluginsManager::new(codex_home.clone()));
let mcp_manager = Arc::new(McpManager::new(Arc::clone(&plugins_manager)));
@ -181,11 +187,12 @@ impl ThreadManager {
state: Arc::new(ThreadManagerState {
threads: Arc::new(RwLock::new(HashMap::new())),
thread_created_tx,
models_manager: Arc::new(ModelsManager::new(
models_manager: Arc::new(ModelsManager::new_with_provider(
codex_home,
auth_manager.clone(),
config.model_catalog.clone(),
collaboration_modes_config,
openai_models_provider,
)),
skills_manager,
plugins_manager,

View file

@ -1,13 +1,18 @@
use super::*;
use crate::codex::make_session_and_context;
use crate::config::test_config;
use crate::models_manager::collaboration_mode_presets::CollaborationModesConfig;
use crate::models_manager::manager::RefreshStrategy;
use assert_matches::assert_matches;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ReasoningItemReasoningSummary;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ModelsResponse;
use core_test_support::responses::mount_models_once;
use pretty_assertions::assert_eq;
use std::time::Duration;
use tempfile::tempdir;
use wiremock::MockServer;
fn user_msg(text: &str) -> ResponseItem {
ResponseItem::Message {
@ -150,3 +155,33 @@ async fn shutdown_all_threads_bounded_submits_shutdown_to_every_thread() {
assert!(report.timed_out.is_empty());
assert!(manager.list_thread_ids().await.is_empty());
}
#[tokio::test]
async fn new_uses_configured_openai_provider_for_model_refresh() {
let server = MockServer::start().await;
let models_mock = mount_models_once(&server, ModelsResponse { models: vec![] }).await;
let temp_dir = tempdir().expect("tempdir");
let mut config = test_config();
config.codex_home = temp_dir.path().join("codex-home");
config.cwd = config.codex_home.clone();
std::fs::create_dir_all(&config.codex_home).expect("create codex home");
config.model_catalog = None;
config
.model_providers
.get_mut("openai")
.expect("openai provider should exist")
.base_url = Some(server.uri());
let auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
let manager = ThreadManager::new(
&config,
auth_manager,
SessionSource::Exec,
CollaborationModesConfig::default(),
);
let _ = manager.list_models(RefreshStrategy::Online).await;
assert_eq!(models_mock.requests().len(), 1);
}

View file

@ -57,7 +57,7 @@ fn function_payload(args: serde_json::Value) -> ToolPayload {
fn thread_manager() -> ThreadManager {
ThreadManager::with_models_provider_for_tests(
CodexAuth::from_api_key("dummy"),
built_in_model_providers()["openai"].clone(),
built_in_model_providers(/* openai_base_url */ None)["openai"].clone(),
)
}
@ -162,7 +162,7 @@ async fn spawn_agent_uses_explorer_role_and_preserves_approval_policy() {
let manager = thread_manager();
session.services.agent_control = manager.agent_control();
let mut config = (*turn.config).clone();
let provider = built_in_model_providers()["ollama"].clone();
let provider = built_in_model_providers(/* openai_base_url */ None)["ollama"].clone();
config.model_provider_id = "ollama".to_string();
config.model_provider = provider.clone();
config

View file

@ -226,7 +226,7 @@ impl TestCodexBuilder {
) -> anyhow::Result<(Config, Arc<TempDir>)> {
let model_provider = ModelProviderInfo {
base_url: Some(base_url),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let cwd = Arc::new(TempDir::new()?);
let mut config = load_default_config_for_test(home).await;

View file

@ -23,7 +23,8 @@ impl TestCodexExecBuilder {
pub fn cmd_with_server(&self, server: &MockServer) -> assert_cmd::Command {
let mut cmd = self.cmd();
let base = format!("{}/v1", server.uri());
cmd.env("OPENAI_BASE_URL", base);
cmd.arg("-c")
.arg(format!("openai_base_url={}", toml_string_literal(&base)));
cmd
}
@ -35,6 +36,10 @@ impl TestCodexExecBuilder {
}
}
fn toml_string_literal(value: &str) -> String {
serde_json::to_string(value).expect("serialize TOML string literal")
}
pub fn test_codex_exec() -> TestCodexExecBuilder {
TestCodexExecBuilder {
home: TempDir::new().expect("create temp home"),

View file

@ -52,8 +52,7 @@ async fn responses_mode_stream_cli() {
.arg(&repo_root)
.arg("hello?");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()));
.env("OPENAI_API_KEY", "dummy");
let output = cmd.output().unwrap();
println!("Status: {}", output.status);
@ -89,6 +88,75 @@ async fn responses_mode_stream_cli() {
// assert!(page.items[0].created_at.is_some(), "missing created_at");
}
/// Ensures `OPENAI_BASE_URL` still works as a deprecated fallback.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn responses_mode_stream_cli_supports_openai_base_url_env_fallback() {
skip_if_no_network!();
let server = MockServer::start().await;
let repo_root = repo_root();
let sse = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "hi"),
responses::ev_completed("resp-1"),
]);
let resp_mock = responses::mount_sse_once(&server, sse).await;
let home = TempDir::new().unwrap();
let bin = codex_utils_cargo_bin::cargo_bin("codex").unwrap();
let mut cmd = AssertCommand::new(bin);
cmd.timeout(Duration::from_secs(30));
cmd.arg("exec")
.arg("--skip-git-repo-check")
.arg("-C")
.arg(&repo_root)
.arg("hello?");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()));
let output = cmd.output().unwrap();
assert!(output.status.success());
let request = resp_mock.single_request();
assert_eq!(request.path(), "/v1/responses");
}
/// Ensures `openai_base_url` config override routes built-in openai provider requests.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn responses_mode_stream_cli_supports_openai_base_url_config_override() {
skip_if_no_network!();
let server = MockServer::start().await;
let repo_root = repo_root();
let sse = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "hi"),
responses::ev_completed("resp-1"),
]);
let resp_mock = responses::mount_sse_once(&server, sse).await;
let home = TempDir::new().unwrap();
let bin = codex_utils_cargo_bin::cargo_bin("codex").unwrap();
let mut cmd = AssertCommand::new(bin);
cmd.timeout(Duration::from_secs(30));
cmd.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg(format!("openai_base_url=\"{}/v1\"", server.uri()))
.arg("-C")
.arg(&repo_root)
.arg("hello?");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy");
let output = cmd.output().unwrap();
assert!(output.status.success());
let request = resp_mock.single_request();
assert_eq!(request.path(), "/v1/responses");
}
/// Verify that passing `-c model_instructions_file=...` to the CLI
/// overrides the built-in base instructions by inspecting the request body
/// received by a mock OpenAI Responses endpoint.
@ -136,8 +204,7 @@ async fn exec_cli_applies_model_instructions_file() {
.arg(&repo_root)
.arg("hello?\n");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()));
.env("OPENAI_API_KEY", "dummy");
let output = cmd.output().unwrap();
println!("Status: {}", output.status);
@ -247,13 +314,14 @@ async fn responses_api_stream_cli() {
let mut cmd = AssertCommand::new(bin);
cmd.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg("openai_base_url=\"http://unused.local\"")
.arg("-C")
.arg(&repo_root)
.arg("hello?");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("CODEX_RS_SSE_FIXTURE", fixture)
.env("OPENAI_BASE_URL", "http://unused.local");
.env("CODEX_RS_SSE_FIXTURE", fixture);
let output = cmd.output().unwrap();
assert!(output.status.success());
@ -283,14 +351,14 @@ async fn integration_creates_and_checks_session_file() -> anyhow::Result<()> {
let mut cmd = AssertCommand::new(bin);
cmd.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg("openai_base_url=\"http://unused.local\"")
.arg("-C")
.arg(&repo_root)
.arg(&prompt);
cmd.env("CODEX_HOME", home.path())
.env(CODEX_API_KEY_ENV_VAR, "dummy")
.env("CODEX_RS_SSE_FIXTURE", &fixture)
// Required for CLI arg parsing even though fixture short-circuits network usage.
.env("OPENAI_BASE_URL", "http://unused.local");
.env("CODEX_RS_SSE_FIXTURE", &fixture);
let output = cmd.output().unwrap();
assert!(
@ -404,6 +472,8 @@ async fn integration_creates_and_checks_session_file() -> anyhow::Result<()> {
let mut cmd2 = AssertCommand::new(bin2);
cmd2.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg("openai_base_url=\"http://unused.local\"")
.arg("-C")
.arg(&repo_root)
.arg(&prompt2)
@ -411,8 +481,7 @@ async fn integration_creates_and_checks_session_file() -> anyhow::Result<()> {
.arg("--last");
cmd2.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("CODEX_RS_SSE_FIXTURE", &fixture)
.env("OPENAI_BASE_URL", "http://unused.local");
.env("CODEX_RS_SSE_FIXTURE", &fixture);
let output2 = cmd2.output().unwrap();
assert!(output2.status.success(), "resume codex-cli run failed");

View file

@ -715,7 +715,7 @@ async fn chatgpt_auth_sends_correct_request() {
)
.await;
let mut model_provider = built_in_model_providers()["openai"].clone();
let mut model_provider = built_in_model_providers(/* openai_base_url */ None)["openai"].clone();
model_provider.base_url = Some(format!("{}/api/codex", server.uri()));
let mut builder = test_codex()
.with_auth(create_dummy_codex_auth())
@ -791,7 +791,7 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
// Init session
@ -1977,7 +1977,7 @@ async fn token_count_includes_rate_limits_snapshot() {
.mount(&server)
.await;
let mut provider = built_in_model_providers()["openai"].clone();
let mut provider = built_in_model_providers(/* openai_base_url */ None)["openai"].clone();
provider.base_url = Some(format!("{}/v1", server.uri()));
let mut builder = test_codex()

View file

@ -93,7 +93,7 @@ fn json_fragment(text: &str) -> String {
}
fn non_openai_model_provider(server: &MockServer) -> ModelProviderInfo {
let mut provider = built_in_model_providers()["openai"].clone();
let mut provider = built_in_model_providers(/* openai_base_url */ None)["openai"].clone();
provider.name = "OpenAI (test)".into();
provider.base_url = Some(format!("{}/v1", server.uri()));
provider

View file

@ -95,7 +95,7 @@ async fn remote_models_get_model_info_uses_longest_matching_prefix() -> Result<(
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@ -654,7 +654,7 @@ async fn remote_models_do_not_append_removed_builtin_presets() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@ -709,7 +709,7 @@ async fn remote_models_merge_adds_new_high_priority_first() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@ -756,7 +756,7 @@ async fn remote_models_merge_replaces_overlapping_model() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@ -800,7 +800,7 @@ async fn remote_models_merge_preserves_bundled_models_on_empty_response() -> Res
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@ -841,7 +841,7 @@ async fn remote_models_request_times_out_after_5s() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@ -907,7 +907,7 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),

View file

@ -6477,7 +6477,7 @@ impl ChatWidget {
fn model_menu_warning_line(&self) -> Option<Line<'static>> {
let base_url = self.custom_openai_base_url()?;
let warning = format!(
"Warning: OPENAI_BASE_URL is set to {base_url}. Selecting models may not be supported or work properly."
"Warning: OpenAI base URL is overridden to {base_url}. Selecting models may not be supported or work properly."
);
Some(Line::from(warning.red()))
}

View file

@ -129,8 +129,8 @@ const codex = new Codex({
});
```
The SDK still injects its required variables (such as `OPENAI_BASE_URL` and `CODEX_API_KEY`) on top of the environment you
provide.
The SDK still injects its required variables (such as `CODEX_API_KEY`) on top of the environment you provide. If you set
`baseUrl`, the SDK passes it as a `--config openai_base_url=...` override.
### Passing `--config` overrides

View file

@ -78,6 +78,13 @@ export class CodexExec {
}
}
if (args.baseUrl) {
commandArgs.push(
"--config",
`openai_base_url=${toTomlValue(args.baseUrl, "openai_base_url")}`,
);
}
if (args.model) {
commandArgs.push("--model", args.model);
}
@ -150,9 +157,6 @@ export class CodexExec {
if (!env[INTERNAL_ORIGINATOR_ENV]) {
env[INTERNAL_ORIGINATOR_ENV] = TYPESCRIPT_SDK_ORIGINATOR;
}
if (args.baseUrl) {
env.OPENAI_BASE_URL = args.baseUrl;
}
if (args.apiKey) {
env.CODEX_API_KEY = args.apiKey;
}

View file

@ -502,7 +502,7 @@ describe("Codex", () => {
],
});
const { envs: spawnEnvs, restore } = codexExecSpy();
const { args: spawnArgs, envs: spawnEnvs, restore } = codexExecSpy();
process.env.CODEX_ENV_SHOULD_NOT_LEAK = "leak";
try {
@ -521,11 +521,18 @@ describe("Codex", () => {
if (!spawnEnv) {
throw new Error("Spawn env missing");
}
const commandArgs = spawnArgs[0];
expect(commandArgs).toBeDefined();
if (!commandArgs) {
throw new Error("Command args missing");
}
expect(spawnEnv.CUSTOM_ENV).toBe("custom");
expect(spawnEnv.CODEX_ENV_SHOULD_NOT_LEAK).toBeUndefined();
expect(spawnEnv.OPENAI_BASE_URL).toBe(url);
expect(spawnEnv.OPENAI_BASE_URL).toBeUndefined();
expect(spawnEnv.CODEX_API_KEY).toBe("test");
expect(spawnEnv.CODEX_INTERNAL_ORIGINATOR_OVERRIDE).toBeDefined();
expect(commandArgs).toContain("--config");
expect(commandArgs).toContain(`openai_base_url=${JSON.stringify(url)}`);
} finally {
delete process.env.CODEX_ENV_SHOULD_NOT_LEAK;
restore();