diff --git a/codex-rs/codex-api/tests/models_integration.rs b/codex-rs/codex-api/tests/models_integration.rs index 6ef328188..20eb64d5c 100644 --- a/codex-rs/codex-api/tests/models_integration.rs +++ b/codex-rs/codex-api/tests/models_integration.rs @@ -77,6 +77,7 @@ async fn models_client_hits_models_endpoint() { supported_in_api: true, priority: 1, upgrade: None, + base_instructions: None, }], etag: String::new(), }; diff --git a/codex-rs/core/src/openai_models/model_family.rs b/codex-rs/core/src/openai_models/model_family.rs index 6665165ee..094fb0137 100644 --- a/codex-rs/core/src/openai_models/model_family.rs +++ b/codex-rs/core/src/openai_models/model_family.rs @@ -102,6 +102,7 @@ impl ModelFamily { if model.slug == self.slug { self.default_reasoning_effort = Some(model.default_reasoning_level); self.shell_type = model.shell_type; + self.base_instructions = model.base_instructions.unwrap_or(self.base_instructions); } } self @@ -357,6 +358,7 @@ mod tests { supported_in_api: true, priority: 1, upgrade: None, + base_instructions: None, } } diff --git a/codex-rs/core/src/openai_models/models_manager.rs b/codex-rs/core/src/openai_models/models_manager.rs index 9ebf0112a..09eedeeba 100644 --- a/codex-rs/core/src/openai_models/models_manager.rs +++ b/codex-rs/core/src/openai_models/models_manager.rs @@ -216,6 +216,7 @@ mod tests { "supported_in_api": true, "priority": priority, "upgrade": null, + "base_instructions": null, })) .expect("valid model") } diff --git a/codex-rs/core/tests/suite/remote_models.rs b/codex-rs/core/tests/suite/remote_models.rs index b13188d5d..0f8040747 100644 --- a/codex-rs/core/tests/suite/remote_models.rs +++ b/codex-rs/core/tests/suite/remote_models.rs @@ -25,6 +25,7 @@ use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_models_once; +use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::skip_if_no_network; @@ -67,6 +68,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> { supported_in_api: true, priority: 1, upgrade: None, + base_instructions: None, }; let models_mock = mount_models_once( @@ -167,6 +169,105 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> { Ok(()) } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn remote_models_apply_remote_base_instructions() -> Result<()> { + skip_if_no_network!(Ok(())); + skip_if_sandbox!(Ok(())); + + let server = MockServer::builder() + .body_print_limit(BodyPrintLimit::Limited(80_000)) + .start() + .await; + + let model = "test-gpt-5-remote"; + + let remote_base = "Use the remote base instructions only."; + let remote_model = ModelInfo { + slug: model.to_string(), + display_name: "Parallel Remote".to_string(), + description: Some("A remote model with custom instructions".to_string()), + default_reasoning_level: ReasoningEffort::Medium, + supported_reasoning_levels: vec![ReasoningEffortPreset { + effort: ReasoningEffort::Medium, + description: ReasoningEffort::Medium.to_string(), + }], + shell_type: ConfigShellToolType::ShellCommand, + visibility: ModelVisibility::List, + minimal_client_version: ClientVersion(0, 1, 0), + supported_in_api: true, + priority: 1, + upgrade: None, + base_instructions: Some(remote_base.to_string()), + }; + mount_models_once( + &server, + ModelsResponse { + models: vec![remote_model], + etag: String::new(), + }, + ) + .await; + + let response_mock = mount_sse_once( + &server, + sse(vec![ + ev_response_created("resp-1"), + ev_assistant_message("msg-1", "done"), + ev_completed("resp-1"), + ]), + ) + .await; + + let mut builder = test_codex().with_config(|config| { + config.features.enable(Feature::RemoteModels); + config.model = "gpt-5.1".to_string(); + }); + + let TestCodex { + codex, + cwd, + conversation_manager, + .. + } = builder.build(&server).await?; + + let models_manager = conversation_manager.get_models_manager(); + wait_for_model_available(&models_manager, model).await; + + codex + .submit(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: Some(model.to_string()), + effort: None, + summary: None, + }) + .await?; + + codex + .submit(Op::UserTurn { + items: vec![UserInput::Text { + text: "hello remote".into(), + }], + final_output_json_schema: None, + cwd: cwd.path().to_path_buf(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::DangerFullAccess, + model: model.to_string(), + effort: None, + summary: ReasoningSummary::Auto, + }) + .await?; + + wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; + + let body = response_mock.single_request().body_json(); + let instructions = body["instructions"].as_str().unwrap(); + assert_eq!(instructions, remote_base); + + Ok(()) +} + async fn wait_for_model_available(manager: &Arc, slug: &str) -> ModelPreset { let deadline = Instant::now() + Duration::from_secs(2); loop { diff --git a/codex-rs/protocol/src/openai_models.rs b/codex-rs/protocol/src/openai_models.rs index 942303a90..c5500f1cc 100644 --- a/codex-rs/protocol/src/openai_models.rs +++ b/codex-rs/protocol/src/openai_models.rs @@ -135,6 +135,8 @@ pub struct ModelInfo { pub priority: i32, #[serde(default)] pub upgrade: Option, + #[serde(default)] + pub base_instructions: Option, } /// Response wrapper for `/models`.