From d45ffd5830027978b322c9b88754ded78d1bf339 Mon Sep 17 00:00:00 2001 From: sayan-oai Date: Wed, 25 Feb 2026 13:01:40 -0800 Subject: [PATCH] make 5.3-codex visible in cli for api users (#12808) 5.3-codex released in api, mark it visible for API users via bundled `models.json`. --- .../app-server/tests/suite/v2/model_list.rs | 120 +++++------------- codex-rs/core/models.json | 2 +- codex-rs/core/tests/suite/remote_models.rs | 14 +- ...twidget__tests__model_selection_popup.snap | 9 +- 4 files changed, 50 insertions(+), 95 deletions(-) diff --git a/codex-rs/app-server/tests/suite/v2/model_list.rs b/codex-rs/app-server/tests/suite/v2/model_list.rs index 759721249..a71a8a337 100644 --- a/codex-rs/app-server/tests/suite/v2/model_list.rs +++ b/codex-rs/app-server/tests/suite/v2/model_list.rs @@ -1,7 +1,6 @@ use std::time::Duration; use anyhow::Result; -use anyhow::anyhow; use app_test_support::McpProcess; use app_test_support::to_response; use app_test_support::write_models_cache; @@ -136,100 +135,45 @@ async fn list_models_pagination_works() -> Result<()> { timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??; - let first_request = mcp - .send_list_models_request(ModelListParams { - limit: Some(1), - cursor: None, - include_hidden: None, - }) - .await?; - - let first_response: JSONRPCResponse = timeout( - DEFAULT_TIMEOUT, - mcp.read_stream_until_response_message(RequestId::Integer(first_request)), - ) - .await??; - - let ModelListResponse { - data: first_items, - next_cursor: first_cursor, - } = to_response::(first_response)?; - let expected_models = expected_visible_models(); + let mut cursor = None; + let mut items = Vec::new(); - assert_eq!(first_items.len(), 1); - assert_eq!(first_items[0].id, expected_models[0].id); - let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?; + for _ in 0..expected_models.len() { + let request_id = mcp + .send_list_models_request(ModelListParams { + limit: Some(1), + cursor: cursor.clone(), + include_hidden: None, + }) + .await?; - let second_request = mcp - .send_list_models_request(ModelListParams { - limit: Some(1), - cursor: Some(next_cursor.clone()), - include_hidden: None, - }) - .await?; + let response: JSONRPCResponse = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await??; - let second_response: JSONRPCResponse = timeout( - DEFAULT_TIMEOUT, - mcp.read_stream_until_response_message(RequestId::Integer(second_request)), - ) - .await??; + let ModelListResponse { + data: page_items, + next_cursor, + } = to_response::(response)?; - let ModelListResponse { - data: second_items, - next_cursor: second_cursor, - } = to_response::(second_response)?; + assert_eq!(page_items.len(), 1); + items.extend(page_items); - assert_eq!(second_items.len(), 1); - assert_eq!(second_items[0].id, expected_models[1].id); - let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?; + if let Some(next_cursor) = next_cursor { + cursor = Some(next_cursor); + } else { + assert_eq!(items, expected_models); + return Ok(()); + } + } - let third_request = mcp - .send_list_models_request(ModelListParams { - limit: Some(1), - cursor: Some(third_cursor.clone()), - include_hidden: None, - }) - .await?; - - let third_response: JSONRPCResponse = timeout( - DEFAULT_TIMEOUT, - mcp.read_stream_until_response_message(RequestId::Integer(third_request)), - ) - .await??; - - let ModelListResponse { - data: third_items, - next_cursor: third_cursor, - } = to_response::(third_response)?; - - assert_eq!(third_items.len(), 1); - assert_eq!(third_items[0].id, expected_models[2].id); - let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?; - - let fourth_request = mcp - .send_list_models_request(ModelListParams { - limit: Some(1), - cursor: Some(fourth_cursor.clone()), - include_hidden: None, - }) - .await?; - - let fourth_response: JSONRPCResponse = timeout( - DEFAULT_TIMEOUT, - mcp.read_stream_until_response_message(RequestId::Integer(fourth_request)), - ) - .await??; - - let ModelListResponse { - data: fourth_items, - next_cursor: fourth_cursor, - } = to_response::(fourth_response)?; - - assert_eq!(fourth_items.len(), 1); - assert_eq!(fourth_items[0].id, expected_models[3].id); - assert!(fourth_cursor.is_none()); - Ok(()) + panic!( + "model pagination did not terminate after {} pages", + expected_models.len() + ); } #[tokio::test] diff --git a/codex-rs/core/models.json b/codex-rs/core/models.json index 3f7d08b43..2fd55b986 100644 --- a/codex-rs/core/models.json +++ b/codex-rs/core/models.json @@ -40,7 +40,7 @@ } ], "shell_type": "shell_command", - "visibility": "hide", + "visibility": "list", "minimal_client_version": "0.98.0", "supported_in_api": true, "upgrade": null, diff --git a/codex-rs/core/tests/suite/remote_models.rs b/codex-rs/core/tests/suite/remote_models.rs index 74a5b6d90..166f0ceac 100644 --- a/codex-rs/core/tests/suite/remote_models.rs +++ b/codex-rs/core/tests/suite/remote_models.rs @@ -830,8 +830,9 @@ async fn remote_models_request_times_out_after_5s() -> Result<()> { let elapsed = start.elapsed(); // get_model should return a default model even when refresh times out let default_model = model.expect("get_model should finish and return default model"); + let expected_default = bundled_default_model_slug(); assert!( - default_model == "gpt-5.2-codex", + default_model == expected_default, "get_model should return default model when refresh times out, got: {default_model}" ); let _ = server @@ -889,7 +890,7 @@ async fn remote_models_hide_picker_only_models() -> Result<()> { let selected = manager .get_default_model(&None, RefreshStrategy::OnlineIfUncached) .await; - assert_eq!(selected, "gpt-5.2-codex"); + assert_eq!(selected, bundled_default_model_slug()); let available = manager.list_models(RefreshStrategy::OnlineIfUncached).await; let hidden = available @@ -935,6 +936,15 @@ fn bundled_model_slug() -> String { .clone() } +fn bundled_default_model_slug() -> String { + codex_core::test_support::all_model_presets() + .iter() + .find(|preset| preset.is_default) + .expect("bundled models should include a default") + .model + .clone() +} + fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) -> ModelInfo { test_remote_model_with_policy( slug, diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap index 2147714e2..9b5bb54b9 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap @@ -5,12 +5,13 @@ expression: popup Select Model and Effort Access legacy models by running codex -m or in your config.toml -› 1. gpt-5.2-codex (default) Frontier agentic coding model. - 2. gpt-5.1-codex-max Codex-optimized flagship for deep and fast +› 1. gpt-5.3-codex (default) Latest frontier agentic coding model. + 2. gpt-5.2-codex Frontier agentic coding model. + 3. gpt-5.1-codex-max Codex-optimized flagship for deep and fast reasoning. - 3. gpt-5.2 Latest frontier model with improvements across + 4. gpt-5.2 Latest frontier model with improvements across knowledge, reasoning and coding - 4. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less + 5. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less capable. Press enter to select reasoning effort, or esc to dismiss.