make 5.3-codex visible in cli for api users (#12808)

5.3-codex released in api, mark it visible for API users via bundled
`models.json`.
This commit is contained in:
sayan-oai 2026-02-25 13:01:40 -08:00 committed by GitHub
parent be5bca6f8d
commit d45ffd5830
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 50 additions and 95 deletions

View file

@ -1,7 +1,6 @@
use std::time::Duration;
use anyhow::Result;
use anyhow::anyhow;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_models_cache;
@ -136,100 +135,45 @@ async fn list_models_pagination_works() -> Result<()> {
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: None,
include_hidden: None,
})
.await?;
let first_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_request)),
)
.await??;
let ModelListResponse {
data: first_items,
next_cursor: first_cursor,
} = to_response::<ModelListResponse>(first_response)?;
let expected_models = expected_visible_models();
let mut cursor = None;
let mut items = Vec::new();
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, expected_models[0].id);
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
for _ in 0..expected_models.len() {
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: cursor.clone(),
include_hidden: None,
})
.await?;
let second_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(next_cursor.clone()),
include_hidden: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let second_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_request)),
)
.await??;
let ModelListResponse {
data: page_items,
next_cursor,
} = to_response::<ModelListResponse>(response)?;
let ModelListResponse {
data: second_items,
next_cursor: second_cursor,
} = to_response::<ModelListResponse>(second_response)?;
assert_eq!(page_items.len(), 1);
items.extend(page_items);
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, expected_models[1].id);
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
if let Some(next_cursor) = next_cursor {
cursor = Some(next_cursor);
} else {
assert_eq!(items, expected_models);
return Ok(());
}
}
let third_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(third_cursor.clone()),
include_hidden: None,
})
.await?;
let third_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(third_request)),
)
.await??;
let ModelListResponse {
data: third_items,
next_cursor: third_cursor,
} = to_response::<ModelListResponse>(third_response)?;
assert_eq!(third_items.len(), 1);
assert_eq!(third_items[0].id, expected_models[2].id);
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
let fourth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(fourth_cursor.clone()),
include_hidden: None,
})
.await?;
let fourth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fourth_request)),
)
.await??;
let ModelListResponse {
data: fourth_items,
next_cursor: fourth_cursor,
} = to_response::<ModelListResponse>(fourth_response)?;
assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, expected_models[3].id);
assert!(fourth_cursor.is_none());
Ok(())
panic!(
"model pagination did not terminate after {} pages",
expected_models.len()
);
}
#[tokio::test]

View file

@ -40,7 +40,7 @@
}
],
"shell_type": "shell_command",
"visibility": "hide",
"visibility": "list",
"minimal_client_version": "0.98.0",
"supported_in_api": true,
"upgrade": null,

View file

@ -830,8 +830,9 @@ async fn remote_models_request_times_out_after_5s() -> Result<()> {
let elapsed = start.elapsed();
// get_model should return a default model even when refresh times out
let default_model = model.expect("get_model should finish and return default model");
let expected_default = bundled_default_model_slug();
assert!(
default_model == "gpt-5.2-codex",
default_model == expected_default,
"get_model should return default model when refresh times out, got: {default_model}"
);
let _ = server
@ -889,7 +890,7 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
let selected = manager
.get_default_model(&None, RefreshStrategy::OnlineIfUncached)
.await;
assert_eq!(selected, "gpt-5.2-codex");
assert_eq!(selected, bundled_default_model_slug());
let available = manager.list_models(RefreshStrategy::OnlineIfUncached).await;
let hidden = available
@ -935,6 +936,15 @@ fn bundled_model_slug() -> String {
.clone()
}
fn bundled_default_model_slug() -> String {
codex_core::test_support::all_model_presets()
.iter()
.find(|preset| preset.is_default)
.expect("bundled models should include a default")
.model
.clone()
}
fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) -> ModelInfo {
test_remote_model_with_policy(
slug,

View file

@ -5,12 +5,13 @@ expression: popup
Select Model and Effort
Access legacy models by running codex -m <model_name> or in your config.toml
1. gpt-5.2-codex (default) Frontier agentic coding model.
2. gpt-5.1-codex-max Codex-optimized flagship for deep and fast
1. gpt-5.3-codex (default) Latest frontier agentic coding model.
2. gpt-5.2-codex Frontier agentic coding model.
3. gpt-5.1-codex-max Codex-optimized flagship for deep and fast
reasoning.
3. gpt-5.2 Latest frontier model with improvements across
4. gpt-5.2 Latest frontier model with improvements across
knowledge, reasoning and coding
4. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less
5. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less
capable.
Press enter to select reasoning effort, or esc to dismiss.