chores: clean picker (#8232)

# External (non-OpenAI) Pull Request Requirements

Before opening this Pull Request, please read the dedicated
"Contributing" markdown file or your PR may be closed:
https://github.com/openai/codex/blob/main/docs/contributing.md

If your PR conforms to our contribution guidelines, replace this text
with a detailed and high quality description of your changes.

Include a link to a bug report or enhancement request.
This commit is contained in:
Ahmed Ibrahim 2025-12-18 08:41:34 -08:00 committed by GitHub
parent 9bf41e9262
commit 374d591311
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 76 additions and 240 deletions

View file

@ -47,33 +47,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
} = to_response::<ModelListResponse>(response)?;
let expected_models = vec![
Model {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward \
queries and short explanations"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for \
general-purpose tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
},
Model {
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
@ -105,7 +78,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
is_default: true,
},
Model {
id: "gpt-5.1-codex-mini".to_string(),
@ -126,29 +99,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
@ -237,7 +187,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5.1");
assert_eq!(first_items[0].id, "gpt-5.2");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
@ -259,7 +209,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5.2");
assert_eq!(second_items[0].id, "gpt-5.1-codex-mini");
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
let third_request = mcp
@ -281,7 +231,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(third_response)?;
assert_eq!(third_items.len(), 1);
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
assert_eq!(third_items[0].id, "gpt-5.1-codex-max");
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
let fourth_request = mcp
@ -303,52 +253,8 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(fourth_response)?;
assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, "gpt-5.1-codex");
let fifth_cursor = fourth_cursor.ok_or_else(|| anyhow!("cursor for fifth page"))?;
let fifth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(fifth_cursor.clone()),
})
.await?;
let fifth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fifth_request)),
)
.await??;
let ModelListResponse {
data: fifth_items,
next_cursor: fifth_cursor,
} = to_response::<ModelListResponse>(fifth_response)?;
assert_eq!(fifth_items.len(), 1);
assert_eq!(fifth_items[0].id, "gpt-5.1-codex-max");
let sixth_cursor = fifth_cursor.ok_or_else(|| anyhow!("cursor for sixth page"))?;
let sixth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(sixth_cursor.clone()),
})
.await?;
let sixth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(sixth_request)),
)
.await??;
let ModelListResponse {
data: sixth_items,
next_cursor: sixth_cursor,
} = to_response::<ModelListResponse>(sixth_response)?;
assert_eq!(sixth_items.len(), 1);
assert_eq!(sixth_items[0].id, "caribou");
assert!(sixth_cursor.is_none());
assert_eq!(fourth_items[0].id, "caribou");
assert!(fourth_cursor.is_none());
Ok(())
}

View file

@ -74,37 +74,6 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
model_link: Some("https://www.codex.com/models/caribou".to_string()),
}),
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1-codex-mini".to_string(),
model: "gpt-5.1-codex-mini".to_string(),
@ -166,36 +135,6 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
model_link: Some("https://www.codex.com/models/caribou".to_string()),
}),
show_in_picker: true,
supported_in_api: true,
},
// Deprecated models.
ModelPreset {
id: "gpt-5-codex".to_string(),
@ -253,6 +192,37 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
show_in_picker: false,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
model_link: Some("https://www.codex.com/models/caribou".to_string()),
}),
show_in_picker: false,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5".to_string(),
model: "gpt-5".to_string(),
@ -287,6 +257,36 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
show_in_picker: false,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
model_link: Some("https://www.codex.com/models/caribou".to_string()),
}),
show_in_picker: false,
supported_in_api: true,
},
]
});

View file

@ -42,13 +42,7 @@ async fn list_models_returns_chatgpt_models() -> Result<()> {
}
fn expected_models_for_api_key() -> Vec<ModelPreset> {
vec![
gpt_5_1_codex_max(),
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
gpt_5_2(),
gpt_5_1(),
]
vec![gpt_5_1_codex_max(), gpt_5_1_codex_mini(), gpt_5_2()]
}
fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
@ -57,10 +51,8 @@ fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
vec![
caribou(),
gpt_5_1_codex_max,
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
gpt_5_2(),
gpt_5_1(),
]
}
@ -128,34 +120,6 @@ fn gpt_5_1_codex_max() -> ModelPreset {
}
}
fn gpt_5_1_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fastest responses with limited reasoning",
),
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
fn gpt_5_1_codex_mini() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex-mini".to_string(),
@ -214,34 +178,6 @@ fn gpt_5_2() -> ModelPreset {
}
}
fn gpt_5_1() -> ModelPreset {
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
fn caribou_upgrade() -> codex_protocol::openai_models::ModelUpgrade {
codex_protocol::openai_models::ModelUpgrade {
id: "caribou".to_string(),

View file

@ -7,12 +7,9 @@ expression: popup
1. gpt-5.1-codex-max (default) Latest Codex-optimized flagship for deep and
fast reasoning.
2. gpt-5.1-codex Optimized for codex.
3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but
2. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but
less capable.
4. gpt-5.2 Latest frontier model with improvements
3. gpt-5.2 Latest frontier model with improvements
across knowledge, reasoning and coding
5. gpt-5.1 Broad world knowledge with strong general
reasoning.
Press enter to select reasoning effort, or esc to dismiss.

View file

@ -2061,10 +2061,10 @@ fn feedback_upload_consent_popup_snapshot() {
#[test]
fn reasoning_popup_escape_returns_to_model_popup() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1"));
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max"));
chat.open_model_popup();
let preset = get_available_model(&chat, "gpt-5.1-codex");
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
let before_escape = render_bottom_popup(&chat, 80);

View file

@ -7,12 +7,9 @@ expression: popup
1. gpt-5.1-codex-max (default) Latest Codex-optimized flagship for deep and
fast reasoning.
2. gpt-5.1-codex Optimized for codex.
3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but
2. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but
less capable.
4. gpt-5.2 Latest frontier model with improvements
3. gpt-5.2 Latest frontier model with improvements
across knowledge, reasoning and coding
5. gpt-5.1 Broad world knowledge with strong general
reasoning.
Press enter to select reasoning effort, or esc to dismiss.

View file

@ -1953,10 +1953,10 @@ fn feedback_upload_consent_popup_snapshot() {
#[test]
fn reasoning_popup_escape_returns_to_model_popup() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1"));
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max"));
chat.open_model_popup();
let preset = get_available_model(&chat, "gpt-5.1-codex");
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
let before_escape = render_bottom_popup(&chat, 80);