Revert "Revert "Update models.json"" (#11256)

Reverts openai/codex#11255
This commit is contained in:
Ahmed Ibrahim 2026-02-09 19:22:41 -08:00 committed by GitHub
parent 34c88d10ea
commit d1df3bd63b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 374 additions and 621 deletions

File diff suppressed because one or more lines are too long

View file

@ -5350,8 +5350,9 @@ mod tests {
let (session, turn_context) = make_session_and_context().await;
let (rollout_items, expected) = sample_rollout(&session, &turn_context).await;
let reconstruction_turn = session.new_default_turn().await;
let reconstructed = session
.reconstruct_history_from_rollout(&turn_context, &rollout_items)
.reconstruct_history_from_rollout(reconstruction_turn.as_ref(), &rollout_items)
.await;
assert_eq!(expected, reconstructed);
@ -5562,7 +5563,12 @@ mod tests {
.record_initial_history(InitialHistory::Forked(rollout_items))
.await;
expected.extend(session.build_initial_context(&turn_context).await);
let reconstruction_turn = session.new_default_turn().await;
expected.extend(
session
.build_initial_context(reconstruction_turn.as_ref())
.await,
);
let history = session.state.lock().await.clone_history();
assert_eq!(expected, history.raw_items());
}
@ -6089,6 +6095,7 @@ mod tests {
}
}
// todo: use online model info
pub(crate) async fn make_session_and_context() -> (Session, TurnContext) {
let (tx_event, _rx_event) = async_channel::unbounded();
let codex_home = tempfile::tempdir().expect("create temp dir");
@ -6776,16 +6783,50 @@ mod tests {
async fn sample_rollout(
session: &Session,
turn_context: &TurnContext,
_turn_context: &TurnContext,
) -> (Vec<RolloutItem>, Vec<ResponseItem>) {
let mut rollout_items = Vec::new();
let mut live_history = ContextManager::new();
let initial_context = session.build_initial_context(turn_context).await;
// Use the same turn_context source as record_initial_history so model_info (and thus
// personality_spec) matches reconstruction.
let reconstruction_turn = session.new_default_turn().await;
let mut initial_context = session
.build_initial_context(reconstruction_turn.as_ref())
.await;
// Ensure personality_spec is present when Personality is enabled, so expected matches
// what reconstruction produces (build_initial_context may omit it when baked into model).
if !initial_context.iter().any(|m| {
matches!(m, ResponseItem::Message { role, content, .. }
if role == "developer"
&& content.iter().any(|c| {
matches!(c, ContentItem::InputText { text } if text.contains("<personality_spec>"))
}))
})
&& let Some(p) = reconstruction_turn.personality
&& session.features.enabled(Feature::Personality)
&& let Some(personality_message) = reconstruction_turn
.model_info
.model_messages
.as_ref()
.and_then(|m| m.get_personality_message(Some(p)).filter(|s| !s.is_empty()))
{
let msg =
DeveloperInstructions::personality_spec_message(personality_message).into();
let insert_at = initial_context
.iter()
.position(|m| matches!(m, ResponseItem::Message { role, .. } if role == "developer"))
.map(|i| i + 1)
.unwrap_or(0);
initial_context.insert(insert_at, msg);
}
for item in &initial_context {
rollout_items.push(RolloutItem::ResponseItem(item.clone()));
}
live_history.record_items(initial_context.iter(), turn_context.truncation_policy);
live_history.record_items(
initial_context.iter(),
reconstruction_turn.truncation_policy,
);
let user1 = ResponseItem::Message {
id: None,
@ -6796,7 +6837,10 @@ mod tests {
end_turn: None,
phase: None,
};
live_history.record_items(std::iter::once(&user1), turn_context.truncation_policy);
live_history.record_items(
std::iter::once(&user1),
reconstruction_turn.truncation_policy,
);
rollout_items.push(RolloutItem::ResponseItem(user1.clone()));
let assistant1 = ResponseItem::Message {
@ -6808,17 +6852,17 @@ mod tests {
end_turn: None,
phase: None,
};
live_history.record_items(std::iter::once(&assistant1), turn_context.truncation_policy);
live_history.record_items(
std::iter::once(&assistant1),
reconstruction_turn.truncation_policy,
);
rollout_items.push(RolloutItem::ResponseItem(assistant1.clone()));
let summary1 = "summary one";
let snapshot1 = live_history.clone().for_prompt();
let user_messages1 = collect_user_messages(&snapshot1);
let rebuilt1 = compact::build_compacted_history(
session.build_initial_context(turn_context).await,
&user_messages1,
summary1,
);
let rebuilt1 =
compact::build_compacted_history(initial_context.clone(), &user_messages1, summary1);
live_history.replace(rebuilt1);
rollout_items.push(RolloutItem::Compacted(CompactedItem {
message: summary1.to_string(),
@ -6834,7 +6878,10 @@ mod tests {
end_turn: None,
phase: None,
};
live_history.record_items(std::iter::once(&user2), turn_context.truncation_policy);
live_history.record_items(
std::iter::once(&user2),
reconstruction_turn.truncation_policy,
);
rollout_items.push(RolloutItem::ResponseItem(user2.clone()));
let assistant2 = ResponseItem::Message {
@ -6846,17 +6893,17 @@ mod tests {
end_turn: None,
phase: None,
};
live_history.record_items(std::iter::once(&assistant2), turn_context.truncation_policy);
live_history.record_items(
std::iter::once(&assistant2),
reconstruction_turn.truncation_policy,
);
rollout_items.push(RolloutItem::ResponseItem(assistant2.clone()));
let summary2 = "summary two";
let snapshot2 = live_history.clone().for_prompt();
let user_messages2 = collect_user_messages(&snapshot2);
let rebuilt2 = compact::build_compacted_history(
session.build_initial_context(turn_context).await,
&user_messages2,
summary2,
);
let rebuilt2 =
compact::build_compacted_history(initial_context.clone(), &user_messages2, summary2);
live_history.replace(rebuilt2);
rollout_items.push(RolloutItem::Compacted(CompactedItem {
message: summary2.to_string(),
@ -6872,7 +6919,10 @@ mod tests {
end_turn: None,
phase: None,
};
live_history.record_items(std::iter::once(&user3), turn_context.truncation_policy);
live_history.record_items(
std::iter::once(&user3),
reconstruction_turn.truncation_policy,
);
rollout_items.push(RolloutItem::ResponseItem(user3));
let assistant3 = ResponseItem::Message {
@ -6884,7 +6934,10 @@ mod tests {
end_turn: None,
phase: None,
};
live_history.record_items(std::iter::once(&assistant3), turn_context.truncation_policy);
live_history.record_items(
std::iter::once(&assistant3),
reconstruction_turn.truncation_policy,
);
rollout_items.push(RolloutItem::ResponseItem(assistant3));
(rollout_items, live_history.for_prompt())

View file

@ -3,15 +3,8 @@ use codex_core::CodexAuth;
use codex_core::ThreadManager;
use codex_core::built_in_model_providers;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::default_input_modalities;
use core_test_support::load_default_config_for_test;
use indoc::indoc;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use tempfile::tempdir;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -26,8 +19,8 @@ async fn list_models_returns_api_key_models() -> Result<()> {
.list_models(&config, RefreshStrategy::OnlineIfUncached)
.await;
let expected_models = expected_models_for_api_key();
assert_eq!(expected_models, models);
let slugs: Vec<String> = models.into_iter().map(|m| m.id).collect();
assert_eq!(expected_slugs(), slugs);
Ok(())
}
@ -44,486 +37,24 @@ async fn list_models_returns_chatgpt_models() -> Result<()> {
.list_models(&config, RefreshStrategy::OnlineIfUncached)
.await;
let expected_models = expected_models_for_chatgpt();
assert_eq!(expected_models, models);
let slugs: Vec<String> = models.into_iter().map(|m| m.id).collect();
assert_eq!(expected_slugs(), slugs);
Ok(())
}
fn expected_models_for_api_key() -> Vec<ModelPreset> {
fn expected_slugs() -> Vec<String> {
vec![
gpt_52_codex(),
gpt_5_2(),
gpt_5_1_codex_max(),
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
gpt_5_1(),
gpt_5_codex(),
gpt_5(),
gpt_5_codex_mini(),
bengalfox(),
boomslang(),
"gpt-5.2-codex".into(),
"gpt-5.1-codex-max".into(),
"gpt-5.1-codex".into(),
"gpt-5.2".into(),
"gpt-5.1".into(),
"gpt-5-codex".into(),
"gpt-5".into(),
"gpt-5.1-codex-mini".into(),
"gpt-5-codex-mini".into(),
"bengalfox".into(),
"boomslang".into(),
]
}
fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
expected_models_for_api_key()
}
fn gpt_52_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5.2-codex".to_string(),
model: "gpt-5.2-codex".to_string(),
display_name: "gpt-5.2-codex".to_string(),
description: "Latest frontier agentic coding model.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
supports_personality: false,
is_default: true,
upgrade: None,
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_1_codex_max() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
display_name: "gpt-5.1-codex-max".to_string(),
description: "Codex-optimized flagship for deep and fast reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1-codex-max",
HashMap::from([
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::XHigh, ReasoningEffort::XHigh),
]),
)),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_1_codex_mini() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex-mini".to_string(),
model: "gpt-5.1-codex-mini".to_string(),
display_name: "gpt-5.1-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1-codex-mini",
HashMap::from([
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::Minimal, ReasoningEffort::Medium),
(ReasoningEffort::None, ReasoningEffort::Medium),
(ReasoningEffort::Low, ReasoningEffort::Medium),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
]),
)),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_2() -> ModelPreset {
ModelPreset {
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
display_name: "gpt-5.2".to_string(),
description:
"Latest frontier model with improvements across knowledge, reasoning and coding"
.to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning for complex problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.2",
HashMap::from([
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::XHigh, ReasoningEffort::XHigh),
]),
)),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn bengalfox() -> ModelPreset {
ModelPreset {
id: "bengalfox".to_string(),
model: "bengalfox".to_string(),
display_name: "bengalfox".to_string(),
description: "bengalfox".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
supports_personality: true,
is_default: false,
upgrade: None,
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn boomslang() -> ModelPreset {
ModelPreset {
id: "boomslang".to_string(),
model: "boomslang".to_string(),
display_name: "boomslang".to_string(),
description: "boomslang".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
supports_personality: false,
is_default: false,
upgrade: None,
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fastest responses with limited reasoning",
),
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5-codex",
HashMap::from([
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Low, ReasoningEffort::Low),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_codex_mini() -> ModelPreset {
ModelPreset {
id: "gpt-5-codex-mini".to_string(),
model: "gpt-5-codex-mini".to_string(),
display_name: "gpt-5-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5-codex-mini",
HashMap::from([
(ReasoningEffort::None, ReasoningEffort::Medium),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Low, ReasoningEffort::Medium),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::Minimal, ReasoningEffort::Medium),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_1_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fastest responses with limited reasoning",
),
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1-codex",
HashMap::from([
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::XHigh, ReasoningEffort::High),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5() -> ModelPreset {
ModelPreset {
id: "gpt-5".to_string(),
model: "gpt-5".to_string(),
display_name: "gpt-5".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Minimal,
"Fastest responses with little reasoning",
),
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5",
HashMap::from([
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::Minimal, ReasoningEffort::Minimal),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::None, ReasoningEffort::Minimal),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_1() -> ModelPreset {
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1",
HashMap::from([
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::Minimal, ReasoningEffort::Low),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt52_codex_upgrade(
migration_config_key: &str,
reasoning_effort_mapping: HashMap<ReasoningEffort, ReasoningEffort>,
) -> ModelUpgrade {
ModelUpgrade {
id: "gpt-5.2-codex".to_string(),
reasoning_effort_mapping: Some(reasoning_effort_mapping),
migration_config_key: migration_config_key.to_string(),
model_link: None,
upgrade_copy: None,
migration_markdown: Some(
indoc! {r#"
**Codex just got an upgrade. Introducing {model_to}.**
Codex is now powered by {model_to}, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work. Learn more about {model_to} at https://openai.com/index/introducing-gpt-5-2-codex
You can continue using {model_from} if you prefer.
"#}
.to_string(),
),
}
}
fn effort(reasoning_effort: ReasoningEffort, description: &str) -> ReasoningEffortPreset {
ReasoningEffortPreset {
effort: reasoning_effort,
description: description.to_string(),
}
}

View file

@ -5,11 +5,11 @@ expression: popup
Select Model and Effort
Access legacy models by running codex -m <model_name> or in your config.toml
1. gpt-5.2-codex (default) Latest frontier agentic coding model.
2. gpt-5.2 Latest frontier model with improvements across
knowledge, reasoning and coding
3. gpt-5.1-codex-max Codex-optimized flagship for deep and fast
1. gpt-5.2-codex (default) Frontier agentic coding model.
2. gpt-5.1-codex-max Codex-optimized flagship for deep and fast
reasoning.
3. gpt-5.2 Latest frontier model with improvements across
knowledge, reasoning and coding
4. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less
capable.

View file

@ -2361,7 +2361,8 @@ async fn steer_enter_submits_when_plan_stream_is_not_active() {
assert!(chat.queued_user_messages.is_empty());
match next_submit_op(&mut op_rx) {
Op::UserTurn {
personality: None, ..
personality: Some(Personality::Pragmatic),
..
} => {}
other => panic!("expected Op::UserTurn, got {other:?}"),
}
@ -2912,7 +2913,7 @@ async fn collab_slash_command_opens_picker_and_updates_mode() {
mode: ModeKind::Default,
..
}),
personality: None,
personality: Some(Personality::Pragmatic),
..
} => {}
other => {
@ -2930,7 +2931,7 @@ async fn collab_slash_command_opens_picker_and_updates_mode() {
mode: ModeKind::Default,
..
}),
personality: None,
personality: Some(Personality::Pragmatic),
..
} => {}
other => {
@ -3128,7 +3129,7 @@ async fn collab_mode_is_sent_after_enabling() {
mode: ModeKind::Default,
..
}),
personality: None,
personality: Some(Personality::Pragmatic),
..
} => {}
other => {
@ -3148,7 +3149,7 @@ async fn collab_mode_toggle_on_applies_default_preset() {
match next_submit_op(&mut op_rx) {
Op::UserTurn {
collaboration_mode: None,
personality: None,
personality: Some(Personality::Pragmatic),
..
} => {}
other => panic!("expected Op::UserTurn without collaboration_mode, got {other:?}"),
@ -3166,7 +3167,7 @@ async fn collab_mode_toggle_on_applies_default_preset() {
mode: ModeKind::Default,
..
}),
personality: None,
personality: Some(Personality::Pragmatic),
..
} => {}
other => {