Improve Plan mode reasoning selection flow (#12303)

Addresses https://github.com/openai/codex/issues/11013

## Summary
- add a Plan implementation path in the TUI that lets users choose
reasoning before switching to Default mode and implementing
- add Plan-mode reasoning scope handling (Plan-only override vs
all-modes default), including config/schema/docs plumbing for
`plan_mode_reasoning_effort`
- remove the hardcoded Plan preset medium default and make the reasoning
popup reflect the active Plan override as `(current)`
- split the collaboration-mode switch notification UI hint into #12307
to keep this diff focused

If I have `plan_mode_reasoning_effort = "medium"` set in my
`config.toml`:
<img width="699" height="127" alt="Screenshot 2026-02-20 at 6 59 37 PM"
src="https://github.com/user-attachments/assets/b33abf04-6b7a-49ed-b2e9-d24b99795369"
/>

If I don't have `plan_mode_reasoning_effort` set in my `config.toml`:
<img width="704" height="129" alt="Screenshot 2026-02-20 at 7 01 51 PM"
src="https://github.com/user-attachments/assets/88a086d4-d2f1-49c7-8be4-f6f0c0fa1b8d"
/>

## Codex author
`codex resume 019c78a2-726b-7fe3-adac-3fa4523dcc2a`
This commit is contained in:
Charley Cunningham 2026-02-20 20:08:56 -08:00 committed by GitHub
parent 7ae5d88016
commit 4c1744afb2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 558 additions and 55 deletions

View file

@ -467,6 +467,9 @@
"personality": {
"$ref": "#/definitions/Personality"
},
"plan_mode_reasoning_effort": {
"$ref": "#/definitions/ReasoningEffort"
},
"sandbox_mode": {
"$ref": "#/definitions/SandboxMode"
},
@ -1901,6 +1904,9 @@
],
"description": "Optionally specify a personality for the model"
},
"plan_mode_reasoning_effort": {
"$ref": "#/definitions/ReasoningEffort"
},
"profile": {
"description": "Profile to use from the `profiles` map.",
"type": "string"

View file

@ -376,6 +376,13 @@ pub struct Config {
/// Value to use for `reasoning.effort` when making a request using the
/// Responses API.
pub model_reasoning_effort: Option<ReasoningEffort>,
/// Optional Plan-mode-specific reasoning effort override used by the TUI.
///
/// When unset, Plan mode uses the built-in Plan preset default (currently
/// `medium`). When explicitly set (including `none`), this overrides the
/// Plan preset. The `none` value means "no reasoning" (not "inherit the
/// global default").
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
/// If not "none", the value to use for `reasoning.summary` when making a
/// request using the Responses API.
@ -1107,6 +1114,7 @@ pub struct ConfigToml {
pub show_raw_agent_reasoning: Option<bool>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
pub model_verbosity: Option<Verbosity>,
@ -2042,6 +2050,9 @@ impl Config {
model_reasoning_effort: config_profile
.model_reasoning_effort
.or(cfg.model_reasoning_effort),
plan_mode_reasoning_effort: config_profile
.plan_mode_reasoning_effort
.or(cfg.plan_mode_reasoning_effort),
model_reasoning_summary: config_profile
.model_reasoning_summary
.or(cfg.model_reasoning_summary)
@ -4588,6 +4599,7 @@ model_verbosity = "high"
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: Some(ReasoningEffort::High),
plan_mode_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_catalog: None,
@ -4708,6 +4720,7 @@ model_verbosity = "high"
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: None,
plan_mode_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_catalog: None,
@ -4826,6 +4839,7 @@ model_verbosity = "high"
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: None,
plan_mode_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_catalog: None,
@ -4930,6 +4944,7 @@ model_verbosity = "high"
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: Some(ReasoningEffort::High),
plan_mode_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_catalog: None,

View file

@ -24,6 +24,7 @@ pub struct ConfigProfile {
pub approval_policy: Option<AskForApproval>,
pub sandbox_mode: Option<SandboxMode>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub personality: Option<Personality>,

View file

@ -75,6 +75,10 @@ mod tests {
fn preset_names_use_mode_display_names() {
assert_eq!(plan_preset().name, ModeKind::Plan.display_name());
assert_eq!(default_preset().name, ModeKind::Default.display_name());
assert_eq!(
plan_preset().reasoning_effort,
Some(Some(ReasoningEffort::Medium))
);
}
#[test]

View file

@ -1764,6 +1764,10 @@ impl App {
AppEvent::OpenReasoningPopup { model } => {
self.chat_widget.open_reasoning_popup(model);
}
AppEvent::OpenPlanReasoningScopePrompt { model, effort } => {
self.chat_widget
.open_plan_reasoning_scope_prompt(model, effort);
}
AppEvent::OpenAllModelsPopup { models } => {
self.chat_widget.open_all_models_popup(models);
}
@ -2115,6 +2119,10 @@ impl App {
.await
{
Ok(()) => {
let effort_label = effort
.map(|selected_effort| selected_effort.to_string())
.unwrap_or_else(|| "default".to_string());
tracing::info!("Selected model: {model}, Selected effort: {effort_label}");
let mut message = format!("Model changed to {model}");
if let Some(label) = Self::reasoning_label_for(&model, effort) {
message.push(' ');
@ -2315,6 +2323,11 @@ impl App {
AppEvent::UpdateRateLimitSwitchPromptHidden(hidden) => {
self.chat_widget.set_rate_limit_switch_prompt_hidden(hidden);
}
AppEvent::UpdatePlanModeReasoningEffort(effort) => {
self.config.plan_mode_reasoning_effort = effort;
self.chat_widget.set_plan_mode_reasoning_effort(effort);
self.refresh_status_line();
}
AppEvent::PersistFullAccessWarningAcknowledged => {
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.set_hide_full_access_warning(true)
@ -2360,6 +2373,45 @@ impl App {
));
}
}
AppEvent::PersistPlanModeReasoningEffort(effort) => {
let profile = self.active_profile.as_deref();
let segments = if let Some(profile) = profile {
vec![
"profiles".to_string(),
profile.to_string(),
"plan_mode_reasoning_effort".to_string(),
]
} else {
vec!["plan_mode_reasoning_effort".to_string()]
};
let edit = if let Some(effort) = effort {
ConfigEdit::SetPath {
segments,
value: effort.to_string().into(),
}
} else {
ConfigEdit::ClearPath { segments }
};
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.with_edits([edit])
.apply()
.await
{
tracing::error!(
error = %err,
"failed to persist plan mode reasoning effort"
);
if let Some(profile) = profile {
self.chat_widget.add_error_message(format!(
"Failed to save Plan mode reasoning effort for profile `{profile}`: {err}"
));
} else {
self.chat_widget.add_error_message(format!(
"Failed to save Plan mode reasoning effort: {err}"
));
}
}
}
AppEvent::PersistModelMigrationPromptAcknowledged {
from_model,
to_model,

View file

@ -164,6 +164,12 @@ pub(crate) enum AppEvent {
model: ModelPreset,
},
/// Open the Plan-mode reasoning scope prompt for the selected model/effort.
OpenPlanReasoningScopePrompt {
model: String,
effort: Option<ReasoningEffort>,
},
/// Open the full model picker (non-auto models).
OpenAllModelsPopup {
models: Vec<ModelPreset>,
@ -258,6 +264,9 @@ pub(crate) enum AppEvent {
/// Update whether the rate limit switch prompt has been acknowledged for the session.
UpdateRateLimitSwitchPromptHidden(bool),
/// Update the Plan-mode-specific reasoning effort in memory.
UpdatePlanModeReasoningEffort(Option<ReasoningEffort>),
/// Persist the acknowledgement flag for the full access warning prompt.
PersistFullAccessWarningAcknowledged,
@ -268,6 +277,9 @@ pub(crate) enum AppEvent {
/// Persist the acknowledgement flag for the rate limit switch prompt.
PersistRateLimitSwitchPromptHidden,
/// Persist the Plan-mode-specific reasoning effort.
PersistPlanModeReasoningEffort(Option<ReasoningEffort>),
/// Persist the acknowledgement flag for the model migration prompt.
PersistModelMigrationPromptAcknowledged {
from_model: String,

View file

@ -158,6 +158,9 @@ const PLAN_IMPLEMENTATION_TITLE: &str = "Implement this plan?";
const PLAN_IMPLEMENTATION_YES: &str = "Yes, implement this plan";
const PLAN_IMPLEMENTATION_NO: &str = "No, stay in Plan mode";
const PLAN_IMPLEMENTATION_CODING_MESSAGE: &str = "Implement the plan.";
const PLAN_MODE_REASONING_SCOPE_TITLE: &str = "Apply reasoning change";
const PLAN_MODE_REASONING_SCOPE_PLAN_ONLY: &str = "Apply to Plan mode override";
const PLAN_MODE_REASONING_SCOPE_ALL_MODES: &str = "Apply to global default and Plan mode override";
const CONNECTORS_SELECTION_VIEW_ID: &str = "connectors-selection";
/// Choose the keybinding used to edit the most-recently queued message.
@ -1452,7 +1455,6 @@ impl ChatWidget {
}
None => (Vec::new(), Some("Default mode unavailable".to_string())),
};
let items = vec![
SelectionItem {
name: PLAN_IMPLEMENTATION_YES.to_string(),
@ -5031,16 +5033,20 @@ impl ChatWidget {
}
auto_presets.sort_by_key(|preset| Self::auto_model_order(&preset.model));
let mut items: Vec<SelectionItem> = auto_presets
.into_iter()
.map(|preset| {
let description =
(!preset.description.is_empty()).then_some(preset.description.clone());
let model = preset.model.clone();
let should_prompt_plan_mode_scope = self.should_prompt_plan_mode_reasoning_scope(
model.as_str(),
Some(preset.default_reasoning_effort),
);
let actions = Self::model_selection_actions(
model.clone(),
Some(preset.default_reasoning_effort),
should_prompt_plan_mode_scope,
);
SelectionItem {
name: model.clone(),
@ -5195,40 +5201,135 @@ impl ChatWidget {
fn model_selection_actions(
model_for_action: String,
effort_for_action: Option<ReasoningEffortConfig>,
should_prompt_plan_mode_scope: bool,
) -> Vec<SelectionAction> {
vec![Box::new(move |tx| {
let effort_label = effort_for_action
.map(|effort| effort.to_string())
.unwrap_or_else(|| "default".to_string());
tx.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
windows_sandbox_level: None,
model: Some(model_for_action.clone()),
effort: Some(effort_for_action),
summary: None,
collaboration_mode: None,
personality: None,
}));
if should_prompt_plan_mode_scope {
tx.send(AppEvent::OpenPlanReasoningScopePrompt {
model: model_for_action.clone(),
effort: effort_for_action,
});
return;
}
tx.send(AppEvent::UpdateModel(model_for_action.clone()));
tx.send(AppEvent::UpdateReasoningEffort(effort_for_action));
tx.send(AppEvent::PersistModelSelection {
model: model_for_action.clone(),
effort: effort_for_action,
});
tracing::info!(
"Selected model: {}, Selected effort: {}",
model_for_action,
effort_label
);
})]
}
fn should_prompt_plan_mode_reasoning_scope(
&self,
selected_model: &str,
selected_effort: Option<ReasoningEffortConfig>,
) -> bool {
if !self.collaboration_modes_enabled()
|| self.active_mode_kind() != ModeKind::Plan
|| selected_model != self.current_model()
{
return false;
}
// Prompt whenever the selection is not a true no-op for both:
// 1) the active Plan-mode effective reasoning, and
// 2) the stored global defaults that would be updated by the fallback path.
selected_effort != self.effective_reasoning_effort()
|| selected_model != self.current_collaboration_mode.model()
|| selected_effort != self.current_collaboration_mode.reasoning_effort()
}
pub(crate) fn open_plan_reasoning_scope_prompt(
&mut self,
model: String,
effort: Option<ReasoningEffortConfig>,
) {
let reasoning_phrase = match effort {
Some(ReasoningEffortConfig::None) => "no reasoning".to_string(),
Some(selected_effort) => {
format!(
"{} reasoning",
Self::reasoning_effort_label(selected_effort).to_lowercase()
)
}
None => "the selected reasoning".to_string(),
};
let plan_only_description = format!("Always use {reasoning_phrase} in Plan mode.");
let plan_reasoning_source = if let Some(plan_override) =
self.config.plan_mode_reasoning_effort
{
format!(
"user-chosen Plan override ({})",
Self::reasoning_effort_label(plan_override).to_lowercase()
)
} else if let Some(plan_mask) = collaboration_modes::plan_mask(self.models_manager.as_ref())
{
match plan_mask.reasoning_effort.flatten() {
Some(plan_effort) => format!(
"built-in Plan default ({})",
Self::reasoning_effort_label(plan_effort).to_lowercase()
),
None => "built-in Plan default (no reasoning)".to_string(),
}
} else {
"built-in Plan default".to_string()
};
let all_modes_description = format!(
"Set the global default reasoning level and the Plan mode override. This replaces the current {plan_reasoning_source}."
);
let subtitle = format!("Choose where to apply {reasoning_phrase}.");
let plan_only_actions: Vec<SelectionAction> = vec![Box::new({
let model = model.clone();
move |tx| {
tx.send(AppEvent::UpdateModel(model.clone()));
tx.send(AppEvent::UpdatePlanModeReasoningEffort(effort));
tx.send(AppEvent::PersistPlanModeReasoningEffort(effort));
}
})];
let all_modes_actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::UpdateModel(model.clone()));
tx.send(AppEvent::UpdateReasoningEffort(effort));
tx.send(AppEvent::UpdatePlanModeReasoningEffort(effort));
tx.send(AppEvent::PersistPlanModeReasoningEffort(effort));
tx.send(AppEvent::PersistModelSelection {
model: model.clone(),
effort,
});
})];
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some(PLAN_MODE_REASONING_SCOPE_TITLE.to_string()),
subtitle: Some(subtitle),
footer_hint: Some(standard_popup_hint_line()),
items: vec![
SelectionItem {
name: PLAN_MODE_REASONING_SCOPE_PLAN_ONLY.to_string(),
description: Some(plan_only_description),
actions: plan_only_actions,
dismiss_on_select: true,
..Default::default()
},
SelectionItem {
name: PLAN_MODE_REASONING_SCOPE_ALL_MODES.to_string(),
description: Some(all_modes_description),
actions: all_modes_actions,
dismiss_on_select: true,
..Default::default()
},
],
..Default::default()
});
}
/// Open a popup to choose the reasoning effort (stage 2) for the given model.
pub(crate) fn open_reasoning_popup(&mut self, preset: ModelPreset) {
let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort;
let supported = preset.supported_reasoning_efforts;
let in_plan_mode =
self.collaboration_modes_enabled() && self.active_mode_kind() == ModeKind::Plan;
let warn_effort = if supported
.iter()
@ -5272,10 +5373,16 @@ impl ChatWidget {
}
if choices.len() == 1 {
if let Some(effort) = choices.first().and_then(|c| c.stored) {
self.apply_model_and_effort(preset.model, Some(effort));
let selected_effort = choices.first().and_then(|c| c.stored);
let selected_model = preset.model;
if self.should_prompt_plan_mode_reasoning_scope(&selected_model, selected_effort) {
self.app_event_tx
.send(AppEvent::OpenPlanReasoningScopePrompt {
model: selected_model,
effort: selected_effort,
});
} else {
self.apply_model_and_effort(preset.model, None);
self.apply_model_and_effort(selected_model, selected_effort);
}
return;
}
@ -5291,7 +5398,13 @@ impl ChatWidget {
let model_slug = preset.model.to_string();
let is_current_model = self.current_model() == preset.model.as_str();
let highlight_choice = if is_current_model {
self.effective_reasoning_effort()
if in_plan_mode {
self.config
.plan_mode_reasoning_effort
.or(self.effective_reasoning_effort())
} else {
self.effective_reasoning_effort()
}
} else {
default_choice
};
@ -5334,7 +5447,24 @@ impl ChatWidget {
};
let model_for_action = model_slug.clone();
let actions = Self::model_selection_actions(model_for_action, choice.stored);
let choice_effort = choice.stored;
let should_prompt_plan_mode_scope =
self.should_prompt_plan_mode_reasoning_scope(model_slug.as_str(), choice_effort);
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
if should_prompt_plan_mode_scope {
tx.send(AppEvent::OpenPlanReasoningScopePrompt {
model: model_for_action.clone(),
effort: choice_effort,
});
} else {
tx.send(AppEvent::UpdateModel(model_for_action.clone()));
tx.send(AppEvent::UpdateReasoningEffort(choice_effort));
tx.send(AppEvent::PersistModelSelection {
model: model_for_action.clone(),
effort: choice_effort,
});
}
})];
items.push(SelectionItem {
name: effort_label,
@ -5372,33 +5502,20 @@ impl ChatWidget {
}
}
fn apply_model_and_effort(&self, model: String, effort: Option<ReasoningEffortConfig>) {
self.app_event_tx
.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
windows_sandbox_level: None,
model: Some(model.clone()),
effort: Some(effort),
summary: None,
collaboration_mode: None,
personality: None,
}));
self.app_event_tx.send(AppEvent::UpdateModel(model.clone()));
fn apply_model_and_effort_without_persist(
&self,
model: String,
effort: Option<ReasoningEffortConfig>,
) {
self.app_event_tx.send(AppEvent::UpdateModel(model));
self.app_event_tx
.send(AppEvent::UpdateReasoningEffort(effort));
self.app_event_tx.send(AppEvent::PersistModelSelection {
model: model.clone(),
effort,
});
tracing::info!(
"Selected model: {}, Selected effort: {}",
model,
effort
.map(|e| e.to_string())
.unwrap_or_else(|| "default".to_string())
);
}
fn apply_model_and_effort(&self, model: String, effort: Option<ReasoningEffortConfig>) {
self.apply_model_and_effort_without_persist(model.clone(), effort);
self.app_event_tx
.send(AppEvent::PersistModelSelection { model, effort });
}
/// Open the permissions popup (alias for /permissions).
@ -6163,6 +6280,22 @@ impl ChatWidget {
.unwrap_or(false)
}
pub(crate) fn set_plan_mode_reasoning_effort(&mut self, effort: Option<ReasoningEffortConfig>) {
self.config.plan_mode_reasoning_effort = effort;
if self.collaboration_modes_enabled()
&& let Some(mask) = self.active_collaboration_mask.as_mut()
&& mask.mode == Some(ModeKind::Plan)
{
if let Some(effort) = effort {
mask.reasoning_effort = Some(Some(effort));
} else if let Some(plan_mask) =
collaboration_modes::plan_mask(self.models_manager.as_ref())
{
mask.reasoning_effort = plan_mask.reasoning_effort;
}
}
}
/// Set the reasoning effort in the stored collaboration mode.
pub(crate) fn set_reasoning_effort(&mut self, effort: Option<ReasoningEffortConfig>) {
self.current_collaboration_mode =
@ -6170,7 +6303,10 @@ impl ChatWidget {
.with_updates(None, Some(effort), None);
if self.collaboration_modes_enabled()
&& let Some(mask) = self.active_collaboration_mask.as_mut()
&& mask.mode != Some(ModeKind::Plan)
{
// Generic "global default" updates should not mutate the active Plan mask.
// Plan reasoning is controlled by the Plan preset and Plan-only override updates.
mask.reasoning_effort = Some(effort);
}
}
@ -6394,13 +6530,18 @@ impl ChatWidget {
///
/// When collaboration modes are enabled and a preset is selected,
/// the current mode is attached to submissions as `Op::UserTurn { collaboration_mode: Some(...) }`.
pub(crate) fn set_collaboration_mask(&mut self, mask: CollaborationModeMask) {
pub(crate) fn set_collaboration_mask(&mut self, mut mask: CollaborationModeMask) {
if !self.collaboration_modes_enabled() {
return;
}
let previous_mode = self.active_mode_kind();
let previous_model = self.current_model().to_string();
let previous_effort = self.effective_reasoning_effort();
if mask.mode == Some(ModeKind::Plan)
&& let Some(effort) = self.config.plan_mode_reasoning_effort
{
mask.reasoning_effort = Some(Some(effort));
}
self.active_collaboration_mask = Some(mask);
self.update_collaboration_mode_indicator();
self.refresh_model_display();
@ -6817,8 +6958,13 @@ impl ChatWidget {
pub(crate) fn submit_user_message_with_mode(
&mut self,
text: String,
collaboration_mode: CollaborationModeMask,
mut collaboration_mode: CollaborationModeMask,
) {
if collaboration_mode.mode == Some(ModeKind::Plan)
&& let Some(effort) = self.config.plan_mode_reasoning_effort
{
collaboration_mode.reasoning_effort = Some(Some(effort));
}
if self.agent_turn_running
&& self.active_collaboration_mask.as_ref() != Some(&collaboration_mode)
{

View file

@ -1,6 +1,5 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 1548
expression: popup
---
Select Reasoning Level for gpt-5.1-codex-max

View file

@ -2177,6 +2177,243 @@ async fn submit_user_message_with_mode_sets_coding_collaboration_mode() {
}
}
#[tokio::test]
async fn reasoning_selection_in_plan_mode_opens_scope_prompt_event() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_feature_enabled(Feature::CollaborationModes, true);
let plan_mask = collaboration_modes::plan_mask(chat.models_manager.as_ref())
.expect("expected plan collaboration mode");
chat.set_collaboration_mask(plan_mask);
let _ = drain_insert_history(&mut rx);
set_chatgpt_auth(&mut chat);
chat.set_reasoning_effort(Some(ReasoningEffortConfig::High));
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.handle_key_event(KeyEvent::from(KeyCode::Down));
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let event = rx.try_recv().expect("expected AppEvent");
assert_matches!(
event,
AppEvent::OpenPlanReasoningScopePrompt {
model,
effort: Some(_)
} if model == "gpt-5.1-codex-max"
);
}
#[tokio::test]
async fn reasoning_selection_in_plan_mode_without_effort_change_does_not_open_scope_prompt_event() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_feature_enabled(Feature::CollaborationModes, true);
let plan_mask = collaboration_modes::plan_mask(chat.models_manager.as_ref())
.expect("expected plan collaboration mode");
chat.set_collaboration_mask(plan_mask);
let _ = drain_insert_history(&mut rx);
set_chatgpt_auth(&mut chat);
let current_preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.set_reasoning_effort(Some(current_preset.default_reasoning_effort));
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let events = std::iter::from_fn(|| rx.try_recv().ok()).collect::<Vec<_>>();
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::UpdateModel(model) if model == "gpt-5.1-codex-max"
)),
"expected model update event; events: {events:?}"
);
assert!(
events
.iter()
.any(|event| matches!(event, AppEvent::UpdateReasoningEffort(Some(_)))),
"expected reasoning update event; events: {events:?}"
);
}
#[tokio::test]
async fn reasoning_selection_in_plan_mode_matching_plan_effort_but_different_global_opens_scope_prompt()
{
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_feature_enabled(Feature::CollaborationModes, true);
let plan_mask = collaboration_modes::plan_mask(chat.models_manager.as_ref())
.expect("expected plan collaboration mode");
chat.set_collaboration_mask(plan_mask);
let _ = drain_insert_history(&mut rx);
set_chatgpt_auth(&mut chat);
// Reproduce: Plan effective reasoning remains the preset (medium), but the
// global default differs (high). Pressing Enter on the current Plan choice
// should open the scope prompt rather than silently rewriting the global default.
chat.set_reasoning_effort(Some(ReasoningEffortConfig::High));
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let event = rx.try_recv().expect("expected AppEvent");
assert_matches!(
event,
AppEvent::OpenPlanReasoningScopePrompt {
model,
effort: Some(ReasoningEffortConfig::Medium)
} if model == "gpt-5.1-codex-max"
);
}
#[tokio::test]
async fn plan_mode_reasoning_override_is_marked_current_in_reasoning_popup() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.set_feature_enabled(Feature::CollaborationModes, true);
set_chatgpt_auth(&mut chat);
chat.set_reasoning_effort(Some(ReasoningEffortConfig::High));
chat.set_plan_mode_reasoning_effort(Some(ReasoningEffortConfig::Low));
let plan_mask = collaboration_modes::plan_mask(chat.models_manager.as_ref())
.expect("expected plan collaboration mode");
chat.set_collaboration_mask(plan_mask);
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 100);
assert!(popup.contains("Low (current)"));
assert!(
!popup.contains("High (current)"),
"expected Plan override to drive current reasoning label, got: {popup}"
);
}
#[tokio::test]
async fn reasoning_selection_in_plan_mode_model_switch_does_not_open_scope_prompt_event() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_feature_enabled(Feature::CollaborationModes, true);
let plan_mask = collaboration_modes::plan_mask(chat.models_manager.as_ref())
.expect("expected plan collaboration mode");
chat.set_collaboration_mask(plan_mask);
let _ = drain_insert_history(&mut rx);
set_chatgpt_auth(&mut chat);
let preset = get_available_model(&chat, "gpt-5");
chat.open_reasoning_popup(preset);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let events = std::iter::from_fn(|| rx.try_recv().ok()).collect::<Vec<_>>();
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::UpdateModel(model) if model == "gpt-5"
)),
"expected model update event; events: {events:?}"
);
assert!(
events
.iter()
.any(|event| matches!(event, AppEvent::UpdateReasoningEffort(Some(_)))),
"expected reasoning update event; events: {events:?}"
);
}
#[tokio::test]
async fn plan_reasoning_scope_popup_all_modes_persists_global_and_plan_override() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.open_plan_reasoning_scope_prompt(
"gpt-5.1-codex-max".to_string(),
Some(ReasoningEffortConfig::High),
);
chat.handle_key_event(KeyEvent::from(KeyCode::Down));
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let events = std::iter::from_fn(|| rx.try_recv().ok()).collect::<Vec<_>>();
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::UpdatePlanModeReasoningEffort(Some(ReasoningEffortConfig::High))
)),
"expected plan override to be updated; events: {events:?}"
);
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::PersistPlanModeReasoningEffort(Some(ReasoningEffortConfig::High))
)),
"expected updated plan override to be persisted; events: {events:?}"
);
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::PersistModelSelection { model, effort: Some(ReasoningEffortConfig::High) }
if model == "gpt-5.1-codex-max"
)),
"expected global model reasoning selection persistence; events: {events:?}"
);
}
#[tokio::test]
async fn plan_reasoning_scope_popup_mentions_selected_reasoning() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.set_plan_mode_reasoning_effort(Some(ReasoningEffortConfig::Low));
chat.open_plan_reasoning_scope_prompt(
"gpt-5.1-codex-max".to_string(),
Some(ReasoningEffortConfig::Medium),
);
let popup = render_bottom_popup(&chat, 100);
assert!(popup.contains("Choose where to apply medium reasoning."));
assert!(popup.contains("Always use medium reasoning in Plan mode."));
assert!(popup.contains("Apply to Plan mode override"));
assert!(popup.contains("Apply to global default and Plan mode override"));
assert!(popup.contains("user-chosen Plan override (low)"));
}
#[tokio::test]
async fn plan_reasoning_scope_popup_mentions_built_in_plan_default_when_no_override() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.open_plan_reasoning_scope_prompt(
"gpt-5.1-codex-max".to_string(),
Some(ReasoningEffortConfig::Medium),
);
let popup = render_bottom_popup(&chat, 100);
assert!(popup.contains("built-in Plan default (medium)"));
}
#[tokio::test]
async fn plan_reasoning_scope_popup_plan_only_does_not_update_all_modes_reasoning() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.open_plan_reasoning_scope_prompt(
"gpt-5.1-codex-max".to_string(),
Some(ReasoningEffortConfig::High),
);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let events = std::iter::from_fn(|| rx.try_recv().ok()).collect::<Vec<_>>();
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::UpdatePlanModeReasoningEffort(Some(ReasoningEffortConfig::High))
)),
"expected plan-only reasoning update; events: {events:?}"
);
assert!(
events
.iter()
.all(|event| !matches!(event, AppEvent::UpdateReasoningEffort(_))),
"did not expect all-modes reasoning update; events: {events:?}"
);
}
#[tokio::test]
async fn submit_user_message_with_mode_errors_when_mode_changes_during_running_turn() {
let (mut chat, mut rx, mut op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
@ -3958,7 +4195,29 @@ async fn set_reasoning_effort_updates_active_collaboration_mask() {
chat.set_reasoning_effort(None);
assert_eq!(chat.current_reasoning_effort(), None);
assert_eq!(
chat.current_reasoning_effort(),
Some(ReasoningEffortConfig::Medium)
);
assert_eq!(chat.active_collaboration_mode_kind(), ModeKind::Plan);
}
#[tokio::test]
async fn set_reasoning_effort_does_not_override_active_plan_override() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1")).await;
chat.set_feature_enabled(Feature::CollaborationModes, true);
chat.set_plan_mode_reasoning_effort(Some(ReasoningEffortConfig::High));
let plan_mask =
collaboration_modes::mask_for_kind(chat.models_manager.as_ref(), ModeKind::Plan)
.expect("expected plan collaboration mask");
chat.set_collaboration_mask(plan_mask);
chat.set_reasoning_effort(Some(ReasoningEffortConfig::Low));
assert_eq!(
chat.current_reasoning_effort(),
Some(ReasoningEffortConfig::High)
);
assert_eq!(chat.active_collaboration_mode_kind(), ModeKind::Plan);
}

View file

@ -32,4 +32,13 @@ The generated JSON Schema for `config.toml` lives at `codex-rs/core/config.schem
Codex stores "do not show again" flags for some UI prompts under the `[notice]` table.
## Plan mode defaults
`plan_mode_reasoning_effort` lets you set a Plan-mode-specific default reasoning
effort override. When unset, Plan mode uses the built-in Plan preset default
(currently `medium`). When explicitly set (including `none`), it overrides the
Plan preset. The string value `none` means "no reasoning" (an explicit Plan
override), not "inherit the global default". There is currently no separate
config value for "follow the global default in Plan mode".
Ctrl+C/Ctrl+D quitting uses a ~1 second double-press hint (`ctrl + c again to quit`).