add fast mode toggle (#13212)

- add a local Fast mode setting in codex-core (similar to how model id
is currently stored on disk locally)
- send `service_tier=priority` on requests when Fast is enabled
- add `/fast` in the TUI and persist it locally
- feature flag
This commit is contained in:
pash-openai 2026-03-02 20:29:33 -08:00 committed by GitHub
parent 56cc2c71f4
commit 2f5b01abd6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
69 changed files with 929 additions and 127 deletions

View file

@ -5323,6 +5323,7 @@ impl CodexMessageProcessor {
model,
effort,
summary: Some(summary),
service_tier: None,
final_output_json_schema: output_schema,
collaboration_mode: None,
personality: None,
@ -5880,6 +5881,7 @@ impl CodexMessageProcessor {
model: params.model,
effort: params.effort.map(Some),
summary: params.summary,
service_tier: None,
collaboration_mode,
personality: params.personality,
})

View file

@ -155,6 +155,8 @@ pub struct ResponsesApiRequest {
pub stream: bool,
pub include: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub service_tier: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub prompt_cache_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub text: Option<TextControls>,
@ -174,6 +176,7 @@ impl From<&ResponsesApiRequest> for ResponseCreateWsRequest {
store: request.store,
stream: request.stream,
include: request.include.clone(),
service_tier: request.service_tier.clone(),
prompt_cache_key: request.prompt_cache_key.clone(),
text: request.text.clone(),
generate: None,
@ -197,6 +200,8 @@ pub struct ResponseCreateWsRequest {
pub stream: bool,
pub include: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub service_tier: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub prompt_cache_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub text: Option<TextControls>,

View file

@ -265,6 +265,7 @@ async fn streaming_client_retries_on_transport_error() -> Result<()> {
store: false,
stream: true,
include: Vec::new(),
service_tier: None,
prompt_cache_key: None,
text: None,
};
@ -306,6 +307,7 @@ async fn azure_default_store_attaches_ids_and_headers() -> Result<()> {
store: true,
stream: true,
include: Vec::new(),
service_tier: None,
prompt_cache_key: None,
text: None,
};

View file

@ -349,6 +349,9 @@
"experimental_windows_sandbox": {
"type": "boolean"
},
"fast_mode": {
"type": "boolean"
},
"include_apply_patch_tool": {
"type": "boolean"
},
@ -505,6 +508,9 @@
"sandbox_mode": {
"$ref": "#/definitions/SandboxMode"
},
"service_tier": {
"$ref": "#/definitions/ServiceTier"
},
"tools_view_image": {
"type": "boolean"
},
@ -1321,6 +1327,23 @@
},
"type": "object"
},
"ServiceTier": {
"oneOf": [
{
"enum": [
"fast"
],
"type": "string"
},
{
"description": "Legacy compatibility value for older local config files.",
"enum": [
"standard"
],
"type": "string"
}
]
},
"ShellEnvironmentPolicyInherit": {
"oneOf": [
{
@ -1724,6 +1747,9 @@
"experimental_windows_sandbox": {
"type": "boolean"
},
"fast_mode": {
"type": "boolean"
},
"include_apply_patch_tool": {
"type": "boolean"
},
@ -2112,6 +2138,14 @@
],
"description": "Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`."
},
"service_tier": {
"allOf": [
{
"$ref": "#/definitions/ServiceTier"
}
],
"description": "Optional explicit service tier preference for new turns."
},
"shell_environment_policy": {
"allOf": [
{

View file

@ -63,6 +63,7 @@ use codex_otel::OtelManager;
use codex_protocol::ThreadId;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::Verbosity as VerbosityConfig;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ModelInfo;
@ -520,6 +521,7 @@ impl ModelClientSession {
model_info: &ModelInfo,
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
service_tier: Option<ServiceTier>,
) -> Result<ResponsesApiRequest> {
let instructions = &prompt.base_instructions.text;
let input = prompt.get_formatted_input();
@ -569,6 +571,10 @@ impl ModelClientSession {
store: provider.is_azure_responses_endpoint(),
stream: true,
include,
service_tier: match service_tier {
Some(ServiceTier::Fast) => Some("priority".to_string()),
_ => None,
},
prompt_cache_key,
text,
};
@ -793,6 +799,7 @@ impl ModelClientSession {
otel_manager: &OtelManager,
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
service_tier: Option<ServiceTier>,
turn_metadata_header: Option<&str>,
) -> Result<ResponseStream> {
if let Some(path) = &*CODEX_RS_SSE_FIXTURE {
@ -823,6 +830,7 @@ impl ModelClientSession {
model_info,
effort,
summary,
service_tier,
)?;
let client = ApiResponsesClient::new(
transport,
@ -858,6 +866,7 @@ impl ModelClientSession {
otel_manager: &OtelManager,
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
service_tier: Option<ServiceTier>,
turn_metadata_header: Option<&str>,
warmup: bool,
) -> Result<WebsocketStreamOutcome> {
@ -877,6 +886,7 @@ impl ModelClientSession {
model_info,
effort,
summary,
service_tier,
)?;
let mut ws_payload = ResponseCreateWsRequest {
client_metadata: build_ws_client_metadata(turn_metadata_header),
@ -958,6 +968,7 @@ impl ModelClientSession {
otel_manager: &OtelManager,
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
service_tier: Option<ServiceTier>,
turn_metadata_header: Option<&str>,
) -> Result<()> {
let Some(ws_version) = self.client.active_ws_version(model_info) else {
@ -982,6 +993,7 @@ impl ModelClientSession {
otel_manager,
effort,
summary,
service_tier,
turn_metadata_header,
true,
)
@ -1020,6 +1032,7 @@ impl ModelClientSession {
otel_manager: &OtelManager,
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
service_tier: Option<ServiceTier>,
turn_metadata_header: Option<&str>,
) -> Result<ResponseStream> {
let wire_api = self.client.state.provider.wire_api;
@ -1034,6 +1047,7 @@ impl ModelClientSession {
otel_manager,
effort,
summary,
service_tier,
turn_metadata_header,
false,
)
@ -1052,6 +1066,7 @@ impl ModelClientSession {
otel_manager,
effort,
summary,
service_tier,
turn_metadata_header,
)
.await

View file

@ -255,6 +255,7 @@ mod tests {
stream: true,
include: vec![],
prompt_cache_key: None,
service_tier: None,
text: Some(TextControls {
verbosity: Some(OpenAiVerbosity::Low),
format: None,
@ -296,6 +297,7 @@ mod tests {
stream: true,
include: vec![],
prompt_cache_key: None,
service_tier: None,
text: Some(text_controls),
};
@ -332,6 +334,7 @@ mod tests {
stream: true,
include: vec![],
prompt_cache_key: None,
service_tier: None,
text: None,
};

View file

@ -290,6 +290,7 @@ use codex_otel::TelemetryAuthMode;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::models::ContentItem;
use codex_protocol::models::DeveloperInstructions;
@ -459,6 +460,7 @@ impl Codex {
provider: config.model_provider.clone(),
collaboration_mode,
model_reasoning_summary: config.model_reasoning_summary,
service_tier: config.service_tier,
developer_instructions: config.developer_instructions.clone(),
user_instructions,
personality: config.personality,
@ -823,6 +825,7 @@ pub(crate) struct SessionConfiguration {
collaboration_mode: CollaborationMode,
model_reasoning_summary: Option<ReasoningSummaryConfig>,
service_tier: Option<ServiceTier>,
/// Developer instructions that supplement the base instructions.
developer_instructions: Option<String>,
@ -897,6 +900,9 @@ impl SessionConfiguration {
if let Some(summary) = updates.reasoning_summary {
next_configuration.model_reasoning_summary = Some(summary);
}
if let Some(service_tier) = updates.service_tier {
next_configuration.service_tier = service_tier;
}
if let Some(personality) = updates.personality {
next_configuration.personality = Some(personality);
}
@ -927,6 +933,7 @@ pub(crate) struct SessionSettingsUpdate {
pub(crate) windows_sandbox_level: Option<WindowsSandboxLevel>,
pub(crate) collaboration_mode: Option<CollaborationMode>,
pub(crate) reasoning_summary: Option<ReasoningSummaryConfig>,
pub(crate) service_tier: Option<Option<ServiceTier>>,
pub(crate) final_output_json_schema: Option<Option<Value>>,
pub(crate) personality: Option<Personality>,
pub(crate) app_server_client_name: Option<String>,
@ -997,6 +1004,7 @@ impl Session {
per_turn_config.model_reasoning_effort =
session_configuration.collaboration_mode.reasoning_effort();
per_turn_config.model_reasoning_summary = session_configuration.model_reasoning_summary;
per_turn_config.service_tier = session_configuration.service_tier;
per_turn_config.personality = session_configuration.personality;
let resolved_web_search_mode = resolve_web_search_mode_for_turn(
&per_turn_config.web_search_mode,
@ -3690,6 +3698,7 @@ async fn submission_loop(sess: Arc<Session>, config: Arc<Config>, rx_sub: Receiv
model,
effort,
summary,
service_tier,
collaboration_mode,
personality,
} => {
@ -3713,6 +3722,7 @@ async fn submission_loop(sess: Arc<Session>, config: Arc<Config>, rx_sub: Receiv
windows_sandbox_level,
collaboration_mode: Some(collaboration_mode),
reasoning_summary: summary,
service_tier,
personality,
..Default::default()
},
@ -3909,6 +3919,7 @@ mod handlers {
model,
effort,
summary,
service_tier,
final_output_json_schema,
items,
collaboration_mode,
@ -3933,6 +3944,7 @@ mod handlers {
windows_sandbox_level: None,
collaboration_mode,
reasoning_summary: summary,
service_tier,
final_output_json_schema: Some(final_output_json_schema),
personality,
app_server_client_name: None,
@ -6200,6 +6212,7 @@ async fn try_run_sampling_request(
&turn_context.otel_manager,
turn_context.reasoning_effort,
turn_context.reasoning_summary,
turn_context.config.service_tier,
turn_metadata_header,
)
.instrument(trace_span!("stream_request"))
@ -7697,6 +7710,7 @@ mod tests {
model_reasoning_summary: config.model_reasoning_summary,
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
service_tier: None,
personality: config.personality,
base_instructions: config
.base_instructions
@ -7791,6 +7805,7 @@ mod tests {
model_reasoning_summary: config.model_reasoning_summary,
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
service_tier: None,
personality: config.personality,
base_instructions: config
.base_instructions
@ -8104,6 +8119,7 @@ mod tests {
model_reasoning_summary: config.model_reasoning_summary,
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
service_tier: None,
personality: config.personality,
base_instructions: config
.base_instructions
@ -8159,6 +8175,7 @@ mod tests {
model_reasoning_summary: config.model_reasoning_summary,
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
service_tier: None,
personality: config.personality,
base_instructions: config
.base_instructions
@ -8250,6 +8267,7 @@ mod tests {
model_reasoning_summary: config.model_reasoning_summary,
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
service_tier: None,
personality: config.personality,
base_instructions: config
.base_instructions
@ -8418,6 +8436,7 @@ mod tests {
model_reasoning_summary: config.model_reasoning_summary,
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
service_tier: None,
personality: config.personality,
base_instructions: config
.base_instructions

View file

@ -402,6 +402,7 @@ async fn drain_to_completed(
&turn_context.otel_manager,
turn_context.reasoning_effort,
turn_context.reasoning_summary,
turn_context.config.service_tier,
turn_metadata_header,
)
.await?;

View file

@ -5,6 +5,7 @@ use crate::path_utils::write_atomically;
use anyhow::Context;
use codex_config::CONFIG_TOML_FILE;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use std::collections::BTreeMap;
@ -26,6 +27,8 @@ pub enum ConfigEdit {
model: Option<String>,
effort: Option<ReasoningEffort>,
},
/// Update the service tier preference for future turns.
SetServiceTier { service_tier: Option<ServiceTier> },
/// Update the active (or default) model personality.
SetModelPersonality { personality: Option<Personality> },
/// Toggle the acknowledgement flag under `[notice]`.
@ -327,6 +330,10 @@ impl ConfigDocument {
);
mutated
}),
ConfigEdit::SetServiceTier { service_tier } => Ok(self.write_profile_value(
&["service_tier"],
service_tier.map(|service_tier| value(service_tier.to_string())),
)),
ConfigEdit::SetModelPersonality { personality } => Ok(self.write_profile_value(
&["personality"],
personality.map(|personality| value(personality.to_string())),
@ -774,6 +781,11 @@ impl ConfigEditsBuilder {
self
}
pub fn set_service_tier(mut self, service_tier: Option<ServiceTier>) -> Self {
self.edits.push(ConfigEdit::SetServiceTier { service_tier });
self
}
pub fn set_personality(mut self, personality: Option<Personality>) -> Self {
self.edits
.push(ConfigEdit::SetModelPersonality { personality });

View file

@ -62,6 +62,7 @@ use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::config_types::Verbosity;
use codex_protocol::config_types::WebSearchMode;
@ -185,6 +186,9 @@ pub struct Config {
/// Optional override of model selection.
pub model: Option<String>,
/// Effective service tier preference for new turns.
pub service_tier: Option<ServiceTier>,
/// Model used specifically for review sessions.
pub review_model: Option<String>,
@ -1184,6 +1188,9 @@ pub struct ConfigToml {
/// Optionally specify a personality for the model
pub personality: Option<Personality>,
/// Optional explicit service tier preference for new turns.
pub service_tier: Option<ServiceTier>,
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: Option<String>,
@ -1948,6 +1955,14 @@ impl Config {
let forced_login_method = cfg.forced_login_method;
let model = model.or(config_profile.model).or(cfg.model);
let service_tier = if features.enabled(Feature::FastMode) {
config_profile
.service_tier
.or(cfg.service_tier)
.filter(|tier| matches!(tier, ServiceTier::Fast))
} else {
None
};
let compact_prompt = compact_prompt.or(cfg.compact_prompt).and_then(|value| {
let trimmed = value.trim();
@ -2094,6 +2109,7 @@ impl Config {
let config = Self {
model,
service_tier,
review_model,
model_context_window: cfg.model_context_window,
model_auto_compact_token_limit: cfg.model_auto_compact_token_limit,
@ -4878,6 +4894,7 @@ model_verbosity = "high"
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
service_tier: None,
model_provider_id: "openai".to_string(),
model_provider: fixture.openai_provider.clone(),
permissions: Permissions {
@ -5007,6 +5024,7 @@ model_verbosity = "high"
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
service_tier: None,
model_provider_id: "openai-custom".to_string(),
model_provider: fixture.openai_custom_provider.clone(),
permissions: Permissions {
@ -5134,6 +5152,7 @@ model_verbosity = "high"
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
service_tier: None,
model_provider_id: "openai".to_string(),
model_provider: fixture.openai_provider.clone(),
permissions: Permissions {
@ -5247,6 +5266,7 @@ model_verbosity = "high"
review_model: None,
model_context_window: None,
model_auto_compact_token_limit: None,
service_tier: None,
model_provider_id: "openai".to_string(),
model_provider: fixture.openai_provider.clone(),
permissions: Permissions {
@ -5635,6 +5655,33 @@ trust_level = "untrusted"
Ok(())
}
#[test]
fn legacy_standard_service_tier_loads_as_default_none() -> anyhow::Result<()> {
let codex_home = TempDir::new()?;
let cfg = toml::from_str::<ConfigToml>(
r#"
service_tier = "standard"
[features]
fast_mode = true
"#,
)
.expect("TOML deserialization should succeed");
let config = Config::load_from_base_config_with_overrides(
cfg,
ConfigOverrides {
cwd: Some(codex_home.path().to_path_buf()),
..Default::default()
},
codex_home.path().to_path_buf(),
)?;
assert_eq!(config.service_tier, None);
Ok(())
}
#[test]
fn derive_sandbox_policy_falls_back_to_constraint_value_for_implicit_defaults()
-> anyhow::Result<()> {

View file

@ -8,6 +8,7 @@ use crate::config::types::WindowsToml;
use crate::protocol::AskForApproval;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::Verbosity;
use codex_protocol::config_types::WebSearchMode;
use codex_protocol::openai_models::ReasoningEffort;
@ -18,6 +19,7 @@ use codex_protocol::openai_models::ReasoningEffort;
#[schemars(deny_unknown_fields)]
pub struct ConfigProfile {
pub model: Option<String>,
pub service_tier: Option<ServiceTier>,
/// The key in the `model_providers` map identifying the
/// [`ModelProviderInfo`] to use.
pub model_provider: Option<String>,

View file

@ -7,6 +7,7 @@ use crate::config_loader::RequirementSource;
pub use codex_protocol::config_types::AltScreenMode;
pub use codex_protocol::config_types::ModeKind;
pub use codex_protocol::config_types::Personality;
pub use codex_protocol::config_types::ServiceTier;
pub use codex_protocol::config_types::WebSearchMode;
use codex_utils_absolute_path::AbsolutePathBuf;
use std::collections::BTreeMap;

View file

@ -145,6 +145,8 @@ pub enum Feature {
CollaborationModes,
/// Enable personality selection in the TUI.
Personality,
/// Enable Fast mode selection in the TUI and request layer.
FastMode,
/// Enable voice transcription in the TUI composer.
VoiceTranscription,
/// Enable experimental realtime voice conversation mode in the TUI.
@ -660,6 +662,12 @@ pub const FEATURES: &[FeatureSpec] = &[
stage: Stage::Stable,
default_enabled: true,
},
FeatureSpec {
id: Feature::FastMode,
key: "fast_mode",
stage: Stage::UnderDevelopment,
default_enabled: false,
},
FeatureSpec {
id: Feature::VoiceTranscription,
key: "voice_transcription",

View file

@ -13,6 +13,7 @@ use crate::rollout::policy::should_persist_response_item_for_memories;
use codex_api::ResponseEvent;
use codex_otel::OtelManager;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::models::BaseInstructions;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
@ -36,6 +37,7 @@ pub(in crate::memories) struct RequestContext {
pub(in crate::memories) otel_manager: OtelManager,
pub(in crate::memories) reasoning_effort: Option<ReasoningEffortConfig>,
pub(in crate::memories) reasoning_summary: ReasoningSummaryConfig,
pub(in crate::memories) service_tier: Option<ServiceTier>,
pub(in crate::memories) turn_metadata_header: Option<String>,
}
@ -144,6 +146,7 @@ impl RequestContext {
otel_manager: turn_context.otel_manager.clone(),
reasoning_effort: Some(phase_one::REASONING_EFFORT),
reasoning_summary: turn_context.reasoning_summary,
service_tier: turn_context.config.service_tier,
}
}
}
@ -322,6 +325,7 @@ mod job {
&stage_one_context.otel_manager,
stage_one_context.reasoning_effort,
stage_one_context.reasoning_summary,
stage_one_context.service_tier,
stage_one_context.turn_metadata_header.as_deref(),
)
.await?;

View file

@ -44,6 +44,7 @@ impl RegularTask {
&turn_context.otel_manager,
turn_context.reasoning_effort,
turn_context.reasoning_summary,
turn_context.config.service_tier,
turn_metadata_header.as_deref(),
)
.await?;

View file

@ -11,6 +11,7 @@ use codex_core::ThreadManager;
use codex_core::built_in_model_providers;
use codex_core::config::Config;
use codex_core::features::Feature;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::Op;
@ -282,11 +283,36 @@ impl TestCodex {
.await
}
pub async fn submit_turn_with_service_tier(
&self,
prompt: &str,
service_tier: Option<ServiceTier>,
) -> Result<()> {
self.submit_turn_with_context(
prompt,
AskForApproval::Never,
SandboxPolicy::DangerFullAccess,
Some(service_tier),
)
.await
}
pub async fn submit_turn_with_policies(
&self,
prompt: &str,
approval_policy: AskForApproval,
sandbox_policy: SandboxPolicy,
) -> Result<()> {
self.submit_turn_with_context(prompt, approval_policy, sandbox_policy, None)
.await
}
async fn submit_turn_with_context(
&self,
prompt: &str,
approval_policy: AskForApproval,
sandbox_policy: SandboxPolicy,
service_tier: Option<Option<ServiceTier>>,
) -> Result<()> {
let session_model = self.session_configured.model.clone();
self.codex
@ -302,6 +328,7 @@ impl TestCodex {
model: session_model,
effort: None,
summary: None,
service_tier,
collaboration_mode: None,
personality: None,
})

View file

@ -118,6 +118,7 @@ async fn responses_stream_includes_subagent_header_on_review() {
effort,
summary.unwrap_or(model_info.default_reasoning_summary),
None,
None,
)
.await
.expect("stream failed");
@ -230,6 +231,7 @@ async fn responses_stream_includes_subagent_header_on_other() {
effort,
summary.unwrap_or(model_info.default_reasoning_summary),
None,
None,
)
.await
.expect("stream failed");
@ -341,6 +343,7 @@ async fn responses_respects_model_info_overrides_from_config() {
effort,
summary.unwrap_or(model_info.default_reasoning_summary),
None,
None,
)
.await
.expect("stream failed");

View file

@ -1,5 +1,6 @@
use anyhow::Result;
use codex_core::features::Feature;
use codex_protocol::config_types::ServiceTier;
use core_test_support::responses::WebSocketConnectionConfig;
use core_test_support::responses::ev_assistant_message;
use core_test_support::responses::ev_completed;
@ -244,3 +245,171 @@ async fn websocket_v2_test_codex_shell_chain() -> Result<()> {
server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn websocket_v2_first_turn_uses_updated_fast_tier_after_startup_prewarm() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_websocket_server(vec![vec![
vec![ev_response_created("warm-1"), ev_done_with_id("warm-1")],
vec![
ev_response_created("resp-1"),
ev_assistant_message("msg-1", "fast"),
ev_completed("resp-1"),
],
]])
.await;
let mut builder = test_codex().with_config(|config| {
config.features.enable(Feature::ResponsesWebsocketsV2);
});
let test = builder.build_with_websocket_server(&server).await?;
let warmup = server.wait_for_request(0, 0).await.body_json();
assert_eq!(warmup["type"].as_str(), Some("response.create"));
assert_eq!(warmup["generate"].as_bool(), Some(false));
assert_eq!(warmup.get("service_tier"), None);
test.submit_turn_with_service_tier("hello", Some(ServiceTier::Fast))
.await?;
assert_eq!(server.handshakes().len(), 1);
let connection = server.single_connection();
assert_eq!(connection.len(), 2);
let first_turn = connection
.get(1)
.expect("missing first turn request")
.body_json();
assert_eq!(first_turn["type"].as_str(), Some("response.create"));
assert_eq!(first_turn["service_tier"].as_str(), Some("priority"));
assert_eq!(first_turn.get("previous_response_id"), None);
assert!(
first_turn
.get("input")
.and_then(Value::as_array)
.is_some_and(|items| !items.is_empty())
);
server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn websocket_v2_first_turn_drops_fast_tier_after_startup_prewarm() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_websocket_server(vec![vec![
vec![ev_response_created("warm-1"), ev_done_with_id("warm-1")],
vec![
ev_response_created("resp-1"),
ev_assistant_message("msg-1", "standard"),
ev_completed("resp-1"),
],
]])
.await;
let mut builder = test_codex().with_config(|config| {
config.features.enable(Feature::ResponsesWebsocketsV2);
config.service_tier = Some(ServiceTier::Fast);
});
let test = builder.build_with_websocket_server(&server).await?;
let warmup = server.wait_for_request(0, 0).await.body_json();
assert_eq!(warmup["type"].as_str(), Some("response.create"));
assert_eq!(warmup["generate"].as_bool(), Some(false));
assert_eq!(warmup["service_tier"].as_str(), Some("priority"));
test.submit_turn_with_service_tier("hello", None).await?;
assert_eq!(server.handshakes().len(), 1);
let connection = server.single_connection();
assert_eq!(connection.len(), 2);
let first_turn = connection
.get(1)
.expect("missing first turn request")
.body_json();
assert_eq!(first_turn["type"].as_str(), Some("response.create"));
assert_eq!(first_turn.get("service_tier"), None);
assert_eq!(first_turn.get("previous_response_id"), None);
assert!(
first_turn
.get("input")
.and_then(Value::as_array)
.is_some_and(|items| !items.is_empty())
);
server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn websocket_v2_next_turn_uses_updated_service_tier() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_websocket_server(vec![vec![
vec![ev_response_created("warm-1"), ev_done_with_id("warm-1")],
vec![
ev_response_created("resp-1"),
ev_assistant_message("msg-1", "fast"),
ev_completed("resp-1"),
],
vec![
ev_response_created("resp-2"),
ev_assistant_message("msg-2", "standard"),
ev_completed("resp-2"),
],
]])
.await;
let mut builder = test_codex().with_config(|config| {
config.features.enable(Feature::ResponsesWebsocketsV2);
});
let test = builder.build_with_websocket_server(&server).await?;
let warmup = server.wait_for_request(0, 0).await.body_json();
assert_eq!(warmup["type"].as_str(), Some("response.create"));
assert_eq!(warmup["generate"].as_bool(), Some(false));
assert_eq!(warmup.get("service_tier"), None);
test.submit_turn_with_service_tier("first", Some(ServiceTier::Fast))
.await?;
test.submit_turn_with_service_tier("second", None).await?;
assert_eq!(server.handshakes().len(), 1);
let connection = server.single_connection();
assert_eq!(connection.len(), 3);
let first_turn = connection
.get(1)
.expect("missing first turn request")
.body_json();
let second_turn = connection
.get(2)
.expect("missing second turn request")
.body_json();
assert_eq!(first_turn["type"].as_str(), Some("response.create"));
assert_eq!(first_turn["service_tier"].as_str(), Some("priority"));
assert_eq!(first_turn.get("previous_response_id"), None);
assert!(
first_turn
.get("input")
.and_then(Value::as_array)
.is_some_and(|items| !items.is_empty())
);
assert_eq!(second_turn["type"].as_str(), Some("response.create"));
assert_eq!(second_turn.get("service_tier"), None);
assert_eq!(second_turn.get("previous_response_id"), None);
assert!(
second_turn
.get("input")
.and_then(Value::as_array)
.is_some_and(|items| !items.is_empty())
);
server.shutdown().await;
Ok(())
}

View file

@ -312,6 +312,7 @@ async fn apply_patch_cli_move_without_content_change_has_no_turn_diff(
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -907,6 +908,7 @@ async fn apply_patch_shell_command_heredoc_with_cd_emits_turn_diff() -> Result<(
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -987,6 +989,7 @@ async fn apply_patch_shell_command_failure_propagates_error_and_skips_diff() ->
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1137,6 +1140,7 @@ async fn apply_patch_emits_turn_diff_event_with_unified_diff(
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1200,6 +1204,7 @@ async fn apply_patch_turn_diff_for_rename_with_content_change(
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1271,6 +1276,7 @@ async fn apply_patch_aggregates_diff_across_multiple_tool_calls() -> Result<()>
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1342,6 +1348,7 @@ async fn apply_patch_aggregates_diff_preserves_success_after_failure() -> Result
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -554,6 +554,7 @@ async fn submit_turn(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -1128,6 +1128,7 @@ async fn user_turn_collaboration_mode_overrides_model_and_effort() -> anyhow::Re
.model_reasoning_summary
.unwrap_or(ReasoningSummary::Auto),
),
service_tier: None,
collaboration_mode: Some(collaboration_mode),
final_output_json_schema: None,
personality: None,
@ -1240,6 +1241,7 @@ async fn user_turn_explicit_reasoning_summary_overrides_model_catalog_default()
model: session_configured.model,
effort: None,
summary: Some(ReasoningSummary::Concise),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@ -1718,6 +1720,7 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
effort,
summary.unwrap_or(ReasoningSummary::Auto),
None,
None,
)
.await
.expect("responses stream to start");

View file

@ -16,6 +16,7 @@ use codex_otel::metrics::MetricsConfig;
use codex_protocol::ThreadId;
use codex_protocol::account::PlanType;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::models::BaseInstructions;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
@ -140,6 +141,7 @@ async fn responses_websocket_request_prewarm_reuses_connection() {
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket prewarm failed");
@ -220,6 +222,7 @@ async fn responses_websocket_preconnect_is_reused_even_with_header_changes() {
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@ -257,6 +260,7 @@ async fn responses_websocket_request_prewarm_is_reused_even_with_header_changes(
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket prewarm failed");
@ -268,6 +272,7 @@ async fn responses_websocket_request_prewarm_is_reused_even_with_header_changes(
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@ -320,6 +325,7 @@ async fn responses_websocket_prewarm_uses_v2_when_model_prefers_websockets_and_f
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket prewarm failed");
@ -693,6 +699,7 @@ async fn responses_websocket_emits_reasoning_included_event() {
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@ -764,6 +771,7 @@ async fn responses_websocket_emits_rate_limit_events() {
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@ -1054,6 +1062,7 @@ async fn responses_websocket_forwards_turn_metadata_on_create_and_append() {
&mut client_session,
&harness,
&prompt_one,
None,
Some(first_turn_metadata),
)
.await;
@ -1061,6 +1070,7 @@ async fn responses_websocket_forwards_turn_metadata_on_create_and_append() {
&mut client_session,
&harness,
&prompt_two,
None,
Some(enriched_turn_metadata),
)
.await;
@ -1324,6 +1334,7 @@ async fn responses_websocket_v2_after_error_uses_full_create_without_previous_re
harness.effort,
harness.summary,
None,
None,
)
.await
.expect("websocket stream failed");
@ -1555,13 +1566,24 @@ async fn stream_until_complete(
harness: &WebsocketTestHarness,
prompt: &Prompt,
) {
stream_until_complete_with_turn_metadata(client_session, harness, prompt, None).await;
stream_until_complete_with_service_tier(client_session, harness, prompt, None).await;
}
async fn stream_until_complete_with_service_tier(
client_session: &mut ModelClientSession,
harness: &WebsocketTestHarness,
prompt: &Prompt,
service_tier: Option<ServiceTier>,
) {
stream_until_complete_with_turn_metadata(client_session, harness, prompt, service_tier, None)
.await;
}
async fn stream_until_complete_with_turn_metadata(
client_session: &mut ModelClientSession,
harness: &WebsocketTestHarness,
prompt: &Prompt,
service_tier: Option<ServiceTier>,
turn_metadata_header: Option<&str>,
) {
let mut stream = client_session
@ -1571,6 +1593,7 @@ async fn stream_until_complete_with_turn_metadata(
&harness.otel_manager,
harness.effort,
harness.summary,
service_tier,
turn_metadata_header,
)
.await

View file

@ -119,6 +119,7 @@ async fn user_input_includes_collaboration_instructions_after_override() -> Resu
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@ -174,6 +175,7 @@ async fn collaboration_instructions_added_on_user_turn() -> Result<()> {
.model_reasoning_summary
.unwrap_or(codex_protocol::config_types::ReasoningSummary::Auto),
),
service_tier: None,
collaboration_mode: Some(collaboration_mode),
final_output_json_schema: None,
personality: None,
@ -213,6 +215,7 @@ async fn override_then_next_turn_uses_updated_collaboration_instructions() -> Re
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@ -263,6 +266,7 @@ async fn user_turn_overrides_collaboration_instructions_after_override() -> Resu
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(base_mode),
personality: None,
})
@ -284,6 +288,7 @@ async fn user_turn_overrides_collaboration_instructions_after_override() -> Resu
.model_reasoning_summary
.unwrap_or(codex_protocol::config_types::ReasoningSummary::Auto),
),
service_tier: None,
collaboration_mode: Some(turn_mode),
final_output_json_schema: None,
personality: None,
@ -330,6 +335,7 @@ async fn collaboration_mode_update_emits_new_instruction_message() -> Result<()>
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(first_text))),
personality: None,
})
@ -355,6 +361,7 @@ async fn collaboration_mode_update_emits_new_instruction_message() -> Result<()>
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(second_text))),
personality: None,
})
@ -409,6 +416,7 @@ async fn collaboration_mode_update_noop_does_not_append() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(collab_text))),
personality: None,
})
@ -434,6 +442,7 @@ async fn collaboration_mode_update_noop_does_not_append() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(collab_text))),
personality: None,
})
@ -487,6 +496,7 @@ async fn collaboration_mode_update_emits_new_instruction_message_when_mode_chang
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_mode_and_instructions(
ModeKind::Default,
Some(default_text),
@ -515,6 +525,7 @@ async fn collaboration_mode_update_emits_new_instruction_message_when_mode_chang
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_mode_and_instructions(
ModeKind::Plan,
Some(plan_text),
@ -572,6 +583,7 @@ async fn collaboration_mode_update_noop_does_not_append_when_mode_is_unchanged()
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_mode_and_instructions(
ModeKind::Default,
Some(collab_text),
@ -600,6 +612,7 @@ async fn collaboration_mode_update_noop_does_not_append_when_mode_is_unchanged()
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_mode_and_instructions(
ModeKind::Default,
Some(collab_text),
@ -663,6 +676,7 @@ async fn resume_replays_collaboration_instructions() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collab_mode_with_instructions(Some(collab_text))),
personality: None,
})
@ -724,6 +738,7 @@ async fn empty_collaboration_instructions_are_ignored() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(CollaborationMode {
mode: ModeKind::Default,
settings: Settings {

View file

@ -1659,6 +1659,7 @@ async fn auto_compact_runs_after_resume_when_token_usage_is_over_limit() {
model: resumed.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1748,6 +1749,7 @@ async fn pre_sampling_compact_runs_on_switch_to_smaller_context_model() {
model: previous_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1771,6 +1773,7 @@ async fn pre_sampling_compact_runs_on_switch_to_smaller_context_model() {
model: next_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1880,6 +1883,7 @@ async fn pre_sampling_compact_runs_after_resume_and_switch_to_smaller_model() {
model: previous_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1927,6 +1931,7 @@ async fn pre_sampling_compact_runs_after_resume_and_switch_to_smaller_model() {
model: next_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -3012,6 +3017,7 @@ async fn snapshot_request_shape_pre_turn_compaction_including_incoming_user_mess
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -3128,6 +3134,7 @@ async fn snapshot_request_shape_pre_turn_compaction_strips_incoming_model_switch
model: previous_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -3151,6 +3158,7 @@ async fn snapshot_request_shape_pre_turn_compaction_strips_incoming_model_switch
model: next_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -1926,6 +1926,7 @@ async fn snapshot_request_shape_remote_pre_turn_compaction_including_incoming_us
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2035,6 +2036,7 @@ async fn snapshot_request_shape_remote_pre_turn_compaction_strips_incoming_model
model: Some(next_model.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -55,6 +55,7 @@ async fn submit_user_turn(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode,
personality: None,
})
@ -134,6 +135,7 @@ async fn execpolicy_blocks_shell_invocation() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -126,6 +126,7 @@ async fn copy_paste_local_image_persists_rollout_request_shape() -> anyhow::Resu
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -208,6 +209,7 @@ async fn drag_drop_image_persists_rollout_request_shape() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -378,6 +378,7 @@ async fn plan_mode_emits_plan_item_from_proposed_plan_block() -> anyhow::Result<
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@ -453,6 +454,7 @@ async fn plan_mode_strips_plan_from_agent_messages() -> anyhow::Result<()> {
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@ -560,6 +562,7 @@ async fn plan_mode_streaming_citations_are_stripped_across_added_deltas_and_done
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@ -745,6 +748,7 @@ async fn plan_mode_streaming_proposed_plan_tag_split_across_added_and_delta_is_p
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@ -857,6 +861,7 @@ async fn plan_mode_handles_missing_plan_close_tag() -> anyhow::Result<()> {
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})

View file

@ -84,6 +84,7 @@ async fn codex_returns_json_result(model: String) -> anyhow::Result<()> {
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -65,6 +65,7 @@ async fn submit_skill_turn(test: &TestCodex, skill_path: PathBuf, prompt: &str)
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -33,6 +33,7 @@ async fn override_turn_context_does_not_persist_when_config_exists() {
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::High)),
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -69,6 +70,7 @@ async fn override_turn_context_does_not_create_config_file() {
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::Medium)),
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -4,6 +4,7 @@ use codex_core::config::types::Personality;
use codex_core::features::Feature;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::InputModality;
use codex_protocol::openai_models::ModelInfo;
@ -59,6 +60,7 @@ async fn model_change_appends_model_instructions_developer_message() -> Result<(
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -74,6 +76,7 @@ async fn model_change_appends_model_instructions_developer_message() -> Result<(
model: Some(next_model.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -92,6 +95,7 @@ async fn model_change_appends_model_instructions_developer_message() -> Result<(
model: next_model.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -147,6 +151,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -162,6 +167,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
model: Some(next_model.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Pragmatic),
})
@ -180,6 +186,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
model: next_model.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -207,6 +214,36 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn service_tier_change_is_applied_on_next_http_turn() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let test = test_codex().build(&server).await?;
test.submit_turn_with_service_tier("fast turn", Some(ServiceTier::Fast))
.await?;
test.submit_turn_with_service_tier("standard turn", None)
.await?;
let requests = resp_mock.requests();
assert_eq!(requests.len(), 2, "expected two model requests");
let first_body = requests[0].body_json();
let second_body = requests[1].body_json();
assert_eq!(first_body["service_tier"].as_str(), Some("priority"));
assert_eq!(second_body.get("service_tier"), None);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<()> {
skip_if_no_network!(Ok(()));
@ -296,6 +333,7 @@ async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<
model: image_model_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -315,6 +353,7 @@ async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<
model: text_model_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -474,6 +513,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
model: large_model_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -511,6 +551,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
model: Some(smaller_model_slug.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -529,6 +570,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
model: smaller_model_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -122,6 +122,7 @@ async fn snapshot_model_visible_layout_turn_overrides() -> Result<()> {
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -144,6 +145,7 @@ async fn snapshot_model_visible_layout_turn_overrides() -> Result<()> {
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@ -221,6 +223,7 @@ async fn snapshot_model_visible_layout_cwd_change_does_not_refresh_agents() -> R
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -243,6 +246,7 @@ async fn snapshot_model_visible_layout_cwd_change_does_not_refresh_agents() -> R
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -348,6 +352,7 @@ async fn snapshot_model_visible_layout_resume_with_personality_change() -> Resul
model: resumed.session_configured.model.clone(),
effort: resumed.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@ -436,6 +441,7 @@ async fn snapshot_model_visible_layout_resume_override_matches_rollout_model() -
model: Some("gpt-5.2".to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -100,6 +100,7 @@ async fn renews_cache_ttl_on_matching_models_etag() -> Result<()> {
model: test.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -106,6 +106,7 @@ async fn refresh_models_on_models_etag_mismatch_and_avoid_duplicate_models_fetch
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -121,6 +121,7 @@ async fn override_turn_context_without_user_turn_does_not_record_permissions_upd
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -161,6 +162,7 @@ async fn override_turn_context_without_user_turn_does_not_record_environment_upd
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -198,6 +200,7 @@ async fn override_turn_context_without_user_turn_does_not_record_collaboration_u
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})

View file

@ -120,6 +120,7 @@ async fn permissions_message_added_on_override_change() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -262,6 +263,7 @@ async fn resume_replays_permissions_messages() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -361,6 +363,7 @@ async fn resume_and_fork_append_permissions_messages() -> Result<()> {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -98,6 +98,7 @@ async fn user_turn_personality_none_does_not_add_update_message() -> anyhow::Res
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -144,6 +145,7 @@ async fn config_personality_some_sets_instructions_template() -> anyhow::Result<
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -197,6 +199,7 @@ async fn config_personality_none_sends_no_personality() -> anyhow::Result<()> {
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -256,6 +259,7 @@ async fn default_personality_is_pragmatic_without_config_toml() -> anyhow::Resul
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -303,6 +307,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()>
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -319,6 +324,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()>
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@ -337,6 +343,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()>
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -399,6 +406,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -415,6 +423,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Pragmatic),
})
@ -433,6 +442,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -505,6 +515,7 @@ async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()>
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -521,6 +532,7 @@ async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()>
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Pragmatic),
})
@ -539,6 +551,7 @@ async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()>
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -649,6 +662,7 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow
model: remote_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@ -761,6 +775,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
model: remote_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -777,6 +792,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: Some(Personality::Friendly),
})
@ -795,6 +811,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
model: remote_slug.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -413,6 +413,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an
model: None,
effort: Some(Some(ReasoningEffort::High)),
summary: Some(ReasoningSummary::Detailed),
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -494,6 +495,7 @@ async fn override_before_first_turn_emits_environment_context() -> anyhow::Resul
model: Some("gpt-5.1-codex".to_string()),
effort: Some(Some(ReasoningEffort::Low)),
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})
@ -680,6 +682,7 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Res
model: "o3".to_string(),
effort: Some(ReasoningEffort::High),
summary: Some(ReasoningSummary::Detailed),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@ -788,6 +791,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
model: default_model.clone(),
effort: default_effort,
summary: Some(default_summary.unwrap_or(ReasoningSummary::Auto)),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@ -807,6 +811,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
model: default_model.clone(),
effort: default_effort,
summary: Some(default_summary.unwrap_or(ReasoningSummary::Auto)),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@ -907,6 +912,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
model: default_model,
effort: default_effort,
summary: Some(default_summary.unwrap_or(ReasoningSummary::Auto)),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,
@ -926,6 +932,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
model: "o3".to_string(),
effort: Some(ReasoningEffort::High),
summary: Some(ReasoningSummary::Detailed),
service_tier: None,
collaboration_mode: None,
final_output_json_schema: None,
personality: None,

View file

@ -177,6 +177,7 @@ async fn remote_models_long_model_slug_is_sent_with_high_reasoning() -> Result<(
model: requested_model.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -238,6 +239,7 @@ async fn namespaced_model_slug_uses_catalog_metadata_without_fallback_warning()
.model_reasoning_summary
.unwrap_or(ReasoningSummary::Auto),
),
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -355,6 +357,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
model: Some(REMOTE_MODEL_SLUG.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -392,6 +395,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
model: REMOTE_MODEL_SLUG.to_string(),
effort: None,
summary: Some(ReasoningSummary::Auto),
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -587,6 +591,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
model: Some(model.to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -605,6 +610,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
model: model.to_string(),
effort: None,
summary: Some(ReasoningSummary::Auto),
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -135,6 +135,7 @@ async fn submit_turn(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -138,6 +138,7 @@ async fn request_user_input_round_trip_for_mode(mode: ModeKind) -> anyhow::Resul
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(CollaborationMode {
mode,
settings: Settings {
@ -254,6 +255,7 @@ where
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: Some(collaboration_mode),
personality: None,
})

View file

@ -340,6 +340,7 @@ async fn resume_model_switch_is_not_duplicated_after_pre_turn_override() -> Resu
model: Some("gpt-5.1-codex-max".to_string()),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -830,6 +830,7 @@ async fn review_uses_overridden_cwd_for_base_branch_merge_base() {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -130,6 +130,7 @@ async fn stdio_server_round_trip() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -298,6 +299,7 @@ async fn stdio_image_responses_round_trip() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -498,6 +500,7 @@ async fn stdio_image_responses_are_sanitized_for_text_only_model() -> anyhow::Re
model: text_only_model_slug.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -611,6 +614,7 @@ async fn stdio_server_propagates_whitelisted_env_vars() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -771,6 +775,7 @@ async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -991,6 +996,7 @@ async fn streamable_http_with_oauth_round_trip_impl() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -49,6 +49,7 @@ async fn openai_model_header_mismatch_emits_warning_event_and_warning_item() ->
model: REQUESTED_MODEL.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -146,6 +147,7 @@ async fn response_model_field_mismatch_emits_warning_when_header_matches_request
model: REQUESTED_MODEL.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -230,6 +232,7 @@ async fn openai_model_header_mismatch_only_emits_one_warning_per_turn() -> Resul
model: REQUESTED_MODEL.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -278,6 +281,7 @@ async fn openai_model_header_casing_only_mismatch_does_not_warn() -> Result<()>
model: REQUESTED_MODEL.to_string(),
effort: test.config.model_reasoning_effort,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -162,6 +162,7 @@ async fn run_snapshot_command_with_options(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -248,6 +249,7 @@ async fn run_shell_command_snapshot_with_options(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -317,6 +319,7 @@ async fn run_tool_turn_on_harness(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -535,6 +538,7 @@ async fn shell_command_snapshot_still_intercepts_apply_patch() -> Result<()> {
model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -67,6 +67,7 @@ async fn submit_turn_with_policies(
model: test.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -78,6 +78,7 @@ async fn user_turn_includes_skill_instructions() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -381,6 +381,7 @@ async fn mcp_call_marks_thread_memory_mode_polluted_when_configured() -> Result<
model: test.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -89,6 +89,7 @@ async fn shell_tool_executes_command_and_streams_output() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -158,6 +159,7 @@ async fn update_plan_tool_emits_plan_update_event() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -237,6 +239,7 @@ async fn update_plan_tool_rejects_malformed_payload() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -328,6 +331,7 @@ async fn apply_patch_tool_executes_and_emits_patch_events() -> anyhow::Result<()
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -427,6 +431,7 @@ async fn apply_patch_reports_parse_diagnostics() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -46,6 +46,7 @@ async fn run_turn(test: &TestCodex, prompt: &str) -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -361,6 +362,7 @@ async fn shell_tools_start_before_response_completed_when_stream_delayed() -> an
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -490,6 +490,7 @@ async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -209,6 +209,7 @@ async fn unified_exec_intercepts_apply_patch_exec_command() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -338,6 +339,7 @@ async fn unified_exec_emits_exec_command_begin_event() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -416,6 +418,7 @@ async fn unified_exec_resolves_relative_workdir() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -497,6 +500,7 @@ async fn unified_exec_respects_workdir_override() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -590,6 +594,7 @@ async fn unified_exec_emits_exec_command_end_event() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -665,6 +670,7 @@ async fn unified_exec_emits_output_delta_for_exec_command() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -741,6 +747,7 @@ async fn unified_exec_full_lifecycle_with_background_end_event() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -871,6 +878,7 @@ async fn unified_exec_emits_terminal_interaction_for_write_stdin() -> Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1008,6 +1016,7 @@ async fn unified_exec_terminal_interaction_captures_delayed_output() -> Result<(
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1168,6 +1177,7 @@ async fn unified_exec_emits_one_begin_and_one_end_event() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1266,6 +1276,7 @@ async fn exec_command_reports_chunk_and_exit_metadata() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1384,6 +1395,7 @@ async fn unified_exec_defaults_to_pipe() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1474,6 +1486,7 @@ async fn unified_exec_can_enable_tty() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1555,6 +1568,7 @@ async fn unified_exec_respects_early_exit_notifications() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1686,6 +1700,7 @@ async fn write_stdin_returns_exit_metadata_and_clears_session() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1854,6 +1869,7 @@ async fn unified_exec_emits_end_event_when_session_dies_via_stdin() -> Result<()
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -1931,6 +1947,7 @@ async fn unified_exec_keeps_long_running_session_after_turn_end() -> Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2019,6 +2036,7 @@ async fn unified_exec_interrupt_terminates_long_running_session() -> Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2116,6 +2134,7 @@ async fn unified_exec_reuses_session_via_stdin() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2251,6 +2270,7 @@ PY
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2365,6 +2385,7 @@ async fn unified_exec_timeout_and_followup_poll() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2461,6 +2482,7 @@ PY
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2543,6 +2565,7 @@ async fn unified_exec_runs_under_sandbox() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2647,6 +2670,7 @@ async fn unified_exec_python_prompt_under_seatbelt() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2742,6 +2766,7 @@ async fn unified_exec_runs_on_all_platforms() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -2877,6 +2902,7 @@ async fn unified_exec_prunes_exited_sessions_first() -> Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -178,6 +178,7 @@ async fn user_shell_command_does_not_replace_active_turn() -> anyhow::Result<()>
model: fixture.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -113,6 +113,7 @@ async fn user_turn_with_local_image_attaches_image() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -215,6 +216,7 @@ async fn view_image_tool_attaches_local_image() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -345,6 +347,7 @@ console.log(out.output?.body?.text ?? "");
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -434,6 +437,7 @@ async fn view_image_tool_errors_when_path_is_directory() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -509,6 +513,7 @@ async fn view_image_tool_placeholder_for_non_image_files() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -601,6 +606,7 @@ async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -725,6 +731,7 @@ async fn view_image_tool_returns_unsupported_message_for_text_only_model() -> an
model: model_slug.to_string(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
@ -801,6 +808,7 @@ async fn replaces_invalid_local_image_after_bad_request() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -161,6 +161,7 @@ async fn websocket_fallback_hides_first_websocket_retry_stream_error() -> Result
model: session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})

View file

@ -567,6 +567,7 @@ pub async fn run_main(cli: Cli, arg0_paths: Arg0DispatchPaths) -> anyhow::Result
model: default_model,
effort: default_effort,
summary: None,
service_tier: None,
final_output_json_schema: output_schema,
collaboration_mode: None,
personality: None,

View file

@ -113,6 +113,15 @@ pub enum WebSearchMode {
Live,
}
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Display, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ServiceTier {
/// Legacy compatibility value for older local config files.
Standard,
Fast,
}
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Display, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]

View file

@ -18,6 +18,7 @@ use crate::config_types::CollaborationMode;
use crate::config_types::ModeKind;
use crate::config_types::Personality;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::config_types::ServiceTier;
use crate::config_types::WindowsSandboxLevel;
use crate::custom_prompts::CustomPrompt;
use crate::dynamic_tools::DynamicToolCallOutputContentItem;
@ -222,6 +223,15 @@ pub enum Op {
/// fall back to the selected model's default on new sessions).
#[serde(default, skip_serializing_if = "Option::is_none")]
summary: Option<ReasoningSummaryConfig>,
/// Optional service tier override for this turn.
///
/// Use `Some(Some(_))` to set a specific tier for this turn, `Some(None)` to
/// explicitly clear the tier for this turn, or `None` to keep the existing
/// session preference.
#[serde(default, skip_serializing_if = "Option::is_none")]
service_tier: Option<Option<ServiceTier>>,
// The JSON schema to use for the final assistant message
final_output_json_schema: Option<Value>,
@ -274,6 +284,13 @@ pub enum Op {
#[serde(skip_serializing_if = "Option::is_none")]
summary: Option<ReasoningSummaryConfig>,
/// Updated service tier preference for future turns.
///
/// Use `Some(Some(_))` to set a specific tier, `Some(None)` to clear the
/// preference, or `None` to leave the existing value unchanged.
#[serde(skip_serializing_if = "Option::is_none")]
service_tier: Option<Option<ServiceTier>>,
/// EXPERIMENTAL - set a pre-set collaboration mode.
/// Takes precedence over model, effort, and developer instructions if set.
#[serde(skip_serializing_if = "Option::is_none")]

View file

@ -1375,6 +1375,7 @@ impl App {
// Start a fresh in-memory session while preserving resumability via persisted rollout
// history.
let model = self.chat_widget.current_model().to_string();
let config = self.fresh_session_config();
let summary = session_summary(
self.chat_widget.token_usage(),
self.chat_widget.thread_id(),
@ -1385,7 +1386,7 @@ impl App {
tracing::warn!(error = %err, "failed to close all threads");
}
let init = crate::chatwidget::ChatWidgetInit {
config: self.config.clone(),
config,
frame_requester: tui.frame_requester(),
app_event_tx: self.app_event_tx.clone(),
// New sessions start without prefilled message content.
@ -1414,6 +1415,12 @@ impl App {
tui.frame_requester().schedule_frame();
}
fn fresh_session_config(&self) -> Config {
let mut config = self.config.clone();
config.service_tier = self.chat_widget.current_service_tier();
config
}
async fn drain_active_thread_events(&mut self, tui: &mut tui::Tui) -> Result<()> {
let Some(mut rx) = self.active_thread_rx.take() else {
return Ok(());
@ -2532,6 +2539,7 @@ impl App {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
},
@ -2554,6 +2562,7 @@ impl App {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
},
@ -2665,6 +2674,39 @@ impl App {
}
}
}
AppEvent::PersistServiceTierSelection { service_tier } => {
self.refresh_status_line();
let profile = self.active_profile.as_deref();
match ConfigEditsBuilder::new(&self.config.codex_home)
.with_profile(profile)
.set_service_tier(service_tier)
.apply()
.await
{
Ok(()) => {
let status = if service_tier.is_some() { "on" } else { "off" };
let mut message = format!("Fast mode set to {status}");
if let Some(profile) = profile {
message.push_str(" for ");
message.push_str(profile);
message.push_str(" profile");
}
self.chat_widget.add_info_message(message, None);
}
Err(err) => {
tracing::error!(error = %err, "failed to persist fast mode selection");
if let Some(profile) = profile {
self.chat_widget.add_error_message(format!(
"Failed to save Fast mode for profile `{profile}`: {err}"
));
} else {
self.chat_widget.add_error_message(format!(
"Failed to save default Fast mode: {err}"
));
}
}
}
}
AppEvent::PersistRealtimeAudioDeviceSelection { kind, name } => {
let builder = match kind {
RealtimeAudioDeviceKind::Microphone => {
@ -2827,6 +2869,7 @@ impl App {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
}));
@ -4916,6 +4959,20 @@ mod tests {
);
}
#[tokio::test]
async fn fresh_session_config_uses_current_service_tier() {
let mut app = make_test_app().await;
app.chat_widget
.set_service_tier(Some(codex_protocol::config_types::ServiceTier::Fast));
let config = app.fresh_session_config();
assert_eq!(
config.service_tier,
Some(codex_protocol::config_types::ServiceTier::Fast)
);
}
#[tokio::test]
async fn backtrack_selection_with_duplicate_history_targets_unique_turn() {
let (mut app, _app_event_rx, mut op_rx) = make_test_app_with_channels().await;

View file

@ -25,6 +25,7 @@ use crate::history_cell::HistoryCell;
use codex_core::features::Feature;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::SandboxPolicy;
@ -197,6 +198,11 @@ pub(crate) enum AppEvent {
personality: Personality,
},
/// Persist the selected service tier to the appropriate config.
PersistServiceTierSelection {
service_tier: Option<ServiceTier>,
},
/// Open the device picker for a realtime microphone or speaker.
OpenRealtimeAudioDeviceSelection {
kind: RealtimeAudioDeviceKind,

View file

@ -178,6 +178,7 @@ use super::paste_burst::PasteBurst;
use super::skill_popup::MentionItem;
use super::skill_popup::SkillPopup;
use super::slash_commands;
use super::slash_commands::BuiltinCommandFlags;
use crate::bottom_pane::paste_burst::FlushResult;
use crate::bottom_pane::prompt_args::expand_custom_prompt;
use crate::bottom_pane::prompt_args::expand_if_numeric_with_positional_args;
@ -398,6 +399,7 @@ pub(crate) struct ChatComposer {
config: ChatComposerConfig,
collaboration_mode_indicator: Option<CollaborationModeIndicator>,
connectors_enabled: bool,
fast_command_enabled: bool,
personality_command_enabled: bool,
realtime_conversation_enabled: bool,
audio_device_selection_enabled: bool,
@ -429,6 +431,18 @@ enum ActivePopup {
const FOOTER_SPACING_HEIGHT: u16 = 0;
impl ChatComposer {
fn builtin_command_flags(&self) -> BuiltinCommandFlags {
BuiltinCommandFlags {
collaboration_modes_enabled: self.collaboration_modes_enabled,
connectors_enabled: self.connectors_enabled,
fast_command_enabled: self.fast_command_enabled,
personality_command_enabled: self.personality_command_enabled,
realtime_conversation_enabled: self.realtime_conversation_enabled,
audio_device_selection_enabled: self.audio_device_selection_enabled,
allow_elevate_sandbox: self.windows_degraded_sandbox_active,
}
}
pub fn new(
has_input_focus: bool,
app_event_tx: AppEventSender,
@ -504,6 +518,7 @@ impl ChatComposer {
config,
collaboration_mode_indicator: None,
connectors_enabled: false,
fast_command_enabled: false,
personality_command_enabled: false,
realtime_conversation_enabled: false,
audio_device_selection_enabled: false,
@ -569,6 +584,10 @@ impl ChatComposer {
self.connectors_enabled = enabled;
}
pub fn set_fast_command_enabled(&mut self, enabled: bool) {
self.fast_command_enabled = enabled;
}
pub fn set_collaboration_mode_indicator(
&mut self,
indicator: Option<CollaborationModeIndicator>,
@ -2262,16 +2281,9 @@ impl ChatComposer {
{
let treat_as_plain_text = input_starts_with_space || name.contains('/');
if !treat_as_plain_text {
let is_builtin = slash_commands::find_builtin_command(
name,
self.collaboration_modes_enabled,
self.connectors_enabled,
self.personality_command_enabled,
self.realtime_conversation_enabled,
self.audio_device_selection_enabled,
self.windows_degraded_sandbox_active,
)
.is_some();
let is_builtin =
slash_commands::find_builtin_command(name, self.builtin_command_flags())
.is_some();
let prompt_prefix = format!("{PROMPTS_CMD_PREFIX}:");
let is_known_prompt = name
.strip_prefix(&prompt_prefix)
@ -2479,15 +2491,8 @@ impl ChatComposer {
let first_line = self.textarea.text().lines().next().unwrap_or("");
if let Some((name, rest, _rest_offset)) = parse_slash_name(first_line)
&& rest.is_empty()
&& let Some(cmd) = slash_commands::find_builtin_command(
name,
self.collaboration_modes_enabled,
self.connectors_enabled,
self.personality_command_enabled,
self.realtime_conversation_enabled,
self.audio_device_selection_enabled,
self.windows_degraded_sandbox_active,
)
&& let Some(cmd) =
slash_commands::find_builtin_command(name, self.builtin_command_flags())
{
if self.reject_slash_command_if_unavailable(cmd) {
return Some(InputResult::None);
@ -2515,15 +2520,7 @@ impl ChatComposer {
return None;
}
let cmd = slash_commands::find_builtin_command(
name,
self.collaboration_modes_enabled,
self.connectors_enabled,
self.personality_command_enabled,
self.realtime_conversation_enabled,
self.audio_device_selection_enabled,
self.windows_degraded_sandbox_active,
)?;
let cmd = slash_commands::find_builtin_command(name, self.builtin_command_flags())?;
if !cmd.supports_inline_args() {
return None;
@ -3335,16 +3332,8 @@ impl ChatComposer {
}
fn is_known_slash_name(&self, name: &str) -> bool {
let is_builtin = slash_commands::find_builtin_command(
name,
self.collaboration_modes_enabled,
self.connectors_enabled,
self.personality_command_enabled,
self.realtime_conversation_enabled,
self.audio_device_selection_enabled,
self.windows_degraded_sandbox_active,
)
.is_some();
let is_builtin =
slash_commands::find_builtin_command(name, self.builtin_command_flags()).is_some();
if is_builtin {
return true;
}
@ -3398,15 +3387,7 @@ impl ChatComposer {
return rest_after_name.is_empty();
}
if slash_commands::has_builtin_prefix(
name,
self.collaboration_modes_enabled,
self.connectors_enabled,
self.personality_command_enabled,
self.realtime_conversation_enabled,
self.audio_device_selection_enabled,
self.windows_degraded_sandbox_active,
) {
if slash_commands::has_builtin_prefix(name, self.builtin_command_flags()) {
return true;
}
@ -3457,6 +3438,7 @@ impl ChatComposer {
if is_editing_slash_command_name {
let collaboration_modes_enabled = self.collaboration_modes_enabled;
let connectors_enabled = self.connectors_enabled;
let fast_command_enabled = self.fast_command_enabled;
let personality_command_enabled = self.personality_command_enabled;
let realtime_conversation_enabled = self.realtime_conversation_enabled;
let audio_device_selection_enabled = self.audio_device_selection_enabled;
@ -3465,6 +3447,7 @@ impl ChatComposer {
CommandPopupFlags {
collaboration_modes_enabled,
connectors_enabled,
fast_command_enabled,
personality_command_enabled,
realtime_conversation_enabled,
audio_device_selection_enabled,

View file

@ -38,26 +38,35 @@ pub(crate) struct CommandPopup {
pub(crate) struct CommandPopupFlags {
pub(crate) collaboration_modes_enabled: bool,
pub(crate) connectors_enabled: bool,
pub(crate) fast_command_enabled: bool,
pub(crate) personality_command_enabled: bool,
pub(crate) realtime_conversation_enabled: bool,
pub(crate) audio_device_selection_enabled: bool,
pub(crate) windows_degraded_sandbox_active: bool,
}
impl From<CommandPopupFlags> for slash_commands::BuiltinCommandFlags {
fn from(value: CommandPopupFlags) -> Self {
Self {
collaboration_modes_enabled: value.collaboration_modes_enabled,
connectors_enabled: value.connectors_enabled,
fast_command_enabled: value.fast_command_enabled,
personality_command_enabled: value.personality_command_enabled,
realtime_conversation_enabled: value.realtime_conversation_enabled,
audio_device_selection_enabled: value.audio_device_selection_enabled,
allow_elevate_sandbox: value.windows_degraded_sandbox_active,
}
}
}
impl CommandPopup {
pub(crate) fn new(mut prompts: Vec<CustomPrompt>, flags: CommandPopupFlags) -> Self {
// Keep built-in availability in sync with the composer.
let builtins: Vec<(&'static str, SlashCommand)> = slash_commands::builtins_for_input(
flags.collaboration_modes_enabled,
flags.connectors_enabled,
flags.personality_command_enabled,
flags.realtime_conversation_enabled,
flags.audio_device_selection_enabled,
flags.windows_degraded_sandbox_active,
)
.into_iter()
.filter(|(name, _)| !name.starts_with("debug"))
.collect();
let builtins: Vec<(&'static str, SlashCommand)> =
slash_commands::builtins_for_input(flags.into())
.into_iter()
.filter(|(name, _)| !name.starts_with("debug"))
.collect();
// Exclude prompts that collide with builtin command names and sort by name.
let exclude: HashSet<String> = builtins.iter().map(|(n, _)| (*n).to_string()).collect();
prompts.retain(|p| !exclude.contains(&p.name));
@ -498,6 +507,7 @@ mod tests {
CommandPopupFlags {
collaboration_modes_enabled: true,
connectors_enabled: false,
fast_command_enabled: false,
personality_command_enabled: true,
realtime_conversation_enabled: false,
audio_device_selection_enabled: false,
@ -519,6 +529,7 @@ mod tests {
CommandPopupFlags {
collaboration_modes_enabled: true,
connectors_enabled: false,
fast_command_enabled: false,
personality_command_enabled: true,
realtime_conversation_enabled: false,
audio_device_selection_enabled: false,
@ -540,6 +551,7 @@ mod tests {
CommandPopupFlags {
collaboration_modes_enabled: true,
connectors_enabled: false,
fast_command_enabled: false,
personality_command_enabled: false,
realtime_conversation_enabled: false,
audio_device_selection_enabled: false,
@ -569,6 +581,7 @@ mod tests {
CommandPopupFlags {
collaboration_modes_enabled: true,
connectors_enabled: false,
fast_command_enabled: false,
personality_command_enabled: true,
realtime_conversation_enabled: false,
audio_device_selection_enabled: false,
@ -590,6 +603,7 @@ mod tests {
CommandPopupFlags {
collaboration_modes_enabled: false,
connectors_enabled: false,
fast_command_enabled: false,
personality_command_enabled: true,
realtime_conversation_enabled: true,
audio_device_selection_enabled: false,

View file

@ -294,6 +294,11 @@ impl BottomPane {
self.request_redraw();
}
pub fn set_fast_command_enabled(&mut self, enabled: bool) {
self.composer.set_fast_command_enabled(enabled);
self.request_redraw();
}
pub fn set_realtime_conversation_enabled(&mut self, enabled: bool) {
self.composer.set_realtime_conversation_enabled(enabled);
self.request_redraw();

View file

@ -8,72 +8,47 @@ use codex_utils_fuzzy_match::fuzzy_match;
use crate::slash_command::SlashCommand;
use crate::slash_command::built_in_slash_commands;
#[derive(Clone, Copy, Debug, Default)]
pub(crate) struct BuiltinCommandFlags {
pub(crate) collaboration_modes_enabled: bool,
pub(crate) connectors_enabled: bool,
pub(crate) fast_command_enabled: bool,
pub(crate) personality_command_enabled: bool,
pub(crate) realtime_conversation_enabled: bool,
pub(crate) audio_device_selection_enabled: bool,
pub(crate) allow_elevate_sandbox: bool,
}
/// Return the built-ins that should be visible/usable for the current input.
pub(crate) fn builtins_for_input(
collaboration_modes_enabled: bool,
connectors_enabled: bool,
personality_command_enabled: bool,
realtime_conversation_enabled: bool,
audio_device_selection_enabled: bool,
allow_elevate_sandbox: bool,
) -> Vec<(&'static str, SlashCommand)> {
pub(crate) fn builtins_for_input(flags: BuiltinCommandFlags) -> Vec<(&'static str, SlashCommand)> {
built_in_slash_commands()
.into_iter()
.filter(|(_, cmd)| allow_elevate_sandbox || *cmd != SlashCommand::ElevateSandbox)
.filter(|(_, cmd)| flags.allow_elevate_sandbox || *cmd != SlashCommand::ElevateSandbox)
.filter(|(_, cmd)| {
collaboration_modes_enabled
flags.collaboration_modes_enabled
|| !matches!(*cmd, SlashCommand::Collab | SlashCommand::Plan)
})
.filter(|(_, cmd)| connectors_enabled || *cmd != SlashCommand::Apps)
.filter(|(_, cmd)| personality_command_enabled || *cmd != SlashCommand::Personality)
.filter(|(_, cmd)| realtime_conversation_enabled || *cmd != SlashCommand::Realtime)
.filter(|(_, cmd)| audio_device_selection_enabled || *cmd != SlashCommand::Settings)
.filter(|(_, cmd)| flags.connectors_enabled || *cmd != SlashCommand::Apps)
.filter(|(_, cmd)| flags.fast_command_enabled || *cmd != SlashCommand::Fast)
.filter(|(_, cmd)| flags.personality_command_enabled || *cmd != SlashCommand::Personality)
.filter(|(_, cmd)| flags.realtime_conversation_enabled || *cmd != SlashCommand::Realtime)
.filter(|(_, cmd)| flags.audio_device_selection_enabled || *cmd != SlashCommand::Settings)
.collect()
}
/// Find a single built-in command by exact name, after applying the gating rules.
pub(crate) fn find_builtin_command(
name: &str,
collaboration_modes_enabled: bool,
connectors_enabled: bool,
personality_command_enabled: bool,
realtime_conversation_enabled: bool,
audio_device_selection_enabled: bool,
allow_elevate_sandbox: bool,
) -> Option<SlashCommand> {
builtins_for_input(
collaboration_modes_enabled,
connectors_enabled,
personality_command_enabled,
realtime_conversation_enabled,
audio_device_selection_enabled,
allow_elevate_sandbox,
)
.into_iter()
.find(|(command_name, _)| *command_name == name)
.map(|(_, cmd)| cmd)
pub(crate) fn find_builtin_command(name: &str, flags: BuiltinCommandFlags) -> Option<SlashCommand> {
builtins_for_input(flags)
.into_iter()
.find(|(command_name, _)| *command_name == name)
.map(|(_, cmd)| cmd)
}
/// Whether any visible built-in fuzzily matches the provided prefix.
pub(crate) fn has_builtin_prefix(
name: &str,
collaboration_modes_enabled: bool,
connectors_enabled: bool,
personality_command_enabled: bool,
realtime_conversation_enabled: bool,
audio_device_selection_enabled: bool,
allow_elevate_sandbox: bool,
) -> bool {
builtins_for_input(
collaboration_modes_enabled,
connectors_enabled,
personality_command_enabled,
realtime_conversation_enabled,
audio_device_selection_enabled,
allow_elevate_sandbox,
)
.into_iter()
.any(|(command_name, _)| fuzzy_match(command_name, name).is_some())
pub(crate) fn has_builtin_prefix(name: &str, flags: BuiltinCommandFlags) -> bool {
builtins_for_input(flags)
.into_iter()
.any(|(command_name, _)| fuzzy_match(command_name, name).is_some())
}
#[cfg(test)]
@ -81,41 +56,58 @@ mod tests {
use super::*;
use pretty_assertions::assert_eq;
fn all_enabled_flags() -> BuiltinCommandFlags {
BuiltinCommandFlags {
collaboration_modes_enabled: true,
connectors_enabled: true,
fast_command_enabled: true,
personality_command_enabled: true,
realtime_conversation_enabled: true,
audio_device_selection_enabled: true,
allow_elevate_sandbox: true,
}
}
#[test]
fn debug_command_still_resolves_for_dispatch() {
let cmd = find_builtin_command("debug-config", true, true, true, false, false, false);
let cmd = find_builtin_command("debug-config", all_enabled_flags());
assert_eq!(cmd, Some(SlashCommand::DebugConfig));
}
#[test]
fn clear_command_resolves_for_dispatch() {
assert_eq!(
find_builtin_command("clear", true, true, true, false, false, false),
find_builtin_command("clear", all_enabled_flags()),
Some(SlashCommand::Clear)
);
}
#[test]
fn fast_command_is_hidden_when_disabled() {
let mut flags = all_enabled_flags();
flags.fast_command_enabled = false;
assert_eq!(find_builtin_command("fast", flags), None);
}
#[test]
fn realtime_command_is_hidden_when_realtime_is_disabled() {
assert_eq!(
find_builtin_command("realtime", true, true, true, false, true, false),
None
);
let mut flags = all_enabled_flags();
flags.realtime_conversation_enabled = false;
assert_eq!(find_builtin_command("realtime", flags), None);
}
#[test]
fn settings_command_is_hidden_when_realtime_is_disabled() {
assert_eq!(
find_builtin_command("settings", true, true, true, false, false, false),
None
);
let mut flags = all_enabled_flags();
flags.realtime_conversation_enabled = false;
flags.audio_device_selection_enabled = false;
assert_eq!(find_builtin_command("settings", flags), None);
}
#[test]
fn settings_command_is_hidden_when_audio_device_selection_is_disabled() {
assert_eq!(
find_builtin_command("settings", true, true, true, true, false, false),
None
);
let mut flags = all_enabled_flags();
flags.audio_device_selection_enabled = false;
assert_eq!(find_builtin_command("settings", flags), None);
}
}

View file

@ -81,6 +81,7 @@ use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::Settings;
#[cfg(target_os = "windows")]
use codex_protocol::config_types::WindowsSandboxLevel;
@ -1156,6 +1157,7 @@ impl ChatWidget {
mask.reasoning_effort = Some(event.reasoning_effort);
}
self.refresh_model_display();
self.sync_fast_command_enabled();
self.sync_personality_command_enabled();
let startup_tooltip_override = self.startup_tooltip_override.take();
let session_info_cell = history_cell::new_session_info(
@ -2960,6 +2962,7 @@ impl ChatWidget {
.bottom_pane
.set_status_line_enabled(!widget.configured_status_line_items().is_empty());
widget.bottom_pane.set_collaboration_modes_enabled(true);
widget.sync_fast_command_enabled();
widget.sync_personality_command_enabled();
widget
.bottom_pane
@ -3139,6 +3142,7 @@ impl ChatWidget {
.bottom_pane
.set_status_line_enabled(!widget.configured_status_line_items().is_empty());
widget.bottom_pane.set_collaboration_modes_enabled(true);
widget.sync_fast_command_enabled();
widget.sync_personality_command_enabled();
widget
.bottom_pane
@ -3307,6 +3311,7 @@ impl ChatWidget {
.bottom_pane
.set_status_line_enabled(!widget.configured_status_line_items().is_empty());
widget.bottom_pane.set_collaboration_modes_enabled(true);
widget.sync_fast_command_enabled();
widget.sync_personality_command_enabled();
widget
.bottom_pane
@ -3605,6 +3610,14 @@ impl ChatWidget {
SlashCommand::Model => {
self.open_model_popup();
}
SlashCommand::Fast => {
let next_tier = if self.config.service_tier.is_some() {
None
} else {
Some(ServiceTier::Fast)
};
self.set_service_tier_selection(next_tier);
}
SlashCommand::Realtime => {
if !self.realtime_conversation_enabled() {
return;
@ -3884,6 +3897,27 @@ impl ChatWidget {
let trimmed = args.trim();
match cmd {
SlashCommand::Fast => {
if trimmed.is_empty() {
self.dispatch_command(cmd);
return;
}
match trimmed.to_ascii_lowercase().as_str() {
"on" => self.set_service_tier_selection(Some(ServiceTier::Fast)),
"off" => self.set_service_tier_selection(None),
"status" => {
let status = if self.config.service_tier.is_some() {
"on"
} else {
"off"
};
self.add_info_message(format!("Fast mode is {status}."), None);
}
_ => {
self.add_error_message("Usage: /fast [on|off|status]".to_string());
}
}
}
SlashCommand::Rename if !trimmed.is_empty() => {
self.otel_manager.counter("codex.thread.rename", 1, &[]);
let Some((prepared_args, _prepared_elements)) =
@ -4222,6 +4256,7 @@ impl ChatWidget {
.personality
.filter(|_| self.config.features.enabled(Feature::Personality))
.filter(|_| self.current_model_supports_personality());
let service_tier = self.fast_mode_enabled().then_some(self.config.service_tier);
let op = Op::UserTurn {
items,
cwd: self.config.cwd.clone(),
@ -4230,6 +4265,7 @@ impl ChatWidget {
model: effective_mode.model().to_string(),
effort: effective_mode.reasoning_effort(),
summary: None,
service_tier,
final_output_json_schema: None,
collaboration_mode,
personality,
@ -5210,6 +5246,7 @@ impl ChatWidget {
model: Some(switch_model_for_events.clone()),
effort: Some(Some(default_effort)),
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
}));
@ -5329,6 +5366,7 @@ impl ChatWidget {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
windows_sandbox_level: None,
personality: Some(personality),
@ -6242,6 +6280,7 @@ impl ChatWidget {
model: None,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
}));
@ -6777,6 +6816,9 @@ impl ChatWidget {
self.reset_realtime_conversation_state();
}
}
if feature == Feature::FastMode {
self.sync_fast_command_enabled();
}
if feature == Feature::Personality {
self.sync_personality_command_enabled();
}
@ -6859,6 +6901,19 @@ impl ChatWidget {
self.config.personality = Some(personality);
}
/// Set Fast mode in the widget's config copy.
pub(crate) fn set_service_tier(&mut self, service_tier: Option<ServiceTier>) {
self.config.service_tier = service_tier;
}
pub(crate) fn current_service_tier(&self) -> Option<ServiceTier> {
self.config.service_tier
}
fn fast_mode_enabled(&self) -> bool {
self.config.features.enabled(Feature::FastMode)
}
pub(crate) fn set_realtime_audio_device(
&mut self,
kind: RealtimeAudioDeviceKind,
@ -6888,6 +6943,25 @@ impl ChatWidget {
self.refresh_model_display();
}
fn set_service_tier_selection(&mut self, service_tier: Option<ServiceTier>) {
self.set_service_tier(service_tier);
self.app_event_tx
.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
windows_sandbox_level: None,
model: None,
effort: None,
summary: None,
service_tier: Some(service_tier),
collaboration_mode: None,
personality: None,
}));
self.app_event_tx
.send(AppEvent::PersistServiceTierSelection { service_tier });
}
pub(crate) fn current_model(&self) -> &str {
if !self.collaboration_modes_enabled() {
return self.current_collaboration_mode.model();
@ -6914,6 +6988,11 @@ impl ChatWidget {
.unwrap_or_else(|| "System default".to_string())
}
fn sync_fast_command_enabled(&mut self) {
self.bottom_pane
.set_fast_command_enabled(self.fast_mode_enabled());
}
fn sync_personality_command_enabled(&mut self) {
self.bottom_pane
.set_personality_command_enabled(self.config.features.enabled(Feature::Personality));

View file

@ -38,6 +38,7 @@ use codex_protocol::account::PlanType;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::Settings;
use codex_protocol::items::AgentMessageContent;
use codex_protocol::items::AgentMessageItem;
@ -6519,6 +6520,61 @@ async fn disabled_slash_command_while_task_running_snapshot() {
assert_snapshot!(blob);
}
#[tokio::test]
async fn fast_slash_command_updates_and_persists_local_service_tier() {
let (mut chat, mut rx, mut op_rx) = make_chatwidget_manual(Some("gpt-5.3-codex")).await;
chat.set_feature_enabled(Feature::FastMode, true);
chat.dispatch_command(SlashCommand::Fast);
let events = std::iter::from_fn(|| rx.try_recv().ok()).collect::<Vec<_>>();
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::CodexOp(Op::OverrideTurnContext {
service_tier: Some(Some(ServiceTier::Fast)),
..
})
)),
"expected fast-mode override app event; events: {events:?}"
);
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::PersistServiceTierSelection {
service_tier: Some(ServiceTier::Fast),
}
)),
"expected fast-mode persistence app event; events: {events:?}"
);
assert_matches!(op_rx.try_recv(), Err(TryRecvError::Empty));
}
#[tokio::test]
async fn user_turn_carries_service_tier_after_fast_toggle() {
let (mut chat, mut rx, mut op_rx) = make_chatwidget_manual(Some("gpt-5.3-codex")).await;
chat.thread_id = Some(ThreadId::new());
set_chatgpt_auth(&mut chat);
chat.set_feature_enabled(Feature::FastMode, true);
chat.dispatch_command(SlashCommand::Fast);
let _events = std::iter::from_fn(|| rx.try_recv().ok()).collect::<Vec<_>>();
chat.bottom_pane
.set_composer_text("hello".to_string(), Vec::new(), Vec::new());
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
match next_submit_op(&mut op_rx) {
Op::UserTurn {
service_tier: Some(Some(ServiceTier::Fast)),
..
} => {}
other => panic!("expected Op::UserTurn with fast service tier, got {other:?}"),
}
}
#[tokio::test]
async fn approvals_popup_shows_disabled_presets() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await;

View file

@ -13,6 +13,7 @@ pub enum SlashCommand {
// DO NOT ALPHA-SORT! Enum order is presentation order in the popup, so
// more frequently used commands should be listed first.
Model,
Fast,
Approvals,
Permissions,
#[strum(serialize = "setup-default-sandbox")]
@ -89,6 +90,7 @@ impl SlashCommand {
SlashCommand::MemoryDrop => "DO NOT USE",
SlashCommand::MemoryUpdate => "DO NOT USE",
SlashCommand::Model => "choose what model and reasoning effort to use",
SlashCommand::Fast => "toggle Fast mode for supported models",
SlashCommand::Personality => "choose a communication style for Codex",
SlashCommand::Realtime => "toggle realtime voice mode (experimental)",
SlashCommand::Settings => "configure realtime microphone/speaker",
@ -123,6 +125,7 @@ impl SlashCommand {
SlashCommand::Review
| SlashCommand::Rename
| SlashCommand::Plan
| SlashCommand::Fast
| SlashCommand::SandboxReadRoot
)
}
@ -137,6 +140,7 @@ impl SlashCommand {
| SlashCommand::Compact
// | SlashCommand::Undo
| SlashCommand::Model
| SlashCommand::Fast
| SlashCommand::Personality
| SlashCommand::Approvals
| SlashCommand::Permissions