Add experimental realtime websocket backend prompt override (#12418)

- add top-level `experimental_realtime_ws_backend_prompt` config key
(experimental / do not use) and include it in config schema
- apply the override only to `Op::RealtimeConversation` websocket
`backend_prompt`, with config + realtime tests
This commit is contained in:
Ahmed Ibrahim 2026-02-20 20:10:51 -08:00 committed by GitHub
parent 4c1744afb2
commit b237f7cbb1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 96 additions and 10 deletions

View file

@ -1544,6 +1544,10 @@
"experimental_compact_prompt_file": {
"$ref": "#/definitions/AbsolutePathBuf"
},
"experimental_realtime_ws_backend_prompt": {
"description": "Experimental / do not use. Overrides only the realtime conversation websocket transport backend prompt (the `Op::RealtimeConversation` `/ws` session.create backend_prompt) without changing normal prompts.",
"type": "string"
},
"experimental_realtime_ws_base_url": {
"description": "Experimental / do not use. Overrides only the realtime conversation websocket transport base URL (the `Op::RealtimeConversation` `/ws` connection) without changing normal provider HTTP requests.",
"type": "string"

View file

@ -405,7 +405,10 @@ pub struct Config {
/// websocket transport base URL (the `Op::RealtimeConversation` `/ws`
/// connection) without changing normal provider HTTP requests.
pub experimental_realtime_ws_base_url: Option<String>,
/// Experimental / do not use. Overrides only the realtime conversation
/// websocket transport backend prompt (the `Op::RealtimeConversation`
/// `/ws` session.create backend_prompt) without changing normal prompts.
pub experimental_realtime_ws_backend_prompt: Option<String>,
/// When set, restricts ChatGPT login to a specific workspace identifier.
pub forced_chatgpt_workspace_id: Option<String>,
@ -1136,7 +1139,10 @@ pub struct ConfigToml {
/// websocket transport base URL (the `Op::RealtimeConversation` `/ws`
/// connection) without changing normal provider HTTP requests.
pub experimental_realtime_ws_base_url: Option<String>,
/// Experimental / do not use. Overrides only the realtime conversation
/// websocket transport backend prompt (the `Op::RealtimeConversation`
/// `/ws` session.create backend_prompt) without changing normal prompts.
pub experimental_realtime_ws_backend_prompt: Option<String>,
pub projects: Option<HashMap<String, ProjectConfig>>,
/// Controls the web search tool mode: disabled, cached, or live.
@ -2065,6 +2071,7 @@ impl Config {
.or(cfg.chatgpt_base_url)
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
experimental_realtime_ws_base_url: cfg.experimental_realtime_ws_base_url,
experimental_realtime_ws_backend_prompt: cfg.experimental_realtime_ws_backend_prompt,
forced_chatgpt_workspace_id,
forced_login_method,
include_apply_patch_tool: include_apply_patch_tool_flag,
@ -4607,6 +4614,7 @@ model_verbosity = "high"
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_realtime_ws_base_url: None,
experimental_realtime_ws_backend_prompt: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
@ -4728,6 +4736,7 @@ model_verbosity = "high"
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_realtime_ws_base_url: None,
experimental_realtime_ws_backend_prompt: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
@ -4847,6 +4856,7 @@ model_verbosity = "high"
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_realtime_ws_base_url: None,
experimental_realtime_ws_backend_prompt: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
@ -4952,6 +4962,7 @@ model_verbosity = "high"
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_realtime_ws_base_url: None,
experimental_realtime_ws_backend_prompt: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
@ -5738,7 +5749,6 @@ trust_level = "untrusted"
);
Ok(())
}
#[test]
fn experimental_realtime_ws_base_url_loads_from_config_toml() -> std::io::Result<()> {
let cfg: ConfigToml = toml::from_str(
@ -5766,6 +5776,34 @@ experimental_realtime_ws_base_url = "http://127.0.0.1:8011"
);
Ok(())
}
#[test]
fn experimental_realtime_ws_backend_prompt_loads_from_config_toml() -> std::io::Result<()> {
let cfg: ConfigToml = toml::from_str(
r#"
experimental_realtime_ws_backend_prompt = "prompt from config"
"#,
)
.expect("TOML deserialization should succeed");
assert_eq!(
cfg.experimental_realtime_ws_backend_prompt.as_deref(),
Some("prompt from config")
);
let codex_home = TempDir::new()?;
let config = Config::load_from_base_config_with_overrides(
cfg,
ConfigOverrides::default(),
codex_home.path().to_path_buf(),
)?;
assert_eq!(
config.experimental_realtime_ws_backend_prompt.as_deref(),
Some("prompt from config")
);
Ok(())
}
}
#[cfg(test)]

View file

@ -174,18 +174,17 @@ pub(crate) async fn handle_start(
if let Some(realtime_ws_base_url) = &config.experimental_realtime_ws_base_url {
api_provider.base_url = realtime_ws_base_url.clone();
}
let prompt = config
.experimental_realtime_ws_backend_prompt
.clone()
.unwrap_or(params.prompt);
let requested_session_id = params
.session_id
.or_else(|| Some(sess.conversation_id.to_string()));
let events_rx = match sess
.conversation
.start(
api_provider,
None,
params.prompt,
requested_session_id.clone(),
)
.start(api_provider, None, prompt, requested_session_id.clone())
.await
{
Ok(events_rx) => events_rx,

View file

@ -358,7 +358,6 @@ async fn conversation_second_start_replaces_runtime() -> Result<()> {
server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn conversation_uses_experimental_realtime_ws_base_url_override() -> Result<()> {
skip_if_no_network!(Ok(()));
@ -413,3 +412,49 @@ async fn conversation_uses_experimental_realtime_ws_base_url_override() -> Resul
realtime_server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn conversation_uses_experimental_realtime_ws_backend_prompt_override() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_websocket_server(vec![
vec![],
vec![vec![json!({
"type": "session.created",
"session": { "id": "sess_override" }
})]],
])
.await;
let mut builder = test_codex().with_config(|config| {
config.experimental_realtime_ws_backend_prompt = Some("prompt from config".to_string());
});
let test = builder.build_with_websocket_server(&server).await?;
assert!(server.wait_for_handshakes(1, Duration::from_secs(2)).await);
test.codex
.submit(Op::RealtimeConversationStart(ConversationStartParams {
prompt: "prompt from op".to_string(),
session_id: None,
}))
.await?;
let session_created = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationRealtime(RealtimeConversationRealtimeEvent {
payload: RealtimeEvent::SessionCreated { session_id },
}) => Some(session_id.clone()),
_ => None,
})
.await;
assert_eq!(session_created, "sess_override");
let connections = server.connections();
assert_eq!(connections.len(), 2);
assert_eq!(
connections[1][0].body_json()["session"]["backend_prompt"].as_str(),
Some("prompt from config")
);
server.shutdown().await;
Ok(())
}