diff --git a/codex-rs/exec-server/tests/suite/accept_elicitation.rs b/codex-rs/exec-server/tests/suite/accept_elicitation.rs index 1858deeec..a9c6978a0 100644 --- a/codex-rs/exec-server/tests/suite/accept_elicitation.rs +++ b/codex-rs/exec-server/tests/suite/accept_elicitation.rs @@ -3,6 +3,7 @@ use std::borrow::Cow; use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; +use std::time::Duration; use anyhow::Context; use anyhow::Result; @@ -82,6 +83,18 @@ prefix_rule( }; notify_readable_sandbox(&project_root_path, codex_linux_sandbox_exe, &service).await?; + // TODO(mbolin): Remove this hack to remove flakiness when possible. + // As noted in the commentary on https://github.com/openai/codex/pull/7832, + // an rmcp server does not process messages serially: it takes messages off + // the queue and immediately dispatches them to handlers, which may complete + // out of order. The proper fix is to replace our custom notification with a + // custom request where we wait for the response before proceeding. However, + // rmcp does not currently support custom requests, so as a temporary + // workaround we just wait a bit to increase the probability the server has + // processed the notification. Assuming we can upstream rmcp support for + // custom requests, we will remove this once the functionality is available. + tokio::time::sleep(Duration::from_secs(4)).await; + // Call the shell tool and verify that an elicitation was created and // auto-approved. let CallToolResult {