Revert tui code so it does not rely on in-process app server (#14899)

PR https://github.com/openai/codex/pull/14512 added an in-process app
server and started to wire up the tui to use it. We were originally
planning to modify the `tui` code in place, converting it to use the app
server a bit at a time using a hybrid adapter. We've since decided to
create an entirely new parallel `tui_app_server` implementation and do
the conversion all at once but retain the existing `tui` while we work
the bugs out of the new implementation.

This PR undoes the changes to the `tui` made in the PR #14512 and
restores the old initialization to its previous state. This allows us to
modify the `tui_app_server` without the risk of regressing the old `tui`
code. For example, we can start to remove support for all legacy core
events, like the ones that PR https://github.com/openai/codex/pull/14892
needed to ignore.

Testing:
* I manually verified that the old `tui` starts and shuts down without a
problem.
This commit is contained in:
Eric Traut 2026-03-17 00:56:32 -06:00 committed by GitHub
parent 57f865c069
commit d37dcca7e0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 18 additions and 253 deletions

1
codex-rs/Cargo.lock generated
View file

@ -2500,7 +2500,6 @@ dependencies = [
"chrono",
"clap",
"codex-ansi-escape",
"codex-app-server-client",
"codex-app-server-protocol",
"codex-arg0",
"codex-backend-client",

View file

@ -29,7 +29,6 @@ base64 = { workspace = true }
chrono = { workspace = true, features = ["serde"] }
clap = { workspace = true, features = ["derive"] }
codex-ansi-escape = { workspace = true }
codex-app-server-client = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-arg0 = { workspace = true }
codex-backend-client = { workspace = true }

View file

@ -39,7 +39,6 @@ use crate::tui::TuiEvent;
use crate::update_action::UpdateAction;
use crate::version::CODEX_CLI_VERSION;
use codex_ansi_escape::ansi_escape_line;
use codex_app_server_client::InProcessAppServerClient;
use codex_app_server_protocol::ConfigLayerSource;
use codex_core::AuthManager;
use codex_core::CodexAuth;
@ -53,6 +52,7 @@ use codex_core::config::types::ApprovalsReviewer;
use codex_core::config::types::ModelAvailabilityNuxConfig;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::features::Feature;
use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
@ -113,7 +113,6 @@ use tokio::task::JoinHandle;
use toml::Value as TomlValue;
mod agent_navigation;
mod app_server_adapter;
mod pending_interactive_replay;
use self::agent_navigation::AgentNavigationDirection;
@ -1948,7 +1947,7 @@ impl App {
#[allow(clippy::too_many_arguments)]
pub async fn run(
tui: &mut tui::Tui,
mut app_server: InProcessAppServerClient,
auth_manager: Arc<AuthManager>,
mut config: Config,
cli_kv_overrides: Vec<(String, TomlValue)>,
harness_overrides: ConfigOverrides,
@ -1968,8 +1967,20 @@ impl App {
let harness_overrides =
normalize_harness_overrides_for_cwd(harness_overrides, &config.cwd)?;
let auth_manager = app_server.auth_manager();
let thread_manager = app_server.thread_manager();
let thread_manager = Arc::new(ThreadManager::new(
&config,
auth_manager.clone(),
SessionSource::Cli,
CollaborationModesConfig {
default_mode_request_user_input: config
.features
.enabled(Feature::DefaultModeRequestUserInput),
},
));
// TODO(xl): Move into PluginManager once this no longer depends on config feature gating.
thread_manager
.plugins_manager()
.maybe_start_curated_repo_sync_for_config(&config);
let mut model = thread_manager
.get_models_manager()
.get_default_model(&config.model, RefreshStrategy::Offline)
@ -1987,13 +1998,6 @@ impl App {
)
.await;
if let Some(exit_info) = exit_info {
app_server
.shutdown()
.await
.inspect_err(|err| {
tracing::warn!("app-server shutdown failed: {err}");
})
.ok();
return Ok(exit_info);
}
if let Some(updated_model) = config.model.clone() {
@ -2225,7 +2229,6 @@ impl App {
let mut thread_created_rx = thread_manager.subscribe_thread_created();
let mut listen_for_threads = true;
let mut listen_for_app_server_events = true;
let mut waiting_for_initial_session_configured = wait_for_initial_session_configured;
#[cfg(not(debug_assertions))]
@ -2285,16 +2288,6 @@ impl App {
Err(err) => break Err(err),
}
}
app_server_event = app_server.next_event(), if listen_for_app_server_events => {
match app_server_event {
Some(event) => app.handle_app_server_event(&app_server, event).await,
None => {
listen_for_app_server_events = false;
tracing::warn!("app-server event stream closed");
}
}
AppRunControl::Continue
}
// Listen on new thread creation due to collab tools.
created = thread_created_rx.recv(), if listen_for_threads => {
match created {
@ -2325,9 +2318,6 @@ impl App {
}
}
};
if let Err(err) = app_server.shutdown().await {
tracing::warn!(error = %err, "failed to shut down embedded app server");
}
let clear_result = tui.terminal.clear();
let exit_reason = match exit_reason_result {
Ok(exit_reason) => {

View file

@ -1,72 +0,0 @@
/*
This module holds the temporary adapter layer between the TUI and the app
server during the hybrid migration period.
For now, the TUI still owns its existing direct-core behavior, but startup
allocates a local in-process app server and drains its event stream. Keeping
the app-server-specific wiring here keeps that transitional logic out of the
main `app.rs` orchestration path.
As more TUI flows move onto the app-server surface directly, this adapter
should shrink and eventually disappear.
*/
use super::App;
use codex_app_server_client::InProcessAppServerClient;
use codex_app_server_client::InProcessServerEvent;
use codex_app_server_protocol::JSONRPCErrorError;
impl App {
pub(super) async fn handle_app_server_event(
&mut self,
app_server_client: &InProcessAppServerClient,
event: InProcessServerEvent,
) {
match event {
InProcessServerEvent::Lagged { skipped } => {
tracing::warn!(
skipped,
"app-server event consumer lagged; dropping ignored events"
);
}
InProcessServerEvent::ServerNotification(_) => {}
InProcessServerEvent::LegacyNotification(_) => {}
InProcessServerEvent::ServerRequest(request) => {
let request_id = request.id().clone();
tracing::warn!(
?request_id,
"rejecting app-server request while TUI still uses direct core APIs"
);
if let Err(err) = self
.reject_app_server_request(
app_server_client,
request_id,
"TUI client does not yet handle this app-server server request".to_string(),
)
.await
{
tracing::warn!("{err}");
}
}
}
}
async fn reject_app_server_request(
&self,
app_server_client: &InProcessAppServerClient,
request_id: codex_app_server_protocol::RequestId,
reason: String,
) -> std::result::Result<(), String> {
app_server_client
.reject_server_request(
request_id,
JSONRPCErrorError {
code: -32000,
message: reason,
data: None,
},
)
.await
.map_err(|err| format!("failed to reject app-server request: {err}"))
}
}

View file

@ -7,10 +7,6 @@ use additional_dirs::add_dir_warning_message;
use app::App;
pub use app::AppExitInfo;
pub use app::ExitReason;
use codex_app_server_client::DEFAULT_IN_PROCESS_CHANNEL_CAPACITY;
use codex_app_server_client::InProcessAppServerClient;
use codex_app_server_client::InProcessClientStartArgs;
use codex_app_server_protocol::ConfigWarningNotification;
use codex_cloud_requirements::cloud_requirements_loader;
use codex_core::AuthManager;
use codex_core::CodexAuth;
@ -50,15 +46,12 @@ use codex_state::log_db;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_oss::ensure_oss_provider_ready;
use codex_utils_oss::get_default_model_for_oss_provider;
use color_eyre::eyre::WrapErr;
use cwd_prompt::CwdPromptAction;
use cwd_prompt::CwdPromptOutcome;
use cwd_prompt::CwdSelection;
use std::fs::OpenOptions;
use std::future::Future;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use tracing::error;
use tracing_appender::non_blocking;
use tracing_subscriber::EnvFilter;
@ -246,74 +239,10 @@ pub use public_widgets::composer_input::ComposerAction;
pub use public_widgets::composer_input::ComposerInput;
// (tests access modules directly within the crate)
async fn start_embedded_app_server(
arg0_paths: Arg0DispatchPaths,
config: Config,
cli_kv_overrides: Vec<(String, toml::Value)>,
loader_overrides: LoaderOverrides,
cloud_requirements: CloudRequirementsLoader,
feedback: codex_feedback::CodexFeedback,
) -> color_eyre::Result<InProcessAppServerClient> {
start_embedded_app_server_with(
arg0_paths,
config,
cli_kv_overrides,
loader_overrides,
cloud_requirements,
feedback,
InProcessAppServerClient::start,
)
.await
}
async fn start_embedded_app_server_with<F, Fut>(
arg0_paths: Arg0DispatchPaths,
config: Config,
cli_kv_overrides: Vec<(String, toml::Value)>,
loader_overrides: LoaderOverrides,
cloud_requirements: CloudRequirementsLoader,
feedback: codex_feedback::CodexFeedback,
start_client: F,
) -> color_eyre::Result<InProcessAppServerClient>
where
F: FnOnce(InProcessClientStartArgs) -> Fut,
Fut: Future<Output = std::io::Result<InProcessAppServerClient>>,
{
let config_warnings = config
.startup_warnings
.iter()
.map(|warning| ConfigWarningNotification {
summary: warning.clone(),
details: None,
path: None,
range: None,
})
.collect();
let client = start_client(InProcessClientStartArgs {
arg0_paths,
config: Arc::new(config),
cli_overrides: cli_kv_overrides,
loader_overrides,
cloud_requirements,
feedback,
config_warnings,
session_source: codex_protocol::protocol::SessionSource::Cli,
enable_codex_api_key_env: false,
client_name: "codex-tui".to_string(),
client_version: env!("CARGO_PKG_VERSION").to_string(),
experimental_api: true,
opt_out_notification_methods: Vec::new(),
channel_capacity: DEFAULT_IN_PROCESS_CHANNEL_CAPACITY,
})
.await
.wrap_err("failed to start embedded app server")?;
Ok(client)
}
pub async fn run_main(
mut cli: Cli,
arg0_paths: Arg0DispatchPaths,
loader_overrides: LoaderOverrides,
_loader_overrides: LoaderOverrides,
) -> std::io::Result<AppExitInfo> {
let (sandbox_mode, approval_policy) = if cli.full_auto {
(
@ -611,8 +540,6 @@ pub async fn run_main(
run_ratatui_app(
cli,
arg0_paths,
loader_overrides,
config,
overrides,
cli_kv_overrides,
@ -626,8 +553,6 @@ pub async fn run_main(
#[allow(clippy::too_many_arguments)]
async fn run_ratatui_app(
cli: Cli,
arg0_paths: Arg0DispatchPaths,
loader_overrides: LoaderOverrides,
initial_config: Config,
overrides: ConfigOverrides,
cli_kv_overrides: Vec<(String, toml::Value)>,
@ -1025,27 +950,10 @@ async fn run_ratatui_app(
let use_alt_screen = determine_alt_screen_mode(no_alt_screen, config.tui_alternate_screen);
tui.set_alt_screen_enabled(use_alt_screen);
let app_server = match start_embedded_app_server(
arg0_paths,
config.clone(),
cli_kv_overrides.clone(),
loader_overrides,
cloud_requirements.clone(),
feedback.clone(),
)
.await
{
Ok(app_server) => app_server,
Err(err) => {
restore();
session_log::log_session_end();
return Err(err);
}
};
let app_result = App::run(
&mut tui,
app_server,
auth_manager,
config,
cli_kv_overrides.clone(),
overrides.clone(),
@ -1328,20 +1236,6 @@ mod tests {
.await
}
async fn start_test_embedded_app_server(
config: Config,
) -> color_eyre::Result<InProcessAppServerClient> {
start_embedded_app_server(
Arg0DispatchPaths::default(),
config,
Vec::new(),
LoaderOverrides::default(),
CloudRequirementsLoader::default(),
codex_feedback::CodexFeedback::new(),
)
.await
}
#[tokio::test]
#[serial]
async fn windows_shows_trust_prompt_without_sandbox() -> std::io::Result<()> {
@ -1358,51 +1252,6 @@ mod tests {
Ok(())
}
#[tokio::test]
async fn embedded_app_server_exposes_client_manager_accessors() -> color_eyre::Result<()> {
let temp_dir = TempDir::new()?;
let config = build_config(&temp_dir).await?;
let app_server = start_test_embedded_app_server(config).await?;
assert!(Arc::ptr_eq(
&app_server.auth_manager(),
&app_server.auth_manager()
));
assert!(Arc::ptr_eq(
&app_server.thread_manager(),
&app_server.thread_manager()
));
app_server.shutdown().await?;
Ok(())
}
#[tokio::test]
async fn embedded_app_server_start_failure_is_returned() -> color_eyre::Result<()> {
let temp_dir = TempDir::new()?;
let config = build_config(&temp_dir).await?;
let result = start_embedded_app_server_with(
Arg0DispatchPaths::default(),
config,
Vec::new(),
LoaderOverrides::default(),
CloudRequirementsLoader::default(),
codex_feedback::CodexFeedback::new(),
|_args| async { Err(std::io::Error::other("boom")) },
)
.await;
let err = match result {
Ok(_) => panic!("startup failure should be returned"),
Err(err) => err,
};
assert!(
err.to_string()
.contains("failed to start embedded app server"),
"error should preserve the embedded app server startup context"
);
Ok(())
}
#[tokio::test]
#[serial]
async fn windows_shows_trust_prompt_with_sandbox() -> std::io::Result<()> {