From 4e9bdbd44fe8824b34bdd6accc4e8d40cb948c57 Mon Sep 17 00:00:00 2001 From: Connor Peet Date: Mon, 17 Oct 2022 13:34:06 -0700 Subject: [PATCH] make self-update work on Linux --- build/azure-pipelines/product-build.yml | 2 +- cli/src/commands/tunnels.rs | 11 ++-- cli/src/util/tar.rs | 71 ++++++++++++++++++++++--- cli/src/util/zipper.rs | 4 +- 4 files changed, 71 insertions(+), 17 deletions(-) diff --git a/build/azure-pipelines/product-build.yml b/build/azure-pipelines/product-build.yml index eba4edf81eb..fc0aa438b5b 100644 --- a/build/azure-pipelines/product-build.yml +++ b/build/azure-pipelines/product-build.yml @@ -312,7 +312,7 @@ stages: parameters: VSCODE_PUBLISH: ${{ variables.VSCODE_PUBLISH }} VSCODE_QUALITY: ${{ variables.VSCODE_QUALITY }} - VSCODE_BUILD_TUNNEL_CLI: false # todo until 32 bit CLI is available + VSCODE_BUILD_TUNNEL_CLI: true VSCODE_RUN_UNIT_TESTS: ${{ eq(parameters.VSCODE_STEP_ON_IT, false) }} VSCODE_RUN_INTEGRATION_TESTS: ${{ eq(parameters.VSCODE_STEP_ON_IT, false) }} VSCODE_RUN_SMOKE_TESTS: ${{ eq(parameters.VSCODE_STEP_ON_IT, false) }} diff --git a/cli/src/commands/tunnels.rs b/cli/src/commands/tunnels.rs index 658f3123b7a..cb068195512 100644 --- a/cli/src/commands/tunnels.rs +++ b/cli/src/commands/tunnels.rs @@ -3,8 +3,6 @@ * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ -use std::process::Stdio; - use async_trait::async_trait; use tokio::sync::oneshot; @@ -215,6 +213,10 @@ async fn serve_with_csa( csa: CodeServerArgs, shutdown_rx: Option>, ) -> Result { + // Intentionally read before starting the server. If the server updated and + // respawn is requested, the old binary will get renamed, and then + // current_exe will point to the wrong path. + let current_exe = std::env::current_exe().unwrap(); let platform = spanf!(log, log.span("prereq"), PreReqChecker::new().verify())?; let auth = Auth::new(&paths, log.clone()); @@ -244,11 +246,8 @@ async fn serve_with_csa( // reuse current args, but specify no-forward since tunnels will // already be running in this process, and we cannot do a login let args = std::env::args().skip(1).collect::>(); - let exit = std::process::Command::new(std::env::current_exe().unwrap()) + let exit = std::process::Command::new(current_exe) .args(args) - .stdout(Stdio::inherit()) - .stderr(Stdio::inherit()) - .stdin(Stdio::inherit()) .spawn() .map_err(|e| wrap(e, "error respawning after update"))? .wait() diff --git a/cli/src/util/tar.rs b/cli/src/util/tar.rs index e18927b16c3..decb8e652ae 100644 --- a/cli/src/util/tar.rs +++ b/cli/src/util/tar.rs @@ -5,12 +5,54 @@ use crate::util::errors::{wrap, WrappedError}; use flate2::read::GzDecoder; -use std::fs::File; +use std::fs; +use std::io::{Seek, SeekFrom}; use std::path::{Path, PathBuf}; use tar::Archive; use super::io::ReportCopyProgress; +fn should_skip_first_segment(file: &fs::File) -> Result { + // unfortunately, we need to re-read the archive here since you cannot reuse + // `.entries()`. But this will generally only look at one or two files, so this + // should be acceptably speedy... If not, we could hardcode behavior for + // different types of archives. + + let tar = GzDecoder::new(file); + let mut archive = Archive::new(tar); + let mut entries = archive + .entries() + .map_err(|e| wrap(e, "error opening archive"))?; + + let first_name = { + let file = entries + .next() + .expect("expected not to have an empty archive") + .map_err(|e| wrap(e, "error reading entry file"))?; + + let path = file.path().expect("expected to have path"); + + path.iter() + .next() + .expect("expected to have non-empty name") + .to_owned() + }; + + let mut had_multiple = false; + for file in entries { + if let Ok(file) = file { + had_multiple = true; + if let Ok(name) = file.path() { + if name.iter().next() != Some(&first_name) { + return Ok(false); + } + } + } + } + + Ok(had_multiple) // prefix removal is invalid if there's only a single file +} + pub fn decompress_tarball( path: &Path, parent_path: &Path, @@ -19,12 +61,15 @@ pub fn decompress_tarball( where T: ReportCopyProgress, { - let tar_gz = File::open(path).map_err(|e| { - wrap( - Box::new(e), - format!("error opening file {}", path.display()), - ) - })?; + let mut tar_gz = fs::File::open(path) + .map_err(|e| wrap(e, format!("error opening file {}", path.display())))?; + let skip_first = should_skip_first_segment(&tar_gz)?; + + // reset since skip logic read the tar already: + tar_gz + .seek(SeekFrom::Start(0)) + .map_err(|e| wrap(e, "error resetting seek position"))?; + let tar = GzDecoder::new(tar_gz); let mut archive = Archive::new(tar); @@ -37,7 +82,17 @@ where .path() .map_err(|e| wrap(e, "error reading entry path"))?; - let path = parent_path.join(entry_path.iter().skip(1).collect::()); + let path = parent_path.join(if skip_first { + entry_path.iter().skip(1).collect::() + } else { + entry_path.into_owned() + }); + + if let Some(p) = path.parent() { + fs::create_dir_all(&p) + .map_err(|e| wrap(e, format!("could not create dir for {}", p.display())))?; + } + entry .unpack(&path) .map_err(|e| wrap(e, format!("error unpacking {}", path.display())))?; diff --git a/cli/src/util/zipper.rs b/cli/src/util/zipper.rs index a9106fd6b6c..52548542d59 100644 --- a/cli/src/util/zipper.rs +++ b/cli/src/util/zipper.rs @@ -41,7 +41,7 @@ fn should_skip_first_segment(archive: &mut ZipArchive) -> bool { } } - true + archive.len() > 1 // prefix removal is invalid if there's only a single file } pub fn unzip_file(path: &Path, parent_path: &Path, mut reporter: T) -> Result<(), WrappedError> @@ -59,7 +59,7 @@ where } else { 0 }; - +println!("len: {}", archive.len()); for i in 0..archive.len() { reporter.report_progress(i as u64, archive.len() as u64); let mut file = archive