make cli and other nice things

This commit is contained in:
cy 2025-04-01 11:38:28 -04:00
parent 92f7edfba4
commit aa6b94949b
Signed by: cy
SSH key fingerprint: SHA256:o/geVWV4om1QhUSkKvDQeW/eAihwnjyXkqMwrVdbuts
3 changed files with 118 additions and 18 deletions

View file

@ -1,18 +1,18 @@
#![feature(let_chains)]
use std::process::{Command, Stdio};
use std::process::Command;
use std::sync::mpsc;
use std::{env, path::Path};
use std::path::Path;
use std::sync::{Mutex, Arc, atomic::{AtomicUsize, Ordering}};
use log::{debug, trace};
use serde::{Deserialize, Serialize};
use serde_json;
use tokio::sync::Semaphore;
use clap::Parser;
const UPSTREAM_CACHES: &'static [&'static str] = &[
"https://cache.nixos.org",
"https://nix-community.cachix.org",
"https://nixcache.cy7.sh",
];
// nix path-info --derivation --json
@ -63,12 +63,43 @@ impl PathInfo {
}
}
#[derive(Parser)]
#[command(version, about, long_about = None)]
struct Cli {
/// Package to upload to the binary cache
package: String,
/// Address of the binary cache (passed to nix copy --to)
#[arg(long, value_name = "BINARY CACHE")]
to: String,
/// Upstream cache to check against. Can be specified multiple times.
/// cache.nixos.org is always included
#[arg(long, short)]
upstream_cache: Vec<String>,
/// Concurrent upstream cache checkers
#[arg(long, default_value_t = 50)]
upstream_checker_concurrency: u8,
/// Concurrent uploaders
#[arg(long, default_value_t = 10)]
uploader_concurrency: u8,
}
#[tokio::main]
async fn main() {
env_logger::init();
let args: Vec<String> = env::args().collect();
let package = &args[1];
let cli = Cli::parse();
let package = &cli.package;
let binary_cache = cli.to;
let mut upstream_caches = cli.upstream_cache;
for upstream in UPSTREAM_CACHES {
upstream_caches.push(upstream.to_string());
}
debug!("package: {}", package);
debug!("binary cache: {}", binary_cache);
debug!("upstream caches: {:#?}", upstream_caches);
println!("querying nix path-info");
let path_infos = PathInfo::from_package(package);
@ -78,15 +109,15 @@ async fn main() {
let (cacheable_tx, cacheable_rx) = mpsc::channel();
let mut handles = Vec::new();
println!("spawning check_upstream");
handles.push(tokio::spawn(async move {
check_upstream(store_paths, cacheable_tx).await;
check_upstream(store_paths, cacheable_tx, cli.upstream_checker_concurrency, upstream_caches).await;
}));
println!("spawning uploader");
handles.push(tokio::spawn(async move {
uploader(cacheable_rx).await;
uploader(cacheable_rx, binary_cache, cli.uploader_concurrency).await;
}));
// make sure all threads are done
@ -96,8 +127,8 @@ async fn main() {
}
// filter out store paths that exist in upstream caches
async fn check_upstream(store_paths: Vec<String>, cacheable_tx: mpsc::Sender<String>) {
let concurrent = Semaphore::new(50);
async fn check_upstream(store_paths: Vec<String>, cacheable_tx: mpsc::Sender<String>, concurrency: u8, upstream_caches: Vec<String>) {
let concurrent = Semaphore::new(concurrency.into());
for store_path in store_paths {
let _ = concurrent.acquire().await.unwrap();
let tx = cacheable_tx.clone();
@ -122,7 +153,7 @@ async fn check_upstream(store_paths: Vec<String>, cacheable_tx: mpsc::Sender<Str
.map(|x| x.status());
if let Ok(res_status) = res_status && res_status.is_success() {
println!("{} was a hit upstream: {}", store_path, upstream);
debug!("{} was a hit upstream: {}", store_path, upstream);
hit = true;
break;
}
@ -135,26 +166,32 @@ async fn check_upstream(store_paths: Vec<String>, cacheable_tx: mpsc::Sender<Str
}
}
async fn uploader(cacheable_rx: mpsc::Receiver<String>) {
let mut count = 0;
let concurrent = Semaphore::new(10);
async fn uploader(cacheable_rx: mpsc::Receiver<String>, binary_cache: String, concurrency: u8) {
let upload_count = Arc::new(AtomicUsize::new(0));
let failures: Arc<Mutex<Vec<String>>> = Arc::new(Mutex::new(Vec::new()));
let concurrent = Semaphore::new(concurrency.into());
let mut handles = Vec::new();
loop {
if let Ok(path_to_upload) = cacheable_rx.recv() {
let _ = concurrent.acquire().await.unwrap();
let failures = Arc::clone(&failures);
let binary_cache = binary_cache.clone();
let upload_count = Arc::clone(&upload_count);
handles.push(tokio::spawn(async move {
println!("uploading: {}", path_to_upload);
if Command::new("nix")
.arg("copy")
.arg("--to")
.arg("s3://nixcache?endpoint=s3.cy7.sh&secret-key=/home/yt/cache-priv-key.pem")
.arg(&binary_cache.to_string())
.arg(&path_to_upload)
.output()
.is_err()
{
println!("WARN: upload failed: {}", path_to_upload);
failures.lock().unwrap().push(path_to_upload);
} else {
count += 1;
upload_count.fetch_add(1, Ordering::Relaxed);
}
}));
} else {
@ -162,7 +199,16 @@ async fn uploader(cacheable_rx: mpsc::Receiver<String>) {
for handle in handles {
handle.await.unwrap();
}
println!("uploaded {} paths", count);
println!("uploaded {} paths", upload_count.load(Ordering::Relaxed));
let failures = failures.lock().unwrap();
if !failures.is_empty() {
println!("failed to upload these paths: ");
for failure in failures.iter() {
print!("{}", failure);
}
println!();
}
break;
}
}