diff --git a/.dockerignore b/.dockerignore index 707a6465..5d38ac92 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,8 +2,8 @@ target/ k6/ examples/ assets/ -.vscodes/ +.vscode/ .github/ scripts/* -!scripts/install_onnx.sh \ No newline at end of file +!scripts/install_onnx.sh diff --git a/Cargo.lock b/Cargo.lock index 3c66ac24..659f6af5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1006,6 +1006,7 @@ dependencies = [ "cpu_timer", "deno_core", "once_cell", + "opentelemetry", "tokio", "tokio-util", "tracing", @@ -1038,7 +1039,7 @@ dependencies = [ "bitflags 2.6.0", "cexpr", "clang-sys", - "itertools 0.11.0", + "itertools 0.12.1", "log", "prettyplease", "proc-macro2", @@ -1742,7 +1743,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b28bfe653d79bd16c77f659305b195b82bb5ce0c0eb2a4846b82ddbd77586813" dependencies = [ "bitflags 2.6.0", - "libloading 0.7.4", + "libloading 0.8.1", "winapi", ] @@ -2176,8 +2177,6 @@ dependencies = [ [[package]] name = "deno_fetch" version = "0.206.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b190136d6226d3fcb4e48e80ca952272cdbd5ca4856818a33db4722ee73c8d29" dependencies = [ "base64 0.21.7", "bytes", @@ -2267,8 +2266,6 @@ dependencies = [ [[package]] name = "deno_http" version = "0.180.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15b121d076ca72659d5ede77585895954616efd0c09aed221f71e5b7f93334e" dependencies = [ "async-compression", "async-trait", @@ -2529,14 +2526,15 @@ dependencies = [ [[package]] name = "deno_telemetry" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4cb531664acf92a67f468898367ca2d72e108059ebcfd7027cd8980a5b9734" +version = "0.17.0" dependencies = [ "async-trait", + "base_rt", "deno_core", + "deno_tls", "http-body-util", "hyper 1.5.2", + "hyper-rustls 0.27.4", "hyper-util", "log", "once_cell", @@ -3354,6 +3352,7 @@ dependencies = [ "deno_config", "deno_core", "deno_facade", + "deno_telemetry", "enum-as-inner", "ext_event_worker", "ext_runtime", @@ -6215,7 +6214,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", "syn 2.0.90", @@ -9180,7 +9179,7 @@ dependencies = [ "js-sys", "khronos-egl", "libc", - "libloading 0.7.4", + "libloading 0.8.1", "log", "metal", "naga", diff --git a/Cargo.toml b/Cargo.toml index 32c4cfab..08a27b03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,7 +43,6 @@ deno_package_json = { version = "0.2.1", default-features = false } deno_path_util = "=0.2.1" deno_permissions = "0.42.0" deno_semver = "=0.6.1" -deno_telemetry = "0.4.0" # upstream exts deno_broadcast_channel = "0.176.0" @@ -56,6 +55,7 @@ deno_fs = { version = "0.92.0", features = ["sync_fs"] } deno_http = "0.180.0" deno_io = "0.92.0" deno_net = "0.174.0" +deno_telemetry = "0.17.0" deno_tls = "0.169.0" deno_url = "0.182.0" deno_web = "0.213.0" @@ -108,6 +108,7 @@ anyhow = "1.0.57" async-trait = "0.1.73" base32 = "=0.5.1" base64 = "0.21.7" +bencher = "0.1" brotli = "6.0.0" bytes = "1.4.0" cache_control = "=0.2.0" @@ -117,9 +118,11 @@ cbc = { version = "=0.1.2", features = ["alloc"] } chrono = { version = "0.4", default-features = false, features = ["std", "serde"] } dashmap = "5.5.3" data-encoding = "2.3.3" +data-url = "=0.3.0" dotenvy = "0.15.7" ecb = "=0.1.2" elliptic-curve = { version = "0.13.4", features = ["alloc", "arithmetic", "ecdh", "std", "pem", "jwk"] } +fast-socks5 = "0.9.6" faster-hex = "0.9" fastwebsockets = { version = "0.8", features = ["upgrade", "unstable-split"] } flate2 = { version = "=1.0.30", default-features = false } @@ -127,19 +130,23 @@ fs3 = "0.5.0" futures = "0.3.21" glob = "0.3.1" h2 = "0.4.4" +hickory-resolver = { version = "0.25.0-alpha.4", features = ["tokio-runtime", "serde"] } http = "1.0" http-body = "1.0" http-body-util = "0.1.2" http_v02 = { package = "http", version = "0.2.9" } httparse = "1.8.0" hyper = { version = "1.4.1", features = ["full"] } +hyper-rustls = { version = "0.27.2", default-features = false, features = ["http1", "http2", "tls12", "ring"] } hyper-util = { version = "=0.1.7", features = ["tokio", "server", "server-auto"] } hyper_v014 = { package = "hyper", version = "0.14.26", features = ["runtime", "http1"] } indexmap = { version = "2", features = ["serde"] } +ipnet = "2.3" lazy-regex = "3" libc = "0.2.126" libz-sys = { version = "1.1.20", default-features = false } log = "0.4.20" +memmem = "0.1.1" monch = "=0.5.0" notify = "=6.1.1" num-bigint = { version = "0.4", features = ["rand"] } @@ -149,6 +156,7 @@ p256 = { version = "0.13.2", features = ["ecdh", "jwk"] } p384 = { version = "0.13.0", features = ["ecdh", "jwk"] } parking_lot = "0.12.0" percent-encoding = "2.3.0" +phf = { version = "0.11", features = ["macros"] } pin-project = "1.0.11" # don't pin because they yank crates from cargo rand = "=0.8.5" regex = "^1.7.0" @@ -158,6 +166,7 @@ ring = "^0.17.14" rustls = { version = "0.23.11", default-features = false, features = ["logging", "std", "tls12", "ring"] } rustls-pemfile = "2" rustls-tokio-stream = "=0.3.0" +rustls-webpki = "0.102" scopeguard = "1.2.0" sec1 = "0.7" serde = { version = "1.0.149", features = ["derive"] } @@ -165,12 +174,18 @@ serde_json = "1.0.85" sha1 = { version = "0.10.6", features = ["oid"] } sha2 = { version = "0.10.8", features = ["oid"] } signature = "2.1" +smallvec = "1.8" spki = "0.7.2" tar = "=0.4.40" tempfile = "3.4.0" thiserror = "2.0.3" tokio = { version = "1.36.0", features = ["full"] } +tokio-rustls = { version = "0.26.0", default-features = false, features = ["ring", "tls12"] } +tokio-socks = "0.5.1" tokio-util = "0.7.4" +tower = { version = "0.4.13", default-features = false, features = ["util"] } +tower-http = { version = "0.6.1", features = ["decompression-br", "decompression-gzip"] } +tower-service = "0.3.2" twox-hash = "=1.6.3" url = { version = "2.5", features = ["serde", "expose_internals"] } uuid = { version = "1.3.0", features = ["v4"] } @@ -178,6 +193,12 @@ webpki-root-certs = "0.26.5" webpki-roots = "0.26" yoke = { version = "0.7.4", features = ["derive"] } +opentelemetry = "0.27.0" +opentelemetry-http = "0.27.0" +opentelemetry-otlp = { version = "0.27.0", features = ["logs", "http-proto", "http-json"] } +opentelemetry-semantic-conventions = { version = "0.27.0", features = ["semconv_experimental"] } +opentelemetry_sdk = "0.27.0" + # upstream resolvers deno_npm_cache = "0.2.0" deno_resolver = "0.14.0" @@ -203,7 +224,11 @@ windows-sys = { version = "0.59.0", features = ["Win32_Foundation", "Win32_Media deno_core = { git = "https://github.com/supabase/deno_core", branch = "324-supabase" } eszip = { git = "https://github.com/supabase/eszip", branch = "fix-pub-vis-0-80-1" } v8 = { git = "https://github.com/supabase/rusty_v8", tag = "v130.0.7" } + deno_unsync = { path = "./vendor/deno_unsync" } +deno_fetch = { path = "./vendor/deno_fetch" } +deno_telemetry = { path = "./vendor/deno_telemetry" } +deno_http = { path = "./vendor/deno_http" } [profile.dind] inherits = "dev" diff --git a/cli/src/flags.rs b/cli/src/flags.rs index 7ce0af9e..334a37ec 100644 --- a/cli/src/flags.rs +++ b/cli/src/flags.rs @@ -11,6 +11,7 @@ use clap::ArgAction; use clap::ArgGroup; use clap::Command; use clap::ValueEnum; +use deno::deno_telemetry; use deno_facade::Checksum; #[derive(ValueEnum, Default, Clone, Copy)] @@ -28,6 +29,30 @@ impl From for Option { } } +#[derive(ValueEnum, Clone, Copy)] +pub(super) enum OtelKind { + Main, + Event, +} + +#[derive(ValueEnum, Default, Clone, Copy)] +pub(super) enum OtelConsoleConfig { + #[default] + Ignore, + Capture, + Replace, +} + +impl From for deno_telemetry::OtelConsoleConfig { + fn from(value: OtelConsoleConfig) -> Self { + match value { + OtelConsoleConfig::Ignore => Self::Ignore, + OtelConsoleConfig::Capture => Self::Capture, + OtelConsoleConfig::Replace => Self::Replace, + } + } +} + pub(super) fn get_cli() -> Command { Command::new(env!("CARGO_BIN_NAME")) .about(env!("CARGO_PKG_DESCRIPTION")) @@ -278,6 +303,21 @@ fn get_start_command() -> Command { .value_parser(value_parser!(u8).range(..=99)) .default_value("90"), ) + .arg( + arg!(--"enable-otel") + .help("Enable OpenTelemetry in the main and event workers") + .value_delimiter(',') + .value_parser(value_parser!(OtelKind)) + .num_args(0..=1) + .default_missing_value("main,event") + .action(ArgAction::Append), + ) + .arg( + arg!(--"otel-console" ) + // .env("OTEL_DENO_CONSOLE") + .help("Configure console auto instrumentation for OpenTelemetry Logs") + .value_parser(value_parser!(OtelConsoleConfig)), + ) } fn get_bundle_command() -> Command { diff --git a/cli/src/main.rs b/cli/src/main.rs index 683dcedf..4976c165 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -10,6 +10,7 @@ use std::sync::Arc; use anyhow::bail; use anyhow::Context; use anyhow::Error; +use base::server; use base::server::Builder; use base::server::ServerFlags; use base::server::Tls; @@ -19,6 +20,8 @@ use base::worker::pool::WorkerPoolPolicy; use base::CacheSetting; use base::InspectorOption; use base::WorkerKind; +use deno::deno_telemetry; +use deno::deno_telemetry::OtelConfig; use deno::ConfigMode; use deno::DenoOptionsBuilder; use deno_facade::extract_from_file; @@ -28,6 +31,8 @@ use deno_facade::Metadata; use env::resolve_deno_runtime_env; use flags::get_cli; use flags::EszipV2ChecksumKind; +use flags::OtelConsoleConfig; +use flags::OtelKind; use log::warn; mod env; @@ -79,6 +84,11 @@ fn main() -> Result { #[allow(clippy::arc_with_non_send_sync)] let exit_code = match matches.subcommand() { Some(("start", sub_matches)) => { + deno_telemetry::init( + deno::versions::otel_runtime_config(), + OtelConfig::default(), + )?; + let ip = sub_matches.get_one::("ip").cloned().unwrap(); let ip = IpAddr::from_str(&ip) .context("failed to parse the IP address to bind the server")?; @@ -121,6 +131,17 @@ fn main() -> Result { .cloned() .unwrap(); + let enable_otel = sub_matches + .get_many::("enable-otel") + .unwrap_or_default() + .cloned() + .collect::>(); + + let otel_console = sub_matches + .get_one::("otel-console") + .cloned() + .map(Into::into); + let event_service_manager_path = sub_matches.get_one::("event-worker").cloned(); let maybe_main_entrypoint = @@ -203,6 +224,21 @@ fn main() -> Result { .unwrap(); let flags = ServerFlags { + otel: if !enable_otel.is_empty() { + if enable_otel.len() > 1 { + Some(server::OtelKind::Both) + } else { + match enable_otel.first() { + Some(OtelKind::Main) => Some(server::OtelKind::Main), + Some(OtelKind::Event) => Some(server::OtelKind::Event), + None => None, + } + } + } else { + None + }, + otel_console, + no_module_cache, allow_main_inspector, tcp_nodelay, diff --git a/crates/base/src/runtime/mod.rs b/crates/base/src/runtime/mod.rs index f3e7b66b..a0a8f3c6 100644 --- a/crates/base/src/runtime/mod.rs +++ b/crates/base/src/runtime/mod.rs @@ -23,6 +23,7 @@ use base_rt::get_current_cpu_time_ns; use base_rt::BlockingScopeCPUUsage; use base_rt::DenoRuntimeDropToken; use base_rt::DropToken; +use base_rt::RuntimeOtelExtraAttributes; use base_rt::RuntimeState; use base_rt::RuntimeWaker; use cooked_waker::IntoWaker; @@ -39,6 +40,7 @@ use deno::deno_io; use deno::deno_net; use deno::deno_package_json; use deno::deno_telemetry; +use deno::deno_telemetry::OtelConfig; use deno::deno_tls; use deno::deno_tls::deno_native_certs::load_native_certs; use deno::deno_tls::rustls::RootCertStore; @@ -53,6 +55,7 @@ use deno_cache::SqliteBackedCache; use deno_core::error::AnyError; use deno_core::error::JsError; use deno_core::serde_json; +use deno_core::serde_json::Value; use deno_core::url::Url; use deno_core::v8; use deno_core::v8::GCCallbackFlags; @@ -240,6 +243,7 @@ pub trait GetRuntimeContext { use_inspector: bool, migrated: bool, version: Option<&str>, + otel_config: Option, ) -> impl Serialize { serde_json::json!({ "target": env!("TARGET"), @@ -265,7 +269,8 @@ pub trait GetRuntimeContext { .get() .copied() .unwrap_or_default() - } + }, + "otel": otel_config.unwrap_or_default().as_v8(), }) } @@ -471,6 +476,7 @@ where static_patterns, maybe_s3_fs_config, maybe_tmp_fs_config, + maybe_otel_config, .. } = init_opts.unwrap(); @@ -1019,6 +1025,7 @@ where has_inspector, migrated, option_env!("GIT_V_TAG"), + maybe_otel_config, )); let tokens = { @@ -1099,6 +1106,7 @@ where s3_fs, beforeunload_cpu_threshold, beforeunload_mem_threshold, + context, .. } = match bootstrap_ret { Ok(Ok(v)) => v, @@ -1110,6 +1118,7 @@ where } }; + let context = context.unwrap_or_default(); let span = Span::current(); let post_task_ret = unsafe { spawn_blocking_non_send(|| { @@ -1137,14 +1146,31 @@ where op_state.put::>(HashMap::new()); } + let mut otel_attributes = HashMap::new(); + + otel_attributes.insert( + "edge_runtime.worker.kind".into(), + conf.to_worker_kind().to_string().into(), + ); + if conf.is_user_worker() { let conf = conf.as_user_worker().unwrap(); + let key = conf.key.map_or("".to_string(), |k| k.to_string()); // set execution id for user workers - env_vars.insert( - "SB_EXECUTION_ID".to_string(), - conf.key.map_or("".to_string(), |k| k.to_string()), - ); + env_vars.insert("SB_EXECUTION_ID".to_string(), key.clone()); + + if let Some(Value::Object(attributes)) = context.get("otel") { + for (k, v) in attributes { + otel_attributes.insert( + k.to_string().into(), + match v { + Value::String(str) => str.to_string().into(), + others => others.to_string().into(), + }, + ); + } + } if let Some(events_msg_tx) = conf.events_msg_tx.clone() { op_state.put::>( @@ -1159,6 +1185,7 @@ where op_state.put(ext_env::EnvVars(env_vars)); op_state.put(DenoRuntimeDropToken(DropToken(drop_token.clone()))); + op_state.put(RuntimeOtelExtraAttributes(otel_attributes)); } if is_user_worker { @@ -2451,6 +2478,7 @@ mod test { maybe_s3_fs_config: s3_fs_config, maybe_tmp_fs_config: tmp_fs_config, + maybe_otel_config: None, }, Arc::default(), ) @@ -2554,6 +2582,7 @@ mod test { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }, Arc::default(), ) @@ -2619,6 +2648,7 @@ mod test { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }, Arc::default(), ) @@ -2706,6 +2736,7 @@ mod test { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }, Arc::default(), ) diff --git a/crates/base/src/server.rs b/crates/base/src/server.rs index a3e228f2..d41ea829 100644 --- a/crates/base/src/server.rs +++ b/crates/base/src/server.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; use std::future::pending; use std::future::Future; use std::net::SocketAddr; @@ -11,6 +12,9 @@ use anyhow::anyhow; use anyhow::bail; use anyhow::Context; use anyhow::Error; +use deno::deno_telemetry::OtelConfig; +use deno::deno_telemetry::OtelConsoleConfig; +use deno::deno_telemetry::OtelPropagators; use deno_core::serde_json; use deno_core::serde_json::json; use either::Either; @@ -267,8 +271,18 @@ pub struct WorkerEntrypoints { pub events: Option, } +#[derive(Debug, Clone, Copy)] +pub enum OtelKind { + Main, + Event, + Both, +} + #[derive(Debug, Default, Clone, Copy)] pub struct ServerFlags { + pub otel: Option, + pub otel_console: Option, + pub no_module_cache: bool, pub allow_main_inspector: bool, pub tcp_nodelay: bool, @@ -401,6 +415,18 @@ impl Server { builder.set_entrypoint(maybe_event_entrypoint.as_deref()); + if let Some(OtelKind::Event | OtelKind::Both) = flags.otel { + builder.set_otel_config(Some(OtelConfig { + tracing_enabled: true, + console: flags.otel_console.unwrap_or_default(), + propagators: HashSet::from([ + OtelPropagators::TraceContext, + OtelPropagators::Baggage, + ]), + ..Default::default() + })); + } + Some(builder.build().await?) } else { None @@ -444,6 +470,18 @@ impl Server { event_worker_surface.as_ref().map(|it| it.metric.clone()), ); + if let Some(OtelKind::Main | OtelKind::Both) = flags.otel { + builder.set_otel_config(Some(OtelConfig { + tracing_enabled: true, + console: flags.otel_console.unwrap_or_default(), + propagators: HashSet::from([ + OtelPropagators::TraceContext, + OtelPropagators::Baggage, + ]), + ..Default::default() + })); + } + builder.build().await? }; diff --git a/crates/base/src/utils/test_utils.rs b/crates/base/src/utils/test_utils.rs index bdca774d..b6ce1aa5 100644 --- a/crates/base/src/utils/test_utils.rs +++ b/crates/base/src/utils/test_utils.rs @@ -257,6 +257,7 @@ impl TestBedBuilder { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }) .build() .await diff --git a/crates/base/src/worker/pool.rs b/crates/base/src/worker/pool.rs index 318a7844..4c58a9e1 100644 --- a/crates/base/src/worker/pool.rs +++ b/crates/base/src/worker/pool.rs @@ -287,6 +287,7 @@ impl WorkerPool { if tx .send(Ok(CreateUserWorkerResult { key: *active_worker_uuid, + reused: true, })) .is_err() { @@ -395,6 +396,7 @@ impl WorkerPool { maybe_s3_fs_config, maybe_tmp_fs_config, static_patterns, + maybe_otel_config: otel_config, .. } = worker_options; @@ -413,6 +415,7 @@ impl WorkerPool { maybe_s3_fs_config, maybe_tmp_fs_config, + maybe_otel_config: otel_config, }, tx, )) @@ -466,7 +469,8 @@ impl WorkerPool { builder .set_termination_token(termination_token.clone()) - .set_inspector(inspector); + .set_inspector(inspector) + .set_eager_module_init(true); match builder.build().await { Ok(surface) => { @@ -486,7 +490,13 @@ impl WorkerPool { { error!("user worker msgs receiver dropped") } - if tx.send(Ok(CreateUserWorkerResult { key: uuid })).is_err() { + if tx + .send(Ok(CreateUserWorkerResult { + key: uuid, + reused: false, + })) + .is_err() + { error!("main worker receiver dropped") }; } diff --git a/crates/base/src/worker/worker_surface_creation.rs b/crates/base/src/worker/worker_surface_creation.rs index 43e248b6..85a5d13c 100644 --- a/crates/base/src/worker/worker_surface_creation.rs +++ b/crates/base/src/worker/worker_surface_creation.rs @@ -3,6 +3,7 @@ use std::path::PathBuf; use std::sync::Arc; use anyhow::Context; +use deno::deno_telemetry::OtelConfig; use deno_facade::EszipPayloadKind; use either::Either; use ext_event_worker::events::BootEvent; @@ -493,6 +494,7 @@ pub struct MainWorkerSurfaceBuilder { worker_pool_tx: Option>, shared_metric_src: Option, event_worker_metric_src: Option, + otel_config: Option, } impl std::ops::Deref for MainWorkerSurfaceBuilder { @@ -524,6 +526,7 @@ impl MainWorkerSurfaceBuilder { worker_pool_tx: None, shared_metric_src: None, event_worker_metric_src: None, + otel_config: None, } } @@ -555,6 +558,11 @@ impl MainWorkerSurfaceBuilder { self } + pub fn otel_config(mut self, value: OtelConfig) -> Self { + self.otel_config = Some(value); + self + } + pub fn set_no_module_cache(&mut self, value: Option) -> &mut Self { self.no_module_cache = value; self @@ -589,6 +597,11 @@ impl MainWorkerSurfaceBuilder { self } + pub fn set_otel_config(&mut self, value: Option) -> &mut Self { + self.otel_config = value; + self + } + pub async fn build(self) -> Result { let Self { mut inner, @@ -598,6 +611,7 @@ impl MainWorkerSurfaceBuilder { worker_pool_tx, shared_metric_src, event_worker_metric_src, + otel_config, } = self; let flags = inner.flags.as_ref().cloned().unwrap_or_default(); @@ -615,7 +629,6 @@ impl MainWorkerSurfaceBuilder { inner.set_init_opts(Some(WorkerContextInitOpts { service_path, - // import_map_path, no_module_cache: no_module_cache.unwrap_or(flags.no_module_cache), timing: None, @@ -634,6 +647,7 @@ impl MainWorkerSurfaceBuilder { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: otel_config, })); Ok(MainWorkerSurface( @@ -677,9 +691,8 @@ pub struct EventWorkerSurfaceBuilder { event_worker_path: PathBuf, no_module_cache: Option, - // import_map_path: Option, entrypoint: Option, - // decorator: Option, + otel_config: Option, } impl std::ops::Deref for EventWorkerSurfaceBuilder { @@ -707,6 +720,7 @@ impl EventWorkerSurfaceBuilder { event_worker_path: event_worker_path.as_ref().to_path_buf(), no_module_cache: None, entrypoint: None, + otel_config: None, } } @@ -720,6 +734,11 @@ impl EventWorkerSurfaceBuilder { self } + pub fn otel_config(mut self, value: OtelConfig) -> Self { + self.otel_config = Some(value); + self + } + pub fn set_no_module_cache(&mut self, value: Option) -> &mut Self { self.no_module_cache = value; self @@ -730,12 +749,18 @@ impl EventWorkerSurfaceBuilder { self } + pub fn set_otel_config(&mut self, value: Option) -> &mut Self { + self.otel_config = value; + self + } + pub async fn build(self) -> Result { let Self { mut inner, event_worker_path, no_module_cache, entrypoint, + otel_config, } = self; let (event_msg_tx, event_msg_rx) = @@ -773,6 +798,7 @@ impl EventWorkerSurfaceBuilder { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: otel_config, })); Ok(EventWorkerSurface { diff --git a/crates/base/tests/eszip_migration.rs b/crates/base/tests/eszip_migration.rs index b1a3d96a..cfe03a87 100644 --- a/crates/base/tests/eszip_migration.rs +++ b/crates/base/tests/eszip_migration.rs @@ -131,6 +131,7 @@ where maybe_entrypoint, maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }) .build() .await; diff --git a/crates/base/tests/integration_tests.rs b/crates/base/tests/integration_tests.rs index 92fdfe0e..80020eb7 100644 --- a/crates/base/tests/integration_tests.rs +++ b/crates/base/tests/integration_tests.rs @@ -226,6 +226,7 @@ async fn test_not_trigger_pku_sigsegv_due_to_jit_compilation_non_cli() { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }) .termination_token(main_termination_token.clone()) .build() @@ -384,6 +385,7 @@ async fn test_main_worker_boot_error() { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }) .termination_token(main_termination_token.clone()) .build() @@ -506,6 +508,7 @@ async fn test_main_worker_user_worker_mod_evaluate_exception() { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }) .termination_token(main_termination_token.clone()) .build() @@ -883,6 +886,7 @@ async fn test_worker_boot_invalid_imports() { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }; let result = create_test_user_worker(opts).await; @@ -910,6 +914,7 @@ async fn test_worker_boot_with_0_byte_eszip() { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }; let result = create_test_user_worker(opts).await; @@ -936,6 +941,7 @@ async fn test_worker_boot_with_invalid_entrypoint() { maybe_s3_fs_config: None, maybe_tmp_fs_config: None, + maybe_otel_config: None, }; let result = create_test_user_worker(opts).await; @@ -3821,7 +3827,7 @@ async fn test_should_be_able_to_trigger_early_drop_with_mem() { let resp = tb .request(|b| { b.uri("/early-drop-mem") - .header("x-memory-limit-mb", HeaderValue::from_static("22")) + .header("x-memory-limit-mb", HeaderValue::from_static("30")) .body(Body::empty()) .context("can't make request") }) diff --git a/crates/base_rt/Cargo.toml b/crates/base_rt/Cargo.toml index f4cf6e39..e0db2ac9 100644 --- a/crates/base_rt/Cargo.toml +++ b/crates/base_rt/Cargo.toml @@ -11,6 +11,7 @@ deno_core.workspace = true cpu_timer.workspace = true once_cell.workspace = true +opentelemetry.workspace = true tokio.workspace = true tokio-util = { workspace = true, features = ["rt"] } tracing.workspace = true diff --git a/crates/base_rt/src/lib.rs b/crates/base_rt/src/lib.rs index 905a1567..8caf49d4 100644 --- a/crates/base_rt/src/lib.rs +++ b/crates/base_rt/src/lib.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::num::NonZeroUsize; use std::sync::atomic::AtomicI64; use std::sync::atomic::Ordering; @@ -198,3 +199,8 @@ impl BlockingScopeCPUUsageMetricExt for &mut OpState { #[derive(Debug, Clone)] pub struct RuntimeWaker(pub Arc); + +#[derive(Debug, Clone)] +pub struct RuntimeOtelExtraAttributes( + pub HashMap, +); diff --git a/deno/versions.rs b/deno/versions.rs index b5924933..152f8912 100644 --- a/deno/versions.rs +++ b/deno/versions.rs @@ -1,3 +1,9 @@ +use std::borrow::Cow; + +use deno_telemetry::OtelRuntimeConfig; + +use crate::version; + pub fn deno() -> &'static str { concat!("Supa:", "0") } @@ -9,3 +15,10 @@ pub fn get_user_agent() -> &'static str { pub fn is_canary() -> bool { false } + +pub fn otel_runtime_config() -> OtelRuntimeConfig { + OtelRuntimeConfig { + runtime_name: Cow::Borrowed("deno"), + runtime_version: Cow::Borrowed(version()), + } +} diff --git a/examples/main/index.ts b/examples/main/index.ts index a9dc3cdc..45779bd4 100644 --- a/examples/main/index.ts +++ b/examples/main/index.ts @@ -3,6 +3,12 @@ import { STATUS_CODE } from "https://deno.land/std/http/status.ts"; import { handleRegistryRequest } from "./registry/mod.ts"; import { join } from "jsr:@std/path@^1.0"; +import { context, propagation } from "npm:@opentelemetry/api"; +import { W3CBaggagePropagator } from "npm:@opentelemetry/core@1"; + +// @ts-ignore See https://github.com/denoland/deno/issues/28082 +globalThis[Symbol.for("opentelemetry.js.api.1")].propagation = + new W3CBaggagePropagator(); console.log("main function started"); console.log(Deno.version); @@ -42,6 +48,17 @@ addEventListener("unhandledrejection", (ev) => { // }, 30 * 1000); Deno.serve(async (req: Request) => { + const ctx = propagation.extract(context.active(), req.headers, { + get(carrier, key) { + return carrier.get(key) ?? void 0; + }, + keys(carrier) { + return [...carrier.keys()]; + }, + }); + const baggage = propagation.getBaggage(ctx); + const requestId = baggage?.getEntry("sb-request-id")?.value ?? null; + const headers = new Headers({ "Content-Type": "application/json", }); @@ -149,7 +166,7 @@ Deno.serve(async (req: Request) => { // console.error(`serving the request with ${servicePath}`); - const createWorker = async () => { + const createWorker = async (otelAttributes?: { [_: string]: string }) => { const memoryLimitMb = 150; const workerTimeoutMs = 5 * 60 * 1000; const noModuleCache = false; @@ -184,6 +201,11 @@ Deno.serve(async (req: Request) => { staticPatterns, context: { useReadSyncFileAPI: true, + otel: otelAttributes, + }, + otelConfig: { + tracing_enabled: true, + propagators: ["TraceContext", "Baggage"], }, // maybeEszip, // maybeEntrypoint, @@ -196,7 +218,14 @@ Deno.serve(async (req: Request) => { // If a worker for the given service path already exists, // it will be reused by default. // Update forceCreate option in createWorker to force create a new worker for each request. - const worker = await createWorker(); + const worker = await createWorker( + requestId + ? { + "sb_request_id": requestId, + } + : void 0, + ); + const controller = new AbortController(); const signal = controller.signal; diff --git a/ext/node/polyfills/http.ts b/ext/node/polyfills/http.ts index 44417c15..b389f4bf 100644 --- a/ext/node/polyfills/http.ts +++ b/ext/node/polyfills/http.ts @@ -13,6 +13,7 @@ import { import { TextEncoder } from "ext:deno_web/08_text_encoding.js"; import { setTimeout } from "ext:deno_web/02_timers.js"; +import { updateSpanFromError } from "ext:deno_telemetry/util.ts"; import { _normalizeArgs, // createConnection, @@ -65,6 +66,14 @@ import { getTimerDuration } from "ext:deno_node/internal/timers.mjs"; // import { serve, upgradeHttpRaw } from "ext:deno_http/00_serve.ts"; import { createHttpClient } from "ext:deno_fetch/22_http_client.js"; import { headersEntries } from "ext:deno_fetch/20_headers.js"; +import { + builtinTracer, + ContextManager, + enterSpan, + PROPAGATORS, + restoreSnapshot, + TRACING_ENABLED, +} from "ext:deno_telemetry/telemetry.ts"; import { timerId } from "ext:deno_web/03_abort_signal.js"; import { clearTimeout as webClearTimeout } from "ext:deno_web/02_timers.js"; import { resourceForReadableStream } from "ext:deno_web/06_streams.js"; @@ -74,7 +83,8 @@ import { methods as METHODS } from "node:_http_common"; import { deprecate } from "node:util"; const { internalRidSymbol } = core; -const { ArrayIsArray, StringPrototypeToLowerCase } = primordials; +const { ArrayIsArray, StringPrototypeToLowerCase, SafeArrayIterator } = + primordials; type Chunk = string | Buffer | Uint8Array; @@ -458,17 +468,49 @@ class ClientRequest extends OutgoingMessage { this._bodyWriteRid = resourceForReadableStream(readable); } - this._req = op_node_http_request( - this.method, - url, - headers, - client[internalRidSymbol], - this._bodyWriteRid, - ); - + let span; + let snapshot; + if (TRACING_ENABLED) { + span = builtinTracer().startSpan(this.method, { kind: 2 }); // Kind 2 = Client + snapshot = enterSpan(span); + } (async () => { try { + const parsedUrl = new URL(url); + if (span) { + const context = ContextManager.active(); + for (const propagator of new SafeArrayIterator(PROPAGATORS)) { + propagator.inject(context, headers, { + set(carrier, key, value) { + carrier.push([key, value]); + }, + }); + } + span.setAttribute("http.request.method", this.method); + span.setAttribute("url.full", parsedUrl.href); + span.setAttribute("url.scheme", parsedUrl.protocol.slice(0, -1)); + span.setAttribute("url.path", parsedUrl.pathname); + span.setAttribute("url.query", parsedUrl.search.slice(1)); + } + + this._req = op_node_http_request( + this.method, + url, + headers, + client[internalRidSymbol], + this._bodyWriteRid, + ); + const res = await op_node_http_fetch_send(this._req.requestRid); + + if (span) { + span.setAttribute("http.response.status_code", res.status); + if (res.status >= 400) { + span.setAttribute("error.type", String(res.status)); + span.setStatus({ code: 2 }); // Code 2 = Error + } + } + if (this._req.cancelHandleRid !== null) { core.tryClose(this._req.cancelHandleRid); } @@ -552,6 +594,10 @@ class ClientRequest extends OutgoingMessage { this.emit("response", incoming); } } catch (err) { + if (span) { + updateSpanFromError(span, err); + } + if (this._req.cancelHandleRid !== null) { core.tryClose(this._req.cancelHandleRid); } @@ -577,8 +623,14 @@ class ClientRequest extends OutgoingMessage { } else { this.emit("error", err); } + } finally { + span?.end(); } })(); + + if (snapshot) { + restoreSnapshot(snapshot); + } } _implicitHeader() { diff --git a/ext/runtime/js/bootstrap.js b/ext/runtime/js/bootstrap.js index 5bc87418..5dc96bc3 100644 --- a/ext/runtime/js/bootstrap.js +++ b/ext/runtime/js/bootstrap.js @@ -47,6 +47,7 @@ import { installPromiseHook } from "ext:runtime/async_hook.js"; import { registerErrors } from "ext:runtime/errors.js"; import { denoOverrides, fsVars } from "ext:runtime/denoOverrides.js"; import { registerDeclarativeServer } from "ext:runtime/00_serve.js"; +import { bootstrap as bootstrapOtel } from "ext:deno_telemetry/telemetry.ts"; import { formatException, @@ -529,7 +530,8 @@ globalThis.bootstrapSBEdge = (opts, ctx) => { * flags: { * SHOULD_DISABLE_DEPRECATED_API_WARNING: boolean, * SHOULD_USE_VERBOSE_DEPRECATED_API_WARNING: boolean - * } + * }, + * otel: [] | [number, number] * }} */ const { @@ -539,6 +541,7 @@ globalThis.bootstrapSBEdge = (opts, ctx) => { version, inspector, flags, + otel, } = opts; deprecatedApiWarningDisabled = flags["SHOULD_DISABLE_DEPRECATED_API_WARNING"]; @@ -580,6 +583,26 @@ globalThis.bootstrapSBEdge = (opts, ctx) => { typescript: "5.1.6", })), }); + + if (inspector) { + ObjectDefineProperties(globalThis, { + console: nonEnumerable(v8Console), + }); + } + + if (kind === "user" && !inspector) { + // override console + ObjectDefineProperties(globalThis, { + console: nonEnumerable( + new console.Console((msg, level) => { + return ops.op_user_worker_log(msg, level > 1); + }), + ), + }); + } + + bootstrapOtel(otel); + ObjectDefineProperty(globalThis, "Deno", readOnly(denoOverrides)); setNumCpus(1); // explicitly setting no of CPUs to 1 (since we don't allow workers) @@ -636,26 +659,9 @@ globalThis.bootstrapSBEdge = (opts, ctx) => { delete globalThis.SharedArrayBuffer; globalThis.WebAssembly.Memory = patchedWasmMemoryCtor; - if (inspector) { - ObjectDefineProperties(globalThis, { - console: nonEnumerable(v8Console), - }); - } - /// DISABLE SHARED MEMORY INSTALL MEM CHECK TIMING if (kind === "user") { - // override console - if (!inspector) { - ObjectDefineProperties(globalThis, { - console: nonEnumerable( - new console.Console((msg, level) => { - return ops.op_user_worker_log(msg, level > 1); - }), - ), - }); - } - const apisToBeOverridden = { ...DENIED_DENO_FS_API_LIST, diff --git a/ext/runtime/js/http.js b/ext/runtime/js/http.js index 79ff82ff..5c503f07 100644 --- a/ext/runtime/js/http.js +++ b/ext/runtime/js/http.js @@ -8,11 +8,34 @@ import { } from "ext:deno_fetch/23_response.js"; import { upgradeWebSocket } from "ext:deno_http/02_websocket.ts"; import { HttpConn } from "ext:runtime/01_http.js"; +import { + builtinTracer, + ContextManager, + currentSnapshot, + enterSpan, + PROPAGATORS, + restoreSnapshot, + TRACING_ENABLED, +} from "ext:deno_telemetry/telemetry.ts"; +import { + updateSpanFromRequest, + updateSpanFromResponse, +} from "ext:deno_telemetry/util.ts"; const ops = core.ops; -const { BadResourcePrototype, internalRidSymbol } = core; -const { ObjectPrototypeIsPrototypeOf } = primordials; +const { + BadResourcePrototype, + internalRidSymbol, +} = core; +const { + ArrayPrototypeFind, + ArrayPrototypeMap, + ArrayPrototypePush, + SafeArrayIterator, + ObjectPrototypeIsPrototypeOf, + SafePromisePrototypeFinally, +} = primordials; const HttpConnPrototypeNextRequest = HttpConn.prototype.nextRequest; const HttpConnPrototypeClose = HttpConn.prototype.close; @@ -100,6 +123,7 @@ function serve(args1, args2) { }; const listener = Deno.listen(options); + const snapshot = currentSnapshot(); if (typeof args1 === "function") { options["handler"] = args1; @@ -133,7 +157,7 @@ function serve(args1, args2) { // the case of h2. // // [1]: https://deno.land/std@0.131.0/http/server.ts?source=#L338 - respond(requestEvent, currentHttpConn, options).then(() => { + respond(requestEvent, currentHttpConn, options, snapshot).then(() => { ACTIVE_REQUESTS--; }); } @@ -195,61 +219,132 @@ function serve(args1, args2) { }; } -async function respond(requestEvent, httpConn, options) { - /** @type {Response} */ - let response; - try { - response = await options["handler"](requestEvent.request, { - remoteAddr: { - port: options.port, - hostname: options.hostname, - transport: options.transport, - }, - }); - } catch (error) { - if (options["onError"] !== void 0) { - /** @throwable */ - response = await options["onError"](error); - } else { - console.error(error); - response = internalServerError(); - } - } +async function respond(requestEvent, httpConn, options, snapshot) { + const mapped = async function (requestEvent, httpConn, options, span) { + /** @type {Response} */ + let response; + try { + if (span) { + updateSpanFromRequest(span, requestEvent.request); + } - if (response === internals.RAW_UPGRADE_RESPONSE_SENTINEL) { - const { fenceRid } = getSupabaseTag(requestEvent.request); + response = await options["handler"](requestEvent.request, { + remoteAddr: { + port: options.port, + hostname: options.hostname, + transport: options.transport, + }, + }); + } catch (error) { + if (options["onError"] !== void 0) { + /** @throwable */ + response = await options["onError"](error); + } else { + console.error(error); + response = internalServerError(); + } + } - if (fenceRid === void 0) { - throw TypeError("Cannot find a fence for upgrading response"); + if (span) { + updateSpanFromResponse(span, response); } - setTimeout(async () => { - const { - status, - headers, - } = await ops.op_http_upgrade_raw2_fence(fenceRid); + if (response === internals.RAW_UPGRADE_RESPONSE_SENTINEL) { + const { fenceRid } = getSupabaseTag(requestEvent.request); + + if (fenceRid === void 0) { + throw TypeError("Cannot find a fence for upgrading response"); + } + setTimeout(async () => { + const { + status, + headers, + } = await ops.op_http_upgrade_raw2_fence(fenceRid); + + try { + await requestEvent.respondWith( + new Response(null, { + headers, + status, + }), + ); + } catch (error) { + console.error(error); + closeHttpConn(httpConn); + } + }); + } else { try { - await requestEvent.respondWith( - new Response(null, { - headers, - status, - }), - ); - } catch (error) { - console.error(error); - closeHttpConn(httpConn); + // send the response + await requestEvent.respondWith(response); + } catch { + // respondWith() fails when the connection has already been closed, + // or there is some other error with responding on this connection + // that prompts us to close it and open a new connection. + return closeHttpConn(httpConn); } - }); + } + }; + + if (TRACING_ENABLED) { + const oldSnapshot = currentSnapshot(); + restoreSnapshot(snapshot); + + const reqHeaders = requestEvent.request.headers; + const headers = []; + for (const key of reqHeaders.keys()) { + ArrayPrototypePush(headers, [key, reqHeaders.get(key)]); + } + let activeContext = ContextManager.active(); + for (const propagator of new SafeArrayIterator(PROPAGATORS)) { + activeContext = propagator.extract(activeContext, headers, { + get(carrier, key) { + return ArrayPrototypeFind( + carrier, + (carrierEntry) => carrierEntry[0] === key, + )?.[1]; + }, + keys(carrier) { + return ArrayPrototypeMap( + carrier, + (carrierEntry) => carrierEntry[0], + ); + }, + }); + } + + const span = builtinTracer().startSpan( + "deno.serve", + { kind: 1 }, + activeContext, + ); + enterSpan(span); + try { + return SafePromisePrototypeFinally( + mapped( + requestEvent, + httpConn, + options, + span, + ), + () => span.end(), + ); + } finally { + restoreSnapshot(oldSnapshot); + } } else { + const oldSnapshot = currentSnapshot(); + restoreSnapshot(snapshot); try { - // send the response - await requestEvent.respondWith(response); - } catch { - // respondWith() fails when the connection has already been closed, - // or there is some other error with responding on this connection - // that prompts us to close it and open a new connection. - return closeHttpConn(httpConn); + return mapped( + requestEvent, + httpConn, + options, + undefined, + ); + } finally { + restoreSnapshot(oldSnapshot); } } } diff --git a/ext/runtime/js/namespaces.js b/ext/runtime/js/namespaces.js index 367d8a02..7bff9c57 100644 --- a/ext/runtime/js/namespaces.js +++ b/ext/runtime/js/namespaces.js @@ -4,6 +4,12 @@ import { MAIN_WORKER_API, USER_WORKER_API } from "ext:ai/ai.js"; import { SUPABASE_USER_WORKERS } from "ext:user_workers/user_workers.js"; import { applySupabaseTag } from "ext:runtime/http.js"; import { waitUntil } from "ext:runtime/async_hook.js"; +import { + builtinTracer, + enterSpan, + METRICS_ENABLED, + TRACING_ENABLED, +} from "ext:deno_telemetry/telemetry.ts"; const ops = core.ops; const { ObjectDefineProperty } = primordials; @@ -33,6 +39,10 @@ function installEdgeRuntimeNamespace(kind, terminationRequestTokenRid) { case "event": props = { + builtinTracer, + enterSpan, + METRICS_ENABLED, + TRACING_ENABLED, ...props, }; break; diff --git a/ext/workers/Cargo.toml b/ext/workers/Cargo.toml index 676d4a7f..8f560a4b 100644 --- a/ext/workers/Cargo.toml +++ b/ext/workers/Cargo.toml @@ -12,6 +12,7 @@ path = "lib.rs" deno.workspace = true deno_config.workspace = true deno_core.workspace = true +deno_telemetry.workspace = true deno_facade.workspace = true fs.workspace = true diff --git a/ext/workers/context.rs b/ext/workers/context.rs index 75f9ad2a..8902ade8 100644 --- a/ext/workers/context.rs +++ b/ext/workers/context.rs @@ -9,6 +9,7 @@ use deno::deno_permissions::PermissionsOptions; use deno_core::unsync::sync::AtomicFlag; use deno_core::FastString; use deno_facade::EszipPayloadKind; +use deno_telemetry::OtelConfig; use enum_as_inner::EnumAsInner; use ext_event_worker::events::UncaughtExceptionEvent; use ext_event_worker::events::WorkerEventWithMetadata; @@ -260,6 +261,7 @@ pub struct WorkerContextInitOpts { pub maybe_entrypoint: Option, pub maybe_s3_fs_config: Option, pub maybe_tmp_fs_config: Option, + pub maybe_otel_config: Option, } #[derive(Debug)] @@ -284,6 +286,7 @@ pub type SendRequestResult = (Response, mpsc::UnboundedSender<()>); #[derive(Debug)] pub struct CreateUserWorkerResult { pub key: Uuid, + pub reused: bool, } #[derive(Debug)] diff --git a/ext/workers/lib.rs b/ext/workers/lib.rs index 8bb258f4..4b902f0c 100644 --- a/ext/workers/lib.rs +++ b/ext/workers/lib.rs @@ -32,6 +32,9 @@ use deno_core::Resource; use deno_core::ResourceId; use deno_core::WriteOutcome; use deno_facade::EszipPayloadKind; +use deno_telemetry::OtelConfig; +use deno_telemetry::OtelConsoleConfig; +use deno_telemetry::OtelPropagators; use errors::WorkerError; use ext_runtime::conn_sync::ConnWatcher; use fs::s3_fs::S3FsConfig; @@ -50,6 +53,7 @@ use once_cell::sync::Lazy; use serde::Deserialize; use serde::Serialize; use std::cell::RefCell; +use std::collections::HashSet; use std::path::PathBuf; use std::pin::Pin; use std::rc::Rc; @@ -82,6 +86,18 @@ pub struct JsxImportBaseConfig { base_url: String, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JsOtelConfig { + #[serde(default)] + tracing_enabled: bool, + #[serde(default)] + metrics_enabled: bool, + #[serde(default)] + console: OtelConsoleConfig, + #[serde(default)] + propagators: HashSet, +} + pub type JsonMap = serde_json::Map; #[derive(Deserialize, Serialize, Default, Debug)] @@ -94,7 +110,7 @@ pub struct UserWorkerCreateOptions { force_create: bool, allow_remote_modules: bool, custom_module_root: Option, - permissions: Option, + permissions: Option, maybe_eszip: Option, maybe_entrypoint: Option, @@ -108,6 +124,7 @@ pub struct UserWorkerCreateOptions { s3_fs_config: Option, tmp_fs_config: Option, + otel_config: Option, context: Option, #[serde(default)] @@ -116,7 +133,7 @@ pub struct UserWorkerCreateOptions { /// It is identical to [`PermissionsOptions`], except for `prompt`. #[derive(Clone, Debug, Eq, PartialEq, Default, Serialize, Deserialize)] -pub struct PermissionsOptions2 { +pub struct JsPermissionsOptions { pub allow_all: Option, pub allow_env: Option>, pub deny_env: Option>, @@ -135,7 +152,7 @@ pub struct PermissionsOptions2 { pub allow_import: Option>, } -impl PermissionsOptions2 { +impl JsPermissionsOptions { fn into_permissions_options(self) -> PermissionsOptions { PermissionsOptions { prompt: false, @@ -160,11 +177,11 @@ impl PermissionsOptions2 { } #[op2(async)] -#[string] +#[serde] pub async fn op_user_worker_create( state: Rc>, #[serde] opts: UserWorkerCreateOptions, -) -> Result { +) -> Result<(String, bool), AnyError> { let result_rx = { let op_state = state.borrow(); let tx = op_state.borrow::>(); @@ -193,11 +210,19 @@ pub async fn op_user_worker_create( s3_fs_config: maybe_s3_fs_config, tmp_fs_config: maybe_tmp_fs_config, + otel_config: maybe_otel_config, context, static_patterns, } = opts; + let maybe_otel_config = maybe_otel_config.map(|it| OtelConfig { + tracing_enabled: it.tracing_enabled, + metrics_enabled: it.metrics_enabled, + console: it.console, + propagators: it.propagators, + ..Default::default() + }); let user_worker_options = WorkerContextInitOpts { service_path: PathBuf::from(service_path), no_module_cache, @@ -224,7 +249,7 @@ pub async fn op_user_worker_create( allow_remote_modules, custom_module_root, permissions: permissions - .map(PermissionsOptions2::into_permissions_options), + .map(JsPermissionsOptions::into_permissions_options), context, @@ -241,6 +266,7 @@ pub async fn op_user_worker_create( maybe_s3_fs_config, maybe_tmp_fs_config, + maybe_otel_config, }; tx.send(UserWorkerMsgs::Create(user_worker_options, result_tx))?; @@ -259,7 +285,7 @@ pub async fn op_user_worker_create( Ok(Err(err)) => { Err(custom_error("InvalidWorkerCreation", format!("{err:#}"))) } - Ok(Ok(v)) => Ok(v.key.to_string()), + Ok(Ok(v)) => Ok((v.key.to_string(), v.reused)), } } diff --git a/ext/workers/user_workers.js b/ext/workers/user_workers.js index 7a95f2a3..22b051f8 100644 --- a/ext/workers/user_workers.js +++ b/ext/workers/user_workers.js @@ -4,6 +4,11 @@ import { writableStreamForRid, } from "ext:deno_web/06_streams.js"; import { getSupabaseTag } from "ext:runtime/http.js"; +import { + builtinTracer, + enterSpan, + TRACING_ENABLED, +} from "ext:deno_telemetry/telemetry.ts"; const ops = core.ops; @@ -140,9 +145,30 @@ class UserWorker { throw new TypeError("service path must be defined"); } - const key = await op_user_worker_create(readyOptions); - - return new UserWorker(key); + let span; + if (TRACING_ENABLED) { + span = builtinTracer().startSpan("edge_runtime.user_worker.create"); + enterSpan(span); + } + try { + const [key, reused] = await op_user_worker_create(readyOptions); + if (TRACING_ENABLED) { + span.setAttribute("worker.id", key); + span.setAttribute("worker.reused", reused); + } + return new UserWorker(key); + } catch (err) { + if (TRACING_ENABLED) { + try { + span.setStatus(2, JSON.stringify(err)); + } catch { + span.setStatus(2, "unknown"); + } + } + throw err; + } finally { + span?.end(); + } } } diff --git a/types/global.d.ts b/types/global.d.ts index 853d139d..e6291b28 100644 --- a/types/global.d.ts +++ b/types/global.d.ts @@ -20,6 +20,15 @@ type S3FsConfig = any; // deno-lint-ignore no-explicit-any type TmpFsConfig = any; +type OtelPropagators = "TraceContext" | "Baggage"; +type OtelConsoleConfig = "Ignore" | "Capture" | "Replace"; +type OtelConfig = { + tracing_enabled?: boolean; + metrics_enabled?: boolean; + console?: OtelConsoleConfig; + propagators?: OtelPropagators[]; +}; + interface UserWorkerFetchOptions { signal?: AbortSignal; } @@ -77,6 +86,7 @@ interface UserWorkerCreateOptions { s3FsConfig?: S3FsConfig | null; tmpFsConfig?: TmpFsConfig | null; + otelConfig?: OtelConfig | null; context?: UserWorkerCreateContext | null; } diff --git a/vendor/deno_fetch/20_headers.js b/vendor/deno_fetch/20_headers.js new file mode 100644 index 00000000..e56a74c4 --- /dev/null +++ b/vendor/deno_fetch/20_headers.js @@ -0,0 +1,538 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +// @ts-check +/// +/// +/// +/// +/// +/// +/// + +import { primordials } from "ext:core/mod.js"; +const { + ArrayIsArray, + ArrayPrototypePush, + ArrayPrototypeSort, + ArrayPrototypeJoin, + ArrayPrototypeSplice, + ObjectFromEntries, + ObjectHasOwn, + ObjectPrototypeIsPrototypeOf, + RegExpPrototypeTest, + Symbol, + SymbolFor, + SymbolIterator, + StringPrototypeReplaceAll, + StringPrototypeCharCodeAt, + TypeError, +} = primordials; + +import * as webidl from "ext:deno_webidl/00_webidl.js"; +import { + byteLowerCase, + collectHttpQuotedString, + collectSequenceOfCodepoints, + HTTP_TAB_OR_SPACE_PREFIX_RE, + HTTP_TAB_OR_SPACE_SUFFIX_RE, + HTTP_TOKEN_CODE_POINT_RE, + httpTrim, +} from "ext:deno_web/00_infra.js"; + +const _headerList = Symbol("header list"); +const _iterableHeaders = Symbol("iterable headers"); +const _iterableHeadersCache = Symbol("iterable headers cache"); +const _guard = Symbol("guard"); +const _brand = webidl.brand; + +/** + * @typedef Header + * @type {[string, string]} + */ + +/** + * @typedef HeaderList + * @type {Header[]} + */ + +/** + * @param {string} potentialValue + * @returns {string} + */ +function normalizeHeaderValue(potentialValue) { + return httpTrim(potentialValue); +} + +/** + * @param {Headers} headers + * @param {HeadersInit} object + */ +function fillHeaders(headers, object) { + if (ArrayIsArray(object)) { + for (let i = 0; i < object.length; ++i) { + const header = object[i]; + if (header.length !== 2) { + throw new TypeError( + `Invalid header: length must be 2, but is ${header.length}`, + ); + } + appendHeader(headers, header[0], header[1]); + } + } else { + for (const key in object) { + if (!ObjectHasOwn(object, key)) { + continue; + } + appendHeader(headers, key, object[key]); + } + } +} + +function checkForInvalidValueChars(value) { + for (let i = 0; i < value.length; i++) { + const c = StringPrototypeCharCodeAt(value, i); + + if (c === 0x0a || c === 0x0d || c === 0x00) { + return false; + } + } + + return true; +} + +let HEADER_NAME_CACHE = { __proto__: null }; +let HEADER_CACHE_SIZE = 0; +const HEADER_NAME_CACHE_SIZE_BOUNDARY = 4096; +function checkHeaderNameForHttpTokenCodePoint(name) { + const fromCache = HEADER_NAME_CACHE[name]; + if (fromCache !== undefined) { + return fromCache; + } + + const valid = RegExpPrototypeTest(HTTP_TOKEN_CODE_POINT_RE, name); + + if (HEADER_CACHE_SIZE > HEADER_NAME_CACHE_SIZE_BOUNDARY) { + HEADER_NAME_CACHE = { __proto__: null }; + HEADER_CACHE_SIZE = 0; + } + HEADER_CACHE_SIZE++; + HEADER_NAME_CACHE[name] = valid; + + return valid; +} + +/** + * https://fetch.spec.whatwg.org/#concept-headers-append + * @param {Headers} headers + * @param {string} name + * @param {string} value + */ +function appendHeader(headers, name, value) { + // 1. + value = normalizeHeaderValue(value); + + // 2. + if (!checkHeaderNameForHttpTokenCodePoint(name)) { + throw new TypeError(`Invalid header name: "${name}"`); + } + if (!checkForInvalidValueChars(value)) { + throw new TypeError(`Invalid header value: "${value}"`); + } + + // 3. + if (headers[_guard] == "immutable") { + throw new TypeError("Cannot change header: headers are immutable"); + } + + // 7. + const list = headers[_headerList]; + const lowercaseName = byteLowerCase(name); + for (let i = 0; i < list.length; i++) { + if (byteLowerCase(list[i][0]) === lowercaseName) { + name = list[i][0]; + break; + } + } + ArrayPrototypePush(list, [name, value]); +} + +/** + * https://fetch.spec.whatwg.org/#concept-header-list-get + * @param {HeaderList} list + * @param {string} name + */ +function getHeader(list, name) { + const lowercaseName = byteLowerCase(name); + const entries = []; + for (let i = 0; i < list.length; i++) { + if (byteLowerCase(list[i][0]) === lowercaseName) { + ArrayPrototypePush(entries, list[i][1]); + } + } + + if (entries.length === 0) { + return null; + } else { + return ArrayPrototypeJoin(entries, "\x2C\x20"); + } +} + +/** + * https://fetch.spec.whatwg.org/#concept-header-list-get-decode-split + * @param {HeaderList} list + * @param {string} name + * @returns {string[] | null} + */ +function getDecodeSplitHeader(list, name) { + const initialValue = getHeader(list, name); + if (initialValue === null) return null; + const input = initialValue; + let position = 0; + const values = []; + let value = ""; + while (position < initialValue.length) { + // 7.1. collect up to " or , + const res = collectSequenceOfCodepoints( + initialValue, + position, + (c) => c !== "\u0022" && c !== "\u002C", + ); + value += res.result; + position = res.position; + + if (position < initialValue.length) { + if (input[position] === "\u0022") { + const res = collectHttpQuotedString(input, position, false); + value += res.result; + position = res.position; + if (position < initialValue.length) { + continue; + } + } else { + if (input[position] !== "\u002C") throw new TypeError("Unreachable"); + position += 1; + } + } + + value = StringPrototypeReplaceAll(value, HTTP_TAB_OR_SPACE_PREFIX_RE, ""); + value = StringPrototypeReplaceAll(value, HTTP_TAB_OR_SPACE_SUFFIX_RE, ""); + + ArrayPrototypePush(values, value); + value = ""; + } + return values; +} + +class Headers { + /** @type {HeaderList} */ + [_headerList] = []; + /** @type {"immutable" | "request" | "request-no-cors" | "response" | "none"} */ + [_guard]; + + get [_iterableHeaders]() { + const list = this[_headerList]; + + if ( + this[_guard] === "immutable" && + this[_iterableHeadersCache] !== undefined + ) { + return this[_iterableHeadersCache]; + } + + // The order of steps are not similar to the ones suggested by the + // spec but produce the same result. + const seenHeaders = { __proto__: null }; + const entries = []; + for (let i = 0; i < list.length; ++i) { + const entry = list[i]; + const name = byteLowerCase(entry[0]); + const value = entry[1]; + if (value === null) throw new TypeError("Unreachable"); + // The following if statement is not spec compliant. + // `set-cookie` is the only header that can not be concatenated, + // so must be given to the user as multiple headers. + // The else block of the if statement is spec compliant again. + if (name === "set-cookie") { + ArrayPrototypePush(entries, [name, value]); + } else { + // The following code has the same behaviour as getHeader() + // at the end of loop. But it avoids looping through the entire + // list to combine multiple values with same header name. It + // instead gradually combines them as they are found. + const seenHeaderIndex = seenHeaders[name]; + if (seenHeaderIndex !== undefined) { + const entryValue = entries[seenHeaderIndex][1]; + entries[seenHeaderIndex][1] = entryValue.length > 0 + ? entryValue + "\x2C\x20" + value + : value; + } else { + seenHeaders[name] = entries.length; // store header index in entries array + ArrayPrototypePush(entries, [name, value]); + } + } + } + + ArrayPrototypeSort( + entries, + (a, b) => { + const akey = a[0]; + const bkey = b[0]; + if (akey > bkey) return 1; + if (akey < bkey) return -1; + return 0; + }, + ); + + this[_iterableHeadersCache] = entries; + + return entries; + } + + /** @param {HeadersInit} [init] */ + constructor(init = undefined) { + if (init === _brand) { + this[_brand] = _brand; + return; + } + + const prefix = "Failed to construct 'Headers'"; + if (init !== undefined) { + init = webidl.converters["HeadersInit"](init, prefix, "Argument 1"); + } + + this[_brand] = _brand; + this[_guard] = "none"; + if (init !== undefined) { + fillHeaders(this, init); + } + } + + /** + * @param {string} name + * @param {string} value + */ + append(name, value) { + webidl.assertBranded(this, HeadersPrototype); + const prefix = "Failed to execute 'append' on 'Headers'"; + webidl.requiredArguments(arguments.length, 2, prefix); + name = webidl.converters["ByteString"](name, prefix, "Argument 1"); + value = webidl.converters["ByteString"](value, prefix, "Argument 2"); + appendHeader(this, name, value); + } + + /** + * @param {string} name + */ + delete(name) { + webidl.assertBranded(this, HeadersPrototype); + const prefix = "Failed to execute 'delete' on 'Headers'"; + webidl.requiredArguments(arguments.length, 1, prefix); + name = webidl.converters["ByteString"](name, prefix, "Argument 1"); + + if (!checkHeaderNameForHttpTokenCodePoint(name)) { + throw new TypeError(`Invalid header name: "${name}"`); + } + if (this[_guard] == "immutable") { + throw new TypeError("Cannot change headers: headers are immutable"); + } + + const list = this[_headerList]; + const lowercaseName = byteLowerCase(name); + for (let i = 0; i < list.length; i++) { + if (byteLowerCase(list[i][0]) === lowercaseName) { + ArrayPrototypeSplice(list, i, 1); + i--; + } + } + } + + /** + * @param {string} name + */ + get(name) { + webidl.assertBranded(this, HeadersPrototype); + const prefix = "Failed to execute 'get' on 'Headers'"; + webidl.requiredArguments(arguments.length, 1, prefix); + name = webidl.converters["ByteString"](name, prefix, "Argument 1"); + + if (!checkHeaderNameForHttpTokenCodePoint(name)) { + throw new TypeError(`Invalid header name: "${name}"`); + } + + const list = this[_headerList]; + return getHeader(list, name); + } + + getSetCookie() { + webidl.assertBranded(this, HeadersPrototype); + const list = this[_headerList]; + + const entries = []; + for (let i = 0; i < list.length; i++) { + if (byteLowerCase(list[i][0]) === "set-cookie") { + ArrayPrototypePush(entries, list[i][1]); + } + } + + return entries; + } + + /** + * @param {string} name + */ + has(name) { + webidl.assertBranded(this, HeadersPrototype); + const prefix = "Failed to execute 'has' on 'Headers'"; + webidl.requiredArguments(arguments.length, 1, prefix); + name = webidl.converters["ByteString"](name, prefix, "Argument 1"); + + if (!checkHeaderNameForHttpTokenCodePoint(name)) { + throw new TypeError(`Invalid header name: "${name}"`); + } + + const list = this[_headerList]; + const lowercaseName = byteLowerCase(name); + for (let i = 0; i < list.length; i++) { + if (byteLowerCase(list[i][0]) === lowercaseName) { + return true; + } + } + return false; + } + + /** + * @param {string} name + * @param {string} value + */ + set(name, value) { + webidl.assertBranded(this, HeadersPrototype); + const prefix = "Failed to execute 'set' on 'Headers'"; + webidl.requiredArguments(arguments.length, 2, prefix); + name = webidl.converters["ByteString"](name, prefix, "Argument 1"); + value = webidl.converters["ByteString"](value, prefix, "Argument 2"); + + value = normalizeHeaderValue(value); + + // 2. + if (!checkHeaderNameForHttpTokenCodePoint(name)) { + throw new TypeError(`Invalid header name: "${name}"`); + } + if (!checkForInvalidValueChars(value)) { + throw new TypeError(`Invalid header value: "${value}"`); + } + + if (this[_guard] == "immutable") { + throw new TypeError("Cannot change headers: headers are immutable"); + } + + const list = this[_headerList]; + const lowercaseName = byteLowerCase(name); + let added = false; + for (let i = 0; i < list.length; i++) { + if (byteLowerCase(list[i][0]) === lowercaseName) { + if (!added) { + list[i][1] = value; + added = true; + } else { + ArrayPrototypeSplice(list, i, 1); + i--; + } + } + } + if (!added) { + ArrayPrototypePush(list, [name, value]); + } + } + + [SymbolFor("Deno.privateCustomInspect")](inspect, inspectOptions) { + if (ObjectPrototypeIsPrototypeOf(HeadersPrototype, this)) { + return `${this.constructor.name} ${ + inspect(ObjectFromEntries(this), inspectOptions) + }`; + } else { + return `${this.constructor.name} ${inspect({}, inspectOptions)}`; + } + } +} + +webidl.mixinPairIterable("Headers", Headers, _iterableHeaders, 0, 1); + +webidl.configureInterface(Headers); +const HeadersPrototype = Headers.prototype; + +webidl.converters["HeadersInit"] = (V, prefix, context, opts) => { + // Union for (sequence> or record) + if (webidl.type(V) === "Object" && V !== null) { + if (V[SymbolIterator] !== undefined) { + return webidl.converters["sequence>"]( + V, + prefix, + context, + opts, + ); + } + return webidl.converters["record"]( + V, + prefix, + context, + opts, + ); + } + throw webidl.makeException( + TypeError, + "The provided value is not of type '(sequence> or record)'", + prefix, + context, + ); +}; +webidl.converters["Headers"] = webidl.createInterfaceConverter( + "Headers", + Headers.prototype, +); + +/** + * @param {HeaderList} list + * @param {"immutable" | "request" | "request-no-cors" | "response" | "none"} guard + * @returns {Headers} + */ +function headersFromHeaderList(list, guard) { + const headers = new Headers(_brand); + headers[_headerList] = list; + headers[_guard] = guard; + return headers; +} + +/** + * @param {Headers} headers + * @returns {HeaderList} + */ +function headerListFromHeaders(headers) { + return headers[_headerList]; +} + +/** + * @param {Headers} headers + * @returns {"immutable" | "request" | "request-no-cors" | "response" | "none"} + */ +function guardFromHeaders(headers) { + return headers[_guard]; +} + +/** + * @param {Headers} headers + * @returns {[string, string][]} + */ +function headersEntries(headers) { + return headers[_iterableHeaders]; +} + +export { + fillHeaders, + getDecodeSplitHeader, + getHeader, + guardFromHeaders, + headerListFromHeaders, + Headers, + headersEntries, + headersFromHeaderList, +}; diff --git a/vendor/deno_fetch/21_formdata.js b/vendor/deno_fetch/21_formdata.js new file mode 100644 index 00000000..7d466b8e --- /dev/null +++ b/vendor/deno_fetch/21_formdata.js @@ -0,0 +1,563 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +// @ts-check +/// +/// +/// +/// +/// +/// +/// + +import { core, primordials } from "ext:core/mod.js"; +import * as webidl from "ext:deno_webidl/00_webidl.js"; +import { + Blob, + BlobPrototype, + File, + FilePrototype, +} from "ext:deno_web/09_file.js"; +const { + ArrayPrototypePush, + ArrayPrototypeSlice, + ArrayPrototypeSplice, + MapPrototypeGet, + MapPrototypeSet, + MathRandom, + ObjectFreeze, + ObjectFromEntries, + ObjectPrototypeIsPrototypeOf, + SafeMap, + SafeRegExp, + Symbol, + SymbolFor, + StringFromCharCode, + StringPrototypeCharCodeAt, + StringPrototypeTrim, + StringPrototypeSlice, + StringPrototypeSplit, + StringPrototypeReplace, + StringPrototypeIndexOf, + StringPrototypePadStart, + StringPrototypeCodePointAt, + StringPrototypeReplaceAll, + TypeError, + TypedArrayPrototypeSubarray, + Uint8Array, +} = primordials; + +const entryList = Symbol("entry list"); + +/** + * @param {string} name + * @param {string | Blob} value + * @param {string | undefined} filename + * @returns {FormDataEntry} + */ +function createEntry(name, value, filename) { + if ( + ObjectPrototypeIsPrototypeOf(BlobPrototype, value) && + !ObjectPrototypeIsPrototypeOf(FilePrototype, value) + ) { + value = new File([value], "blob", { type: value.type }); + } + if ( + ObjectPrototypeIsPrototypeOf(FilePrototype, value) && + filename !== undefined + ) { + value = new File([value], filename, { + type: value.type, + lastModified: value.lastModified, + }); + } + return { + name, + // @ts-expect-error because TS is not smart enough + value, + }; +} + +/** + * @typedef FormDataEntry + * @property {string} name + * @property {FormDataEntryValue} value + */ + +class FormData { + /** @type {FormDataEntry[]} */ + [entryList] = []; + + /** @param {void} form */ + constructor(form) { + if (form !== undefined) { + webidl.illegalConstructor(); + } + this[webidl.brand] = webidl.brand; + } + + /** + * @param {string} name + * @param {string | Blob} valueOrBlobValue + * @param {string} [filename] + * @returns {void} + */ + append(name, valueOrBlobValue, filename) { + webidl.assertBranded(this, FormDataPrototype); + const prefix = "Failed to execute 'append' on 'FormData'"; + webidl.requiredArguments(arguments.length, 2, prefix); + + name = webidl.converters["USVString"](name, prefix, "Argument 1"); + if (ObjectPrototypeIsPrototypeOf(BlobPrototype, valueOrBlobValue)) { + valueOrBlobValue = webidl.converters["Blob"]( + valueOrBlobValue, + prefix, + "Argument 2", + ); + if (filename !== undefined) { + filename = webidl.converters["USVString"]( + filename, + prefix, + "Argument 3", + ); + } + } else { + valueOrBlobValue = webidl.converters["USVString"]( + valueOrBlobValue, + prefix, + "Argument 2", + ); + } + + const entry = createEntry(name, valueOrBlobValue, filename); + + ArrayPrototypePush(this[entryList], entry); + } + + /** + * @param {string} name + * @returns {void} + */ + delete(name) { + webidl.assertBranded(this, FormDataPrototype); + const prefix = "Failed to execute 'name' on 'FormData'"; + webidl.requiredArguments(arguments.length, 1, prefix); + + name = webidl.converters["USVString"](name, prefix, "Argument 1"); + + const list = this[entryList]; + for (let i = 0; i < list.length; i++) { + if (list[i].name === name) { + ArrayPrototypeSplice(list, i, 1); + i--; + } + } + } + + /** + * @param {string} name + * @returns {FormDataEntryValue | null} + */ + get(name) { + webidl.assertBranded(this, FormDataPrototype); + const prefix = "Failed to execute 'get' on 'FormData'"; + webidl.requiredArguments(arguments.length, 1, prefix); + + name = webidl.converters["USVString"](name, prefix, "Argument 1"); + + const entries = this[entryList]; + for (let i = 0; i < entries.length; ++i) { + const entry = entries[i]; + if (entry.name === name) return entry.value; + } + return null; + } + + /** + * @param {string} name + * @returns {FormDataEntryValue[]} + */ + getAll(name) { + webidl.assertBranded(this, FormDataPrototype); + const prefix = "Failed to execute 'getAll' on 'FormData'"; + webidl.requiredArguments(arguments.length, 1, prefix); + + name = webidl.converters["USVString"](name, prefix, "Argument 1"); + + const returnList = []; + const entries = this[entryList]; + for (let i = 0; i < entries.length; ++i) { + const entry = entries[i]; + if (entry.name === name) ArrayPrototypePush(returnList, entry.value); + } + return returnList; + } + + /** + * @param {string} name + * @returns {boolean} + */ + has(name) { + webidl.assertBranded(this, FormDataPrototype); + const prefix = "Failed to execute 'has' on 'FormData'"; + webidl.requiredArguments(arguments.length, 1, prefix); + + name = webidl.converters["USVString"](name, prefix, "Argument 1"); + + const entries = this[entryList]; + for (let i = 0; i < entries.length; ++i) { + const entry = entries[i]; + if (entry.name === name) return true; + } + return false; + } + + /** + * @param {string} name + * @param {string | Blob} valueOrBlobValue + * @param {string} [filename] + * @returns {void} + */ + set(name, valueOrBlobValue, filename) { + webidl.assertBranded(this, FormDataPrototype); + const prefix = "Failed to execute 'set' on 'FormData'"; + webidl.requiredArguments(arguments.length, 2, prefix); + + name = webidl.converters["USVString"](name, prefix, "Argument 1"); + if (ObjectPrototypeIsPrototypeOf(BlobPrototype, valueOrBlobValue)) { + valueOrBlobValue = webidl.converters["Blob"]( + valueOrBlobValue, + prefix, + "Argument 2", + ); + if (filename !== undefined) { + filename = webidl.converters["USVString"]( + filename, + prefix, + "Argument 3", + ); + } + } else { + valueOrBlobValue = webidl.converters["USVString"]( + valueOrBlobValue, + prefix, + "Argument 2", + ); + } + + const entry = createEntry(name, valueOrBlobValue, filename); + + const list = this[entryList]; + let added = false; + for (let i = 0; i < list.length; i++) { + if (list[i].name === name) { + if (!added) { + list[i] = entry; + added = true; + } else { + ArrayPrototypeSplice(list, i, 1); + i--; + } + } + } + if (!added) { + ArrayPrototypePush(list, entry); + } + } + + [SymbolFor("Deno.privateCustomInspect")](inspect, inspectOptions) { + if (ObjectPrototypeIsPrototypeOf(FormDataPrototype, this)) { + return `${this.constructor.name} ${ + inspect(ObjectFromEntries(this), inspectOptions) + }`; + } else { + return `${this.constructor.name} ${inspect({}, inspectOptions)}`; + } + } +} + +webidl.mixinPairIterable("FormData", FormData, entryList, "name", "value"); + +webidl.configureInterface(FormData); +const FormDataPrototype = FormData.prototype; + +const ESCAPE_FILENAME_PATTERN = new SafeRegExp(/\r?\n|\r/g); +const ESCAPE_PATTERN = new SafeRegExp(/([\n\r"])/g); +const ESCAPE_MAP = ObjectFreeze({ + "\n": "%0A", + "\r": "%0D", + '"': "%22", +}); + +function escape(str, isFilename) { + return StringPrototypeReplace( + isFilename + ? str + : StringPrototypeReplace(str, ESCAPE_FILENAME_PATTERN, "\r\n"), + ESCAPE_PATTERN, + (c) => ESCAPE_MAP[c], + ); +} + +const FORM_DETA_SERIALIZE_PATTERN = new SafeRegExp(/\r(?!\n)|(?} + */ +function parseContentDisposition(value) { + /** @type {Map} */ + const params = new SafeMap(); + // Forced to do so for some Map constructor param mismatch + const values = ArrayPrototypeSlice(StringPrototypeSplit(value, ";"), 1); + for (let i = 0; i < values.length; i++) { + const entries = StringPrototypeSplit(StringPrototypeTrim(values[i]), "="); + if (entries.length > 1) { + MapPrototypeSet( + params, + entries[0], + StringPrototypeReplace(entries[1], QUOTE_CONTENT_PATTERN, "$1"), + ); + } + } + return params; +} + +/** + * Decodes a string containing UTF-8 mistakenly decoded as Latin-1 and + * decodes it correctly. + * @param {string} latin1String + * @returns {string} + */ +function decodeLatin1StringAsUtf8(latin1String) { + const buffer = new Uint8Array(latin1String.length); + for (let i = 0; i < latin1String.length; i++) { + buffer[i] = StringPrototypeCharCodeAt(latin1String, i); + } + return core.decode(buffer); +} + +const CRLF = "\r\n"; +const LF = StringPrototypeCodePointAt(CRLF, 1); +const CR = StringPrototypeCodePointAt(CRLF, 0); + +class MultipartParser { + /** + * @param {Uint8Array} body + * @param {string | undefined} boundary + */ + constructor(body, boundary) { + if (!boundary) { + throw new TypeError( + "Cannot construct MultipartParser: multipart/form-data must provide a boundary", + ); + } + + this.boundary = `--${boundary}`; + this.body = body; + this.boundaryChars = core.encode(this.boundary); + } + + /** + * @param {string} headersText + * @returns {{ headers: Headers, disposition: Map }} + */ + #parseHeaders(headersText) { + const headers = new Headers(); + const rawHeaders = StringPrototypeSplit(headersText, "\r\n"); + for (let i = 0; i < rawHeaders.length; ++i) { + const rawHeader = rawHeaders[i]; + const sepIndex = StringPrototypeIndexOf(rawHeader, ":"); + if (sepIndex < 0) { + continue; // Skip this header + } + const key = StringPrototypeSlice(rawHeader, 0, sepIndex); + const value = StringPrototypeSlice(rawHeader, sepIndex + 1); + headers.set(key, value); + } + + const disposition = parseContentDisposition( + headers.get("Content-Disposition") ?? "", + ); + + return { headers, disposition }; + } + + /** + * @returns {FormData} + */ + parse() { + // To have fields body must be at least 2 boundaries + \r\n + -- + // on the last boundary. + if (this.body.length < (this.boundary.length * 2) + 4) { + const decodedBody = core.decode(this.body); + const lastBoundary = this.boundary + "--"; + // check if it's an empty valid form data + if ( + decodedBody === lastBoundary || + decodedBody === lastBoundary + "\r\n" + ) { + return new FormData(); + } + throw new TypeError("Unable to parse body as form data"); + } + + const formData = new FormData(); + let headerText = ""; + let boundaryIndex = 0; + let state = 0; + let fileStart = 0; + + for (let i = 0; i < this.body.length; i++) { + const byte = this.body[i]; + const prevByte = this.body[i - 1]; + const isNewLine = byte === LF && prevByte === CR; + + if (state === 1) { + headerText += StringFromCharCode(byte); + } + + if (state === 0 && isNewLine) { + state = 1; + } else if ( + state === 1 + ) { + if ( + isNewLine && this.body[i + 1] === CR && + this.body[i + 2] === LF + ) { + // end of the headers section + state = 2; + fileStart = i + 3; // After \r\n + } + } else if (state === 2) { + if (this.boundaryChars[boundaryIndex] !== byte) { + boundaryIndex = 0; + } else { + boundaryIndex++; + } + + if (boundaryIndex >= this.boundary.length) { + const { headers, disposition } = this.#parseHeaders(headerText); + const content = TypedArrayPrototypeSubarray( + this.body, + fileStart, + i - boundaryIndex - 1, + ); + // https://fetch.spec.whatwg.org/#ref-for-dom-body-formdata + // These are UTF-8 decoded as if it was Latin-1. + // TODO(@andreubotella): Maybe we shouldn't be parsing entry headers + // as Latin-1. + const latin1Filename = MapPrototypeGet(disposition, "filename"); + const latin1Name = MapPrototypeGet(disposition, "name"); + + state = 3; + // Reset + boundaryIndex = 0; + headerText = ""; + + if (!latin1Name) { + continue; // Skip, unknown name + } + + const name = decodeLatin1StringAsUtf8(latin1Name); + if (latin1Filename) { + const blob = new Blob([content], { + type: headers.get("Content-Type") || "application/octet-stream", + }); + formData.append( + name, + blob, + decodeLatin1StringAsUtf8(latin1Filename), + ); + } else { + formData.append(name, core.decode(content)); + } + } + } else if (state === 3 && isNewLine) { + state = 1; + } + } + + return formData; + } +} + +/** + * @param {Uint8Array} body + * @param {string | undefined} boundary + * @returns {FormData} + */ +function parseFormData(body, boundary) { + const parser = new MultipartParser(body, boundary); + return parser.parse(); +} + +/** + * @param {FormDataEntry[]} entries + * @returns {FormData} + */ +function formDataFromEntries(entries) { + const fd = new FormData(); + fd[entryList] = entries; + return fd; +} + +webidl.converters["FormData"] = webidl + .createInterfaceConverter("FormData", FormDataPrototype); + +export { + FormData, + formDataFromEntries, + FormDataPrototype, + formDataToBlob, + parseFormData, +}; diff --git a/vendor/deno_fetch/22_body.js b/vendor/deno_fetch/22_body.js new file mode 100644 index 00000000..a34758d1 --- /dev/null +++ b/vendor/deno_fetch/22_body.js @@ -0,0 +1,533 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +// @ts-check +/// +/// +/// +/// +/// +/// +/// +/// +/// + +import { core, primordials } from "ext:core/mod.js"; +const { + isAnyArrayBuffer, + isArrayBuffer, + isStringObject, +} = core; +const { + ArrayBufferIsView, + ArrayPrototypeMap, + DataViewPrototypeGetBuffer, + DataViewPrototypeGetByteLength, + DataViewPrototypeGetByteOffset, + JSONParse, + ObjectDefineProperties, + ObjectPrototypeIsPrototypeOf, + TypedArrayPrototypeGetBuffer, + TypedArrayPrototypeGetByteLength, + TypedArrayPrototypeGetByteOffset, + TypedArrayPrototypeGetSymbolToStringTag, + TypedArrayPrototypeSlice, + TypeError, + Uint8Array, +} = primordials; + +import * as webidl from "ext:deno_webidl/00_webidl.js"; +import { + parseUrlEncoded, + URLSearchParamsPrototype, +} from "ext:deno_url/00_url.js"; +import { + formDataFromEntries, + FormDataPrototype, + formDataToBlob, + parseFormData, +} from "ext:deno_fetch/21_formdata.js"; +import * as mimesniff from "ext:deno_web/01_mimesniff.js"; +import { BlobPrototype } from "ext:deno_web/09_file.js"; +import { + createProxy, + errorReadableStream, + isReadableStreamDisturbed, + readableStreamClose, + readableStreamCollectIntoUint8Array, + readableStreamDisturb, + ReadableStreamPrototype, + readableStreamTee, + readableStreamThrowIfErrored, +} from "ext:deno_web/06_streams.js"; + +/** + * @param {Uint8Array | string} chunk + * @returns {Uint8Array} + */ +function chunkToU8(chunk) { + return typeof chunk === "string" ? core.encode(chunk) : chunk; +} + +/** + * @param {Uint8Array | string} chunk + * @returns {string} + */ +function chunkToString(chunk) { + return typeof chunk === "string" ? chunk : core.decode(chunk); +} + +class InnerBody { + /** + * @param {ReadableStream | { body: Uint8Array | string, consumed: boolean }} stream + */ + constructor(stream) { + /** @type {ReadableStream | { body: Uint8Array | string, consumed: boolean }} */ + this.streamOrStatic = stream ?? + { body: new Uint8Array(), consumed: false }; + /** @type {null | Uint8Array | string | Blob | FormData} */ + this.source = null; + /** @type {null | number} */ + this.length = null; + } + + get stream() { + if ( + !ObjectPrototypeIsPrototypeOf( + ReadableStreamPrototype, + this.streamOrStatic, + ) + ) { + const { body, consumed } = this.streamOrStatic; + if (consumed) { + this.streamOrStatic = new ReadableStream(); + this.streamOrStatic.getReader(); + readableStreamDisturb(this.streamOrStatic); + readableStreamClose(this.streamOrStatic); + } else { + this.streamOrStatic = new ReadableStream({ + start(controller) { + controller.enqueue(chunkToU8(body)); + controller.close(); + }, + }); + } + } + return this.streamOrStatic; + } + + /** + * https://fetch.spec.whatwg.org/#body-unusable + * @returns {boolean} + */ + unusable() { + if ( + ObjectPrototypeIsPrototypeOf( + ReadableStreamPrototype, + this.streamOrStatic, + ) + ) { + return this.streamOrStatic.locked || + isReadableStreamDisturbed(this.streamOrStatic); + } + return this.streamOrStatic.consumed; + } + + /** + * @returns {boolean} + */ + consumed() { + if ( + ObjectPrototypeIsPrototypeOf( + ReadableStreamPrototype, + this.streamOrStatic, + ) + ) { + return isReadableStreamDisturbed(this.streamOrStatic); + } + return this.streamOrStatic.consumed; + } + + /** + * https://fetch.spec.whatwg.org/#concept-body-consume-body + * @returns {Promise} + */ + consume() { + if (this.unusable()) throw new TypeError("Body already consumed"); + if ( + ObjectPrototypeIsPrototypeOf( + ReadableStreamPrototype, + this.streamOrStatic, + ) + ) { + readableStreamThrowIfErrored(this.stream); + return readableStreamCollectIntoUint8Array(this.stream); + } else { + this.streamOrStatic.consumed = true; + return this.streamOrStatic.body; + } + } + + cancel(error) { + if ( + ObjectPrototypeIsPrototypeOf( + ReadableStreamPrototype, + this.streamOrStatic, + ) + ) { + this.streamOrStatic.cancel(error); + } else { + this.streamOrStatic.consumed = true; + } + } + + error(error) { + if ( + ObjectPrototypeIsPrototypeOf( + ReadableStreamPrototype, + this.streamOrStatic, + ) + ) { + errorReadableStream(this.streamOrStatic, error); + } else { + this.streamOrStatic.consumed = true; + } + } + + /** + * @returns {InnerBody} + */ + clone() { + let second; + if ( + !ObjectPrototypeIsPrototypeOf( + ReadableStreamPrototype, + this.streamOrStatic, + ) && !this.streamOrStatic.consumed + ) { + second = new InnerBody({ + body: this.streamOrStatic.body, + consumed: false, + }); + } else { + const { 0: out1, 1: out2 } = readableStreamTee(this.stream, true); + this.streamOrStatic = out1; + second = new InnerBody(out2); + } + second.source = this.source; + second.length = this.length; + return second; + } + + /** + * @returns {InnerBody} + */ + createProxy() { + let proxyStreamOrStatic; + if ( + ObjectPrototypeIsPrototypeOf( + ReadableStreamPrototype, + this.streamOrStatic, + ) + ) { + proxyStreamOrStatic = createProxy(this.streamOrStatic); + } else { + proxyStreamOrStatic = { ...this.streamOrStatic }; + this.streamOrStatic.consumed = true; + } + const proxy = new InnerBody(proxyStreamOrStatic); + proxy.source = this.source; + proxy.length = this.length; + return proxy; + } +} + +/** + * @param {any} prototype + * @param {symbol} bodySymbol + * @param {symbol} mimeTypeSymbol + * @returns {void} + */ +function mixinBody(prototype, bodySymbol, mimeTypeSymbol) { + async function consumeBody(object, type) { + webidl.assertBranded(object, prototype); + + const body = object[bodySymbol] !== null + ? await object[bodySymbol].consume() + : new Uint8Array(); + + const mimeType = type === "Blob" || type === "FormData" + ? object[mimeTypeSymbol] + : null; + return packageData(body, type, mimeType); + } + + /** @type {PropertyDescriptorMap} */ + const mixin = { + body: { + __proto__: null, + /** + * @returns {ReadableStream | null} + */ + get() { + webidl.assertBranded(this, prototype); + if (this[bodySymbol] === null) { + return null; + } else { + return this[bodySymbol].stream; + } + }, + configurable: true, + enumerable: true, + }, + bodyUsed: { + __proto__: null, + /** + * @returns {boolean} + */ + get() { + webidl.assertBranded(this, prototype); + try { + if (this[bodySymbol] !== null) { + return this[bodySymbol].consumed(); + } + } catch (_) { + // Request is closed. + return true; + } + return false; + }, + configurable: true, + enumerable: true, + }, + arrayBuffer: { + __proto__: null, + /** @returns {Promise} */ + value: function arrayBuffer() { + return consumeBody(this, "ArrayBuffer"); + }, + writable: true, + configurable: true, + enumerable: true, + }, + blob: { + __proto__: null, + /** @returns {Promise} */ + value: function blob() { + return consumeBody(this, "Blob"); + }, + writable: true, + configurable: true, + enumerable: true, + }, + bytes: { + __proto__: null, + /** @returns {Promise} */ + value: function bytes() { + return consumeBody(this, "bytes"); + }, + writable: true, + configurable: true, + enumerable: true, + }, + formData: { + __proto__: null, + /** @returns {Promise} */ + value: function formData() { + return consumeBody(this, "FormData"); + }, + writable: true, + configurable: true, + enumerable: true, + }, + json: { + __proto__: null, + /** @returns {Promise} */ + value: function json() { + return consumeBody(this, "JSON"); + }, + writable: true, + configurable: true, + enumerable: true, + }, + text: { + __proto__: null, + /** @returns {Promise} */ + value: function text() { + return consumeBody(this, "text"); + }, + writable: true, + configurable: true, + enumerable: true, + }, + }; + return ObjectDefineProperties(prototype, mixin); +} + +/** + * https://fetch.spec.whatwg.org/#concept-body-package-data + * @param {Uint8Array | string} bytes + * @param {"ArrayBuffer" | "Blob" | "FormData" | "JSON" | "text" | "bytes"} type + * @param {MimeType | null} [mimeType] + */ +function packageData(bytes, type, mimeType) { + switch (type) { + case "ArrayBuffer": + return TypedArrayPrototypeGetBuffer(chunkToU8(bytes)); + case "Blob": + return new Blob([bytes], { + type: mimeType !== null ? mimesniff.serializeMimeType(mimeType) : "", + }); + case "bytes": + return chunkToU8(bytes); + case "FormData": { + if (mimeType !== null) { + const essence = mimesniff.essence(mimeType); + if (essence === "multipart/form-data") { + const boundary = mimeType.parameters.get("boundary"); + if (boundary === null) { + throw new TypeError( + "Cannot turn into form data: missing boundary parameter in mime type of multipart form data", + ); + } + return parseFormData(chunkToU8(bytes), boundary); + } else if (essence === "application/x-www-form-urlencoded") { + // TODO(@AaronO): pass as-is with StringOrBuffer in op-layer + const entries = parseUrlEncoded(chunkToU8(bytes)); + return formDataFromEntries( + ArrayPrototypeMap( + entries, + (x) => ({ name: x[0], value: x[1] }), + ), + ); + } + throw new TypeError("Body can not be decoded as form data"); + } + throw new TypeError("Missing content type"); + } + case "JSON": + return JSONParse(chunkToString(bytes)); + case "text": + return chunkToString(bytes); + } +} + +/** + * @param {BodyInit} object + * @returns {{body: InnerBody, contentType: string | null}} + */ +function extractBody(object) { + /** @type {ReadableStream | { body: Uint8Array | string, consumed: boolean }} */ + let stream; + let source = null; + let length = null; + let contentType = null; + if (typeof object === "string") { + source = object; + contentType = "text/plain;charset=UTF-8"; + } else if (ObjectPrototypeIsPrototypeOf(BlobPrototype, object)) { + stream = object.stream(); + source = object; + length = object.size; + if (object.type.length !== 0) { + contentType = object.type; + } + } else if (ArrayBufferIsView(object)) { + const tag = TypedArrayPrototypeGetSymbolToStringTag(object); + if (tag !== undefined) { + // TypedArray + if (tag !== "Uint8Array") { + // TypedArray, unless it's Uint8Array + object = new Uint8Array( + TypedArrayPrototypeGetBuffer(/** @type {Uint8Array} */ (object)), + TypedArrayPrototypeGetByteOffset(/** @type {Uint8Array} */ (object)), + TypedArrayPrototypeGetByteLength(/** @type {Uint8Array} */ (object)), + ); + } + } else { + // DataView + object = new Uint8Array( + DataViewPrototypeGetBuffer(/** @type {DataView} */ (object)), + DataViewPrototypeGetByteOffset(/** @type {DataView} */ (object)), + DataViewPrototypeGetByteLength(/** @type {DataView} */ (object)), + ); + } + source = TypedArrayPrototypeSlice(object); + } else if (isArrayBuffer(object)) { + source = TypedArrayPrototypeSlice(new Uint8Array(object)); + } else if (ObjectPrototypeIsPrototypeOf(FormDataPrototype, object)) { + const res = formDataToBlob(object); + stream = res.stream(); + source = res; + length = res.size; + contentType = res.type; + } else if ( + ObjectPrototypeIsPrototypeOf(URLSearchParamsPrototype, object) + ) { + // TODO(@satyarohith): not sure what primordial here. + // deno-lint-ignore prefer-primordials + source = object.toString(); + contentType = "application/x-www-form-urlencoded;charset=UTF-8"; + } else if (ObjectPrototypeIsPrototypeOf(ReadableStreamPrototype, object)) { + stream = object; + if (object.locked || isReadableStreamDisturbed(object)) { + throw new TypeError("ReadableStream is locked or disturbed"); + } + } else if (object[webidl.AsyncIterable] === webidl.AsyncIterable) { + stream = ReadableStream.from(object.open()); + } + if (typeof source === "string") { + // WARNING: this deviates from spec (expects length to be set) + // https://fetch.spec.whatwg.org/#bodyinit > 7. + // no observable side-effect for users so far, but could change + stream = { body: source, consumed: false }; + length = null; // NOTE: string length != byte length + } else if (TypedArrayPrototypeGetSymbolToStringTag(source) === "Uint8Array") { + stream = { body: source, consumed: false }; + length = TypedArrayPrototypeGetByteLength(source); + } + const body = new InnerBody(stream); + body.source = source; + body.length = length; + return { body, contentType }; +} + +webidl.converters["async iterable"] = webidl + .createAsyncIterableConverter(webidl.converters.Uint8Array); + +webidl.converters["BodyInit_DOMString"] = (V, prefix, context, opts) => { + // Union for (ReadableStream or Blob or ArrayBufferView or ArrayBuffer or FormData or URLSearchParams or USVString) + if (ObjectPrototypeIsPrototypeOf(ReadableStreamPrototype, V)) { + return webidl.converters["ReadableStream"](V, prefix, context, opts); + } else if (ObjectPrototypeIsPrototypeOf(BlobPrototype, V)) { + return webidl.converters["Blob"](V, prefix, context, opts); + } else if (ObjectPrototypeIsPrototypeOf(FormDataPrototype, V)) { + return webidl.converters["FormData"](V, prefix, context, opts); + } else if (ObjectPrototypeIsPrototypeOf(URLSearchParamsPrototype, V)) { + return webidl.converters["URLSearchParams"](V, prefix, context, opts); + } + if (typeof V === "object") { + if (isAnyArrayBuffer(V)) { + return webidl.converters["ArrayBuffer"](V, prefix, context, opts); + } + if (ArrayBufferIsView(V)) { + return webidl.converters["ArrayBufferView"](V, prefix, context, opts); + } + if (webidl.isAsyncIterable(V) && !isStringObject(V)) { + return webidl.converters["async iterable"]( + V, + prefix, + context, + opts, + ); + } + } + // BodyInit conversion is passed to extractBody(), which calls core.encode(). + // core.encode() will UTF-8 encode strings with replacement, being equivalent to the USV normalization. + // Therefore we can convert to DOMString instead of USVString and avoid a costly redundant conversion. + return webidl.converters["DOMString"](V, prefix, context, opts); +}; +webidl.converters["BodyInit_DOMString?"] = webidl.createNullableConverter( + webidl.converters["BodyInit_DOMString"], +); + +export { extractBody, InnerBody, mixinBody }; diff --git a/vendor/deno_fetch/22_http_client.js b/vendor/deno_fetch/22_http_client.js new file mode 100644 index 00000000..6a1243ee --- /dev/null +++ b/vendor/deno_fetch/22_http_client.js @@ -0,0 +1,62 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +// @ts-check +/// +/// +/// +/// +/// +/// +/// +/// + +import { core, primordials } from "ext:core/mod.js"; + +import { SymbolDispose } from "ext:deno_web/00_infra.js"; +import { op_fetch_custom_client } from "ext:core/ops"; +import { loadTlsKeyPair } from "ext:deno_net/02_tls.js"; + +const { internalRidSymbol } = core; +const { ObjectDefineProperty } = primordials; + +/** + * @param {Deno.CreateHttpClientOptions} options + * @returns {HttpClient} + */ +function createHttpClient(options) { + options.caCerts ??= []; + const keyPair = loadTlsKeyPair("Deno.createHttpClient", options); + return new HttpClient( + op_fetch_custom_client( + options, + keyPair, + ), + ); +} + +class HttpClient { + #rid; + + /** + * @param {number} rid + */ + constructor(rid) { + ObjectDefineProperty(this, internalRidSymbol, { + __proto__: null, + enumerable: false, + value: rid, + }); + this.#rid = rid; + } + + close() { + core.close(this.#rid); + } + + [SymbolDispose]() { + core.tryClose(this.#rid); + } +} +const HttpClientPrototype = HttpClient.prototype; + +export { createHttpClient, HttpClient, HttpClientPrototype }; diff --git a/vendor/deno_fetch/23_request.js b/vendor/deno_fetch/23_request.js new file mode 100644 index 00000000..61cac22d --- /dev/null +++ b/vendor/deno_fetch/23_request.js @@ -0,0 +1,634 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +// @ts-check +/// +/// +/// +/// +/// +/// +/// + +import { core, internals, primordials } from "ext:core/mod.js"; +const { + ArrayPrototypeMap, + ArrayPrototypeSlice, + ArrayPrototypeSplice, + ObjectFreeze, + ObjectKeys, + ObjectPrototypeIsPrototypeOf, + RegExpPrototypeExec, + StringPrototypeStartsWith, + Symbol, + SymbolFor, + TypeError, +} = primordials; + +import * as webidl from "ext:deno_webidl/00_webidl.js"; +import { createFilteredInspectProxy } from "ext:deno_console/01_console.js"; +import { + byteUpperCase, + HTTP_TOKEN_CODE_POINT_RE, +} from "ext:deno_web/00_infra.js"; +import { URL } from "ext:deno_url/00_url.js"; +import { extractBody, mixinBody } from "ext:deno_fetch/22_body.js"; +import { getLocationHref } from "ext:deno_web/12_location.js"; +import { extractMimeType } from "ext:deno_web/01_mimesniff.js"; +import { blobFromObjectUrl } from "ext:deno_web/09_file.js"; +import { + fillHeaders, + getDecodeSplitHeader, + guardFromHeaders, + headerListFromHeaders, + headersFromHeaderList, +} from "ext:deno_fetch/20_headers.js"; +import { HttpClientPrototype } from "ext:deno_fetch/22_http_client.js"; +import { + createDependentAbortSignal, + newSignal, + signalAbort, +} from "ext:deno_web/03_abort_signal.js"; +import { DOMException } from "ext:deno_web/01_dom_exception.js"; +const { internalRidSymbol } = core; + +const _request = Symbol("request"); +const _headers = Symbol("headers"); +const _getHeaders = Symbol("get headers"); +const _headersCache = Symbol("headers cache"); +const _signal = Symbol("signal"); +const _signalCache = Symbol("signalCache"); +const _mimeType = Symbol("mime type"); +const _body = Symbol("body"); +const _url = Symbol("url"); +const _method = Symbol("method"); +const _brand = webidl.brand; + +/** + * @param {(() => string)[]} urlList + * @param {string[]} urlListProcessed + */ +function processUrlList(urlList, urlListProcessed) { + for (let i = 0; i < urlList.length; i++) { + if (urlListProcessed[i] === undefined) { + urlListProcessed[i] = urlList[i](); + } + } + return urlListProcessed; +} + +/** + * @typedef InnerRequest + * @property {() => string} method + * @property {() => string} url + * @property {() => string} currentUrl + * @property {() => [string, string][]} headerList + * @property {null | typeof __window.bootstrap.fetchBody.InnerBody} body + * @property {"follow" | "error" | "manual"} redirectMode + * @property {number} redirectCount + * @property {(() => string)[]} urlList + * @property {string[]} urlListProcessed + * @property {number | null} clientRid NOTE: non standard extension for `Deno.HttpClient`. + * @property {Blob | null} blobUrlEntry + */ + +/** + * @param {string} method + * @param {string | () => string} url + * @param {() => [string, string][]} headerList + * @param {typeof __window.bootstrap.fetchBody.InnerBody} body + * @param {boolean} maybeBlob + * @returns {InnerRequest} + */ +function newInnerRequest(method, url, headerList, body, maybeBlob) { + let blobUrlEntry = null; + if ( + maybeBlob && + typeof url === "string" && + StringPrototypeStartsWith(url, "blob:") + ) { + blobUrlEntry = blobFromObjectUrl(url); + } + return { + methodInner: method, + get method() { + return this.methodInner; + }, + set method(value) { + this.methodInner = value; + }, + headerListInner: null, + get headerList() { + if (this.headerListInner === null) { + try { + this.headerListInner = headerList(); + } catch { + throw new TypeError("Cannot read headers: request closed"); + } + } + return this.headerListInner; + }, + set headerList(value) { + this.headerListInner = value; + }, + body, + redirectMode: "follow", + redirectCount: 0, + urlList: [typeof url === "string" ? () => url : url], + urlListProcessed: [], + clientRid: null, + blobUrlEntry, + url() { + if (this.urlListProcessed[0] === undefined) { + try { + this.urlListProcessed[0] = this.urlList[0](); + } catch { + throw new TypeError("cannot read url: request closed"); + } + } + return this.urlListProcessed[0]; + }, + currentUrl() { + const currentIndex = this.urlList.length - 1; + if (this.urlListProcessed[currentIndex] === undefined) { + try { + this.urlListProcessed[currentIndex] = this.urlList[currentIndex](); + } catch { + throw new TypeError("Cannot read url: request closed"); + } + } + return this.urlListProcessed[currentIndex]; + }, + }; +} + +/** + * https://fetch.spec.whatwg.org/#concept-request-clone + * @param {InnerRequest} request + * @param {boolean} skipBody + * @returns {InnerRequest} + */ +function cloneInnerRequest(request, skipBody = false) { + const headerList = ArrayPrototypeMap( + request.headerList, + (x) => [x[0], x[1]], + ); + + let body = null; + if (request.body !== null && !skipBody) { + body = request.body.clone(); + } + + return { + method: request.method, + headerList, + body, + redirectMode: request.redirectMode, + redirectCount: request.redirectCount, + urlList: [() => request.url()], + urlListProcessed: [request.url()], + clientRid: request.clientRid, + blobUrlEntry: request.blobUrlEntry, + url() { + if (this.urlListProcessed[0] === undefined) { + try { + this.urlListProcessed[0] = this.urlList[0](); + } catch { + throw new TypeError("Cannot read url: request closed"); + } + } + return this.urlListProcessed[0]; + }, + currentUrl() { + const currentIndex = this.urlList.length - 1; + if (this.urlListProcessed[currentIndex] === undefined) { + try { + this.urlListProcessed[currentIndex] = this.urlList[currentIndex](); + } catch { + throw new TypeError("Cannot read url: request closed"); + } + } + return this.urlListProcessed[currentIndex]; + }, + }; +} + +// method => normalized method +const KNOWN_METHODS = { + "DELETE": "DELETE", + "delete": "DELETE", + "GET": "GET", + "get": "GET", + "HEAD": "HEAD", + "head": "HEAD", + "OPTIONS": "OPTIONS", + "options": "OPTIONS", + "PATCH": "PATCH", + "patch": "PATCH", + "POST": "POST", + "post": "POST", + "PUT": "PUT", + "put": "PUT", +}; + +/** + * @param {string} m + * @returns {string} + */ +function validateAndNormalizeMethod(m) { + if (RegExpPrototypeExec(HTTP_TOKEN_CODE_POINT_RE, m) === null) { + throw new TypeError("Method is not valid"); + } + const upperCase = byteUpperCase(m); + if ( + upperCase === "CONNECT" || upperCase === "TRACE" || upperCase === "TRACK" + ) { + throw new TypeError("Method is forbidden"); + } + return upperCase; +} + +class Request { + /** @type {InnerRequest} */ + [_request]; + /** @type {Headers} */ + [_headersCache]; + [_getHeaders]; + + /** @type {Headers} */ + get [_headers]() { + if (this[_headersCache] === undefined) { + this[_headersCache] = this[_getHeaders](); + } + return this[_headersCache]; + } + + set [_headers](value) { + this[_headersCache] = value; + } + + /** @type {AbortSignal} */ + get [_signal]() { + const signal = this[_signalCache]; + // This signal has not been created yet, but the request has already completed + if (signal === false) { + const signal = newSignal(); + this[_signalCache] = signal; + signal[signalAbort](signalAbortError); + return signal; + } + + // This signal not been created yet, and the request is still in progress + if (signal === undefined) { + const signal = newSignal(); + this[_signalCache] = signal; + this[_request].onCancel?.(() => { + signal[signalAbort](signalAbortError); + }); + + return signal; + } + + return signal; + } + get [_mimeType]() { + const values = getDecodeSplitHeader( + headerListFromHeaders(this[_headers]), + "Content-Type", + ); + return extractMimeType(values); + } + get [_body]() { + return this[_request].body; + } + + /** + * https://fetch.spec.whatwg.org/#dom-request + * @param {RequestInfo} input + * @param {RequestInit} init + */ + constructor(input, init = { __proto__: null }) { + if (input === _brand) { + this[_brand] = _brand; + return; + } + + const prefix = "Failed to construct 'Request'"; + webidl.requiredArguments(arguments.length, 1, prefix); + input = webidl.converters["RequestInfo_DOMString"]( + input, + prefix, + "Argument 1", + ); + init = webidl.converters["RequestInit"](init, prefix, "Argument 2"); + + this[_brand] = _brand; + + /** @type {InnerRequest} */ + let request; + const baseURL = getLocationHref(); + + // 4. + let signal = null; + + // 5. + if (typeof input === "string") { + const parsedURL = new URL(input, baseURL); + request = newInnerRequest( + "GET", + parsedURL.href, + () => [], + null, + true, + ); + } else { // 6. + if (!ObjectPrototypeIsPrototypeOf(RequestPrototype, input)) { + throw new TypeError("Unreachable"); + } + const originalReq = input[_request]; + // fold in of step 12 from below + request = cloneInnerRequest(originalReq, true); + request.redirectCount = 0; // reset to 0 - cloneInnerRequest copies the value + signal = input[_signal]; + } + + // 12. is folded into the else statement of step 6 above. + + // 22. + if (init.redirect !== undefined) { + request.redirectMode = init.redirect; + } + + // 25. + if (init.method !== undefined) { + const method = init.method; + // fast path: check for known methods + request.method = KNOWN_METHODS[method] ?? + validateAndNormalizeMethod(method); + } + + // 26. + if (init.signal !== undefined) { + signal = init.signal; + } + + // NOTE: non standard extension. This handles Deno.HttpClient parameter + if (init.client !== undefined) { + if ( + init.client !== null && + !ObjectPrototypeIsPrototypeOf(HttpClientPrototype, init.client) + ) { + throw webidl.makeException( + TypeError, + "`client` must be a Deno.HttpClient", + prefix, + "Argument 2", + ); + } + request.clientRid = init.client?.[internalRidSymbol] ?? null; + } + + // 28. + this[_request] = request; + + // 29 & 30. + if (signal !== null) { + this[_signalCache] = createDependentAbortSignal([signal], prefix); + } + + // 31. + this[_headers] = headersFromHeaderList(request.headerList, "request"); + + // 33. + if (init.headers || ObjectKeys(init).length > 0) { + const headerList = headerListFromHeaders(this[_headers]); + const headers = init.headers ?? ArrayPrototypeSlice( + headerList, + 0, + headerList.length, + ); + if (headerList.length !== 0) { + ArrayPrototypeSplice(headerList, 0, headerList.length); + } + fillHeaders(this[_headers], headers); + } + + // 34. + let inputBody = null; + if (ObjectPrototypeIsPrototypeOf(RequestPrototype, input)) { + inputBody = input[_body]; + } + + // 35. + if ( + (request.method === "GET" || request.method === "HEAD") && + ((init.body !== undefined && init.body !== null) || + inputBody !== null) + ) { + throw new TypeError("Request with GET/HEAD method cannot have body"); + } + + // 36. + let initBody = null; + + // 37. + if (init.body !== undefined && init.body !== null) { + const res = extractBody(init.body); + initBody = res.body; + if (res.contentType !== null && !this[_headers].has("content-type")) { + this[_headers].append("Content-Type", res.contentType); + } + } + + // 38. + const inputOrInitBody = initBody ?? inputBody; + + // 40. + let finalBody = inputOrInitBody; + + // 41. + if (initBody === null && inputBody !== null) { + if (input[_body] && input[_body].unusable()) { + throw new TypeError("Input request's body is unusable"); + } + finalBody = inputBody.createProxy(); + } + + // 42. + request.body = finalBody; + } + + get method() { + webidl.assertBranded(this, RequestPrototype); + if (this[_method]) { + return this[_method]; + } + this[_method] = this[_request].method; + return this[_method]; + } + + get url() { + webidl.assertBranded(this, RequestPrototype); + if (this[_url]) { + return this[_url]; + } + + this[_url] = this[_request].url(); + return this[_url]; + } + + get headers() { + webidl.assertBranded(this, RequestPrototype); + return this[_headers]; + } + + get redirect() { + webidl.assertBranded(this, RequestPrototype); + return this[_request].redirectMode; + } + + get signal() { + webidl.assertBranded(this, RequestPrototype); + return this[_signal]; + } + + clone() { + const prefix = "Failed to execute 'Request.clone'"; + webidl.assertBranded(this, RequestPrototype); + if (this[_body] && this[_body].unusable()) { + throw new TypeError("Body is unusable"); + } + const clonedReq = cloneInnerRequest(this[_request]); + + const materializedSignal = this[_signal]; + const clonedSignal = createDependentAbortSignal( + [materializedSignal], + prefix, + ); + + const request = new Request(_brand); + request[_request] = clonedReq; + request[_signalCache] = clonedSignal; + request[_getHeaders] = () => + headersFromHeaderList( + clonedReq.headerList, + guardFromHeaders(this[_headers]), + ); + return request; + } + + [SymbolFor("Deno.privateCustomInspect")](inspect, inspectOptions) { + return inspect( + createFilteredInspectProxy({ + object: this, + evaluate: ObjectPrototypeIsPrototypeOf(RequestPrototype, this), + keys: [ + "bodyUsed", + "headers", + "method", + "redirect", + "url", + ], + }), + inspectOptions, + ); + } +} + +webidl.configureInterface(Request); +const RequestPrototype = Request.prototype; +mixinBody(RequestPrototype, _body, _mimeType); + +webidl.converters["Request"] = webidl.createInterfaceConverter( + "Request", + RequestPrototype, +); +webidl.converters["RequestInfo_DOMString"] = (V, prefix, context, opts) => { + // Union for (Request or USVString) + if (typeof V == "object") { + if (ObjectPrototypeIsPrototypeOf(RequestPrototype, V)) { + return webidl.converters["Request"](V, prefix, context, opts); + } + } + // Passed to new URL(...) which implicitly converts DOMString -> USVString + return webidl.converters["DOMString"](V, prefix, context, opts); +}; +webidl.converters["RequestRedirect"] = webidl.createEnumConverter( + "RequestRedirect", + [ + "follow", + "error", + "manual", + ], +); +webidl.converters["RequestInit"] = webidl.createDictionaryConverter( + "RequestInit", + [ + { key: "method", converter: webidl.converters["ByteString"] }, + { key: "headers", converter: webidl.converters["HeadersInit"] }, + { + key: "body", + converter: webidl.createNullableConverter( + webidl.converters["BodyInit_DOMString"], + ), + }, + { key: "redirect", converter: webidl.converters["RequestRedirect"] }, + { + key: "signal", + converter: webidl.createNullableConverter( + webidl.converters["AbortSignal"], + ), + }, + { key: "client", converter: webidl.converters.any }, + ], +); + +/** + * @param {Request} request + * @returns {InnerRequest} + */ +function toInnerRequest(request) { + return request[_request]; +} + +/** + * @param {InnerRequest} inner + * @param {"request" | "immutable" | "request-no-cors" | "response" | "none"} guard + * @returns {Request} + */ +function fromInnerRequest(inner, guard) { + const request = new Request(_brand); + request[_request] = inner; + request[_getHeaders] = () => headersFromHeaderList(inner.headerList, guard); + return request; +} + +const signalAbortError = new DOMException( + "The request has been cancelled.", + "AbortError", +); +ObjectFreeze(signalAbortError); + +function abortRequest(request) { + if (request[_signalCache] !== undefined) { + request[_signal][signalAbort](signalAbortError); + } else { + request[_signalCache] = false; + } +} + +function getCachedAbortSignal(request) { + return request[_signalCache]; +} + +// For testing +internals.getCachedAbortSignal = getCachedAbortSignal; + +export { + abortRequest, + fromInnerRequest, + newInnerRequest, + processUrlList, + Request, + RequestPrototype, + toInnerRequest, +}; diff --git a/vendor/deno_fetch/23_response.js b/vendor/deno_fetch/23_response.js new file mode 100644 index 00000000..278dcb7d --- /dev/null +++ b/vendor/deno_fetch/23_response.js @@ -0,0 +1,529 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +// @ts-check +/// +/// +/// +/// +/// +/// +/// +/// + +import { core, primordials } from "ext:core/mod.js"; +import * as webidl from "ext:deno_webidl/00_webidl.js"; +import { createFilteredInspectProxy } from "ext:deno_console/01_console.js"; +import { + byteLowerCase, + HTTP_TAB_OR_SPACE, + regexMatcher, + serializeJSValueToJSONString, +} from "ext:deno_web/00_infra.js"; +import { extractBody, mixinBody } from "ext:deno_fetch/22_body.js"; +import { getLocationHref } from "ext:deno_web/12_location.js"; +import { extractMimeType } from "ext:deno_web/01_mimesniff.js"; +import { URL } from "ext:deno_url/00_url.js"; +import { + fillHeaders, + getDecodeSplitHeader, + guardFromHeaders, + headerListFromHeaders, + headersFromHeaderList, +} from "ext:deno_fetch/20_headers.js"; +const { + ArrayPrototypeMap, + ArrayPrototypePush, + ObjectDefineProperties, + ObjectPrototypeIsPrototypeOf, + RangeError, + RegExpPrototypeExec, + SafeArrayIterator, + SafeRegExp, + Symbol, + SymbolFor, + TypeError, +} = primordials; + +const VCHAR = ["\x21-\x7E"]; +const OBS_TEXT = ["\x80-\xFF"]; + +const REASON_PHRASE = [ + ...new SafeArrayIterator(HTTP_TAB_OR_SPACE), + ...new SafeArrayIterator(VCHAR), + ...new SafeArrayIterator(OBS_TEXT), +]; +const REASON_PHRASE_MATCHER = regexMatcher(REASON_PHRASE); +const REASON_PHRASE_RE = new SafeRegExp(`^[${REASON_PHRASE_MATCHER}]*$`); + +const _response = Symbol("response"); +const _headers = Symbol("headers"); +const _mimeType = Symbol("mime type"); +const _body = Symbol("body"); +const _brand = webidl.brand; + +// it's slightly faster to cache these +const webidlConvertersBodyInitDomString = + webidl.converters["BodyInit_DOMString?"]; +const webidlConvertersUSVString = webidl.converters["USVString"]; +const webidlConvertersUnsignedShort = webidl.converters["unsigned short"]; +const webidlConvertersAny = webidl.converters["any"]; +const webidlConvertersByteString = webidl.converters["ByteString"]; +const webidlConvertersHeadersInit = webidl.converters["HeadersInit"]; + +/** + * @typedef InnerResponse + * @property {"basic" | "cors" | "default" | "error" | "opaque" | "opaqueredirect"} type + * @property {() => string | null} url + * @property {string[]} urlList + * @property {number} status + * @property {string} statusMessage + * @property {[string, string][]} headerList + * @property {null | typeof __window.bootstrap.fetchBody.InnerBody} body + * @property {boolean} aborted + * @property {string} [error] + */ + +/** + * @param {number} status + * @returns {boolean} + */ +function nullBodyStatus(status) { + return status === 101 || status === 204 || status === 205 || status === 304; +} + +/** + * @param {number} status + * @returns {boolean} + */ +function redirectStatus(status) { + return status === 301 || status === 302 || status === 303 || + status === 307 || status === 308; +} + +/** + * https://fetch.spec.whatwg.org/#concept-response-clone + * @param {InnerResponse} response + * @returns {InnerResponse} + */ +function cloneInnerResponse(response) { + const urlList = [...new SafeArrayIterator(response.urlList)]; + const headerList = ArrayPrototypeMap( + response.headerList, + (x) => [x[0], x[1]], + ); + + let body = null; + if (response.body !== null) { + body = response.body.clone(); + } + + return { + type: response.type, + body, + headerList, + urlList, + status: response.status, + statusMessage: response.statusMessage, + aborted: response.aborted, + url() { + if (this.urlList.length == 0) return null; + return this.urlList[this.urlList.length - 1]; + }, + }; +} + +/** + * @returns {InnerResponse} + */ +function newInnerResponse(status = 200, statusMessage = "") { + return { + type: "default", + body: null, + headerList: [], + urlList: [], + status, + statusMessage, + aborted: false, + url() { + if (this.urlList.length == 0) return null; + return this.urlList[this.urlList.length - 1]; + }, + }; +} + +/** + * @param {string} error + * @returns {InnerResponse} + */ +function networkError(error) { + const resp = newInnerResponse(0); + resp.type = "error"; + resp.error = error; + return resp; +} + +/** + * @returns {InnerResponse} + */ +function abortedNetworkError() { + const resp = networkError("aborted"); + resp.aborted = true; + return resp; +} + +/** + * https://fetch.spec.whatwg.org#initialize-a-response + * @param {Response} response + * @param {ResponseInit} init + * @param {{ body: fetchBody.InnerBody, contentType: string | null } | null} bodyWithType + */ +function initializeAResponse(response, init, bodyWithType) { + // 1. + if ((init.status < 200 || init.status > 599) && init.status != 101) { + throw new RangeError( + `The status provided (${init.status}) is not equal to 101 and outside the range [200, 599]`, + ); + } + + // 2. + if ( + init.statusText && + RegExpPrototypeExec(REASON_PHRASE_RE, init.statusText) === null + ) { + throw new TypeError( + `Invalid status text: "${init.statusText}"`, + ); + } + + // 3. + response[_response].status = init.status; + + // 4. + response[_response].statusMessage = init.statusText; + // 5. + /** @type {headers.Headers} */ + const headers = response[_headers]; + if (init.headers) { + fillHeaders(headers, init.headers); + } + + // 6. + if (bodyWithType !== null) { + if (nullBodyStatus(response[_response].status)) { + throw new TypeError( + "Response with null body status cannot have body", + ); + } + + const { body, contentType } = bodyWithType; + response[_response].body = body; + + if (contentType !== null) { + let hasContentType = false; + const list = headerListFromHeaders(headers); + for (let i = 0; i < list.length; i++) { + if (byteLowerCase(list[i][0]) === "content-type") { + hasContentType = true; + break; + } + } + if (!hasContentType) { + ArrayPrototypePush(list, ["Content-Type", contentType]); + } + } + } +} + +class Response { + get [_mimeType]() { + const values = getDecodeSplitHeader( + headerListFromHeaders(this[_headers]), + "Content-Type", + ); + return extractMimeType(values); + } + get [_body]() { + return this[_response].body; + } + + /** + * @returns {Response} + */ + static error() { + const inner = newInnerResponse(0); + inner.type = "error"; + const response = webidl.createBranded(Response); + response[_response] = inner; + response[_headers] = headersFromHeaderList( + response[_response].headerList, + "immutable", + ); + return response; + } + + /** + * @param {string} url + * @param {number} status + * @returns {Response} + */ + static redirect(url, status = 302) { + const prefix = "Failed to execute 'Response.redirect'"; + url = webidlConvertersUSVString(url, prefix, "Argument 1"); + status = webidlConvertersUnsignedShort(status, prefix, "Argument 2"); + + const baseURL = getLocationHref(); + const parsedURL = new URL(url, baseURL); + if (!redirectStatus(status)) { + throw new RangeError(`Invalid redirect status code: ${status}`); + } + const inner = newInnerResponse(status); + inner.type = "default"; + ArrayPrototypePush(inner.headerList, ["Location", parsedURL.href]); + const response = webidl.createBranded(Response); + response[_response] = inner; + response[_headers] = headersFromHeaderList( + response[_response].headerList, + "immutable", + ); + return response; + } + + /** + * @param {any} data + * @param {ResponseInit} init + * @returns {Response} + */ + static json(data = undefined, init = { __proto__: null }) { + const prefix = "Failed to execute 'Response.json'"; + data = webidlConvertersAny(data); + init = webidlConvertersResponseInitFast(init, prefix, "Argument 2"); + + const str = serializeJSValueToJSONString(data); + const res = extractBody(str); + res.contentType = "application/json"; + const response = webidl.createBranded(Response); + response[_response] = newInnerResponse(); + response[_headers] = headersFromHeaderList( + response[_response].headerList, + "response", + ); + initializeAResponse(response, init, res); + return response; + } + + /** + * @param {BodyInit | null} body + * @param {ResponseInit} init + */ + constructor(body = null, init = undefined) { + if (body === _brand) { + this[_brand] = _brand; + return; + } + + const prefix = "Failed to construct 'Response'"; + body = webidlConvertersBodyInitDomString(body, prefix, "Argument 1"); + init = webidlConvertersResponseInitFast(init, prefix, "Argument 2"); + + this[_response] = newInnerResponse(); + this[_headers] = headersFromHeaderList( + this[_response].headerList, + "response", + ); + + let bodyWithType = null; + if (body !== null) { + bodyWithType = extractBody(body); + } + initializeAResponse(this, init, bodyWithType); + this[_brand] = _brand; + } + + /** + * @returns {"basic" | "cors" | "default" | "error" | "opaque" | "opaqueredirect"} + */ + get type() { + webidl.assertBranded(this, ResponsePrototype); + return this[_response].type; + } + + /** + * @returns {string} + */ + get url() { + webidl.assertBranded(this, ResponsePrototype); + const url = this[_response].url(); + if (url === null) return ""; + const newUrl = new URL(url); + newUrl.hash = ""; + return newUrl.href; + } + + /** + * @returns {boolean} + */ + get redirected() { + webidl.assertBranded(this, ResponsePrototype); + return this[_response].urlList.length > 1; + } + + /** + * @returns {number} + */ + get status() { + webidl.assertBranded(this, ResponsePrototype); + return this[_response].status; + } + + /** + * @returns {boolean} + */ + get ok() { + webidl.assertBranded(this, ResponsePrototype); + const status = this[_response].status; + return status >= 200 && status <= 299; + } + + /** + * @returns {string} + */ + get statusText() { + webidl.assertBranded(this, ResponsePrototype); + return this[_response].statusMessage; + } + + /** + * @returns {Headers} + */ + get headers() { + webidl.assertBranded(this, ResponsePrototype); + return this[_headers]; + } + + /** + * @returns {Response} + */ + clone() { + webidl.assertBranded(this, ResponsePrototype); + if (this[_body] && this[_body].unusable()) { + throw new TypeError("Body is unusable"); + } + const second = webidl.createBranded(Response); + const newRes = cloneInnerResponse(this[_response]); + second[_response] = newRes; + second[_headers] = headersFromHeaderList( + newRes.headerList, + guardFromHeaders(this[_headers]), + ); + return second; + } + + [SymbolFor("Deno.privateCustomInspect")](inspect, inspectOptions) { + return inspect( + createFilteredInspectProxy({ + object: this, + evaluate: ObjectPrototypeIsPrototypeOf(ResponsePrototype, this), + keys: [ + "body", + "bodyUsed", + "headers", + "ok", + "redirected", + "status", + "statusText", + "url", + ], + }), + inspectOptions, + ); + } +} + +webidl.configureInterface(Response); +ObjectDefineProperties(Response, { + json: { __proto__: null, enumerable: true }, + redirect: { __proto__: null, enumerable: true }, + error: { __proto__: null, enumerable: true }, +}); +const ResponsePrototype = Response.prototype; +mixinBody(ResponsePrototype, _body, _mimeType); + +webidl.converters["Response"] = webidl.createInterfaceConverter( + "Response", + ResponsePrototype, +); +const webidlConvertersResponseInit = webidl.converters["ResponseInit"] = webidl + .createDictionaryConverter( + "ResponseInit", + [{ + key: "status", + defaultValue: 200, + converter: webidlConvertersUnsignedShort, + }, { + key: "statusText", + defaultValue: "", + converter: webidlConvertersByteString, + }, { + key: "headers", + converter: webidlConvertersHeadersInit, + }], + ); +const webidlConvertersResponseInitFast = webidl + .converters["ResponseInit_fast"] = function ( + init, + prefix, + context, + opts, + ) { + if (init === undefined || init === null) { + return { status: 200, statusText: "", headers: undefined }; + } + // Fast path, if not a proxy + if (typeof init === "object" && !core.isProxy(init)) { + // Not a proxy fast path + const status = init.status !== undefined + ? webidlConvertersUnsignedShort(init.status) + : 200; + const statusText = init.statusText !== undefined + ? webidlConvertersByteString(init.statusText) + : ""; + const headers = init.headers !== undefined + ? webidlConvertersHeadersInit(init.headers) + : undefined; + return { status, statusText, headers }; + } + // Slow default path + return webidlConvertersResponseInit(init, prefix, context, opts); + }; + +/** + * @param {Response} response + * @returns {InnerResponse} + */ +function toInnerResponse(response) { + return response[_response]; +} + +/** + * @param {InnerResponse} inner + * @param {"request" | "immutable" | "request-no-cors" | "response" | "none"} guard + * @returns {Response} + */ +function fromInnerResponse(inner, guard) { + const response = new Response(_brand); + response[_response] = inner; + response[_headers] = headersFromHeaderList(inner.headerList, guard); + return response; +} + +export { + abortedNetworkError, + fromInnerResponse, + networkError, + newInnerResponse, + nullBodyStatus, + redirectStatus, + Response, + ResponsePrototype, + toInnerResponse, +}; diff --git a/vendor/deno_fetch/26_fetch.js b/vendor/deno_fetch/26_fetch.js new file mode 100644 index 00000000..b3e23fe7 --- /dev/null +++ b/vendor/deno_fetch/26_fetch.js @@ -0,0 +1,595 @@ +// Copyright 2018-2025 the Deno authors. All rights reserved. MIT license. + +// @ts-check +/// +/// +/// +/// +/// +/// +/// +/// + +import { core, primordials } from "ext:core/mod.js"; +import { + op_fetch, + op_fetch_promise_is_settled, + op_fetch_send, + op_wasm_streaming_feed, + op_wasm_streaming_set_url, +} from "ext:core/ops"; +const { + ArrayPrototypePush, + ArrayPrototypeSplice, + ArrayPrototypeFilter, + ArrayPrototypeIncludes, + Error, + ObjectPrototypeIsPrototypeOf, + Promise, + PromisePrototypeThen, + PromisePrototypeCatch, + SafeArrayIterator, + SafePromisePrototypeFinally, + String, + StringPrototypeEndsWith, + StringPrototypeStartsWith, + StringPrototypeToLowerCase, + TypeError, + TypedArrayPrototypeGetSymbolToStringTag, +} = primordials; + +import * as webidl from "ext:deno_webidl/00_webidl.js"; +import { byteLowerCase } from "ext:deno_web/00_infra.js"; +import { + errorReadableStream, + getReadableStreamResourceBacking, + readableStreamForRid, + ReadableStreamPrototype, + resourceForReadableStream, +} from "ext:deno_web/06_streams.js"; +import { extractBody, InnerBody } from "ext:deno_fetch/22_body.js"; +import { processUrlList, toInnerRequest } from "ext:deno_fetch/23_request.js"; +import { + abortedNetworkError, + fromInnerResponse, + networkError, + nullBodyStatus, + redirectStatus, + toInnerResponse, +} from "ext:deno_fetch/23_response.js"; +import * as abortSignal from "ext:deno_web/03_abort_signal.js"; +import { + builtinTracer, + ContextManager, + enterSpan, + PROPAGATORS, + restoreSnapshot, + TRACING_ENABLED, +} from "ext:deno_telemetry/telemetry.ts"; +import { + updateSpanFromRequest, + updateSpanFromResponse, +} from "ext:deno_telemetry/util.ts"; + +const REQUEST_BODY_HEADER_NAMES = [ + "content-encoding", + "content-language", + "content-location", + "content-type", +]; + +const REDIRECT_SENSITIVE_HEADER_NAMES = [ + "authorization", + "proxy-authorization", + "cookie", +]; + +/** + * @param {number} rid + * @returns {Promise<{ status: number, statusText: string, headers: [string, string][], url: string, responseRid: number, error: [string, string]? }>} + */ +function opFetchSend(rid) { + return op_fetch_send(rid); +} + +/** + * @param {number} responseBodyRid + * @param {AbortSignal} [terminator] + * @returns {ReadableStream} + */ +function createResponseBodyStream(responseBodyRid, terminator) { + const readable = readableStreamForRid(responseBodyRid); + + function onAbort() { + errorReadableStream(readable, terminator.reason); + core.tryClose(responseBodyRid); + } + + // TODO(lucacasonato): clean up registration + terminator[abortSignal.add](onAbort); + + return readable; +} + +/** + * @param {InnerRequest} req + * @param {boolean} recursive + * @param {AbortSignal} terminator + * @returns {Promise} + */ +async function mainFetch(req, recursive, terminator) { + if (req.blobUrlEntry !== null) { + if (req.method !== "GET") { + throw new TypeError("Blob URL fetch only supports GET method"); + } + + const body = new InnerBody(req.blobUrlEntry.stream()); + terminator[abortSignal.add](() => body.error(terminator.reason)); + processUrlList(req.urlList, req.urlListProcessed); + + return { + headerList: [ + ["content-length", String(req.blobUrlEntry.size)], + ["content-type", req.blobUrlEntry.type], + ], + status: 200, + statusMessage: "OK", + body, + type: "basic", + url() { + if (this.urlList.length == 0) return null; + return this.urlList[this.urlList.length - 1]; + }, + urlList: recursive + ? [] + : [...new SafeArrayIterator(req.urlListProcessed)], + }; + } + + /** @type {ReadableStream | Uint8Array | null} */ + let reqBody = null; + let reqRid = null; + + if (req.body) { + const stream = req.body.streamOrStatic; + const body = stream.body; + + if (TypedArrayPrototypeGetSymbolToStringTag(body) === "Uint8Array") { + reqBody = body; + } else if (typeof body === "string") { + reqBody = core.encode(body); + } else if (ObjectPrototypeIsPrototypeOf(ReadableStreamPrototype, stream)) { + const resourceBacking = getReadableStreamResourceBacking(stream); + if (resourceBacking) { + reqRid = resourceBacking.rid; + } else { + reqRid = resourceForReadableStream(stream, req.body.length); + } + } else { + throw new TypeError("Invalid body"); + } + } + + const { requestRid, cancelHandleRid } = op_fetch( + req.method, + req.currentUrl(), + req.headerList, + req.clientRid, + reqBody !== null || reqRid !== null, + reqBody, + reqRid, + ); + + function onAbort() { + if (cancelHandleRid !== null) { + core.tryClose(cancelHandleRid); + } + } + terminator[abortSignal.add](onAbort); + let resp; + try { + resp = await opFetchSend(requestRid); + } catch (err) { + if (terminator.aborted) return abortedNetworkError(); + throw err; + } finally { + if (cancelHandleRid !== null) { + core.tryClose(cancelHandleRid); + } + } + // Re-throw any body errors + if (resp.error !== null) { + const { 0: message, 1: cause } = resp.error; + throw new TypeError(message, { cause: new Error(cause) }); + } + if (terminator.aborted) return abortedNetworkError(); + + processUrlList(req.urlList, req.urlListProcessed); + + /** @type {InnerResponse} */ + const response = { + headerList: resp.headers, + status: resp.status, + body: null, + statusMessage: resp.statusText, + type: "basic", + url() { + if (this.urlList.length == 0) return null; + return this.urlList[this.urlList.length - 1]; + }, + urlList: req.urlListProcessed, + }; + if (redirectStatus(resp.status)) { + switch (req.redirectMode) { + case "error": + core.close(resp.responseRid); + return networkError( + "Encountered redirect while redirect mode is set to 'error'", + ); + case "follow": + core.close(resp.responseRid); + return httpRedirectFetch(req, response, terminator); + case "manual": + break; + } + } + + if (nullBodyStatus(response.status)) { + core.close(resp.responseRid); + } else { + if (req.method === "HEAD" || req.method === "CONNECT") { + response.body = null; + core.close(resp.responseRid); + } else { + response.body = new InnerBody( + createResponseBodyStream(resp.responseRid, terminator), + ); + } + } + + if (recursive) return response; + + if (response.urlList.length === 0) { + processUrlList(req.urlList, req.urlListProcessed); + response.urlList = [...new SafeArrayIterator(req.urlListProcessed)]; + } + + return response; +} + +/** + * @param {InnerRequest} request + * @param {InnerResponse} response + * @param {AbortSignal} terminator + * @returns {Promise} + */ +function httpRedirectFetch(request, response, terminator) { + const locationHeaders = ArrayPrototypeFilter( + response.headerList, + (entry) => byteLowerCase(entry[0]) === "location", + ); + if (locationHeaders.length === 0) { + return response; + } + + const currentURL = new URL(request.currentUrl()); + const locationURL = new URL( + locationHeaders[0][1], + response.url() ?? undefined, + ); + if (locationURL.hash === "") { + locationURL.hash = currentURL.hash; + } + if (locationURL.protocol !== "https:" && locationURL.protocol !== "http:") { + return networkError("Can not redirect to a non HTTP(s) url"); + } + if (request.redirectCount === 20) { + return networkError("Maximum number of redirects (20) reached"); + } + request.redirectCount++; + if ( + response.status !== 303 && + request.body !== null && + request.body.source === null + ) { + return networkError( + "Can not redeliver a streaming request body after a redirect", + ); + } + if ( + ((response.status === 301 || response.status === 302) && + request.method === "POST") || + (response.status === 303 && + request.method !== "GET" && + request.method !== "HEAD") + ) { + request.method = "GET"; + request.body = null; + for (let i = 0; i < request.headerList.length; i++) { + if ( + ArrayPrototypeIncludes( + REQUEST_BODY_HEADER_NAMES, + byteLowerCase(request.headerList[i][0]), + ) + ) { + ArrayPrototypeSplice(request.headerList, i, 1); + i--; + } + } + } + + // Drop confidential headers when redirecting to a less secure protocol + // or to a different domain that is not a superdomain + if ( + locationURL.protocol !== currentURL.protocol && + locationURL.protocol !== "https:" || + locationURL.host !== currentURL.host && + !isSubdomain(locationURL.host, currentURL.host) + ) { + for (let i = 0; i < request.headerList.length; i++) { + if ( + ArrayPrototypeIncludes( + REDIRECT_SENSITIVE_HEADER_NAMES, + byteLowerCase(request.headerList[i][0]), + ) + ) { + ArrayPrototypeSplice(request.headerList, i, 1); + i--; + } + } + } + + if (request.body !== null) { + const res = extractBody(request.body.source); + request.body = res.body; + } + ArrayPrototypePush(request.urlList, () => locationURL.href); + return mainFetch(request, true, terminator); +} + +/** + * @param {RequestInfo} input + * @param {RequestInit} init + */ +function fetch(input, init = { __proto__: null }) { + let span; + let snapshot; + try { + if (TRACING_ENABLED) { + span = builtinTracer().startSpan("fetch", { kind: 2 }); + snapshot = enterSpan(span); + } + + // There is an async dispatch later that causes a stack trace disconnect. + // We reconnect it by assigning the result of that dispatch to `opPromise`, + // awaiting `opPromise` in an inner function also named `fetch()` and + // returning the result from that. + let opPromise = undefined; + // 1. + const result = new Promise((resolve, reject) => { + const prefix = "Failed to execute 'fetch'"; + webidl.requiredArguments(arguments.length, 1, prefix); + // 2. + const requestObject = new Request(input, init); + + if (span) { + const context = ContextManager.active(); + for (const propagator of new SafeArrayIterator(PROPAGATORS)) { + propagator.inject(context, requestObject.headers, { + set(carrier, key, value) { + carrier.append(key, value); + }, + }); + } + + updateSpanFromRequest(span, requestObject); + } + + // 3. + const request = toInnerRequest(requestObject); + // 4. + if (requestObject.signal.aborted) { + reject(abortFetch(request, null, requestObject.signal.reason)); + return; + } + // 7. + let responseObject = null; + // 9. + let locallyAborted = false; + // 10. + function onabort() { + locallyAborted = true; + reject( + abortFetch(request, responseObject, requestObject.signal.reason), + ); + } + requestObject.signal[abortSignal.add](onabort); + + if (!requestObject.headers.has("Accept")) { + ArrayPrototypePush(request.headerList, ["Accept", "*/*"]); + } + + if (!requestObject.headers.has("Accept-Language")) { + ArrayPrototypePush(request.headerList, ["Accept-Language", "*"]); + } + + // 12. + opPromise = PromisePrototypeCatch( + PromisePrototypeThen( + mainFetch(request, false, requestObject.signal), + (response) => { + // 12.1. + if (locallyAborted) return; + // 12.2. + if (response.aborted) { + reject( + abortFetch( + request, + responseObject, + requestObject.signal.reason, + ), + ); + requestObject.signal[abortSignal.remove](onabort); + return; + } + // 12.3. + if (response.type === "error") { + const err = new TypeError( + "Fetch failed: " + (response.error ?? "unknown error"), + ); + reject(err); + requestObject.signal[abortSignal.remove](onabort); + return; + } + responseObject = fromInnerResponse(response, "immutable"); + + if (span) { + updateSpanFromResponse(span, responseObject); + } + + resolve(responseObject); + requestObject.signal[abortSignal.remove](onabort); + }, + ), + (err) => { + reject(err); + requestObject.signal[abortSignal.remove](onabort); + }, + ); + }); + + if (opPromise) { + PromisePrototypeCatch(result, () => {}); + return (async function fetch() { + try { + await opPromise; + return result; + } finally { + span?.end(); + } + })(); + } + // We need to end the span when the promise settles. + // WPT has a test that aborted fetch is settled in the same tick. + // This means we cannot wrap the promise if it is already settled. + // But this is OK, because we can just immediately end the span + // in that case. + if (span) { + // XXX: This should always be true, otherwise `opPromise` would be present. + if (op_fetch_promise_is_settled(result)) { + // It's already settled. + span?.end(); + } else { + // Not settled yet, we can return a new wrapper promise. + return SafePromisePrototypeFinally(result, () => { + span?.end(); + }); + } + } + return result; + } finally { + if (snapshot) restoreSnapshot(snapshot); + } +} + +function abortFetch(request, responseObject, error) { + if (request.body !== null) { + // Cancel the body if we haven't taken it as a resource yet + if (!request.body.streamOrStatic.locked) { + request.body.cancel(error); + } + } + if (responseObject !== null) { + const response = toInnerResponse(responseObject); + if (response.body !== null) response.body.error(error); + } + return error; +} + +/** + * Checks if the given string is a subdomain of the given domain. + * + * @param {String} subdomain + * @param {String} domain + * @returns {Boolean} + */ +function isSubdomain(subdomain, domain) { + const dot = subdomain.length - domain.length - 1; + return dot > 0 && subdomain[dot] === "." && + StringPrototypeEndsWith(subdomain, domain); +} + +/** + * Handle the Response argument to the WebAssembly streaming APIs, after + * resolving if it was passed as a promise. This function should be registered + * through `Deno.core.setWasmStreamingCallback`. + * + * @param {any} source The source parameter that the WebAssembly streaming API + * was called with. If it was called with a Promise, `source` is the resolved + * value of that promise. + * @param {number} rid An rid that represents the wasm streaming resource. + */ +function handleWasmStreaming(source, rid) { + // This implements part of + // https://webassembly.github.io/spec/web-api/#compile-a-potential-webassembly-response + try { + const res = webidl.converters["Response"]( + source, + "Failed to execute 'WebAssembly.compileStreaming'", + "Argument 1", + ); + + // 2.3. + // The spec is ambiguous here, see + // https://github.com/WebAssembly/spec/issues/1138. The WPT tests expect + // the raw value of the Content-Type attribute lowercased. We ignore this + // for file:// because file fetches don't have a Content-Type. + if (!StringPrototypeStartsWith(res.url, "file://")) { + const contentType = res.headers.get("Content-Type"); + if ( + typeof contentType !== "string" || + StringPrototypeToLowerCase(contentType) !== "application/wasm" + ) { + throw new TypeError("Invalid WebAssembly content type"); + } + } + + // 2.5. + if (!res.ok) { + throw new TypeError( + `Failed to receive WebAssembly content: HTTP status code ${res.status}`, + ); + } + + // Pass the resolved URL to v8. + op_wasm_streaming_set_url(rid, res.url); + + if (res.body !== null) { + // 2.6. + // Rather than consuming the body as an ArrayBuffer, this passes each + // chunk to the feed as soon as it's available. + PromisePrototypeThen( + (async () => { + const reader = res.body.getReader(); + while (true) { + const { value: chunk, done } = await reader.read(); + if (done) break; + op_wasm_streaming_feed(rid, chunk); + } + })(), + // 2.7 + () => core.close(rid), + // 2.8 + (err) => core.abortWasmStreaming(rid, err), + ); + } else { + // 2.7 + core.close(rid); + } + } catch (err) { + // 2.8 + core.abortWasmStreaming(rid, err); + } +} + +export { fetch, handleWasmStreaming, mainFetch }; diff --git a/vendor/deno_fetch/27_eventsource.js b/vendor/deno_fetch/27_eventsource.js new file mode 100644 index 00000000..aadbb5fe --- /dev/null +++ b/vendor/deno_fetch/27_eventsource.js @@ -0,0 +1,386 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +/// + +import { primordials } from "ext:core/mod.js"; +import { op_utf8_to_byte_string } from "ext:core/ops"; +const { + ArrayPrototypeFind, + Number, + NumberIsFinite, + NumberIsNaN, + ObjectDefineProperties, + ObjectPrototypeIsPrototypeOf, + StringPrototypeEndsWith, + StringPrototypeIncludes, + StringPrototypeIndexOf, + StringPrototypeSlice, + StringPrototypeStartsWith, + StringPrototypeToLowerCase, + SymbolFor, +} = primordials; + +import * as webidl from "ext:deno_webidl/00_webidl.js"; +import { createFilteredInspectProxy } from "ext:deno_console/01_console.js"; +import { URL } from "ext:deno_url/00_url.js"; +import { DOMException } from "ext:deno_web/01_dom_exception.js"; +import { + defineEventHandler, + EventTarget, + setIsTrusted, +} from "ext:deno_web/02_event.js"; +import { clearTimeout, setTimeout } from "ext:deno_web/02_timers.js"; +import { TransformStream } from "ext:deno_web/06_streams.js"; +import { TextDecoderStream } from "ext:deno_web/08_text_encoding.js"; +import { getLocationHref } from "ext:deno_web/12_location.js"; +import { newInnerRequest } from "ext:deno_fetch/23_request.js"; +import { mainFetch } from "ext:deno_fetch/26_fetch.js"; + +// Copied from https://github.com/denoland/deno_std/blob/e0753abe0c8602552862a568348c046996709521/streams/text_line_stream.ts#L20-L74 +export class TextLineStream extends TransformStream { + #allowCR; + #buf = ""; + + constructor(options) { + super({ + transform: (chunk, controller) => this.#handle(chunk, controller), + flush: (controller) => { + if (this.#buf.length > 0) { + if ( + this.#allowCR && + this.#buf[this.#buf.length - 1] === "\r" + ) controller.enqueue(StringPrototypeSlice(this.#buf, 0, -1)); + else controller.enqueue(this.#buf); + } + }, + }); + this.#allowCR = options?.allowCR ?? false; + } + + #handle(chunk, controller) { + chunk = this.#buf + chunk; + + for (;;) { + const lfIndex = StringPrototypeIndexOf(chunk, "\n"); + + if (this.#allowCR) { + const crIndex = StringPrototypeIndexOf(chunk, "\r"); + + if ( + crIndex !== -1 && crIndex !== (chunk.length - 1) && + (lfIndex === -1 || (lfIndex - 1) > crIndex) + ) { + controller.enqueue(StringPrototypeSlice(chunk, 0, crIndex)); + chunk = StringPrototypeSlice(chunk, crIndex + 1); + continue; + } + } + + if (lfIndex !== -1) { + let crOrLfIndex = lfIndex; + if (chunk[lfIndex - 1] === "\r") { + crOrLfIndex--; + } + controller.enqueue(StringPrototypeSlice(chunk, 0, crOrLfIndex)); + chunk = StringPrototypeSlice(chunk, lfIndex + 1); + continue; + } + + break; + } + + this.#buf = chunk; + } +} + +const CONNECTING = 0; +const OPEN = 1; +const CLOSED = 2; + +class EventSource extends EventTarget { + /** @type {AbortController} */ + #abortController = new AbortController(); + + /** @type {number | undefined} */ + #reconnectionTimerId; + + /** @type {number} */ + #reconnectionTime = 5000; + + /** @type {string} */ + #lastEventId = ""; + + /** @type {number} */ + #readyState = CONNECTING; + get readyState() { + webidl.assertBranded(this, EventSourcePrototype); + return this.#readyState; + } + + get CONNECTING() { + webidl.assertBranded(this, EventSourcePrototype); + return CONNECTING; + } + get OPEN() { + webidl.assertBranded(this, EventSourcePrototype); + return OPEN; + } + get CLOSED() { + webidl.assertBranded(this, EventSourcePrototype); + return CLOSED; + } + + /** @type {string} */ + #url; + get url() { + webidl.assertBranded(this, EventSourcePrototype); + return this.#url; + } + + /** @type {boolean} */ + #withCredentials; + get withCredentials() { + webidl.assertBranded(this, EventSourcePrototype); + return this.#withCredentials; + } + + constructor(url, eventSourceInitDict = { __proto__: null }) { + super(); + this[webidl.brand] = webidl.brand; + const prefix = "Failed to construct 'EventSource'"; + webidl.requiredArguments(arguments.length, 1, prefix); + url = webidl.converters.USVString(url, prefix, "Argument 1"); + eventSourceInitDict = webidl.converters.EventSourceInit( + eventSourceInitDict, + prefix, + "Argument 2", + ); + + try { + url = new URL(url, getLocationHref()).href; + } catch (e) { + throw new DOMException(e.message, "SyntaxError"); + } + + this.#url = url; + this.#withCredentials = eventSourceInitDict.withCredentials; + + this.#loop(); + } + + close() { + webidl.assertBranded(this, EventSourcePrototype); + this.#abortController.abort(); + this.#readyState = CLOSED; + clearTimeout(this.#reconnectionTimerId); + } + + async #loop() { + const lastEventIdValue = this.#lastEventId; + const req = newInnerRequest( + "GET", + this.#url, + () => + lastEventIdValue === "" + ? [ + ["accept", "text/event-stream"], + ] + : [ + ["accept", "text/event-stream"], + ["Last-Event-Id", op_utf8_to_byte_string(lastEventIdValue)], + ], + null, + false, + ); + /** @type {InnerResponse} */ + let res; + try { + res = await mainFetch(req, true, this.#abortController.signal); + } catch { + this.#reestablishConnection(); + return; + } + + if (res.aborted) { + this.#failConnection(); + return; + } + if (res.type === "error") { + this.#reestablishConnection(); + return; + } + const contentType = ArrayPrototypeFind( + res.headerList, + (header) => StringPrototypeToLowerCase(header[0]) === "content-type", + ); + if ( + res.status !== 200 || + !contentType || + !StringPrototypeIncludes( + StringPrototypeToLowerCase(contentType[1]), + "text/event-stream", + ) + ) { + this.#failConnection(); + return; + } + + if (this.#readyState === CLOSED) { + return; + } + this.#readyState = OPEN; + this.dispatchEvent(new Event("open")); + + let data = ""; + let eventType = ""; + let lastEventId = this.#lastEventId; + + try { + for await ( + // deno-lint-ignore prefer-primordials + const chunk of res.body.stream + .pipeThrough(new TextDecoderStream()) + .pipeThrough(new TextLineStream({ allowCR: true })) + ) { + if (chunk === "") { + this.#lastEventId = lastEventId; + if (data === "") { + eventType = ""; + continue; + } + if (StringPrototypeEndsWith(data, "\n")) { + data = StringPrototypeSlice(data, 0, -1); + } + const event = new MessageEvent(eventType || "message", { + data, + origin: res.url(), + lastEventId: this.#lastEventId, + }); + setIsTrusted(event, true); + data = ""; + eventType = ""; + if (this.#readyState !== CLOSED) { + this.dispatchEvent(event); + } + } else if (StringPrototypeStartsWith(chunk, ":")) { + continue; + } else { + let field = chunk; + let value = ""; + const colonIndex = StringPrototypeIndexOf(chunk, ":"); + if (colonIndex !== -1) { + field = StringPrototypeSlice(chunk, 0, colonIndex); + value = StringPrototypeSlice(chunk, colonIndex + 1); + if (StringPrototypeStartsWith(value, " ")) { + value = StringPrototypeSlice(value, 1); + } + } + + switch (field) { + case "event": { + eventType = value; + break; + } + case "data": { + data += value + "\n"; + break; + } + case "id": { + if (!StringPrototypeIncludes(value, "\0")) { + lastEventId = value; + } + break; + } + case "retry": { + const reconnectionTime = Number(value); + if ( + !NumberIsNaN(reconnectionTime) && + NumberIsFinite(reconnectionTime) + ) { + this.#reconnectionTime = reconnectionTime; + } + break; + } + } + } + } + } catch { + // The connection is reestablished below + } + + this.#reestablishConnection(); + } + + #reestablishConnection() { + if (this.#readyState === CLOSED) { + return; + } + this.#readyState = CONNECTING; + this.dispatchEvent(new Event("error")); + this.#reconnectionTimerId = setTimeout(() => { + if (this.#readyState !== CONNECTING) { + return; + } + this.#loop(); + }, this.#reconnectionTime); + } + + #failConnection() { + if (this.#readyState !== CLOSED) { + this.#readyState = CLOSED; + this.dispatchEvent(new Event("error")); + } + } + + [SymbolFor("Deno.privateCustomInspect")](inspect, inspectOptions) { + return inspect( + createFilteredInspectProxy({ + object: this, + evaluate: ObjectPrototypeIsPrototypeOf(EventSourcePrototype, this), + keys: [ + "readyState", + "url", + "withCredentials", + "onopen", + "onmessage", + "onerror", + ], + }), + inspectOptions, + ); + } +} + +const EventSourcePrototype = EventSource.prototype; + +ObjectDefineProperties(EventSource, { + CONNECTING: { + __proto__: null, + value: 0, + }, + OPEN: { + __proto__: null, + value: 1, + }, + CLOSED: { + __proto__: null, + value: 2, + }, +}); + +defineEventHandler(EventSource.prototype, "open"); +defineEventHandler(EventSource.prototype, "message"); +defineEventHandler(EventSource.prototype, "error"); + +webidl.converters.EventSourceInit = webidl.createDictionaryConverter( + "EventSourceInit", + [ + { + key: "withCredentials", + defaultValue: false, + converter: webidl.converters.boolean, + }, + ], +); + +export { EventSource }; diff --git a/vendor/deno_fetch/Cargo.toml b/vendor/deno_fetch/Cargo.toml new file mode 100644 index 00000000..716d268a --- /dev/null +++ b/vendor/deno_fetch/Cargo.toml @@ -0,0 +1,47 @@ +# Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +[package] +name = "deno_fetch" +version = "0.206.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +readme = "README.md" +repository.workspace = true +description = "Fetch API implementation for Deno" + +[lib] +path = "lib.rs" + +[dependencies] +base64.workspace = true +bytes.workspace = true +data-url.workspace = true +deno_core.workspace = true +deno_path_util.workspace = true +deno_permissions.workspace = true +deno_tls.workspace = true +dyn-clone = "1" +error_reporter = "1" +hickory-resolver.workspace = true +http.workspace = true +http-body-util.workspace = true +hyper.workspace = true +hyper-rustls.workspace = true +hyper-util.workspace = true +ipnet.workspace = true +percent-encoding.workspace = true +rustls-webpki.workspace = true +serde.workspace = true +serde_json.workspace = true +thiserror.workspace = true +tokio.workspace = true +tokio-rustls.workspace = true +tokio-socks.workspace = true +tokio-util = { workspace = true, features = ["io"] } +tower.workspace = true +tower-http.workspace = true +tower-service.workspace = true + +[dev-dependencies] +fast-socks5.workspace = true diff --git a/vendor/deno_fetch/README.md b/vendor/deno_fetch/README.md new file mode 100644 index 00000000..3af8110a --- /dev/null +++ b/vendor/deno_fetch/README.md @@ -0,0 +1,82 @@ +# deno_fetch + +**This crate implements the Fetch API.** + +Spec: https://fetch.spec.whatwg.org/ + +## Usage Example + +From javascript, include the extension's source, and assign the following +properties to the global scope: + +```javascript +import * as headers from "ext:deno_fetch/20_headers.js"; +import * as formData from "ext:deno_fetch/21_formdata.js"; +import * as request from "ext:deno_fetch/23_request.js"; +import * as response from "ext:deno_fetch/23_response.js"; +import * as fetch from "ext:deno_fetch/26_fetch.js"; +import * as eventSource from "ext:deno_fetch/27_eventsource.js"; + +// Set up the callback for Wasm streaming ops +Deno.core.setWasmStreamingCallback(fetch.handleWasmStreaming); + +Object.defineProperty(globalThis, "fetch", { + value: fetch.fetch, + enumerable: true, + configurable: true, + writable: true, +}); + +Object.defineProperty(globalThis, "Request", { + value: request.Request, + enumerable: false, + configurable: true, + writable: true, +}); + +Object.defineProperty(globalThis, "Response", { + value: response.Response, + enumerable: false, + configurable: true, + writable: true, +}); + +Object.defineProperty(globalThis, "Headers", { + value: headers.Headers, + enumerable: false, + configurable: true, + writable: true, +}); + +Object.defineProperty(globalThis, "FormData", { + value: formData.FormData, + enumerable: false, + configurable: true, + writable: true, +}); +``` + +Then from rust, provide +`deno_fetch::deno_fetch::init_ops_and_esm(Default::default())` in +the `extensions` field of your `RuntimeOptions` + +Where: + +- Permissions: a struct implementing `deno_fetch::FetchPermissions` +- Options: `deno_fetch::Options`, which implements `Default` + +## Dependencies + +- **deno_webidl**: Provided by the `deno_webidl` crate +- **deno_web**: Provided by the `deno_web` crate +- **deno_url**: Provided by the `deno_url` crate +- **deno_console**: Provided by the `deno_console` crate + +## Provided ops + +Following ops are provided, which can be accessed through `Deno.ops`: + +- op_fetch +- op_fetch_send +- op_utf8_to_byte_string +- op_fetch_custom_client diff --git a/vendor/deno_fetch/dns.rs b/vendor/deno_fetch/dns.rs new file mode 100644 index 00000000..fdde4e17 --- /dev/null +++ b/vendor/deno_fetch/dns.rs @@ -0,0 +1,113 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use std::future::Future; +use std::io; +use std::net::SocketAddr; +use std::pin::Pin; +use std::task::Poll; +use std::task::{self}; +use std::vec; + +use hickory_resolver::name_server::TokioConnectionProvider; +use hyper_util::client::legacy::connect::dns::GaiResolver; +use hyper_util::client::legacy::connect::dns::Name; +use tokio::task::JoinHandle; +use tower::Service; + +#[derive(Clone, Debug)] +pub enum Resolver { + /// A resolver using blocking `getaddrinfo` calls in a threadpool. + Gai(GaiResolver), + /// hickory-resolver's userspace resolver. + Hickory(hickory_resolver::Resolver), +} + +impl Default for Resolver { + fn default() -> Self { + Self::gai() + } +} + +impl Resolver { + pub fn gai() -> Self { + Self::Gai(GaiResolver::new()) + } + + /// Create a [`AsyncResolver`] from system conf. + pub fn hickory() -> Result { + Ok(Self::Hickory( + hickory_resolver::Resolver::tokio_from_system_conf()?, + )) + } + + pub fn hickory_from_resolver( + resolver: hickory_resolver::Resolver, + ) -> Self { + Self::Hickory(resolver) + } +} + +type SocketAddrs = vec::IntoIter; + +pub struct ResolveFut { + inner: JoinHandle>, +} + +impl Future for ResolveFut { + type Output = Result; + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + ) -> Poll { + Pin::new(&mut self.inner).poll(cx).map(|res| match res { + Ok(Ok(addrs)) => Ok(addrs), + Ok(Err(e)) => Err(e), + Err(join_err) => { + if join_err.is_cancelled() { + Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) + } else { + Err(io::Error::new(io::ErrorKind::Other, join_err)) + } + } + }) + } +} + +impl Service for Resolver { + type Response = SocketAddrs; + type Error = io::Error; + type Future = ResolveFut; + + fn poll_ready( + &mut self, + _cx: &mut task::Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, name: Name) -> Self::Future { + let task = match self { + Resolver::Gai(gai_resolver) => { + let mut resolver = gai_resolver.clone(); + tokio::spawn(async move { + let result = resolver.call(name).await?; + let x: Vec<_> = result.into_iter().collect(); + let iter: SocketAddrs = x.into_iter(); + Ok(iter) + }) + } + Resolver::Hickory(async_resolver) => { + let resolver = async_resolver.clone(); + tokio::spawn(async move { + let result = resolver.lookup_ip(name.as_str()).await?; + + let x: Vec<_> = + result.into_iter().map(|x| SocketAddr::new(x, 0)).collect(); + let iter: SocketAddrs = x.into_iter(); + Ok(iter) + }) + } + }; + ResolveFut { inner: task } + } +} diff --git a/vendor/deno_fetch/fs_fetch_handler.rs b/vendor/deno_fetch/fs_fetch_handler.rs new file mode 100644 index 00000000..c236dd9c --- /dev/null +++ b/vendor/deno_fetch/fs_fetch_handler.rs @@ -0,0 +1,50 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use crate::CancelHandle; +use crate::CancelableResponseFuture; +use crate::FetchHandler; + +use deno_core::futures::FutureExt; +use deno_core::futures::TryFutureExt; +use deno_core::futures::TryStreamExt; +use deno_core::url::Url; +use deno_core::CancelFuture; +use deno_core::OpState; +use http::StatusCode; +use http_body_util::BodyExt; +use std::rc::Rc; +use tokio_util::io::ReaderStream; + +/// An implementation which tries to read file URLs from the file system via +/// tokio::fs. +#[derive(Clone)] +pub struct FsFetchHandler; + +impl FetchHandler for FsFetchHandler { + fn fetch_file( + &self, + _state: &mut OpState, + url: &Url, + ) -> (CancelableResponseFuture, Option>) { + let cancel_handle = CancelHandle::new_rc(); + let path_result = url.to_file_path(); + let response_fut = async move { + let path = path_result?; + let file = tokio::fs::File::open(path).map_err(|_| ()).await?; + let stream = ReaderStream::new(file) + .map_ok(hyper::body::Frame::data) + .map_err(Into::into); + let body = http_body_util::StreamBody::new(stream).boxed(); + let response = http::Response::builder() + .status(StatusCode::OK) + .body(body) + .map_err(|_| ())?; + Ok::<_, ()>(response) + } + .map_err(move |_| super::FetchError::NetworkError) + .or_cancel(&cancel_handle) + .boxed_local(); + + (response_fut, Some(cancel_handle)) + } +} diff --git a/vendor/deno_fetch/internal.d.ts b/vendor/deno_fetch/internal.d.ts new file mode 100644 index 00000000..17565992 --- /dev/null +++ b/vendor/deno_fetch/internal.d.ts @@ -0,0 +1,100 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +// deno-lint-ignore-file no-explicit-any no-var + +/// +/// + +declare var domIterable: { + DomIterableMixin(base: any, dataSymbol: symbol): any; +}; + +declare module "ext:deno_fetch/20_headers.js" { + class Headers { + } + type HeaderList = [string, string][]; + function headersFromHeaderList( + list: HeaderList, + guard: + | "immutable" + | "request" + | "request-no-cors" + | "response" + | "none", + ): Headers; + function headerListFromHeaders(headers: Headers): HeaderList; + function fillHeaders(headers: Headers, object: HeadersInit): void; + function getDecodeSplitHeader( + list: HeaderList, + name: string, + ): string[] | null; + function guardFromHeaders( + headers: Headers, + ): "immutable" | "request" | "request-no-cors" | "response" | "none"; +} + +declare module "ext:deno_fetch/21_formdata.js" { + type FormData = typeof FormData; + function formDataToBlob( + formData: FormData, + ): Blob; + function parseFormData( + body: Uint8Array, + boundary: string | undefined, + ): FormData; + function formDataFromEntries(entries: FormDataEntry[]): FormData; +} + +declare module "ext:deno_fetch/22_body.js" { + function mixinBody( + prototype: any, + bodySymbol: symbol, + mimeTypeSymbol: symbol, + ): void; + class InnerBody { + constructor(stream?: ReadableStream); + stream: ReadableStream; + source: null | Uint8Array | Blob | FormData; + length: null | number; + unusable(): boolean; + consume(): Promise; + clone(): InnerBody; + } + function extractBody(object: BodyInit): { + body: InnerBody; + contentType: string | null; + }; +} + +declare module "ext:deno_fetch/26_fetch.js" { + function toInnerRequest(request: Request): InnerRequest; + function fromInnerRequest( + inner: InnerRequest, + guard: + | "request" + | "immutable" + | "request-no-cors" + | "response" + | "none", + skipBody: boolean, + ): Request; + function redirectStatus(status: number): boolean; + function nullBodyStatus(status: number): boolean; + function newInnerRequest( + method: string, + url: any, + headerList?: [string, string][], + body?: fetchBody.InnerBody, + ): InnerResponse; + function toInnerResponse(response: Response): InnerResponse; + function fromInnerResponse( + inner: InnerResponse, + guard: + | "request" + | "immutable" + | "request-no-cors" + | "response" + | "none", + ): Response; + function networkError(error: string): InnerResponse; +} diff --git a/vendor/deno_fetch/lib.deno_fetch.d.ts b/vendor/deno_fetch/lib.deno_fetch.d.ts new file mode 100644 index 00000000..8614dec8 --- /dev/null +++ b/vendor/deno_fetch/lib.deno_fetch.d.ts @@ -0,0 +1,498 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +// deno-lint-ignore-file no-explicit-any no-var + +/// +/// + +/** @category Platform */ +interface DomIterable { + keys(): IterableIterator; + values(): IterableIterator; + entries(): IterableIterator<[K, V]>; + [Symbol.iterator](): IterableIterator<[K, V]>; + forEach( + callback: (value: V, key: K, parent: this) => void, + thisArg?: any, + ): void; +} + +/** @category Fetch */ +type FormDataEntryValue = File | string; + +/** Provides a way to easily construct a set of key/value pairs representing + * form fields and their values, which can then be easily sent using the + * XMLHttpRequest.send() method. It uses the same format a form would use if the + * encoding type were set to "multipart/form-data". + * + * @category Fetch + */ +interface FormData extends DomIterable { + append(name: string, value: string | Blob, fileName?: string): void; + delete(name: string): void; + get(name: string): FormDataEntryValue | null; + getAll(name: string): FormDataEntryValue[]; + has(name: string): boolean; + set(name: string, value: string | Blob, fileName?: string): void; +} + +/** @category Fetch */ +declare var FormData: { + readonly prototype: FormData; + new (): FormData; +}; + +/** @category Fetch */ +interface Body { + /** A simple getter used to expose a `ReadableStream` of the body contents. */ + readonly body: ReadableStream | null; + /** Stores a `Boolean` that declares whether the body has been used in a + * response yet. + */ + readonly bodyUsed: boolean; + /** Takes a `Response` stream and reads it to completion. It returns a promise + * that resolves with an `ArrayBuffer`. + */ + arrayBuffer(): Promise; + /** Takes a `Response` stream and reads it to completion. It returns a promise + * that resolves with a `Blob`. + */ + blob(): Promise; + /** Takes a `Response` stream and reads it to completion. It returns a promise + * that resolves with a `Uint8Array`. + */ + bytes(): Promise; + /** Takes a `Response` stream and reads it to completion. It returns a promise + * that resolves with a `FormData` object. + */ + formData(): Promise; + /** Takes a `Response` stream and reads it to completion. It returns a promise + * that resolves with the result of parsing the body text as JSON. + */ + json(): Promise; + /** Takes a `Response` stream and reads it to completion. It returns a promise + * that resolves with a `USVString` (text). + */ + text(): Promise; +} + +/** @category Fetch */ +type HeadersInit = Iterable | Record; + +/** This Fetch API interface allows you to perform various actions on HTTP + * request and response headers. These actions include retrieving, setting, + * adding to, and removing. A Headers object has an associated header list, + * which is initially empty and consists of zero or more name and value pairs. + * You can add to this using methods like append() (see Examples). In all + * methods of this interface, header names are matched by case-insensitive byte + * sequence. + * + * @category Fetch + */ +interface Headers extends DomIterable { + /** Appends a new value onto an existing header inside a `Headers` object, or + * adds the header if it does not already exist. + */ + append(name: string, value: string): void; + /** Deletes a header from a `Headers` object. */ + delete(name: string): void; + /** Returns a `ByteString` sequence of all the values of a header within a + * `Headers` object with a given name. + */ + get(name: string): string | null; + /** Returns a boolean stating whether a `Headers` object contains a certain + * header. + */ + has(name: string): boolean; + /** Sets a new value for an existing header inside a Headers object, or adds + * the header if it does not already exist. + */ + set(name: string, value: string): void; + /** Returns an array containing the values of all `Set-Cookie` headers + * associated with a response. + */ + getSetCookie(): string[]; +} + +/** This Fetch API interface allows you to perform various actions on HTTP + * request and response headers. These actions include retrieving, setting, + * adding to, and removing. A Headers object has an associated header list, + * which is initially empty and consists of zero or more name and value pairs. + * You can add to this using methods like append() (see Examples). In all + * methods of this interface, header names are matched by case-insensitive byte + * sequence. + * + * @category Fetch + */ +declare var Headers: { + readonly prototype: Headers; + new (init?: HeadersInit): Headers; +}; + +/** @category Fetch */ +type RequestInfo = Request | string; +/** @category Fetch */ +type RequestCache = + | "default" + | "force-cache" + | "no-cache" + | "no-store" + | "only-if-cached" + | "reload"; +/** @category Fetch */ +type RequestCredentials = "include" | "omit" | "same-origin"; +/** @category Fetch */ +type RequestMode = "cors" | "navigate" | "no-cors" | "same-origin"; +/** @category Fetch */ +type RequestRedirect = "error" | "follow" | "manual"; +/** @category Fetch */ +type ReferrerPolicy = + | "" + | "no-referrer" + | "no-referrer-when-downgrade" + | "origin" + | "origin-when-cross-origin" + | "same-origin" + | "strict-origin" + | "strict-origin-when-cross-origin" + | "unsafe-url"; +/** @category Fetch */ +type BodyInit = + | Blob + | BufferSource + | FormData + | URLSearchParams + | ReadableStream + | Iterable + | AsyncIterable + | string; +/** @category Fetch */ +type RequestDestination = + | "" + | "audio" + | "audioworklet" + | "document" + | "embed" + | "font" + | "image" + | "manifest" + | "object" + | "paintworklet" + | "report" + | "script" + | "sharedworker" + | "style" + | "track" + | "video" + | "worker" + | "xslt"; + +/** @category Fetch */ +interface RequestInit { + /** + * A BodyInit object or null to set request's body. + */ + body?: BodyInit | null; + /** + * A string indicating how the request will interact with the browser's cache + * to set request's cache. + */ + cache?: RequestCache; + /** + * A string indicating whether credentials will be sent with the request + * always, never, or only when sent to a same-origin URL. Sets request's + * credentials. + */ + credentials?: RequestCredentials; + /** + * A Headers object, an object literal, or an array of two-item arrays to set + * request's headers. + */ + headers?: HeadersInit; + /** + * A cryptographic hash of the resource to be fetched by request. Sets + * request's integrity. + */ + integrity?: string; + /** + * A boolean to set request's keepalive. + */ + keepalive?: boolean; + /** + * A string to set request's method. + */ + method?: string; + /** + * A string to indicate whether the request will use CORS, or will be + * restricted to same-origin URLs. Sets request's mode. + */ + mode?: RequestMode; + /** + * A string indicating whether request follows redirects, results in an error + * upon encountering a redirect, or returns the redirect (in an opaque + * fashion). Sets request's redirect. + */ + redirect?: RequestRedirect; + /** + * A string whose value is a same-origin URL, "about:client", or the empty + * string, to set request's referrer. + */ + referrer?: string; + /** + * A referrer policy to set request's referrerPolicy. + */ + referrerPolicy?: ReferrerPolicy; + /** + * An AbortSignal to set request's signal. + */ + signal?: AbortSignal | null; + /** + * Can only be null. Used to disassociate request from any Window. + */ + window?: any; +} + +/** This Fetch API interface represents a resource request. + * + * @category Fetch + */ +interface Request extends Body { + /** + * Returns the cache mode associated with request, which is a string + * indicating how the request will interact with the browser's cache when + * fetching. + */ + readonly cache: RequestCache; + /** + * Returns the credentials mode associated with request, which is a string + * indicating whether credentials will be sent with the request always, never, + * or only when sent to a same-origin URL. + */ + readonly credentials: RequestCredentials; + /** + * Returns the kind of resource requested by request, e.g., "document" or "script". + */ + readonly destination: RequestDestination; + /** + * Returns a Headers object consisting of the headers associated with request. + * Note that headers added in the network layer by the user agent will not be + * accounted for in this object, e.g., the "Host" header. + */ + readonly headers: Headers; + /** + * Returns request's subresource integrity metadata, which is a cryptographic + * hash of the resource being fetched. Its value consists of multiple hashes + * separated by whitespace. [SRI] + */ + readonly integrity: string; + /** + * Returns a boolean indicating whether or not request is for a history + * navigation (a.k.a. back-forward navigation). + */ + readonly isHistoryNavigation: boolean; + /** + * Returns a boolean indicating whether or not request is for a reload + * navigation. + */ + readonly isReloadNavigation: boolean; + /** + * Returns a boolean indicating whether or not request can outlive the global + * in which it was created. + */ + readonly keepalive: boolean; + /** + * Returns request's HTTP method, which is "GET" by default. + */ + readonly method: string; + /** + * Returns the mode associated with request, which is a string indicating + * whether the request will use CORS, or will be restricted to same-origin + * URLs. + */ + readonly mode: RequestMode; + /** + * Returns the redirect mode associated with request, which is a string + * indicating how redirects for the request will be handled during fetching. A + * request will follow redirects by default. + */ + readonly redirect: RequestRedirect; + /** + * Returns the referrer of request. Its value can be a same-origin URL if + * explicitly set in init, the empty string to indicate no referrer, and + * "about:client" when defaulting to the global's default. This is used during + * fetching to determine the value of the `Referer` header of the request + * being made. + */ + readonly referrer: string; + /** + * Returns the referrer policy associated with request. This is used during + * fetching to compute the value of the request's referrer. + */ + readonly referrerPolicy: ReferrerPolicy; + /** + * Returns the signal associated with request, which is an AbortSignal object + * indicating whether or not request has been aborted, and its abort event + * handler. + */ + readonly signal: AbortSignal; + /** + * Returns the URL of request as a string. + */ + readonly url: string; + clone(): Request; +} + +/** This Fetch API interface represents a resource request. + * + * @category Fetch + */ +declare var Request: { + readonly prototype: Request; + new (input: RequestInfo | URL, init?: RequestInit): Request; +}; + +/** @category Fetch */ +interface ResponseInit { + headers?: HeadersInit; + status?: number; + statusText?: string; +} + +/** @category Fetch */ +type ResponseType = + | "basic" + | "cors" + | "default" + | "error" + | "opaque" + | "opaqueredirect"; + +/** This Fetch API interface represents the response to a request. + * + * @category Fetch + */ +interface Response extends Body { + readonly headers: Headers; + readonly ok: boolean; + readonly redirected: boolean; + readonly status: number; + readonly statusText: string; + readonly type: ResponseType; + readonly url: string; + clone(): Response; +} + +/** This Fetch API interface represents the response to a request. + * + * @category Fetch + */ +declare var Response: { + readonly prototype: Response; + new (body?: BodyInit | null, init?: ResponseInit): Response; + json(data: unknown, init?: ResponseInit): Response; + error(): Response; + redirect(url: string | URL, status?: number): Response; +}; + +/** Fetch a resource from the network. It returns a `Promise` that resolves to the + * `Response` to that `Request`, whether it is successful or not. + * + * ```ts + * const response = await fetch("http://my.json.host/data.json"); + * console.log(response.status); // e.g. 200 + * console.log(response.statusText); // e.g. "OK" + * const jsonData = await response.json(); + * ``` + * + * @tags allow-net, allow-read + * @category Fetch + */ +declare function fetch( + input: URL | Request | string, + init?: RequestInit, +): Promise; + +/** + * @category Fetch + */ +interface EventSourceInit { + withCredentials?: boolean; +} + +/** + * @category Fetch + */ +interface EventSourceEventMap { + "error": Event; + "message": MessageEvent; + "open": Event; +} + +/** + * @category Fetch + */ +interface EventSource extends EventTarget { + onerror: ((this: EventSource, ev: Event) => any) | null; + onmessage: ((this: EventSource, ev: MessageEvent) => any) | null; + onopen: ((this: EventSource, ev: Event) => any) | null; + /** + * Returns the state of this EventSource object's connection. It can have the values described below. + */ + readonly readyState: number; + /** + * Returns the URL providing the event stream. + */ + readonly url: string; + /** + * Returns true if the credentials mode for connection requests to the URL providing the event stream is set to "include", and false otherwise. + */ + readonly withCredentials: boolean; + /** + * Aborts any instances of the fetch algorithm started for this EventSource object, and sets the readyState attribute to CLOSED. + */ + close(): void; + readonly CONNECTING: 0; + readonly OPEN: 1; + readonly CLOSED: 2; + addEventListener( + type: K, + listener: (this: EventSource, ev: EventSourceEventMap[K]) => any, + options?: boolean | AddEventListenerOptions, + ): void; + addEventListener( + type: string, + listener: (this: EventSource, event: MessageEvent) => any, + options?: boolean | AddEventListenerOptions, + ): void; + addEventListener( + type: string, + listener: EventListenerOrEventListenerObject, + options?: boolean | AddEventListenerOptions, + ): void; + removeEventListener( + type: K, + listener: (this: EventSource, ev: EventSourceEventMap[K]) => any, + options?: boolean | EventListenerOptions, + ): void; + removeEventListener( + type: string, + listener: (this: EventSource, event: MessageEvent) => any, + options?: boolean | EventListenerOptions, + ): void; + removeEventListener( + type: string, + listener: EventListenerOrEventListenerObject, + options?: boolean | EventListenerOptions, + ): void; +} + +/** + * @category Fetch + */ +declare var EventSource: { + prototype: EventSource; + new (url: string | URL, eventSourceInitDict?: EventSourceInit): EventSource; + readonly CONNECTING: 0; + readonly OPEN: 1; + readonly CLOSED: 2; +}; diff --git a/vendor/deno_fetch/lib.rs b/vendor/deno_fetch/lib.rs new file mode 100644 index 00000000..a3f5d03e --- /dev/null +++ b/vendor/deno_fetch/lib.rs @@ -0,0 +1,1219 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +pub mod dns; +mod fs_fetch_handler; +mod proxy; +#[cfg(test)] +mod tests; + +use std::borrow::Cow; +use std::cell::RefCell; +use std::cmp::min; +use std::convert::From; +use std::path::Path; +use std::path::PathBuf; +use std::pin::Pin; +use std::rc::Rc; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; + +use deno_core::futures::stream::Peekable; +use deno_core::futures::Future; +use deno_core::futures::FutureExt; +use deno_core::futures::Stream; +use deno_core::futures::StreamExt; +use deno_core::futures::TryFutureExt; +use deno_core::op2; +use deno_core::url; +use deno_core::url::Url; +use deno_core::v8; +use deno_core::AsyncRefCell; +use deno_core::AsyncResult; +use deno_core::BufView; +use deno_core::ByteString; +use deno_core::CancelFuture; +use deno_core::CancelHandle; +use deno_core::CancelTryFuture; +use deno_core::Canceled; +use deno_core::JsBuffer; +use deno_core::OpState; +use deno_core::RcRef; +use deno_core::Resource; +use deno_core::ResourceId; +use deno_path_util::url_from_file_path; +use deno_path_util::PathToUrlError; +use deno_permissions::PermissionCheckError; +use deno_tls::rustls::RootCertStore; +use deno_tls::Proxy; +use deno_tls::RootCertStoreProvider; +use deno_tls::TlsKey; +use deno_tls::TlsKeys; +use deno_tls::TlsKeysHolder; + +use bytes::Bytes; +use data_url::DataUrl; +use http::header::HeaderName; +use http::header::HeaderValue; +use http::header::ACCEPT; +use http::header::ACCEPT_ENCODING; +use http::header::AUTHORIZATION; +use http::header::CONTENT_LENGTH; +use http::header::HOST; +use http::header::PROXY_AUTHORIZATION; +use http::header::RANGE; +use http::header::USER_AGENT; +use http::Extensions; +use http::Method; +use http::Uri; +use http_body_util::BodyExt; +use hyper::body::Frame; +use hyper_util::client::legacy::connect::HttpConnector; +use hyper_util::client::legacy::connect::HttpInfo; +use hyper_util::client::legacy::Builder as HyperClientBuilder; +use hyper_util::rt::TokioExecutor; +use hyper_util::rt::TokioTimer; +use serde::Deserialize; +use serde::Serialize; +use tower::ServiceExt; +use tower_http::decompression::Decompression; + +// Re-export data_url +pub use data_url; +pub use proxy::basic_auth; + +pub use fs_fetch_handler::FsFetchHandler; + +#[derive(Clone)] +pub struct Options { + pub user_agent: String, + pub root_cert_store_provider: Option>, + pub proxy: Option, + /// A callback to customize HTTP client configuration. + /// + /// The settings applied with this hook may be overridden by the options + /// provided through `Deno.createHttpClient()` API. For instance, if the hook + /// calls [`hyper_util::client::legacy::Builder::pool_max_idle_per_host`] with + /// a value of 99, and a user calls `Deno.createHttpClient({ poolMaxIdlePerHost: 42 })`, + /// the value that will take effect is 42. + /// + /// For more info on what can be configured, see [`hyper_util::client::legacy::Builder`]. + pub client_builder_hook: Option HyperClientBuilder>, + #[allow(clippy::type_complexity)] + pub request_builder_hook: Option< + fn(&mut http::Request) -> Result<(), deno_core::error::AnyError>, + >, + pub unsafely_ignore_certificate_errors: Option>, + pub client_cert_chain_and_key: TlsKeys, + pub file_fetch_handler: Rc, + pub resolver: dns::Resolver, +} + +impl Options { + pub fn root_cert_store( + &self, + ) -> Result, deno_core::error::AnyError> { + Ok(match &self.root_cert_store_provider { + Some(provider) => Some(provider.get_or_try_init()?.clone()), + None => None, + }) + } +} + +impl Default for Options { + fn default() -> Self { + Self { + user_agent: "".to_string(), + root_cert_store_provider: None, + proxy: None, + client_builder_hook: None, + request_builder_hook: None, + unsafely_ignore_certificate_errors: None, + client_cert_chain_and_key: TlsKeys::Null, + file_fetch_handler: Rc::new(DefaultFileFetchHandler), + resolver: dns::Resolver::default(), + } + } +} + +deno_core::extension!(deno_fetch, + deps = [ deno_webidl, deno_web, deno_url, deno_console ], + parameters = [FP: FetchPermissions], + ops = [ + op_fetch, + op_fetch_send, + op_utf8_to_byte_string, + op_fetch_custom_client, + op_fetch_promise_is_settled, + ], + esm = [ + "20_headers.js", + "21_formdata.js", + "22_body.js", + "22_http_client.js", + "23_request.js", + "23_response.js", + "26_fetch.js", + "27_eventsource.js" + ], + options = { + options: Options, + }, + state = |state, options| { + state.put::(options.options); + }, +); + +#[derive(Debug, thiserror::Error)] +pub enum FetchError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error(transparent)] + Permission(#[from] PermissionCheckError), + #[error("NetworkError when attempting to fetch resource")] + NetworkError, + #[error("Fetching files only supports the GET method: received {0}")] + FsNotGet(Method), + #[error(transparent)] + PathToUrl(#[from] PathToUrlError), + #[error("Invalid URL {0}")] + InvalidUrl(Url), + #[error(transparent)] + InvalidHeaderName(#[from] http::header::InvalidHeaderName), + #[error(transparent)] + InvalidHeaderValue(#[from] http::header::InvalidHeaderValue), + #[error("{0:?}")] + DataUrl(data_url::DataUrlError), + #[error("{0:?}")] + Base64(data_url::forgiving_base64::InvalidBase64), + #[error("Blob for the given URL not found.")] + BlobNotFound, + #[error("Url scheme '{0}' not supported")] + SchemeNotSupported(String), + #[error("Request was cancelled")] + RequestCanceled, + #[error(transparent)] + Http(#[from] http::Error), + #[error(transparent)] + ClientCreate(#[from] HttpClientCreateError), + #[error(transparent)] + Url(#[from] url::ParseError), + #[error(transparent)] + Method(#[from] http::method::InvalidMethod), + #[error(transparent)] + ClientSend(#[from] ClientSendError), + #[error(transparent)] + RequestBuilderHook(deno_core::error::AnyError), + #[error(transparent)] + Io(#[from] std::io::Error), + // Only used for node upgrade + #[error(transparent)] + Hyper(#[from] hyper::Error), +} + +pub type CancelableResponseFuture = + Pin>>; + +pub trait FetchHandler: dyn_clone::DynClone { + // Return the result of the fetch request consisting of a tuple of the + // cancelable response result, the optional fetch body resource and the + // optional cancel handle. + fn fetch_file( + &self, + state: &mut OpState, + url: &Url, + ) -> (CancelableResponseFuture, Option>); +} + +dyn_clone::clone_trait_object!(FetchHandler); + +/// A default implementation which will error for every request. +#[derive(Clone)] +pub struct DefaultFileFetchHandler; + +impl FetchHandler for DefaultFileFetchHandler { + fn fetch_file( + &self, + _state: &mut OpState, + _url: &Url, + ) -> (CancelableResponseFuture, Option>) { + let fut = async move { Ok(Err(FetchError::NetworkError)) }; + (Box::pin(fut), None) + } +} + +pub fn get_declaration() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("lib.deno_fetch.d.ts") +} +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct FetchReturn { + pub request_rid: ResourceId, + pub cancel_handle_rid: Option, +} + +pub fn get_or_create_client_from_state( + state: &mut OpState, +) -> Result { + if let Some(client) = state.try_borrow::() { + Ok(client.clone()) + } else { + let options = state.borrow::(); + let client = create_client_from_options(options)?; + state.put::(client.clone()); + Ok(client) + } +} + +pub fn create_client_from_options( + options: &Options, +) -> Result { + create_http_client( + &options.user_agent, + CreateHttpClientOptions { + root_cert_store: options + .root_cert_store() + .map_err(HttpClientCreateError::RootCertStore)?, + ca_certs: vec![], + proxy: options.proxy.clone(), + dns_resolver: options.resolver.clone(), + unsafely_ignore_certificate_errors: options + .unsafely_ignore_certificate_errors + .clone(), + client_cert_chain_and_key: options + .client_cert_chain_and_key + .clone() + .try_into() + .unwrap_or_default(), + pool_max_idle_per_host: None, + pool_idle_timeout: None, + http1: true, + http2: true, + client_builder_hook: options.client_builder_hook, + }, + ) +} + +#[allow(clippy::type_complexity)] +pub struct ResourceToBodyAdapter( + Rc, + Option< + Pin>>>, + >, +); + +impl ResourceToBodyAdapter { + pub fn new(resource: Rc) -> Self { + let future = resource.clone().read(64 * 1024); + Self(resource, Some(future)) + } +} + +// SAFETY: we only use this on a single-threaded executor +unsafe impl Send for ResourceToBodyAdapter {} +// SAFETY: we only use this on a single-threaded executor +unsafe impl Sync for ResourceToBodyAdapter {} + +impl Stream for ResourceToBodyAdapter { + type Item = Result; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.get_mut(); + if let Some(mut fut) = this.1.take() { + match fut.poll_unpin(cx) { + Poll::Pending => { + this.1 = Some(fut); + Poll::Pending + } + Poll::Ready(res) => match res { + Ok(buf) if buf.is_empty() => Poll::Ready(None), + Ok(buf) => { + this.1 = Some(this.0.clone().read(64 * 1024)); + Poll::Ready(Some(Ok(buf.to_vec().into()))) + } + Err(err) => Poll::Ready(Some(Err(err))), + }, + } + } else { + Poll::Ready(None) + } + } +} + +impl hyper::body::Body for ResourceToBodyAdapter { + type Data = Bytes; + type Error = deno_core::error::AnyError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.poll_next(cx) { + Poll::Ready(Some(res)) => Poll::Ready(Some(res.map(Frame::data))), + Poll::Ready(None) => Poll::Ready(None), + Poll::Pending => Poll::Pending, + } + } +} + +impl Drop for ResourceToBodyAdapter { + fn drop(&mut self) { + self.0.clone().close() + } +} + +pub trait FetchPermissions { + fn check_net_url( + &mut self, + url: &Url, + api_name: &str, + ) -> Result<(), PermissionCheckError>; + #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] + fn check_read<'a>( + &mut self, + p: &'a Path, + api_name: &str, + ) -> Result, PermissionCheckError>; +} + +impl FetchPermissions for deno_permissions::PermissionsContainer { + #[inline(always)] + fn check_net_url( + &mut self, + url: &Url, + api_name: &str, + ) -> Result<(), PermissionCheckError> { + deno_permissions::PermissionsContainer::check_net_url(self, url, api_name) + } + + #[inline(always)] + fn check_read<'a>( + &mut self, + path: &'a Path, + api_name: &str, + ) -> Result, PermissionCheckError> { + deno_permissions::PermissionsContainer::check_read_path( + self, + path, + Some(api_name), + ) + } +} + +#[op2(stack_trace)] +#[serde] +#[allow(clippy::too_many_arguments)] +pub fn op_fetch( + state: &mut OpState, + #[serde] method: ByteString, + #[string] url: String, + #[serde] headers: Vec<(ByteString, ByteString)>, + #[smi] client_rid: Option, + has_body: bool, + #[buffer] data: Option, + #[smi] resource: Option, +) -> Result +where + FP: FetchPermissions + 'static, +{ + let (client, allow_host) = if let Some(rid) = client_rid { + let r = state + .resource_table + .get::(rid) + .map_err(FetchError::Resource)?; + (r.client.clone(), r.allow_host) + } else { + (get_or_create_client_from_state(state)?, false) + }; + + let method = Method::from_bytes(&method)?; + let mut url = Url::parse(&url)?; + + // Check scheme before asking for net permission + let scheme = url.scheme(); + let (request_rid, cancel_handle_rid) = match scheme { + "file" => { + let path = url.to_file_path().map_err(|_| FetchError::NetworkError)?; + let permissions = state.borrow_mut::(); + let path = permissions.check_read(&path, "fetch()")?; + let url = match path { + Cow::Owned(path) => url_from_file_path(&path)?, + Cow::Borrowed(_) => url, + }; + + if method != Method::GET { + return Err(FetchError::FsNotGet(method)); + } + + let Options { + file_fetch_handler, .. + } = state.borrow_mut::(); + let file_fetch_handler = file_fetch_handler.clone(); + let (future, maybe_cancel_handle) = + file_fetch_handler.fetch_file(state, &url); + let request_rid = state + .resource_table + .add(FetchRequestResource { future, url }); + let maybe_cancel_handle_rid = maybe_cancel_handle + .map(|ch| state.resource_table.add(FetchCancelHandle(ch))); + + (request_rid, maybe_cancel_handle_rid) + } + "http" | "https" => { + let permissions = state.borrow_mut::(); + permissions.check_net_url(&url, "fetch()")?; + + let maybe_authority = extract_authority(&mut url); + let uri = url + .as_str() + .parse::() + .map_err(|_| FetchError::InvalidUrl(url.clone()))?; + + let mut con_len = None; + let body = if has_body { + match (data, resource) { + (Some(data), _) => { + // If a body is passed, we use it, and don't return a body for streaming. + con_len = Some(data.len() as u64); + + http_body_util::Full::new(data.to_vec().into()) + .map_err(|never| match never {}) + .boxed() + } + (_, Some(resource)) => { + let resource = state + .resource_table + .take_any(resource) + .map_err(FetchError::Resource)?; + match resource.size_hint() { + (body_size, Some(n)) if body_size == n && body_size > 0 => { + con_len = Some(body_size); + } + _ => {} + } + ReqBody::new(ResourceToBodyAdapter::new(resource)) + } + (None, None) => unreachable!(), + } + } else { + // POST and PUT requests should always have a 0 length content-length, + // if there is no body. https://fetch.spec.whatwg.org/#http-network-or-cache-fetch + if matches!(method, Method::POST | Method::PUT) { + con_len = Some(0); + } + http_body_util::Empty::new() + .map_err(|never| match never {}) + .boxed() + }; + + let mut request = http::Request::new(body); + *request.method_mut() = method.clone(); + *request.uri_mut() = uri.clone(); + + if let Some((username, password)) = maybe_authority { + request.headers_mut().insert( + AUTHORIZATION, + proxy::basic_auth(&username, password.as_deref()), + ); + } + if let Some(len) = con_len { + request.headers_mut().insert(CONTENT_LENGTH, len.into()); + } + + for (key, value) in headers { + let name = HeaderName::from_bytes(&key)?; + let v = HeaderValue::from_bytes(&value)?; + + if (name != HOST || allow_host) && name != CONTENT_LENGTH { + request.headers_mut().append(name, v); + } + } + + if request.headers().contains_key(RANGE) { + // https://fetch.spec.whatwg.org/#http-network-or-cache-fetch step 18 + // If httpRequest’s header list contains `Range`, then append (`Accept-Encoding`, `identity`) + request + .headers_mut() + .insert(ACCEPT_ENCODING, HeaderValue::from_static("identity")); + } + + let options = state.borrow::(); + if let Some(request_builder_hook) = options.request_builder_hook { + request_builder_hook(&mut request) + .map_err(FetchError::RequestBuilderHook)?; + } + + let cancel_handle = CancelHandle::new_rc(); + let cancel_handle_ = cancel_handle.clone(); + + let fut = async move { + client + .send(request) + .map_err(Into::into) + .or_cancel(cancel_handle_) + .await + }; + + let request_rid = state.resource_table.add(FetchRequestResource { + future: Box::pin(fut), + url, + }); + + let cancel_handle_rid = + state.resource_table.add(FetchCancelHandle(cancel_handle)); + + (request_rid, Some(cancel_handle_rid)) + } + "data" => { + let data_url = + DataUrl::process(url.as_str()).map_err(FetchError::DataUrl)?; + + let (body, _) = data_url.decode_to_vec().map_err(FetchError::Base64)?; + let body = http_body_util::Full::new(body.into()) + .map_err(|never| match never {}) + .boxed(); + + let response = http::Response::builder() + .status(http::StatusCode::OK) + .header(http::header::CONTENT_TYPE, data_url.mime_type().to_string()) + .body(body)?; + + let fut = async move { Ok(Ok(response)) }; + + let request_rid = state.resource_table.add(FetchRequestResource { + future: Box::pin(fut), + url, + }); + + (request_rid, None) + } + "blob" => { + // Blob URL resolution happens in the JS side of fetch. If we got here is + // because the URL isn't an object URL. + return Err(FetchError::BlobNotFound); + } + _ => return Err(FetchError::SchemeNotSupported(scheme.to_string())), + }; + + Ok(FetchReturn { + request_rid, + cancel_handle_rid, + }) +} + +#[derive(Default, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct FetchResponse { + pub status: u16, + pub status_text: String, + pub headers: Vec<(ByteString, ByteString)>, + pub url: String, + pub response_rid: ResourceId, + pub content_length: Option, + pub remote_addr_ip: Option, + pub remote_addr_port: Option, + /// This field is populated if some error occurred which needs to be + /// reconstructed in the JS side to set the error _cause_. + /// In the tuple, the first element is an error message and the second one is + /// an error cause. + pub error: Option<(String, String)>, +} + +#[op2(async)] +#[serde] +pub async fn op_fetch_send( + state: Rc>, + #[smi] rid: ResourceId, +) -> Result { + let request = state + .borrow_mut() + .resource_table + .take::(rid) + .map_err(FetchError::Resource)?; + + let request = Rc::try_unwrap(request) + .ok() + .expect("multiple op_fetch_send ongoing"); + + let res = match request.future.await { + Ok(Ok(res)) => res, + Ok(Err(err)) => { + // We're going to try and rescue the error cause from a stream and return it from this fetch. + // If any error in the chain is a hyper body error, return that as a special result we can use to + // reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`). + // TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead + + if let FetchError::ClientSend(err_src) = &err { + if let Some(client_err) = std::error::Error::source(&err_src.source) { + if let Some(err_src) = client_err.downcast_ref::() { + if let Some(err_src) = std::error::Error::source(err_src) { + return Ok(FetchResponse { + error: Some((err.to_string(), err_src.to_string())), + ..Default::default() + }); + } + } + } + } + + return Err(err); + } + Err(_) => return Err(FetchError::RequestCanceled), + }; + + let status = res.status(); + let url = request.url.into(); + let mut res_headers = Vec::new(); + for (key, val) in res.headers().iter() { + res_headers.push((key.as_str().into(), val.as_bytes().into())); + } + + let content_length = hyper::body::Body::size_hint(res.body()).exact(); + let remote_addr = res + .extensions() + .get::() + .map(|info| info.remote_addr()); + let (remote_addr_ip, remote_addr_port) = if let Some(addr) = remote_addr { + (Some(addr.ip().to_string()), Some(addr.port())) + } else { + (None, None) + }; + + let response_rid = state + .borrow_mut() + .resource_table + .add(FetchResponseResource::new(res, content_length)); + + Ok(FetchResponse { + status: status.as_u16(), + status_text: status.canonical_reason().unwrap_or("").to_string(), + headers: res_headers, + url, + response_rid, + content_length, + remote_addr_ip, + remote_addr_port, + error: None, + }) +} + +type CancelableResponseResult = + Result, FetchError>, Canceled>; + +pub struct FetchRequestResource { + pub future: Pin>>, + pub url: Url, +} + +impl Resource for FetchRequestResource { + fn name(&self) -> Cow { + "fetchRequest".into() + } +} + +pub struct FetchCancelHandle(pub Rc); + +impl Resource for FetchCancelHandle { + fn name(&self) -> Cow { + "fetchCancelHandle".into() + } + + fn close(self: Rc) { + self.0.cancel() + } +} + +type BytesStream = + Pin> + Unpin>>; + +pub enum FetchResponseReader { + Start(http::Response), + BodyReader(Peekable), +} + +impl Default for FetchResponseReader { + fn default() -> Self { + let stream: BytesStream = Box::pin(deno_core::futures::stream::empty()); + Self::BodyReader(stream.peekable()) + } +} +#[derive(Debug)] +pub struct FetchResponseResource { + pub response_reader: AsyncRefCell, + pub cancel: CancelHandle, + pub size: Option, +} + +impl FetchResponseResource { + pub fn new(response: http::Response, size: Option) -> Self { + Self { + response_reader: AsyncRefCell::new(FetchResponseReader::Start(response)), + cancel: CancelHandle::default(), + size, + } + } + + pub async fn upgrade(self) -> Result { + let reader = self.response_reader.into_inner(); + match reader { + FetchResponseReader::Start(resp) => Ok(hyper::upgrade::on(resp).await?), + _ => unreachable!(), + } + } +} + +impl Resource for FetchResponseResource { + fn name(&self) -> Cow { + "fetchResponse".into() + } + + fn read(self: Rc, limit: usize) -> AsyncResult { + Box::pin(async move { + let mut reader = + RcRef::map(&self, |r| &r.response_reader).borrow_mut().await; + + let body = loop { + match &mut *reader { + FetchResponseReader::BodyReader(reader) => break reader, + FetchResponseReader::Start(_) => {} + } + + match std::mem::take(&mut *reader) { + FetchResponseReader::Start(resp) => { + let stream: BytesStream = + Box::pin(resp.into_body().into_data_stream().map(|r| { + r.map_err(|err| { + std::io::Error::new(std::io::ErrorKind::Other, err) + }) + })); + *reader = FetchResponseReader::BodyReader(stream.peekable()); + } + FetchResponseReader::BodyReader(_) => unreachable!(), + } + }; + let fut = async move { + let mut reader = Pin::new(body); + loop { + match reader.as_mut().peek_mut().await { + Some(Ok(chunk)) if !chunk.is_empty() => { + let len = min(limit, chunk.len()); + let chunk = chunk.split_to(len); + break Ok(chunk.into()); + } + // This unwrap is safe because `peek_mut()` returned `Some`, and thus + // currently has a peeked value that can be synchronously returned + // from `next()`. + // + // The future returned from `next()` is always ready, so we can + // safely call `await` on it without creating a race condition. + Some(_) => match reader.as_mut().next().await.unwrap() { + Ok(chunk) => assert!(chunk.is_empty()), + Err(err) => { + break Err(deno_core::error::type_error(err.to_string())) + } + }, + None => break Ok(BufView::empty()), + } + } + }; + + let cancel_handle = RcRef::map(self, |r| &r.cancel); + fut.try_or_cancel(cancel_handle).await + }) + } + + fn size_hint(&self) -> (u64, Option) { + (self.size.unwrap_or(0), self.size) + } + + fn close(self: Rc) { + self.cancel.cancel() + } +} + +pub struct HttpClientResource { + pub client: Client, + pub allow_host: bool, +} + +impl Resource for HttpClientResource { + fn name(&self) -> Cow { + "httpClient".into() + } +} + +impl HttpClientResource { + fn new(client: Client, allow_host: bool) -> Self { + Self { client, allow_host } + } +} + +#[derive(Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct CreateHttpClientArgs { + ca_certs: Vec, + proxy: Option, + pool_max_idle_per_host: Option, + pool_idle_timeout: Option, + #[serde(default)] + use_hickory_resolver: bool, + #[serde(default = "default_true")] + http1: bool, + #[serde(default = "default_true")] + http2: bool, + #[serde(default)] + allow_host: bool, +} + +fn default_true() -> bool { + true +} + +#[op2(stack_trace)] +#[smi] +pub fn op_fetch_custom_client( + state: &mut OpState, + #[serde] args: CreateHttpClientArgs, + #[cppgc] tls_keys: &TlsKeysHolder, +) -> Result +where + FP: FetchPermissions + 'static, +{ + if let Some(proxy) = args.proxy.clone() { + let permissions = state.borrow_mut::(); + let url = Url::parse(&proxy.url)?; + permissions.check_net_url(&url, "Deno.createHttpClient()")?; + } + + let options = state.borrow::(); + let ca_certs = args + .ca_certs + .into_iter() + .map(|cert| cert.into_bytes()) + .collect::>(); + + let client = create_http_client( + &options.user_agent, + CreateHttpClientOptions { + root_cert_store: options + .root_cert_store() + .map_err(HttpClientCreateError::RootCertStore)?, + ca_certs, + proxy: args.proxy, + dns_resolver: if args.use_hickory_resolver { + dns::Resolver::hickory() + .map_err(deno_core::error::AnyError::new) + .map_err(FetchError::Resource)? + } else { + dns::Resolver::default() + }, + unsafely_ignore_certificate_errors: options + .unsafely_ignore_certificate_errors + .clone(), + client_cert_chain_and_key: tls_keys.take().try_into().unwrap(), + pool_max_idle_per_host: args.pool_max_idle_per_host, + pool_idle_timeout: args.pool_idle_timeout.and_then( + |timeout| match timeout { + serde_json::Value::Bool(true) => None, + serde_json::Value::Bool(false) => Some(None), + serde_json::Value::Number(specify) => { + Some(Some(specify.as_u64().unwrap_or_default())) + } + _ => Some(None), + }, + ), + http1: args.http1, + http2: args.http2, + client_builder_hook: options.client_builder_hook, + }, + )?; + + let rid = state + .resource_table + .add(HttpClientResource::new(client, args.allow_host)); + Ok(rid) +} + +#[derive(Debug, Clone)] +pub struct CreateHttpClientOptions { + pub root_cert_store: Option, + pub ca_certs: Vec>, + pub proxy: Option, + pub dns_resolver: dns::Resolver, + pub unsafely_ignore_certificate_errors: Option>, + pub client_cert_chain_and_key: Option, + pub pool_max_idle_per_host: Option, + pub pool_idle_timeout: Option>, + pub http1: bool, + pub http2: bool, + pub client_builder_hook: Option HyperClientBuilder>, +} + +impl Default for CreateHttpClientOptions { + fn default() -> Self { + CreateHttpClientOptions { + root_cert_store: None, + ca_certs: vec![], + proxy: None, + dns_resolver: dns::Resolver::default(), + unsafely_ignore_certificate_errors: None, + client_cert_chain_and_key: None, + pool_max_idle_per_host: None, + pool_idle_timeout: None, + http1: true, + http2: true, + client_builder_hook: None, + } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum HttpClientCreateError { + #[error(transparent)] + Tls(deno_tls::TlsError), + #[error("Illegal characters in User-Agent: received {0}")] + InvalidUserAgent(String), + #[error("invalid proxy url")] + InvalidProxyUrl, + #[error("Cannot create Http Client: either `http1` or `http2` needs to be set to true")] + HttpVersionSelectionInvalid, + #[error(transparent)] + RootCertStore(deno_core::error::AnyError), +} + +/// Create new instance of async Client. This client supports +/// proxies and doesn't follow redirects. +pub fn create_http_client( + user_agent: &str, + options: CreateHttpClientOptions, +) -> Result { + let mut tls_config = deno_tls::create_client_config( + options.root_cert_store, + options.ca_certs, + options.unsafely_ignore_certificate_errors, + options.client_cert_chain_and_key.into(), + deno_tls::SocketUse::Http, + ) + .map_err(HttpClientCreateError::Tls)?; + + // Proxy TLS should not send ALPN + tls_config.alpn_protocols.clear(); + let proxy_tls_config = Arc::from(tls_config.clone()); + + let mut alpn_protocols = vec![]; + if options.http2 { + alpn_protocols.push("h2".into()); + } + if options.http1 { + alpn_protocols.push("http/1.1".into()); + } + tls_config.alpn_protocols = alpn_protocols; + let tls_config = Arc::from(tls_config); + + let mut http_connector = + HttpConnector::new_with_resolver(options.dns_resolver.clone()); + http_connector.enforce_http(false); + + let user_agent = user_agent.parse::().map_err(|_| { + HttpClientCreateError::InvalidUserAgent(user_agent.to_string()) + })?; + + let mut builder = HyperClientBuilder::new(TokioExecutor::new()); + builder.timer(TokioTimer::new()); + builder.pool_timer(TokioTimer::new()); + + if let Some(client_builder_hook) = options.client_builder_hook { + builder = client_builder_hook(builder); + } + + let mut proxies = proxy::from_env(); + if let Some(proxy) = options.proxy { + let mut intercept = proxy::Intercept::all(&proxy.url) + .ok_or_else(|| HttpClientCreateError::InvalidProxyUrl)?; + if let Some(basic_auth) = &proxy.basic_auth { + intercept.set_auth(&basic_auth.username, &basic_auth.password); + } + proxies.prepend(intercept); + } + let proxies = Arc::new(proxies); + let connector = proxy::ProxyConnector { + http: http_connector, + proxies: proxies.clone(), + tls: tls_config, + tls_proxy: proxy_tls_config, + user_agent: Some(user_agent.clone()), + }; + + if let Some(pool_max_idle_per_host) = options.pool_max_idle_per_host { + builder.pool_max_idle_per_host(pool_max_idle_per_host); + } + + if let Some(pool_idle_timeout) = options.pool_idle_timeout { + builder.pool_idle_timeout( + pool_idle_timeout.map(std::time::Duration::from_millis), + ); + } + + match (options.http1, options.http2) { + (true, false) => {} // noop, handled by ALPN above + (false, true) => { + builder.http2_only(true); + } + (true, true) => {} + (false, false) => { + return Err(HttpClientCreateError::HttpVersionSelectionInvalid) + } + } + + let pooled_client = builder.build(connector); + let decompress = Decompression::new(pooled_client).gzip(true).br(true); + + Ok(Client { + inner: decompress, + proxies, + user_agent, + }) +} + +#[op2] +#[serde] +pub fn op_utf8_to_byte_string(#[string] input: String) -> ByteString { + input.into() +} + +#[derive(Clone, Debug)] +pub struct Client { + inner: Decompression>, + // Used to check whether to include a proxy-authorization header + proxies: Arc, + user_agent: HeaderValue, +} + +type Connector = proxy::ProxyConnector>; + +// clippy is wrong here +#[allow(clippy::declare_interior_mutable_const)] +const STAR_STAR: HeaderValue = HeaderValue::from_static("*/*"); + +#[derive(Debug)] +pub struct ClientSendError { + uri: Uri, + pub source: hyper_util::client::legacy::Error, +} + +impl ClientSendError { + pub fn is_connect_error(&self) -> bool { + self.source.is_connect() + } + + fn http_info(&self) -> Option { + let mut exts = Extensions::new(); + self.source.connect_info()?.get_extras(&mut exts); + exts.remove::() + } +} + +impl std::fmt::Display for ClientSendError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + // NOTE: we can use `std::error::Report` instead once it's stabilized. + let detail = error_reporter::Report::new(&self.source); + + match self.http_info() { + Some(http_info) => { + write!( + f, + "error sending request from {src} for {uri} ({dst}): {detail}", + src = http_info.local_addr(), + uri = self.uri, + dst = http_info.remote_addr(), + detail = detail, + ) + } + None => { + write!( + f, + "error sending request for url ({uri}): {detail}", + uri = self.uri, + detail = detail, + ) + } + } + } +} + +impl std::error::Error for ClientSendError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.source) + } +} + +impl Client { + pub async fn send( + self, + mut req: http::Request, + ) -> Result, ClientSendError> { + req + .headers_mut() + .entry(USER_AGENT) + .or_insert_with(|| self.user_agent.clone()); + + req.headers_mut().entry(ACCEPT).or_insert(STAR_STAR); + + if let Some(auth) = self.proxies.http_forward_auth(req.uri()) { + req.headers_mut().insert(PROXY_AUTHORIZATION, auth.clone()); + } + + let uri = req.uri().clone(); + + let resp = self + .inner + .oneshot(req) + .await + .map_err(|e| ClientSendError { uri, source: e })?; + Ok(resp.map(|b| b.map_err(|e| deno_core::anyhow::anyhow!(e)).boxed())) + } +} + +pub type ReqBody = + http_body_util::combinators::BoxBody; +pub type ResBody = + http_body_util::combinators::BoxBody; + +/// Copied from https://github.com/seanmonstar/reqwest/blob/b9d62a0323d96f11672a61a17bf8849baec00275/src/async_impl/request.rs#L572 +/// Check the request URL for a "username:password" type authority, and if +/// found, remove it from the URL and return it. +pub fn extract_authority(url: &mut Url) -> Option<(String, Option)> { + use percent_encoding::percent_decode; + + if url.has_authority() { + let username: String = percent_decode(url.username().as_bytes()) + .decode_utf8() + .ok()? + .into(); + let password = url.password().and_then(|pass| { + percent_decode(pass.as_bytes()) + .decode_utf8() + .ok() + .map(String::from) + }); + if !username.is_empty() || password.is_some() { + url + .set_username("") + .expect("has_authority means set_username shouldn't fail"); + url + .set_password(None) + .expect("has_authority means set_password shouldn't fail"); + return Some((username, password)); + } + } + + None +} + +#[op2(fast)] +fn op_fetch_promise_is_settled(promise: v8::Local) -> bool { + promise.state() != v8::PromiseState::Pending +} diff --git a/vendor/deno_fetch/proxy.rs b/vendor/deno_fetch/proxy.rs new file mode 100644 index 00000000..88fc211e --- /dev/null +++ b/vendor/deno_fetch/proxy.rs @@ -0,0 +1,884 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +//! Parts of this module should be able to be replaced with other crates +//! eventually, once generic versions appear in hyper-util, et al. + +use std::env; +use std::future::Future; +use std::net::IpAddr; +use std::pin::Pin; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; + +use deno_core::futures::TryFutureExt; +use deno_tls::rustls::ClientConfig as TlsConfig; + +use http::header::HeaderValue; +use http::uri::Scheme; +use http::Uri; +use hyper_rustls::HttpsConnector; +use hyper_rustls::MaybeHttpsStream; +use hyper_util::client::legacy::connect::Connected; +use hyper_util::client::legacy::connect::Connection; +use hyper_util::rt::TokioIo; +use ipnet::IpNet; +use percent_encoding::percent_decode_str; +use tokio::net::TcpStream; +use tokio_rustls::client::TlsStream; +use tokio_rustls::TlsConnector; +use tokio_socks::tcp::Socks5Stream; +use tower_service::Service; + +#[derive(Debug, Clone)] +pub(crate) struct ProxyConnector { + pub(crate) http: C, + pub(crate) proxies: Arc, + /// TLS config when destination is not a proxy + pub(crate) tls: Arc, + /// TLS config when destination is a proxy + /// Notably, does not include ALPN + pub(crate) tls_proxy: Arc, + pub(crate) user_agent: Option, +} + +#[derive(Debug)] +pub(crate) struct Proxies { + no: Option, + intercepts: Vec, +} + +#[derive(Clone)] +pub(crate) struct Intercept { + filter: Filter, + target: Target, +} + +#[derive(Clone)] +enum Target { + Http { + dst: Uri, + auth: Option, + }, + Https { + dst: Uri, + auth: Option, + }, + Socks { + dst: Uri, + auth: Option<(String, String)>, + }, +} + +#[derive(Debug, Clone, Copy)] +enum Filter { + Http, + Https, + All, +} + +pub(crate) fn from_env() -> Proxies { + let mut intercepts = Vec::new(); + + if let Some(proxy) = parse_env_var("ALL_PROXY", Filter::All) { + intercepts.push(proxy); + } else if let Some(proxy) = parse_env_var("all_proxy", Filter::All) { + intercepts.push(proxy); + } + + if let Some(proxy) = parse_env_var("HTTPS_PROXY", Filter::Https) { + intercepts.push(proxy); + } else if let Some(proxy) = parse_env_var("https_proxy", Filter::Https) { + intercepts.push(proxy); + } + + // In a CGI context, headers become environment variables. So, "Proxy:" becomes HTTP_PROXY. + // To prevent an attacker from injecting a proxy, check if we are in CGI. + if env::var_os("REQUEST_METHOD").is_none() { + if let Some(proxy) = parse_env_var("HTTP_PROXY", Filter::Http) { + intercepts.push(proxy); + } else if let Some(proxy) = parse_env_var("http_proxy", Filter::Http) { + intercepts.push(proxy); + } + } + + let no = NoProxy::from_env(); + + Proxies { intercepts, no } +} + +pub fn basic_auth(user: &str, pass: Option<&str>) -> HeaderValue { + use base64::prelude::BASE64_STANDARD; + use base64::write::EncoderWriter; + use std::io::Write; + + let mut buf = b"Basic ".to_vec(); + { + let mut encoder = EncoderWriter::new(&mut buf, &BASE64_STANDARD); + let _ = write!(encoder, "{user}:"); + if let Some(password) = pass { + let _ = write!(encoder, "{password}"); + } + } + let mut header = + HeaderValue::from_bytes(&buf).expect("base64 is always valid HeaderValue"); + header.set_sensitive(true); + header +} + +fn parse_env_var(name: &str, filter: Filter) -> Option { + let val = env::var(name).ok()?; + let target = Target::parse(&val)?; + Some(Intercept { filter, target }) +} + +impl Intercept { + pub(crate) fn all(s: &str) -> Option { + let target = Target::parse(s)?; + Some(Intercept { + filter: Filter::All, + target, + }) + } + + pub(crate) fn set_auth(&mut self, user: &str, pass: &str) { + match self.target { + Target::Http { ref mut auth, .. } => { + *auth = Some(basic_auth(user, Some(pass))); + } + Target::Https { ref mut auth, .. } => { + *auth = Some(basic_auth(user, Some(pass))); + } + Target::Socks { ref mut auth, .. } => { + *auth = Some((user.into(), pass.into())); + } + } + } +} + +impl std::fmt::Debug for Intercept { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Intercept") + .field("filter", &self.filter) + .finish() + } +} + +impl Target { + fn parse(val: &str) -> Option { + let uri = val.parse::().ok()?; + + let mut builder = Uri::builder(); + let mut is_socks = false; + let mut http_auth = None; + let mut socks_auth = None; + + builder = builder.scheme(match uri.scheme() { + Some(s) => { + if s == &Scheme::HTTP || s == &Scheme::HTTPS { + s.clone() + } else if s.as_str() == "socks5" || s.as_str() == "socks5h" { + is_socks = true; + s.clone() + } else { + // can't use this proxy scheme + return None; + } + } + // if no scheme provided, assume they meant 'http' + None => Scheme::HTTP, + }); + + let authority = uri.authority()?; + + if let Some((userinfo, host_port)) = authority.as_str().split_once('@') { + let (user, pass) = userinfo.split_once(':')?; + let user = percent_decode_str(user).decode_utf8_lossy(); + let pass = percent_decode_str(pass).decode_utf8_lossy(); + if is_socks { + socks_auth = Some((user.into(), pass.into())); + } else { + http_auth = Some(basic_auth(&user, Some(&pass))); + } + builder = builder.authority(host_port); + } else { + builder = builder.authority(authority.clone()); + } + + // removing any path, but we MUST specify one or the builder errors + builder = builder.path_and_query("/"); + + let dst = builder.build().ok()?; + + let target = match dst.scheme().unwrap().as_str() { + "https" => Target::Https { + dst, + auth: http_auth, + }, + "http" => Target::Http { + dst, + auth: http_auth, + }, + "socks5" | "socks5h" => Target::Socks { + dst, + auth: socks_auth, + }, + // shouldn't happen + _ => return None, + }; + + Some(target) + } +} + +#[derive(Debug)] +struct NoProxy { + domains: DomainMatcher, + ips: IpMatcher, +} + +/// Represents a possible matching entry for an IP address +#[derive(Clone, Debug)] +enum Ip { + Address(IpAddr), + Network(IpNet), +} + +/// A wrapper around a list of IP cidr blocks or addresses with a [IpMatcher::contains] method for +/// checking if an IP address is contained within the matcher +#[derive(Clone, Debug, Default)] +struct IpMatcher(Vec); + +/// A wrapper around a list of domains with a [DomainMatcher::contains] method for checking if a +/// domain is contained within the matcher +#[derive(Clone, Debug, Default)] +struct DomainMatcher(Vec); + +impl NoProxy { + /// Returns a new no-proxy configuration based on environment variables (or `None` if no variables are set) + /// see [self::NoProxy::from_string()] for the string format + fn from_env() -> Option { + let raw = env::var("NO_PROXY") + .or_else(|_| env::var("no_proxy")) + .unwrap_or_default(); + + Self::from_string(&raw) + } + + /// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables + /// are set) + /// The rules are as follows: + /// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked + /// * If neither environment variable is set, `None` is returned + /// * Entries are expected to be comma-separated (whitespace between entries is ignored) + /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size, + /// for example "`192.168.1.0/24`"). + /// * An entry "`*`" matches all hostnames (this is the only wildcard allowed) + /// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com` + /// and `.google.com` are equivalent) and would match both that domain AND all subdomains. + /// + /// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match + /// (and therefore would bypass the proxy): + /// * `http://google.com/` + /// * `http://www.google.com/` + /// * `http://192.168.1.42/` + /// + /// The URL `http://notgoogle.com/` would not match. + fn from_string(no_proxy_list: &str) -> Option { + if no_proxy_list.is_empty() { + return None; + } + let mut ips = Vec::new(); + let mut domains = Vec::new(); + let parts = no_proxy_list.split(',').map(str::trim); + for part in parts { + match part.parse::() { + // If we can parse an IP net or address, then use it, otherwise, assume it is a domain + Ok(ip) => ips.push(Ip::Network(ip)), + Err(_) => match part.parse::() { + Ok(addr) => ips.push(Ip::Address(addr)), + Err(_) => domains.push(part.to_owned()), + }, + } + } + Some(NoProxy { + ips: IpMatcher(ips), + domains: DomainMatcher(domains), + }) + } + + fn contains(&self, host: &str) -> bool { + // According to RFC3986, raw IPv6 hosts will be wrapped in []. So we need to strip those off + // the end in order to parse correctly + let host = if host.starts_with('[') { + let x: &[_] = &['[', ']']; + host.trim_matches(x) + } else { + host + }; + match host.parse::() { + // If we can parse an IP addr, then use it, otherwise, assume it is a domain + Ok(ip) => self.ips.contains(ip), + Err(_) => self.domains.contains(host), + } + } +} + +impl IpMatcher { + fn contains(&self, addr: IpAddr) -> bool { + for ip in &self.0 { + match ip { + Ip::Address(address) => { + if &addr == address { + return true; + } + } + Ip::Network(net) => { + if net.contains(&addr) { + return true; + } + } + } + } + false + } +} + +impl DomainMatcher { + // The following links may be useful to understand the origin of these rules: + // * https://curl.se/libcurl/c/CURLOPT_NOPROXY.html + // * https://github.com/curl/curl/issues/1208 + fn contains(&self, domain: &str) -> bool { + let domain_len = domain.len(); + for d in &self.0 { + if d == domain || d.strip_prefix('.') == Some(domain) { + return true; + } else if domain.ends_with(d) { + if d.starts_with('.') { + // If the first character of d is a dot, that means the first character of domain + // must also be a dot, so we are looking at a subdomain of d and that matches + return true; + } else if domain.as_bytes().get(domain_len - d.len() - 1) == Some(&b'.') + { + // Given that d is a prefix of domain, if the prior character in domain is a dot + // then that means we must be matching a subdomain of d, and that matches + return true; + } + } else if d == "*" { + return true; + } + } + false + } +} + +impl ProxyConnector { + fn intercept(&self, dst: &Uri) -> Option<&Intercept> { + self.proxies.intercept(dst) + } +} + +impl Proxies { + pub(crate) fn prepend(&mut self, intercept: Intercept) { + self.intercepts.insert(0, intercept); + } + + pub(crate) fn http_forward_auth(&self, dst: &Uri) -> Option<&HeaderValue> { + let intercept = self.intercept(dst)?; + match intercept.target { + // Only if the proxy target is http + Target::Http { ref auth, .. } => auth.as_ref(), + _ => None, + } + } + + fn intercept(&self, dst: &Uri) -> Option<&Intercept> { + if let Some(no_proxy) = self.no.as_ref() { + if no_proxy.contains(dst.host()?) { + return None; + } + } + + for intercept in &self.intercepts { + return match ( + intercept.filter, + dst.scheme().map(Scheme::as_str).unwrap_or(""), + ) { + (Filter::All, _) => Some(intercept), + (Filter::Https, "https") => Some(intercept), + (Filter::Http, "http") => Some(intercept), + _ => continue, + }; + } + None + } +} + +type BoxFuture = Pin + Send>>; +type BoxError = Box; + +// These variatns are not to be inspected. +pub enum Proxied { + /// Not proxied + PassThrough(T), + /// An HTTP forwarding proxy needed absolute-form + HttpForward(T), + /// Tunneled through HTTP CONNECT + HttpTunneled(Box>>>), + /// Tunneled through SOCKS + Socks(TokioIo), + /// Tunneled through SOCKS and TLS + SocksTls(TokioIo>>>), +} + +impl Service for ProxyConnector +where + C: Service + Clone, + C::Response: + hyper::rt::Read + hyper::rt::Write + Connection + Unpin + Send + 'static, + C::Future: Send + 'static, + C::Error: Into + 'static, +{ + type Response = Proxied>; + type Error = BoxError; + type Future = BoxFuture>; + + fn poll_ready( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> { + self.http.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, orig_dst: Uri) -> Self::Future { + if let Some(intercept) = self.intercept(&orig_dst).cloned() { + let is_https = orig_dst.scheme() == Some(&Scheme::HTTPS); + let user_agent = self.user_agent.clone(); + return match intercept.target { + Target::Http { + dst: proxy_dst, + auth, + } + | Target::Https { + dst: proxy_dst, + auth, + } => { + let mut connector = + HttpsConnector::from((self.http.clone(), self.tls_proxy.clone())); + let connecting = connector.call(proxy_dst); + let tls = TlsConnector::from(self.tls.clone()); + Box::pin(async move { + let mut io = connecting.await.map_err(Into::::into)?; + + if is_https { + tunnel(&mut io, &orig_dst, user_agent, auth).await?; + let tokio_io = TokioIo::new(io); + let io = tls + .connect( + TryFrom::try_from(orig_dst.host().unwrap().to_owned())?, + tokio_io, + ) + .await?; + Ok(Proxied::HttpTunneled(Box::new(TokioIo::new(io)))) + } else { + Ok(Proxied::HttpForward(io)) + } + }) + } + Target::Socks { + dst: proxy_dst, + auth, + } => { + let tls = TlsConnector::from(self.tls.clone()); + Box::pin(async move { + let socks_addr = ( + proxy_dst.host().unwrap(), + proxy_dst.port().map(|p| p.as_u16()).unwrap_or(1080), + ); + let host = orig_dst.host().ok_or("no host in url")?; + let port = match orig_dst.port() { + Some(p) => p.as_u16(), + None if is_https => 443, + _ => 80, + }; + let io = if let Some((user, pass)) = auth { + Socks5Stream::connect_with_password( + socks_addr, + (host, port), + &user, + &pass, + ) + .await? + } else { + Socks5Stream::connect(socks_addr, (host, port)).await? + }; + let io = TokioIo::new(io.into_inner()); + + if is_https { + let tokio_io = TokioIo::new(io); + let io = tls + .connect(TryFrom::try_from(host.to_owned())?, tokio_io) + .await?; + Ok(Proxied::SocksTls(TokioIo::new(io))) + } else { + Ok(Proxied::Socks(io)) + } + }) + } + }; + } + + let mut connector = + HttpsConnector::from((self.http.clone(), self.tls.clone())); + Box::pin( + connector + .call(orig_dst) + .map_ok(Proxied::PassThrough) + .map_err(Into::into), + ) + } +} + +async fn tunnel( + io: &mut T, + dst: &Uri, + user_agent: Option, + auth: Option, +) -> Result<(), BoxError> +where + T: hyper::rt::Read + hyper::rt::Write + Unpin, +{ + use tokio::io::AsyncReadExt; + use tokio::io::AsyncWriteExt; + + let host = dst.host().expect("proxy dst has host"); + let port = match dst.port() { + Some(p) => p.as_u16(), + None => match dst.scheme().map(Scheme::as_str).unwrap_or("") { + "https" => 443, + "http" => 80, + _ => return Err("proxy dst unexpected scheme".into()), + }, + }; + + let mut buf = format!( + "\ + CONNECT {host}:{port} HTTP/1.1\r\n\ + Host: {host}:{port}\r\n\ + " + ) + .into_bytes(); + + // user-agent + if let Some(user_agent) = user_agent { + buf.extend_from_slice(b"User-Agent: "); + buf.extend_from_slice(user_agent.as_bytes()); + buf.extend_from_slice(b"\r\n"); + } + + // proxy-authorization + if let Some(value) = auth { + buf.extend_from_slice(b"Proxy-Authorization: "); + buf.extend_from_slice(value.as_bytes()); + buf.extend_from_slice(b"\r\n"); + } + + // headers end + buf.extend_from_slice(b"\r\n"); + + let mut tokio_conn = TokioIo::new(io); + + tokio_conn.write_all(&buf).await?; + + let mut buf = [0; 8192]; + let mut pos = 0; + + loop { + let n = tokio_conn.read(&mut buf[pos..]).await?; + + if n == 0 { + return Err("unexpected eof while tunneling".into()); + } + pos += n; + + let recvd = &buf[..pos]; + if recvd.starts_with(b"HTTP/1.1 200") || recvd.starts_with(b"HTTP/1.0 200") + { + if recvd.ends_with(b"\r\n\r\n") { + return Ok(()); + } + if pos == buf.len() { + return Err("proxy headers too long for tunnel".into()); + } + // else read more + } else if recvd.starts_with(b"HTTP/1.1 407") { + return Err("proxy authentication required".into()); + } else { + return Err("unsuccessful tunnel".into()); + } + } +} + +impl hyper::rt::Read for Proxied +where + T: hyper::rt::Read + hyper::rt::Write + Unpin, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: hyper::rt::ReadBufCursor<'_>, + ) -> Poll> { + match *self { + Proxied::PassThrough(ref mut p) => Pin::new(p).poll_read(cx, buf), + Proxied::HttpForward(ref mut p) => Pin::new(p).poll_read(cx, buf), + Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_read(cx, buf), + Proxied::Socks(ref mut p) => Pin::new(p).poll_read(cx, buf), + Proxied::SocksTls(ref mut p) => Pin::new(p).poll_read(cx, buf), + } + } +} + +impl hyper::rt::Write for Proxied +where + T: hyper::rt::Read + hyper::rt::Write + Unpin, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + match *self { + Proxied::PassThrough(ref mut p) => Pin::new(p).poll_write(cx, buf), + Proxied::HttpForward(ref mut p) => Pin::new(p).poll_write(cx, buf), + Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_write(cx, buf), + Proxied::Socks(ref mut p) => Pin::new(p).poll_write(cx, buf), + Proxied::SocksTls(ref mut p) => Pin::new(p).poll_write(cx, buf), + } + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match *self { + Proxied::PassThrough(ref mut p) => Pin::new(p).poll_flush(cx), + Proxied::HttpForward(ref mut p) => Pin::new(p).poll_flush(cx), + Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_flush(cx), + Proxied::Socks(ref mut p) => Pin::new(p).poll_flush(cx), + Proxied::SocksTls(ref mut p) => Pin::new(p).poll_flush(cx), + } + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match *self { + Proxied::PassThrough(ref mut p) => Pin::new(p).poll_shutdown(cx), + Proxied::HttpForward(ref mut p) => Pin::new(p).poll_shutdown(cx), + Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_shutdown(cx), + Proxied::Socks(ref mut p) => Pin::new(p).poll_shutdown(cx), + Proxied::SocksTls(ref mut p) => Pin::new(p).poll_shutdown(cx), + } + } + + fn is_write_vectored(&self) -> bool { + match *self { + Proxied::PassThrough(ref p) => p.is_write_vectored(), + Proxied::HttpForward(ref p) => p.is_write_vectored(), + Proxied::HttpTunneled(ref p) => p.is_write_vectored(), + Proxied::Socks(ref p) => p.is_write_vectored(), + Proxied::SocksTls(ref p) => p.is_write_vectored(), + } + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + match *self { + Proxied::PassThrough(ref mut p) => { + Pin::new(p).poll_write_vectored(cx, bufs) + } + Proxied::HttpForward(ref mut p) => { + Pin::new(p).poll_write_vectored(cx, bufs) + } + Proxied::HttpTunneled(ref mut p) => { + Pin::new(p).poll_write_vectored(cx, bufs) + } + Proxied::Socks(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs), + Proxied::SocksTls(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs), + } + } +} + +impl Connection for Proxied +where + T: Connection, +{ + fn connected(&self) -> Connected { + match self { + Proxied::PassThrough(ref p) => p.connected(), + Proxied::HttpForward(ref p) => p.connected().proxy(true), + Proxied::HttpTunneled(ref p) => { + let tunneled_tls = p.inner().get_ref(); + if tunneled_tls.1.alpn_protocol() == Some(b"h2") { + tunneled_tls.0.connected().negotiated_h2() + } else { + tunneled_tls.0.connected() + } + } + Proxied::Socks(ref p) => p.connected(), + Proxied::SocksTls(ref p) => { + let tunneled_tls = p.inner().get_ref(); + if tunneled_tls.1.alpn_protocol() == Some(b"h2") { + tunneled_tls.0.connected().negotiated_h2() + } else { + tunneled_tls.0.connected() + } + } + } + } +} + +#[test] +fn test_proxy_parse_from_env() { + fn parse(s: &str) -> Target { + Target::parse(s).unwrap() + } + + // normal + match parse("http://127.0.0.1:6666") { + Target::Http { dst, auth } => { + assert_eq!(dst, "http://127.0.0.1:6666"); + assert!(auth.is_none()); + } + _ => panic!("bad target"), + } + + // without scheme + match parse("127.0.0.1:6666") { + Target::Http { dst, auth } => { + assert_eq!(dst, "http://127.0.0.1:6666"); + assert!(auth.is_none()); + } + _ => panic!("bad target"), + } + + // with userinfo + match parse("user:pass@127.0.0.1:6666") { + Target::Http { dst, auth } => { + assert_eq!(dst, "http://127.0.0.1:6666"); + assert!(auth.is_some()); + assert!(auth.unwrap().is_sensitive()); + } + _ => panic!("bad target"), + } + + // percent encoded user info + match parse("us%2Fer:p%2Fass@127.0.0.1:6666") { + Target::Http { dst, auth } => { + assert_eq!(dst, "http://127.0.0.1:6666"); + let auth = auth.unwrap(); + assert_eq!(auth.to_str().unwrap(), "Basic dXMvZXI6cC9hc3M="); + } + _ => panic!("bad target"), + } + + // socks + match parse("socks5://user:pass@127.0.0.1:6666") { + Target::Socks { dst, auth } => { + assert_eq!(dst, "socks5://127.0.0.1:6666"); + assert!(auth.is_some()); + } + _ => panic!("bad target"), + } + + // socks5h + match parse("socks5h://localhost:6666") { + Target::Socks { dst, auth } => { + assert_eq!(dst, "socks5h://localhost:6666"); + assert!(auth.is_none()); + } + _ => panic!("bad target"), + } +} + +#[test] +fn test_domain_matcher() { + let domains = vec![".foo.bar".into(), "bar.foo".into()]; + let matcher = DomainMatcher(domains); + + // domains match with leading `.` + assert!(matcher.contains("foo.bar")); + // subdomains match with leading `.` + assert!(matcher.contains("www.foo.bar")); + + // domains match with no leading `.` + assert!(matcher.contains("bar.foo")); + // subdomains match with no leading `.` + assert!(matcher.contains("www.bar.foo")); + + // non-subdomain string prefixes don't match + assert!(!matcher.contains("notfoo.bar")); + assert!(!matcher.contains("notbar.foo")); +} + +#[test] +fn test_no_proxy_wildcard() { + let no_proxy = NoProxy::from_string("*").unwrap(); + assert!(no_proxy.contains("any.where")); +} + +#[test] +fn test_no_proxy_ip_ranges() { + let no_proxy = NoProxy::from_string( + ".foo.bar, bar.baz,10.42.1.1/24,::1,10.124.7.8,2001::/17", + ) + .unwrap(); + + let should_not_match = [ + // random url, not in no_proxy + "deno.com", + // make sure that random non-subdomain string prefixes don't match + "notfoo.bar", + // make sure that random non-subdomain string prefixes don't match + "notbar.baz", + // ipv4 address out of range + "10.43.1.1", + // ipv4 address out of range + "10.124.7.7", + // ipv6 address out of range + "[ffff:db8:a0b:12f0::1]", + // ipv6 address out of range + "[2005:db8:a0b:12f0::1]", + ]; + + for host in &should_not_match { + assert!(!no_proxy.contains(host), "should not contain {:?}", host); + } + + let should_match = [ + // make sure subdomains (with leading .) match + "hello.foo.bar", + // make sure exact matches (without leading .) match (also makes sure spaces between entries work) + "bar.baz", + // make sure subdomains (without leading . in no_proxy) match + "foo.bar.baz", + // make sure subdomains (without leading . in no_proxy) match - this differs from cURL + "foo.bar", + // ipv4 address match within range + "10.42.1.100", + // ipv6 address exact match + "[::1]", + // ipv6 address match within range + "[2001:db8:a0b:12f0::1]", + // ipv4 address exact match + "10.124.7.8", + ]; + + for host in &should_match { + assert!(no_proxy.contains(host), "should contain {:?}", host); + } +} diff --git a/vendor/deno_fetch/tests.rs b/vendor/deno_fetch/tests.rs new file mode 100644 index 00000000..3da29f8a --- /dev/null +++ b/vendor/deno_fetch/tests.rs @@ -0,0 +1,300 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use std::net::SocketAddr; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::Arc; + +use bytes::Bytes; +use fast_socks5::server::Config as Socks5Config; +use fast_socks5::server::Socks5Socket; +use http_body_util::BodyExt; +use tokio::io::AsyncReadExt; +use tokio::io::AsyncWriteExt; + +use crate::dns; + +use super::create_http_client; +use super::CreateHttpClientOptions; + +static EXAMPLE_CRT: &[u8] = include_bytes!("../tls/testdata/example1_cert.der"); +static EXAMPLE_KEY: &[u8] = + include_bytes!("../tls/testdata/example1_prikey.der"); + +#[test] +fn test_userspace_resolver() { + let thread_counter = Arc::new(AtomicUsize::new(0)); + + let thread_counter_ref = thread_counter.clone(); + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .on_thread_start(move || { + thread_counter_ref.fetch_add(1, SeqCst); + }) + .build() + .unwrap(); + + rt.block_on(async move { + assert_eq!(thread_counter.load(SeqCst), 0); + let src_addr = create_https_server(true).await; + assert_eq!(src_addr.ip().to_string(), "127.0.0.1"); + // use `localhost` to ensure dns step happens. + let addr = format!("localhost:{}", src_addr.port()); + + let hickory = hickory_resolver::Resolver::tokio( + Default::default(), + Default::default(), + ); + + assert_eq!(thread_counter.load(SeqCst), 0); + rust_test_client_with_resolver( + None, + addr.clone(), + "https", + http::Version::HTTP_2, + dns::Resolver::hickory_from_resolver(hickory), + ) + .await; + assert_eq!(thread_counter.load(SeqCst), 0, "userspace resolver shouldn't spawn new threads."); + rust_test_client_with_resolver( + None, + addr.clone(), + "https", + http::Version::HTTP_2, + dns::Resolver::gai(), + ) + .await; + assert_eq!(thread_counter.load(SeqCst), 1, "getaddrinfo is called inside spawn_blocking, so tokio spawn a new worker thread for it."); + }); +} + +#[tokio::test] +async fn test_https_proxy_http11() { + let src_addr = create_https_server(false).await; + let prx_addr = create_http_proxy(src_addr).await; + run_test_client(prx_addr, src_addr, "http", http::Version::HTTP_11).await; +} + +#[tokio::test] +async fn test_https_proxy_h2() { + let src_addr = create_https_server(true).await; + let prx_addr = create_http_proxy(src_addr).await; + run_test_client(prx_addr, src_addr, "http", http::Version::HTTP_2).await; +} + +#[tokio::test] +async fn test_https_proxy_https_h2() { + let src_addr = create_https_server(true).await; + let prx_addr = create_https_proxy(src_addr).await; + run_test_client(prx_addr, src_addr, "https", http::Version::HTTP_2).await; +} + +#[tokio::test] +async fn test_socks_proxy_http11() { + let src_addr = create_https_server(false).await; + let prx_addr = create_socks_proxy(src_addr).await; + run_test_client(prx_addr, src_addr, "socks5", http::Version::HTTP_11).await; +} + +#[tokio::test] +async fn test_socks_proxy_h2() { + let src_addr = create_https_server(true).await; + let prx_addr = create_socks_proxy(src_addr).await; + run_test_client(prx_addr, src_addr, "socks5", http::Version::HTTP_2).await; +} + +async fn rust_test_client_with_resolver( + prx_addr: Option, + src_addr: String, + proto: &str, + ver: http::Version, + resolver: dns::Resolver, +) { + let client = create_http_client( + "fetch/test", + CreateHttpClientOptions { + root_cert_store: None, + ca_certs: vec![], + proxy: prx_addr.map(|p| deno_tls::Proxy { + url: format!("{}://{}", proto, p), + basic_auth: None, + }), + unsafely_ignore_certificate_errors: Some(vec![]), + client_cert_chain_and_key: None, + pool_max_idle_per_host: None, + pool_idle_timeout: None, + dns_resolver: resolver, + http1: true, + http2: true, + client_builder_hook: None, + }, + ) + .unwrap(); + + let req = http::Request::builder() + .uri(format!("https://{}/foo", src_addr)) + .body( + http_body_util::Empty::new() + .map_err(|err| match err {}) + .boxed(), + ) + .unwrap(); + let resp = client.send(req).await.unwrap(); + assert_eq!(resp.status(), http::StatusCode::OK); + assert_eq!(resp.version(), ver); + let hello = resp.collect().await.unwrap().to_bytes(); + assert_eq!(hello, "hello from server"); +} + +async fn run_test_client( + prx_addr: SocketAddr, + src_addr: SocketAddr, + proto: &str, + ver: http::Version, +) { + rust_test_client_with_resolver( + Some(prx_addr), + src_addr.to_string(), + proto, + ver, + Default::default(), + ) + .await +} + +async fn create_https_server(allow_h2: bool) -> SocketAddr { + let mut tls_config = deno_tls::rustls::server::ServerConfig::builder() + .with_no_client_auth() + .with_single_cert( + vec![EXAMPLE_CRT.into()], + webpki::types::PrivateKeyDer::try_from(EXAMPLE_KEY).unwrap(), + ) + .unwrap(); + if allow_h2 { + tls_config.alpn_protocols.push("h2".into()); + } + tls_config.alpn_protocols.push("http/1.1".into()); + let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::from(tls_config)); + let src_tcp = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let src_addr = src_tcp.local_addr().unwrap(); + + tokio::spawn(async move { + while let Ok((sock, _)) = src_tcp.accept().await { + let conn = tls_acceptor.accept(sock).await.unwrap(); + if conn.get_ref().1.alpn_protocol() == Some(b"h2") { + let fut = hyper::server::conn::http2::Builder::new( + hyper_util::rt::TokioExecutor::new(), + ) + .serve_connection( + hyper_util::rt::TokioIo::new(conn), + hyper::service::service_fn(|_req| async { + Ok::<_, std::convert::Infallible>(http::Response::new( + http_body_util::Full::::new("hello from server".into()), + )) + }), + ); + tokio::spawn(fut); + } else { + let fut = hyper::server::conn::http1::Builder::new().serve_connection( + hyper_util::rt::TokioIo::new(conn), + hyper::service::service_fn(|_req| async { + Ok::<_, std::convert::Infallible>(http::Response::new( + http_body_util::Full::::new("hello from server".into()), + )) + }), + ); + tokio::spawn(fut); + } + } + }); + + src_addr +} + +async fn create_http_proxy(src_addr: SocketAddr) -> SocketAddr { + let prx_tcp = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let prx_addr = prx_tcp.local_addr().unwrap(); + + tokio::spawn(async move { + while let Ok((mut sock, _)) = prx_tcp.accept().await { + let fut = async move { + let mut buf = [0u8; 4096]; + let _n = sock.read(&mut buf).await.unwrap(); + assert_eq!(&buf[..7], b"CONNECT"); + let mut dst_tcp = + tokio::net::TcpStream::connect(src_addr).await.unwrap(); + sock.write_all(b"HTTP/1.1 200 OK\r\n\r\n").await.unwrap(); + tokio::io::copy_bidirectional(&mut sock, &mut dst_tcp) + .await + .unwrap(); + }; + tokio::spawn(fut); + } + }); + + prx_addr +} + +async fn create_https_proxy(src_addr: SocketAddr) -> SocketAddr { + let mut tls_config = deno_tls::rustls::server::ServerConfig::builder() + .with_no_client_auth() + .with_single_cert( + vec![EXAMPLE_CRT.into()], + webpki::types::PrivateKeyDer::try_from(EXAMPLE_KEY).unwrap(), + ) + .unwrap(); + // Set ALPN, to check our proxy connector. But we shouldn't receive anything. + tls_config.alpn_protocols.push("h2".into()); + tls_config.alpn_protocols.push("http/1.1".into()); + let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::from(tls_config)); + let prx_tcp = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let prx_addr = prx_tcp.local_addr().unwrap(); + + tokio::spawn(async move { + while let Ok((sock, _)) = prx_tcp.accept().await { + let mut sock = tls_acceptor.accept(sock).await.unwrap(); + assert_eq!(sock.get_ref().1.alpn_protocol(), None); + + let fut = async move { + let mut buf = [0u8; 4096]; + let _n = sock.read(&mut buf).await.unwrap(); + assert_eq!(&buf[..7], b"CONNECT"); + let mut dst_tcp = + tokio::net::TcpStream::connect(src_addr).await.unwrap(); + sock.write_all(b"HTTP/1.1 200 OK\r\n\r\n").await.unwrap(); + tokio::io::copy_bidirectional(&mut sock, &mut dst_tcp) + .await + .unwrap(); + }; + tokio::spawn(fut); + } + }); + + prx_addr +} + +async fn create_socks_proxy(src_addr: SocketAddr) -> SocketAddr { + let prx_tcp = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let prx_addr = prx_tcp.local_addr().unwrap(); + + tokio::spawn(async move { + while let Ok((sock, _)) = prx_tcp.accept().await { + let cfg: Socks5Config = Default::default(); + let mut socks_conn = Socks5Socket::new(sock, cfg.into()) + .upgrade_to_socks5() + .await + .unwrap(); + + let fut = async move { + let mut dst_tcp = + tokio::net::TcpStream::connect(src_addr).await.unwrap(); + tokio::io::copy_bidirectional(&mut socks_conn, &mut dst_tcp) + .await + .unwrap(); + }; + tokio::spawn(fut); + } + }); + + prx_addr +} diff --git a/vendor/deno_http/00_serve.ts b/vendor/deno_http/00_serve.ts new file mode 100644 index 00000000..f913f493 --- /dev/null +++ b/vendor/deno_http/00_serve.ts @@ -0,0 +1,981 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +import { core, internals, primordials } from "ext:core/mod.js"; +const { + BadResourcePrototype, + InterruptedPrototype, + Interrupted, + internalRidSymbol, +} = core; +import { + op_http_cancel, + op_http_close, + op_http_close_after_finish, + op_http_get_request_headers, + op_http_get_request_method_and_url, + op_http_read_request_body, + op_http_request_on_cancel, + op_http_serve, + op_http_serve_on, + op_http_set_promise_complete, + op_http_set_response_body_bytes, + op_http_set_response_body_resource, + op_http_set_response_body_text, + op_http_set_response_header, + op_http_set_response_headers, + op_http_set_response_trailers, + op_http_try_wait, + op_http_upgrade_raw, + op_http_upgrade_websocket_next, + op_http_wait, +} from "ext:core/ops"; +const { + ArrayPrototypePush, + ObjectHasOwn, + ObjectPrototypeIsPrototypeOf, + PromisePrototypeCatch, + SafePromisePrototypeFinally, + PromisePrototypeThen, + StringPrototypeIncludes, + Symbol, + TypeError, + TypedArrayPrototypeGetSymbolToStringTag, + Uint8Array, + Promise, +} = primordials; +const { + getAsyncContext, + setAsyncContext, +} = core; + +import { InnerBody } from "ext:deno_fetch/22_body.js"; +import { Event } from "ext:deno_web/02_event.js"; +import { + fromInnerResponse, + newInnerResponse, + ResponsePrototype, + toInnerResponse, +} from "ext:deno_fetch/23_response.js"; +import { + abortRequest, + fromInnerRequest, + toInnerRequest, +} from "ext:deno_fetch/23_request.js"; +import { AbortController } from "ext:deno_web/03_abort_signal.js"; +import { + _eventLoop, + _idleTimeoutDuration, + _idleTimeoutTimeout, + _protocol, + _readyState, + _rid, + _role, + _server, + _serverHandleIdleTimeout, + SERVER, + WebSocket, +} from "ext:deno_websocket/01_websocket.js"; +import { + Deferred, + getReadableStreamResourceBacking, + readableStreamForRid, + ReadableStreamPrototype, + resourceForReadableStream, +} from "ext:deno_web/06_streams.js"; +import { + listen, + listenOptionApiName, + UpgradedConn, +} from "ext:deno_net/01_net.js"; +import { hasTlsKeyPairOptions, listenTls } from "ext:deno_net/02_tls.js"; +import { SymbolAsyncDispose } from "ext:deno_web/00_infra.js"; +import { + builtinTracer, + enterSpan, + TRACING_ENABLED, +} from "ext:deno_telemetry/telemetry.ts"; +import { + updateSpanFromRequest, + updateSpanFromResponse, +} from "ext:deno_telemetry/util.ts"; + +const _upgraded = Symbol("_upgraded"); + +function internalServerError() { + // "Internal Server Error" + return new Response( + new Uint8Array([ + 73, + 110, + 116, + 101, + 114, + 110, + 97, + 108, + 32, + 83, + 101, + 114, + 118, + 101, + 114, + 32, + 69, + 114, + 114, + 111, + 114, + ]), + { status: 500 }, + ); +} + +// Used to ensure that user returns a valid response (but not a different response) from handlers that are upgraded. +const UPGRADE_RESPONSE_SENTINEL = fromInnerResponse( + newInnerResponse(101), + "immutable", +); + +function upgradeHttpRaw(req, conn) { + const inner = toInnerRequest(req); + if (inner._wantsUpgrade) { + return inner._wantsUpgrade("upgradeHttpRaw", conn); + } + throw new TypeError("'upgradeHttpRaw' may only be used with Deno.serve"); +} + +function addTrailers(resp, headerList) { + const inner = toInnerResponse(resp); + op_http_set_response_trailers(inner.external, headerList); +} + +class InnerRequest { + #external; + #context; + #methodAndUri; + #streamRid; + #body; + #upgraded; + #urlValue; + #completed; + request; + + constructor(external, context) { + this.#external = external; + this.#context = context; + this.#upgraded = false; + this.#completed = undefined; + } + + close(success = true) { + // The completion signal fires only if someone cares + if (this.#completed) { + if (success) { + this.#completed.resolve(undefined); + } else { + this.#completed.reject( + new Interrupted("HTTP response was not sent successfully"), + ); + } + } + abortRequest(this.request); + this.#external = null; + } + + get [_upgraded]() { + return this.#upgraded; + } + + _wantsUpgrade(upgradeType, ...originalArgs) { + if (this.#upgraded) { + throw new Deno.errors.Http("Already upgraded"); + } + if (this.#external === null) { + throw new Deno.errors.Http("Already closed"); + } + + // upgradeHttpRaw is sync + if (upgradeType == "upgradeHttpRaw") { + const external = this.#external; + const underlyingConn = originalArgs[0]; + + this.url(); + this.headerList; + this.close(); + + this.#upgraded = () => {}; + + const upgradeRid = op_http_upgrade_raw(external); + + const conn = new UpgradedConn( + upgradeRid, + underlyingConn?.remoteAddr, + underlyingConn?.localAddr, + ); + + return { response: UPGRADE_RESPONSE_SENTINEL, conn }; + } + + // upgradeWebSocket is sync + if (upgradeType == "upgradeWebSocket") { + const response = originalArgs[0]; + const ws = originalArgs[1]; + + const external = this.#external; + + this.url(); + this.headerList; + this.close(); + + const goAhead = new Deferred(); + this.#upgraded = () => { + goAhead.resolve(); + }; + const wsPromise = op_http_upgrade_websocket_next( + external, + response.headerList, + ); + + // Start the upgrade in the background. + (async () => { + try { + // Returns the upgraded websocket connection + const wsRid = await wsPromise; + + // We have to wait for the go-ahead signal + await goAhead.promise; + + ws[_rid] = wsRid; + ws[_readyState] = WebSocket.OPEN; + ws[_role] = SERVER; + const event = new Event("open"); + ws.dispatchEvent(event); + + ws[_eventLoop](); + if (ws[_idleTimeoutDuration]) { + ws.addEventListener( + "close", + () => clearTimeout(ws[_idleTimeoutTimeout]), + ); + } + ws[_serverHandleIdleTimeout](); + } catch (error) { + const event = new ErrorEvent("error", { error }); + ws.dispatchEvent(event); + } + })(); + return { response: UPGRADE_RESPONSE_SENTINEL, socket: ws }; + } + } + + url() { + if (this.#urlValue !== undefined) { + return this.#urlValue; + } + + if (this.#methodAndUri === undefined) { + if (this.#external === null) { + throw new TypeError("Request closed"); + } + // TODO(mmastrac): This is quite slow as we're serializing a large number of values. We may want to consider + // splitting this up into multiple ops. + this.#methodAndUri = op_http_get_request_method_and_url(this.#external); + } + + const path = this.#methodAndUri[2]; + + // * is valid for OPTIONS + if (path === "*") { + return this.#urlValue = "*"; + } + + // If the path is empty, return the authority (valid for CONNECT) + if (path == "") { + return this.#urlValue = this.#methodAndUri[1]; + } + + // CONNECT requires an authority + if (this.#methodAndUri[0] == "CONNECT") { + return this.#urlValue = this.#methodAndUri[1]; + } + + const hostname = this.#methodAndUri[1]; + if (hostname) { + // Construct a URL from the scheme, the hostname, and the path + return this.#urlValue = this.#context.scheme + hostname + path; + } + + // Construct a URL from the scheme, the fallback hostname, and the path + return this.#urlValue = this.#context.scheme + this.#context.fallbackHost + + path; + } + + get completed() { + if (!this.#completed) { + // NOTE: this is faster than Promise.withResolvers() + let resolve, reject; + const promise = new Promise((r1, r2) => { + resolve = r1; + reject = r2; + }); + this.#completed = { promise, resolve, reject }; + } + return this.#completed.promise; + } + + get remoteAddr() { + const transport = this.#context.listener?.addr.transport; + if (transport === "unix" || transport === "unixpacket") { + return { + transport, + path: this.#context.listener.addr.path, + }; + } + if (this.#methodAndUri === undefined) { + if (this.#external === null) { + throw new TypeError("Request closed"); + } + this.#methodAndUri = op_http_get_request_method_and_url(this.#external); + } + return { + transport: "tcp", + hostname: this.#methodAndUri[3], + port: this.#methodAndUri[4], + }; + } + + get method() { + if (this.#methodAndUri === undefined) { + if (this.#external === null) { + throw new TypeError("Request closed"); + } + this.#methodAndUri = op_http_get_request_method_and_url(this.#external); + } + return this.#methodAndUri[0]; + } + + get body() { + if (this.#external === null) { + throw new TypeError("Request closed"); + } + if (this.#body !== undefined) { + return this.#body; + } + // If the method is GET or HEAD, we do not want to include a body here, even if the Rust + // side of the code is willing to provide it to us. + if (this.method == "GET" || this.method == "HEAD") { + this.#body = null; + return null; + } + this.#streamRid = op_http_read_request_body(this.#external); + this.#body = new InnerBody(readableStreamForRid(this.#streamRid, false)); + return this.#body; + } + + get headerList() { + if (this.#external === null) { + throw new TypeError("Request closed"); + } + const headers = []; + const reqHeaders = op_http_get_request_headers(this.#external); + for (let i = 0; i < reqHeaders.length; i += 2) { + ArrayPrototypePush(headers, [reqHeaders[i], reqHeaders[i + 1]]); + } + return headers; + } + + get external() { + return this.#external; + } + + onCancel(callback) { + if (this.#external === null) { + callback(); + return; + } + + PromisePrototypeThen( + op_http_request_on_cancel(this.#external), + callback, + ); + } +} + +class CallbackContext { + abortController; + scheme; + fallbackHost; + serverRid; + closed; + /** @type {Promise | undefined} */ + closing; + listener; + asyncContext; + + constructor(signal, args, listener) { + this.asyncContext = getAsyncContext(); + // The abort signal triggers a non-graceful shutdown + signal?.addEventListener( + "abort", + () => { + op_http_cancel(this.serverRid, false); + }, + { once: true }, + ); + this.abortController = new AbortController(); + this.serverRid = args[0]; + this.scheme = args[1]; + this.fallbackHost = args[2]; + this.closed = false; + this.listener = listener; + } + + close() { + try { + this.closed = true; + core.tryClose(this.serverRid); + } catch { + // Pass + } + } +} + +class ServeHandlerInfo { + #inner: InnerRequest; + constructor(inner: InnerRequest) { + this.#inner = inner; + } + get remoteAddr() { + return this.#inner.remoteAddr; + } + get completed() { + return this.#inner.completed; + } +} + +function fastSyncResponseOrStream( + req, + respBody, + status, + innerRequest: InnerRequest, +) { + if (respBody === null || respBody === undefined) { + // Don't set the body + innerRequest?.close(); + op_http_set_promise_complete(req, status); + return; + } + + const stream = respBody.streamOrStatic; + const body = stream.body; + if (body !== undefined) { + // We ensure the response has not been consumed yet in the caller of this + // function. + stream.consumed = true; + } + + if (TypedArrayPrototypeGetSymbolToStringTag(body) === "Uint8Array") { + innerRequest?.close(); + op_http_set_response_body_bytes(req, body, status); + return; + } + + if (typeof body === "string") { + innerRequest?.close(); + op_http_set_response_body_text(req, body, status); + return; + } + + // At this point in the response it needs to be a stream + if (!ObjectPrototypeIsPrototypeOf(ReadableStreamPrototype, stream)) { + innerRequest?.close(); + throw new TypeError("Invalid response"); + } + const resourceBacking = getReadableStreamResourceBacking(stream); + let rid, autoClose; + if (resourceBacking) { + rid = resourceBacking.rid; + autoClose = resourceBacking.autoClose; + } else { + rid = resourceForReadableStream(stream); + autoClose = true; + } + PromisePrototypeThen( + op_http_set_response_body_resource( + req, + rid, + autoClose, + status, + ), + (success) => { + innerRequest?.close(success); + op_http_close_after_finish(req); + }, + ); +} + +/** + * Maps the incoming request slab ID to a fully-fledged Request object, passes it to the user-provided + * callback, then extracts the response that was returned from that callback. The response is then pulled + * apart and handled on the Rust side. + * + * This function returns a promise that will only reject in the case of abnormal exit. + */ +function mapToCallback(context, callback, onError) { + let mapped = async function (req, span) { + // Get the response from the user-provided callback. If that fails, use onError. If that fails, return a fallback + // 500 error. + let innerRequest; + let response; + try { + innerRequest = new InnerRequest(req, context); + const request = fromInnerRequest(innerRequest, "immutable"); + innerRequest.request = request; + + if (span) { + updateSpanFromRequest(span, request); + } + + response = await callback( + request, + new ServeHandlerInfo(innerRequest), + ); + + // Throwing Error if the handler return value is not a Response class + if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) { + throw new TypeError( + "Return value from serve handler must be a response or a promise resolving to a response", + ); + } + + if (response.type === "error") { + throw new TypeError( + "Return value from serve handler must not be an error response (like Response.error())", + ); + } + + if (response.bodyUsed) { + throw new TypeError( + "The body of the Response returned from the serve handler has already been consumed", + ); + } + } catch (error) { + try { + response = await onError(error); + if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) { + throw new TypeError( + "Return value from onError handler must be a response or a promise resolving to a response", + ); + } + } catch (error) { + // deno-lint-ignore no-console + console.error("Exception in onError while handling exception", error); + response = internalServerError(); + } + } + + if (span) { + updateSpanFromResponse(span, response); + } + + const inner = toInnerResponse(response); + if (innerRequest?.[_upgraded]) { + // We're done here as the connection has been upgraded during the callback and no longer requires servicing. + if (response !== UPGRADE_RESPONSE_SENTINEL) { + // deno-lint-ignore no-console + console.error("Upgrade response was not returned from callback"); + context.close(); + } + innerRequest?.[_upgraded](); + return; + } + + // Did everything shut down while we were waiting? + if (context.closed) { + // We're shutting down, so this status shouldn't make it back to the client but "Service Unavailable" seems appropriate + innerRequest?.close(); + op_http_set_promise_complete(req, 503); + return; + } + + const status = inner.status; + const headers = inner.headerList; + if (headers && headers.length > 0) { + if (headers.length == 1) { + op_http_set_response_header(req, headers[0][0], headers[0][1]); + } else { + op_http_set_response_headers(req, headers); + } + } + + fastSyncResponseOrStream(req, inner.body, status, innerRequest); + }; + + if (TRACING_ENABLED) { + const origMapped = mapped; + mapped = function (req, _span) { + const oldCtx = getAsyncContext(); + setAsyncContext(context.asyncContext); + const span = builtinTracer().startSpan("deno.serve", { kind: 1 }); + try { + enterSpan(span); + return SafePromisePrototypeFinally( + origMapped(req, span), + () => span.end(), + ); + } finally { + // equiv to exitSpan. + setAsyncContext(oldCtx); + } + }; + } else { + const origMapped = mapped; + mapped = function (req, span) { + const oldCtx = getAsyncContext(); + setAsyncContext(context.asyncContext); + try { + return origMapped(req, span); + } finally { + setAsyncContext(oldCtx); + } + }; + } + + return mapped; +} + +type RawHandler = ( + request: Request, + info: ServeHandlerInfo, +) => Response | Promise; + +type RawServeOptions = { + port?: number; + hostname?: string; + signal?: AbortSignal; + reusePort?: boolean; + key?: string; + cert?: string; + onError?: (error: unknown) => Response | Promise; + onListen?: (params: { hostname: string; port: number }) => void; + handler?: RawHandler; +}; + +const kLoadBalanced = Symbol("kLoadBalanced"); + +function formatHostName(hostname: string): string { + // If the hostname is "0.0.0.0", we display "localhost" in console + // because browsers in Windows don't resolve "0.0.0.0". + // See the discussion in https://github.com/denoland/deno_std/issues/1165 + if ( + (Deno.build.os === "windows") && + (hostname == "0.0.0.0" || hostname == "::") + ) { + return "localhost"; + } + + // Add brackets around ipv6 hostname + return StringPrototypeIncludes(hostname, ":") ? `[${hostname}]` : hostname; +} + +function serve(arg1, arg2) { + let options: RawServeOptions | undefined; + let handler: RawHandler | undefined; + + if (typeof arg1 === "function") { + handler = arg1; + } else if (typeof arg2 === "function") { + handler = arg2; + options = arg1; + } else { + options = arg1; + } + if (handler === undefined) { + if (options === undefined) { + throw new TypeError( + "Cannot serve HTTP requests: either a `handler` or `options` must be specified", + ); + } + handler = options.handler; + } + if (typeof handler !== "function") { + throw new TypeError( + `Cannot serve HTTP requests: handler must be a function, received ${typeof handler}`, + ); + } + if (options === undefined) { + options = { __proto__: null }; + } + + const wantsHttps = hasTlsKeyPairOptions(options); + const wantsUnix = ObjectHasOwn(options, "path"); + const signal = options.signal; + const onError = options.onError ?? function (error) { + // deno-lint-ignore no-console + console.error(error); + return internalServerError(); + }; + + if (wantsUnix) { + const listener = listen({ + transport: "unix", + path: options.path, + [listenOptionApiName]: "Deno.serve", + }); + const path = listener.addr.path; + return serveHttpOnListener(listener, signal, handler, onError, () => { + if (options.onListen) { + options.onListen(listener.addr); + } else { + // deno-lint-ignore no-console + console.error(`Listening on ${path}`); + } + }); + } + + const listenOpts = { + hostname: options.hostname ?? "0.0.0.0", + port: options.port ?? 8000, + reusePort: options.reusePort ?? false, + loadBalanced: options[kLoadBalanced] ?? false, + }; + + if (options.certFile || options.keyFile) { + throw new TypeError( + "Unsupported 'certFile' / 'keyFile' options provided: use 'cert' / 'key' instead.", + ); + } + if (options.alpnProtocols) { + throw new TypeError( + "Unsupported 'alpnProtocols' option provided. 'h2' and 'http/1.1' are automatically supported.", + ); + } + + let listener; + if (wantsHttps) { + if (!options.cert || !options.key) { + throw new TypeError( + "Both 'cert' and 'key' must be provided to enable HTTPS", + ); + } + listenOpts.cert = options.cert; + listenOpts.key = options.key; + listenOpts.alpnProtocols = ["h2", "http/1.1"]; + listener = listenTls(listenOpts); + listenOpts.port = listener.addr.port; + } else { + listener = listen(listenOpts); + listenOpts.port = listener.addr.port; + } + + const addr = listener.addr; + + const onListen = (scheme) => { + if (options.onListen) { + options.onListen(addr); + } else { + const host = formatHostName(addr.hostname); + + // deno-lint-ignore no-console + console.error(`Listening on ${scheme}${host}:${addr.port}/`); + } + }; + + return serveHttpOnListener(listener, signal, handler, onError, onListen); +} + +/** + * Serve HTTP/1.1 and/or HTTP/2 on an arbitrary listener. + */ +function serveHttpOnListener(listener, signal, handler, onError, onListen) { + const context = new CallbackContext( + signal, + op_http_serve(listener[internalRidSymbol]), + listener, + ); + const callback = mapToCallback(context, handler, onError); + + onListen(context.scheme); + + return serveHttpOn(context, listener.addr, callback); +} + +/** + * Serve HTTP/1.1 and/or HTTP/2 on an arbitrary connection. + */ +function serveHttpOnConnection(connection, signal, handler, onError, onListen) { + const context = new CallbackContext( + signal, + op_http_serve_on(connection[internalRidSymbol]), + null, + ); + const callback = mapToCallback(context, handler, onError); + + onListen(context.scheme); + + return serveHttpOn(context, connection.localAddr, callback); +} + +function serveHttpOn(context, addr, callback) { + let ref = true; + let currentPromise = null; + + const promiseErrorHandler = (error) => { + // Abnormal exit + // deno-lint-ignore no-console + console.error( + "Terminating Deno.serve loop due to unexpected error", + error, + ); + context.close(); + }; + + // Run the server + const finished = (async () => { + const rid = context.serverRid; + while (true) { + let req; + try { + // Attempt to pull as many requests out of the queue as possible before awaiting. This API is + // a synchronous, non-blocking API that returns u32::MAX if anything goes wrong. + while ((req = op_http_try_wait(rid)) !== null) { + PromisePrototypeCatch(callback(req, undefined), promiseErrorHandler); + } + currentPromise = op_http_wait(rid); + if (!ref) { + core.unrefOpPromise(currentPromise); + } + req = await currentPromise; + currentPromise = null; + } catch (error) { + if (ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error)) { + break; + } + if (ObjectPrototypeIsPrototypeOf(InterruptedPrototype, error)) { + break; + } + throw new Deno.errors.Http(error); + } + if (req === null) { + break; + } + PromisePrototypeCatch(callback(req, undefined), promiseErrorHandler); + } + + try { + if (!context.closing && !context.closed) { + context.closing = await op_http_close(rid, false); + context.close(); + } + + await context.closing; + } catch (error) { + if (ObjectPrototypeIsPrototypeOf(InterruptedPrototype, error)) { + return; + } + if (ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error)) { + return; + } + + throw error; + } finally { + context.close(); + context.closed = true; + } + })(); + + return { + addr, + finished, + async shutdown() { + try { + if (!context.closing && !context.closed) { + // Shut this HTTP server down gracefully + context.closing = op_http_close(context.serverRid, true); + } + + await context.closing; + } catch (error) { + // The server was interrupted + if (ObjectPrototypeIsPrototypeOf(InterruptedPrototype, error)) { + return; + } + if (ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error)) { + return; + } + + throw error; + } finally { + context.closed = true; + } + }, + ref() { + ref = true; + if (currentPromise) { + core.refOpPromise(currentPromise); + } + }, + unref() { + ref = false; + if (currentPromise) { + core.unrefOpPromise(currentPromise); + } + }, + [SymbolAsyncDispose]() { + return this.shutdown(); + }, + }; +} + +internals.addTrailers = addTrailers; +internals.upgradeHttpRaw = upgradeHttpRaw; +internals.serveHttpOnListener = serveHttpOnListener; +internals.serveHttpOnConnection = serveHttpOnConnection; + +function registerDeclarativeServer(exports) { + if (ObjectHasOwn(exports, "fetch")) { + if (typeof exports.fetch !== "function") { + throw new TypeError( + "Invalid type for fetch: must be a function with a single or no parameter", + ); + } + return ({ servePort, serveHost, serveIsMain, serveWorkerCount }) => { + Deno.serve({ + port: servePort, + hostname: serveHost, + [kLoadBalanced]: (serveIsMain && serveWorkerCount > 1) || + (serveWorkerCount !== null), + onListen: ({ port, hostname }) => { + if (serveIsMain) { + const nThreads = serveWorkerCount > 1 + ? ` with ${serveWorkerCount} threads` + : ""; + const host = formatHostName(hostname); + + // deno-lint-ignore no-console + console.error( + `%cdeno serve%c: Listening on %chttp://${host}:${port}/%c${nThreads}`, + "color: green", + "color: inherit", + "color: yellow", + "color: inherit", + ); + } + }, + handler: (req, connInfo) => { + return exports.fetch(req, connInfo); + }, + }); + }; + } +} + +export { + addTrailers, + registerDeclarativeServer, + serve, + serveHttpOnConnection, + serveHttpOnListener, + upgradeHttpRaw, +}; diff --git a/vendor/deno_http/01_http.js b/vendor/deno_http/01_http.js new file mode 100644 index 00000000..9302bd8a --- /dev/null +++ b/vendor/deno_http/01_http.js @@ -0,0 +1,403 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +import { core, primordials } from "ext:core/mod.js"; +const { + BadResourcePrototype, + InterruptedPrototype, + internalRidSymbol, +} = core; +import { + op_http_accept, + op_http_headers, + op_http_shutdown, + op_http_start, + op_http_upgrade_websocket, + op_http_write, + op_http_write_headers, + op_http_write_resource, +} from "ext:core/ops"; +const { + ObjectPrototypeIsPrototypeOf, + SafeSet, + SafeSetIterator, + SetPrototypeAdd, + SetPrototypeDelete, + StringPrototypeIncludes, + Symbol, + SymbolAsyncIterator, + TypeError, + TypedArrayPrototypeGetSymbolToStringTag, + Uint8Array, +} = primordials; +import { _ws } from "ext:deno_http/02_websocket.ts"; +import { InnerBody } from "ext:deno_fetch/22_body.js"; +import { Event } from "ext:deno_web/02_event.js"; +import { BlobPrototype } from "ext:deno_web/09_file.js"; +import { + ResponsePrototype, + toInnerResponse, +} from "ext:deno_fetch/23_response.js"; +import { + abortRequest, + fromInnerRequest, + newInnerRequest, +} from "ext:deno_fetch/23_request.js"; +import { + _eventLoop, + _idleTimeoutDuration, + _idleTimeoutTimeout, + _protocol, + _readyState, + _rid, + _role, + _server, + _serverHandleIdleTimeout, + SERVER, + WebSocket, +} from "ext:deno_websocket/01_websocket.js"; +import { + getReadableStreamResourceBacking, + readableStreamClose, + readableStreamForRid, + ReadableStreamPrototype, +} from "ext:deno_web/06_streams.js"; +import { SymbolDispose } from "ext:deno_web/00_infra.js"; + +const connErrorSymbol = Symbol("connError"); + +/** @type {(self: HttpConn, rid: number) => boolean} */ +let deleteManagedResource; + +class HttpConn { + #rid = 0; + #closed = false; + #remoteAddr; + #localAddr; + + // This set holds resource ids of resources + // that were created during lifecycle of this request. + // When the connection is closed these resources should be closed + // as well. + #managedResources = new SafeSet(); + + static { + deleteManagedResource = (self, rid) => + SetPrototypeDelete(self.#managedResources, rid); + } + + constructor(rid, remoteAddr, localAddr) { + this.#rid = rid; + this.#remoteAddr = remoteAddr; + this.#localAddr = localAddr; + } + + /** @returns {number} */ + get rid() { + return this.#rid; + } + + /** @returns {Promise} */ + async nextRequest() { + let nextRequest; + try { + nextRequest = await op_http_accept(this.#rid); + } catch (error) { + this.close(); + // A connection error seen here would cause disrupted responses to throw + // a generic `BadResource` error. Instead store this error and replace + // those with it. + this[connErrorSymbol] = error; + if ( + ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error) || + ObjectPrototypeIsPrototypeOf(InterruptedPrototype, error) || + StringPrototypeIncludes(error.message, "connection closed") + ) { + return null; + } + throw error; + } + if (nextRequest == null) { + // Work-around for servers (deno_std/http in particular) that call + // `nextRequest()` before upgrading a previous request which has a + // `connection: upgrade` header. + await null; + + this.close(); + return null; + } + + const { 0: readStreamRid, 1: writeStreamRid, 2: method, 3: url } = + nextRequest; + SetPrototypeAdd(this.#managedResources, readStreamRid); + SetPrototypeAdd(this.#managedResources, writeStreamRid); + + /** @type {ReadableStream | undefined} */ + let body = null; + // There might be a body, but we don't expose it for GET/HEAD requests. + // It will be closed automatically once the request has been handled and + // the response has been sent. + if (method !== "GET" && method !== "HEAD") { + body = readableStreamForRid(readStreamRid, false); + } + + const innerRequest = newInnerRequest( + method, + url, + () => op_http_headers(readStreamRid), + body !== null ? new InnerBody(body) : null, + false, + ); + const request = fromInnerRequest( + innerRequest, + "immutable", + false, + ); + + const respondWith = createRespondWith( + this, + request, + readStreamRid, + writeStreamRid, + ); + + return { request, respondWith }; + } + + /** @returns {void} */ + close() { + if (!this.#closed) { + this.#closed = true; + core.tryClose(this.#rid); + for (const rid of new SafeSetIterator(this.#managedResources)) { + SetPrototypeDelete(this.#managedResources, rid); + core.tryClose(rid); + } + } + } + + [SymbolDispose]() { + core.tryClose(this.#rid); + for (const rid of new SafeSetIterator(this.#managedResources)) { + SetPrototypeDelete(this.#managedResources, rid); + core.tryClose(rid); + } + } + + [SymbolAsyncIterator]() { + // deno-lint-ignore no-this-alias + const httpConn = this; + return { + async next() { + const reqEvt = await httpConn.nextRequest(); + // Change with caution, current form avoids a v8 deopt + return { value: reqEvt ?? undefined, done: reqEvt === null }; + }, + }; + } +} + +function createRespondWith( + httpConn, + request, + readStreamRid, + writeStreamRid, +) { + return async function respondWith(resp) { + try { + resp = await resp; + if (!(ObjectPrototypeIsPrototypeOf(ResponsePrototype, resp))) { + throw new TypeError( + "First argument to 'respondWith' must be a Response or a promise resolving to a Response", + ); + } + + const innerResp = toInnerResponse(resp); + + // If response body length is known, it will be sent synchronously in a + // single op, in other case a "response body" resource will be created and + // we'll be streaming it. + /** @type {ReadableStream | Uint8Array | null} */ + let respBody = null; + if (innerResp.body !== null) { + if (innerResp.body.unusable()) { + throw new TypeError("Body is unusable"); + } + if ( + ObjectPrototypeIsPrototypeOf( + ReadableStreamPrototype, + innerResp.body.streamOrStatic, + ) + ) { + if ( + innerResp.body.length === null || + ObjectPrototypeIsPrototypeOf( + BlobPrototype, + innerResp.body.source, + ) + ) { + respBody = innerResp.body.stream; + } else { + const reader = innerResp.body.stream.getReader(); + const r1 = await reader.read(); + if (r1.done) { + respBody = new Uint8Array(0); + } else { + respBody = r1.value; + const r2 = await reader.read(); + if (!r2.done) throw new TypeError("Unreachable"); + } + } + } else { + innerResp.body.streamOrStatic.consumed = true; + respBody = innerResp.body.streamOrStatic.body; + } + } else { + respBody = new Uint8Array(0); + } + const isStreamingResponseBody = !( + typeof respBody === "string" || + TypedArrayPrototypeGetSymbolToStringTag(respBody) === "Uint8Array" + ); + try { + await op_http_write_headers( + writeStreamRid, + innerResp.status ?? 200, + innerResp.headerList, + isStreamingResponseBody ? null : respBody, + ); + } catch (error) { + const connError = httpConn[connErrorSymbol]; + if ( + ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error) && + connError != null + ) { + // deno-lint-ignore no-ex-assign + error = new connError.constructor(connError.message); + } + if ( + respBody !== null && + ObjectPrototypeIsPrototypeOf(ReadableStreamPrototype, respBody) + ) { + await respBody.cancel(error); + } + throw error; + } + + if (isStreamingResponseBody) { + let success = false; + if ( + respBody === null || + !ObjectPrototypeIsPrototypeOf(ReadableStreamPrototype, respBody) + ) { + throw new TypeError("Unreachable"); + } + const resourceBacking = getReadableStreamResourceBacking(respBody); + let reader; + if (resourceBacking) { + if (respBody.locked) { + throw new TypeError("ReadableStream is locked"); + } + reader = respBody.getReader(); // Acquire JS lock. + try { + await op_http_write_resource( + writeStreamRid, + resourceBacking.rid, + ); + if (resourceBacking.autoClose) core.tryClose(resourceBacking.rid); + readableStreamClose(respBody); // Release JS lock. + success = true; + } catch (error) { + const connError = httpConn[connErrorSymbol]; + if ( + ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error) && + connError != null + ) { + // deno-lint-ignore no-ex-assign + error = new connError.constructor(connError.message); + } + await reader.cancel(error); + throw error; + } + } else { + reader = respBody.getReader(); + while (true) { + const { value, done } = await reader.read(); + if (done) break; + if ( + TypedArrayPrototypeGetSymbolToStringTag(value) !== "Uint8Array" + ) { + await reader.cancel(new TypeError("Value not a Uint8Array")); + break; + } + try { + await op_http_write(writeStreamRid, value); + } catch (error) { + const connError = httpConn[connErrorSymbol]; + if ( + ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error) && + connError != null + ) { + // deno-lint-ignore no-ex-assign + error = new connError.constructor(connError.message); + } + await reader.cancel(error); + throw error; + } + } + success = true; + } + + if (success) { + try { + await op_http_shutdown(writeStreamRid); + } catch (error) { + await reader.cancel(error); + throw error; + } + } + } + + const ws = resp[_ws]; + if (ws) { + const wsRid = await op_http_upgrade_websocket( + readStreamRid, + ); + ws[_rid] = wsRid; + ws[_protocol] = resp.headers.get("sec-websocket-protocol"); + + httpConn.close(); + + ws[_readyState] = WebSocket.OPEN; + ws[_role] = SERVER; + const event = new Event("open"); + ws.dispatchEvent(event); + + ws[_eventLoop](); + if (ws[_idleTimeoutDuration]) { + ws.addEventListener( + "close", + () => clearTimeout(ws[_idleTimeoutTimeout]), + ); + } + ws[_serverHandleIdleTimeout](); + } + } catch (error) { + abortRequest(request); + throw error; + } finally { + if (deleteManagedResource(httpConn, readStreamRid)) { + core.tryClose(readStreamRid); + } + if (deleteManagedResource(httpConn, writeStreamRid)) { + core.tryClose(writeStreamRid); + } + } + }; +} + +function serveHttp(conn) { + const rid = op_http_start(conn[internalRidSymbol]); + return new HttpConn(rid, conn.remoteAddr, conn.localAddr); +} + +export { HttpConn, serveHttp }; diff --git a/vendor/deno_http/02_websocket.ts b/vendor/deno_http/02_websocket.ts new file mode 100644 index 00000000..96af4d48 --- /dev/null +++ b/vendor/deno_http/02_websocket.ts @@ -0,0 +1,186 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +import { internals, primordials } from "ext:core/mod.js"; +import { op_http_websocket_accept_header } from "ext:core/ops"; +const { + ArrayPrototypeIncludes, + ArrayPrototypeMap, + ArrayPrototypePush, + StringPrototypeCharCodeAt, + StringPrototypeSplit, + StringPrototypeToLowerCase, + StringPrototypeToUpperCase, + TypeError, + Symbol, +} = primordials; +import { toInnerRequest } from "ext:deno_fetch/23_request.js"; +import { + fromInnerResponse, + newInnerResponse, +} from "ext:deno_fetch/23_response.js"; +import { setEventTargetData } from "ext:deno_web/02_event.js"; +import { + _eventLoop, + _idleTimeoutDuration, + _idleTimeoutTimeout, + _protocol, + _readyState, + _rid, + _role, + _server, + _serverHandleIdleTimeout, + createWebSocketBranded, + WebSocket, +} from "ext:deno_websocket/01_websocket.js"; + +const _ws = Symbol("[[associated_ws]]"); + +const websocketCvf = buildCaseInsensitiveCommaValueFinder("websocket"); +const upgradeCvf = buildCaseInsensitiveCommaValueFinder("upgrade"); + +function upgradeWebSocket(request, options = { __proto__: null }) { + const inner = toInnerRequest(request); + const upgrade = request.headers.get("upgrade"); + const upgradeHasWebSocketOption = upgrade !== null && + websocketCvf(upgrade); + if (!upgradeHasWebSocketOption) { + throw new TypeError( + "Invalid Header: 'upgrade' header must contain 'websocket'", + ); + } + + const connection = request.headers.get("connection"); + const connectionHasUpgradeOption = connection !== null && + upgradeCvf(connection); + if (!connectionHasUpgradeOption) { + throw new TypeError( + "Invalid Header: 'connection' header must contain 'Upgrade'", + ); + } + + const websocketKey = request.headers.get("sec-websocket-key"); + if (websocketKey === null) { + throw new TypeError( + "Invalid Header: 'sec-websocket-key' header must be set", + ); + } + + const accept = op_http_websocket_accept_header(websocketKey); + + const r = newInnerResponse(101); + r.headerList = [ + ["upgrade", "websocket"], + ["connection", "Upgrade"], + ["sec-websocket-accept", accept], + ]; + + const protocolsStr = request.headers.get("sec-websocket-protocol") || ""; + const protocols = StringPrototypeSplit(protocolsStr, ", "); + if (protocols && options.protocol) { + if (ArrayPrototypeIncludes(protocols, options.protocol)) { + ArrayPrototypePush(r.headerList, [ + "sec-websocket-protocol", + options.protocol, + ]); + } else { + throw new TypeError( + `Protocol '${options.protocol}' not in the request's protocol list (non negotiable)`, + ); + } + } + + const socket = createWebSocketBranded(WebSocket); + setEventTargetData(socket); + socket[_server] = true; + // Nginx timeout is 60s, so default to a lower number: https://github.com/denoland/deno/pull/23985 + socket[_idleTimeoutDuration] = options.idleTimeout ?? 30; + socket[_idleTimeoutTimeout] = null; + + if (inner._wantsUpgrade) { + return inner._wantsUpgrade("upgradeWebSocket", r, socket); + } + + const response = fromInnerResponse(r, "immutable"); + + response[_ws] = socket; + + return { response, socket }; +} + +const spaceCharCode = StringPrototypeCharCodeAt(" ", 0); +const tabCharCode = StringPrototypeCharCodeAt("\t", 0); +const commaCharCode = StringPrototypeCharCodeAt(",", 0); + +/** Builds a case function that can be used to find a case insensitive + * value in some text that's separated by commas. + * + * This is done because it doesn't require any allocations. + * @param checkText {string} - The text to find. (ex. "websocket") + */ +function buildCaseInsensitiveCommaValueFinder(checkText) { + const charCodes = ArrayPrototypeMap( + StringPrototypeSplit( + StringPrototypeToLowerCase(checkText), + "", + ), + (c) => [ + StringPrototypeCharCodeAt(c, 0), + StringPrototypeCharCodeAt(StringPrototypeToUpperCase(c), 0), + ], + ); + /** @type {number} */ + let i; + /** @type {number} */ + let char; + + /** @param {string} value */ + return function (value) { + for (i = 0; i < value.length; i++) { + char = StringPrototypeCharCodeAt(value, i); + skipWhitespace(value); + + if (hasWord(value)) { + skipWhitespace(value); + if (i === value.length || char === commaCharCode) { + return true; + } + } else { + skipUntilComma(value); + } + } + + return false; + }; + + /** @param value {string} */ + function hasWord(value) { + for (let j = 0; j < charCodes.length; ++j) { + const { 0: cLower, 1: cUpper } = charCodes[j]; + if (cLower === char || cUpper === char) { + char = StringPrototypeCharCodeAt(value, ++i); + } else { + return false; + } + } + return true; + } + + /** @param value {string} */ + function skipWhitespace(value) { + while (char === spaceCharCode || char === tabCharCode) { + char = StringPrototypeCharCodeAt(value, ++i); + } + } + + /** @param value {string} */ + function skipUntilComma(value) { + while (char !== commaCharCode && i < value.length) { + char = StringPrototypeCharCodeAt(value, ++i); + } + } +} + +// Expose this function for unit tests +internals.buildCaseInsensitiveCommaValueFinder = + buildCaseInsensitiveCommaValueFinder; + +export { _ws, upgradeWebSocket }; diff --git a/vendor/deno_http/Cargo.toml b/vendor/deno_http/Cargo.toml new file mode 100644 index 00000000..27a91ca6 --- /dev/null +++ b/vendor/deno_http/Cargo.toml @@ -0,0 +1,58 @@ +# Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +[package] +name = "deno_http" +version = "0.180.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +readme = "README.md" +repository.workspace = true +description = "HTTP server implementation for Deno" + +[features] +"__http_tracing" = [] + +[lib] +path = "lib.rs" + +[[bench]] +name = "compressible" +harness = false + +[dependencies] +async-compression = { version = "0.4", features = ["tokio", "brotli", "gzip"] } +async-trait.workspace = true +base64.workspace = true +brotli.workspace = true +bytes.workspace = true +cache_control.workspace = true +deno_core.workspace = true +deno_net.workspace = true +deno_websocket.workspace = true +flate2.workspace = true +http.workspace = true +http_v02.workspace = true +httparse.workspace = true +hyper.workspace = true +hyper-util.workspace = true +hyper_v014 = { workspace = true, features = ["server", "stream", "http1", "http2", "runtime"] } +itertools = "0.10" +memmem.workspace = true +mime = "0.3.16" +once_cell.workspace = true +percent-encoding.workspace = true +phf.workspace = true +pin-project.workspace = true +ring.workspace = true +scopeguard.workspace = true +serde.workspace = true +smallvec.workspace = true +thiserror.workspace = true +tokio.workspace = true +tokio-util = { workspace = true, features = ["io"] } + +[dev-dependencies] +bencher.workspace = true +http-body-util.workspace = true +rand.workspace = true diff --git a/vendor/deno_http/README.md b/vendor/deno_http/README.md new file mode 100644 index 00000000..ab557017 --- /dev/null +++ b/vendor/deno_http/README.md @@ -0,0 +1,4 @@ +# deno_http + +This crate implements server-side HTTP based on primitives from the +[Fetch API](https://fetch.spec.whatwg.org/). diff --git a/vendor/deno_http/benches/compressible.rs b/vendor/deno_http/benches/compressible.rs new file mode 100644 index 00000000..5ac09cb8 --- /dev/null +++ b/vendor/deno_http/benches/compressible.rs @@ -0,0 +1,39 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use bencher::benchmark_group; +use bencher::benchmark_main; +use bencher::Bencher; +use deno_http::compressible::is_content_compressible; + +fn compressible_simple_hit(b: &mut Bencher) { + b.iter(|| { + is_content_compressible("text/plain"); + }) +} + +fn compressible_complex_hit(b: &mut Bencher) { + b.iter(|| { + is_content_compressible("text/PlAIn; charset=utf-8"); + }) +} + +fn compressible_simple_miss(b: &mut Bencher) { + b.iter(|| { + is_content_compressible("text/fake"); + }) +} + +fn compressible_complex_miss(b: &mut Bencher) { + b.iter(|| { + is_content_compressible("text/fake;charset=utf-8"); + }) +} + +benchmark_group!( + benches, + compressible_simple_hit, + compressible_complex_hit, + compressible_simple_miss, + compressible_complex_miss, +); + +benchmark_main!(benches); diff --git a/vendor/deno_http/compressible.rs b/vendor/deno_http/compressible.rs new file mode 100644 index 00000000..6e96582e --- /dev/null +++ b/vendor/deno_http/compressible.rs @@ -0,0 +1,656 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use phf::phf_set; +use std::str::FromStr; + +// Data obtained from https://github.com/jshttp/mime-db/blob/fa5e4ef3cc8907ec3c5ec5b85af0c63d7059a5cd/db.json +// Important! Keep this list sorted alphabetically. +static CONTENT_TYPES: phf::Set<&'static [u8]> = phf_set! { + b"application/3gpdash-qoe-report+xml", + b"application/3gpp-ims+xml", + b"application/3gpphal+json", + b"application/3gpphalforms+json", + b"application/activity+json", + b"application/alto-costmap+json", + b"application/alto-costmapfilter+json", + b"application/alto-directory+json", + b"application/alto-endpointcost+json", + b"application/alto-endpointcostparams+json", + b"application/alto-endpointprop+json", + b"application/alto-endpointpropparams+json", + b"application/alto-error+json", + b"application/alto-networkmap+json", + b"application/alto-networkmapfilter+json", + b"application/alto-updatestreamcontrol+json", + b"application/alto-updatestreamparams+json", + b"application/atom+xml", + b"application/atomcat+xml", + b"application/atomdeleted+xml", + b"application/atomsvc+xml", + b"application/atsc-dwd+xml", + b"application/atsc-held+xml", + b"application/atsc-rdt+json", + b"application/atsc-rsat+xml", + b"application/auth-policy+xml", + b"application/beep+xml", + b"application/calendar+json", + b"application/calendar+xml", + b"application/captive+json", + b"application/ccmp+xml", + b"application/ccxml+xml", + b"application/cdfx+xml", + b"application/cea-2018+xml", + b"application/cellml+xml", + b"application/clue+xml", + b"application/clue_info+xml", + b"application/cnrp+xml", + b"application/coap-group+json", + b"application/conference-info+xml", + b"application/cpl+xml", + b"application/csta+xml", + b"application/cstadata+xml", + b"application/csvm+json", + b"application/dart", + b"application/dash+xml", + b"application/davmount+xml", + b"application/dialog-info+xml", + b"application/dicom+json", + b"application/dicom+xml", + b"application/dns+json", + b"application/docbook+xml", + b"application/dskpp+xml", + b"application/dssc+xml", + b"application/ecmascript", + b"application/elm+json", + b"application/elm+xml", + b"application/emergencycalldata.cap+xml", + b"application/emergencycalldata.comment+xml", + b"application/emergencycalldata.control+xml", + b"application/emergencycalldata.deviceinfo+xml", + b"application/emergencycalldata.providerinfo+xml", + b"application/emergencycalldata.serviceinfo+xml", + b"application/emergencycalldata.subscriberinfo+xml", + b"application/emergencycalldata.veds+xml", + b"application/emma+xml", + b"application/emotionml+xml", + b"application/epp+xml", + b"application/expect-ct-report+json", + b"application/fdt+xml", + b"application/fhir+json", + b"application/fhir+xml", + b"application/fido.trusted-apps+json", + b"application/framework-attributes+xml", + b"application/geo+json", + b"application/geoxacml+xml", + b"application/gml+xml", + b"application/gpx+xml", + b"application/held+xml", + b"application/ibe-key-request+xml", + b"application/ibe-pkg-reply+xml", + b"application/im-iscomposing+xml", + b"application/inkml+xml", + b"application/its+xml", + b"application/javascript", + b"application/jf2feed+json", + b"application/jose+json", + b"application/jrd+json", + b"application/jscalendar+json", + b"application/json", + b"application/json-patch+json", + b"application/jsonml+json", + b"application/jwk+json", + b"application/jwk-set+json", + b"application/kpml-request+xml", + b"application/kpml-response+xml", + b"application/ld+json", + b"application/lgr+xml", + b"application/load-control+xml", + b"application/lost+xml", + b"application/lostsync+xml", + b"application/mads+xml", + b"application/manifest+json", + b"application/marcxml+xml", + b"application/mathml+xml", + b"application/mathml-content+xml", + b"application/mathml-presentation+xml", + b"application/mbms-associated-procedure-description+xml", + b"application/mbms-deregister+xml", + b"application/mbms-envelope+xml", + b"application/mbms-msk+xml", + b"application/mbms-msk-response+xml", + b"application/mbms-protection-description+xml", + b"application/mbms-reception-report+xml", + b"application/mbms-register+xml", + b"application/mbms-register-response+xml", + b"application/mbms-schedule+xml", + b"application/mbms-user-service-description+xml", + b"application/media-policy-dataset+xml", + b"application/media_control+xml", + b"application/mediaservercontrol+xml", + b"application/merge-patch+json", + b"application/metalink+xml", + b"application/metalink4+xml", + b"application/mets+xml", + b"application/mmt-aei+xml", + b"application/mmt-usd+xml", + b"application/mods+xml", + b"application/mrb-consumer+xml", + b"application/mrb-publish+xml", + b"application/msc-ivr+xml", + b"application/msc-mixer+xml", + b"application/mud+json", + b"application/nlsml+xml", + b"application/odm+xml", + b"application/oebps-package+xml", + b"application/omdoc+xml", + b"application/opc-nodeset+xml", + b"application/p2p-overlay+xml", + b"application/patch-ops-error+xml", + b"application/pidf+xml", + b"application/pidf-diff+xml", + b"application/pls+xml", + b"application/poc-settings+xml", + b"application/postscript", + b"application/ppsp-tracker+json", + b"application/problem+json", + b"application/problem+xml", + b"application/provenance+xml", + b"application/prs.xsf+xml", + b"application/pskc+xml", + b"application/pvd+json", + b"application/raml+yaml", + b"application/rdap+json", + b"application/rdf+xml", + b"application/reginfo+xml", + b"application/reputon+json", + b"application/resource-lists+xml", + b"application/resource-lists-diff+xml", + b"application/rfc+xml", + b"application/rlmi+xml", + b"application/rls-services+xml", + b"application/route-apd+xml", + b"application/route-s-tsid+xml", + b"application/route-usd+xml", + b"application/rsd+xml", + b"application/rss+xml", + b"application/rtf", + b"application/samlassertion+xml", + b"application/samlmetadata+xml", + b"application/sarif+json", + b"application/sarif-external-properties+json", + b"application/sbml+xml", + b"application/scaip+xml", + b"application/scim+json", + b"application/senml+json", + b"application/senml+xml", + b"application/senml-etch+json", + b"application/sensml+json", + b"application/sensml+xml", + b"application/sep+xml", + b"application/shf+xml", + b"application/simple-filter+xml", + b"application/smil+xml", + b"application/soap+xml", + b"application/sparql-results+xml", + b"application/spirits-event+xml", + b"application/srgs+xml", + b"application/sru+xml", + b"application/ssdl+xml", + b"application/ssml+xml", + b"application/stix+json", + b"application/swid+xml", + b"application/tar", + b"application/taxii+json", + b"application/td+json", + b"application/tei+xml", + b"application/thraud+xml", + b"application/tlsrpt+json", + b"application/toml", + b"application/ttml+xml", + b"application/urc-grpsheet+xml", + b"application/urc-ressheet+xml", + b"application/urc-targetdesc+xml", + b"application/urc-uisocketdesc+xml", + b"application/vcard+json", + b"application/vcard+xml", + b"application/vnd.1000minds.decision-model+xml", + b"application/vnd.3gpp-prose+xml", + b"application/vnd.3gpp-prose-pc3ch+xml", + b"application/vnd.3gpp.access-transfer-events+xml", + b"application/vnd.3gpp.bsf+xml", + b"application/vnd.3gpp.gmop+xml", + b"application/vnd.3gpp.mcdata-affiliation-command+xml", + b"application/vnd.3gpp.mcdata-info+xml", + b"application/vnd.3gpp.mcdata-service-config+xml", + b"application/vnd.3gpp.mcdata-ue-config+xml", + b"application/vnd.3gpp.mcdata-user-profile+xml", + b"application/vnd.3gpp.mcptt-affiliation-command+xml", + b"application/vnd.3gpp.mcptt-floor-request+xml", + b"application/vnd.3gpp.mcptt-info+xml", + b"application/vnd.3gpp.mcptt-location-info+xml", + b"application/vnd.3gpp.mcptt-mbms-usage-info+xml", + b"application/vnd.3gpp.mcptt-service-config+xml", + b"application/vnd.3gpp.mcptt-signed+xml", + b"application/vnd.3gpp.mcptt-ue-config+xml", + b"application/vnd.3gpp.mcptt-ue-init-config+xml", + b"application/vnd.3gpp.mcptt-user-profile+xml", + b"application/vnd.3gpp.mcvideo-affiliation-command+xml", + b"application/vnd.3gpp.mcvideo-affiliation-info+xml", + b"application/vnd.3gpp.mcvideo-info+xml", + b"application/vnd.3gpp.mcvideo-location-info+xml", + b"application/vnd.3gpp.mcvideo-mbms-usage-info+xml", + b"application/vnd.3gpp.mcvideo-service-config+xml", + b"application/vnd.3gpp.mcvideo-transmission-request+xml", + b"application/vnd.3gpp.mcvideo-ue-config+xml", + b"application/vnd.3gpp.mcvideo-user-profile+xml", + b"application/vnd.3gpp.mid-call+xml", + b"application/vnd.3gpp.sms+xml", + b"application/vnd.3gpp.srvcc-ext+xml", + b"application/vnd.3gpp.srvcc-info+xml", + b"application/vnd.3gpp.state-and-event-info+xml", + b"application/vnd.3gpp.ussd+xml", + b"application/vnd.3gpp2.bcmcsinfo+xml", + b"application/vnd.adobe.xdp+xml", + b"application/vnd.amadeus+json", + b"application/vnd.amundsen.maze+xml", + b"application/vnd.api+json", + b"application/vnd.aplextor.warrp+json", + b"application/vnd.apothekende.reservation+json", + b"application/vnd.apple.installer+xml", + b"application/vnd.artisan+json", + b"application/vnd.avalon+json", + b"application/vnd.avistar+xml", + b"application/vnd.balsamiq.bmml+xml", + b"application/vnd.bbf.usp.msg+json", + b"application/vnd.bekitzur-stech+json", + b"application/vnd.biopax.rdf+xml", + b"application/vnd.byu.uapi+json", + b"application/vnd.capasystems-pg+json", + b"application/vnd.chemdraw+xml", + b"application/vnd.citationstyles.style+xml", + b"application/vnd.collection+json", + b"application/vnd.collection.doc+json", + b"application/vnd.collection.next+json", + b"application/vnd.coreos.ignition+json", + b"application/vnd.criticaltools.wbs+xml", + b"application/vnd.cryptii.pipe+json", + b"application/vnd.ctct.ws+xml", + b"application/vnd.cyan.dean.root+xml", + b"application/vnd.cyclonedx+json", + b"application/vnd.cyclonedx+xml", + b"application/vnd.dart", + b"application/vnd.datapackage+json", + b"application/vnd.dataresource+json", + b"application/vnd.dece.ttml+xml", + b"application/vnd.dm.delegation+xml", + b"application/vnd.document+json", + b"application/vnd.drive+json", + b"application/vnd.dvb.dvbisl+xml", + b"application/vnd.dvb.notif-aggregate-root+xml", + b"application/vnd.dvb.notif-container+xml", + b"application/vnd.dvb.notif-generic+xml", + b"application/vnd.dvb.notif-ia-msglist+xml", + b"application/vnd.dvb.notif-ia-registration-request+xml", + b"application/vnd.dvb.notif-ia-registration-response+xml", + b"application/vnd.dvb.notif-init+xml", + b"application/vnd.emclient.accessrequest+xml", + b"application/vnd.eprints.data+xml", + b"application/vnd.eszigno3+xml", + b"application/vnd.etsi.aoc+xml", + b"application/vnd.etsi.cug+xml", + b"application/vnd.etsi.iptvcommand+xml", + b"application/vnd.etsi.iptvdiscovery+xml", + b"application/vnd.etsi.iptvprofile+xml", + b"application/vnd.etsi.iptvsad-bc+xml", + b"application/vnd.etsi.iptvsad-cod+xml", + b"application/vnd.etsi.iptvsad-npvr+xml", + b"application/vnd.etsi.iptvservice+xml", + b"application/vnd.etsi.iptvsync+xml", + b"application/vnd.etsi.iptvueprofile+xml", + b"application/vnd.etsi.mcid+xml", + b"application/vnd.etsi.overload-control-policy-dataset+xml", + b"application/vnd.etsi.pstn+xml", + b"application/vnd.etsi.sci+xml", + b"application/vnd.etsi.simservs+xml", + b"application/vnd.etsi.tsl+xml", + b"application/vnd.fujifilm.fb.jfi+xml", + b"application/vnd.futoin+json", + b"application/vnd.gentics.grd+json", + b"application/vnd.geo+json", + b"application/vnd.geocube+xml", + b"application/vnd.google-earth.kml+xml", + b"application/vnd.gov.sk.e-form+xml", + b"application/vnd.gov.sk.xmldatacontainer+xml", + b"application/vnd.hal+json", + b"application/vnd.hal+xml", + b"application/vnd.handheld-entertainment+xml", + b"application/vnd.hc+json", + b"application/vnd.heroku+json", + b"application/vnd.hyper+json", + b"application/vnd.hyper-item+json", + b"application/vnd.hyperdrive+json", + b"application/vnd.ims.lis.v2.result+json", + b"application/vnd.ims.lti.v2.toolconsumerprofile+json", + b"application/vnd.ims.lti.v2.toolproxy+json", + b"application/vnd.ims.lti.v2.toolproxy.id+json", + b"application/vnd.ims.lti.v2.toolsettings+json", + b"application/vnd.ims.lti.v2.toolsettings.simple+json", + b"application/vnd.informedcontrol.rms+xml", + b"application/vnd.infotech.project+xml", + b"application/vnd.iptc.g2.catalogitem+xml", + b"application/vnd.iptc.g2.conceptitem+xml", + b"application/vnd.iptc.g2.knowledgeitem+xml", + b"application/vnd.iptc.g2.newsitem+xml", + b"application/vnd.iptc.g2.newsmessage+xml", + b"application/vnd.iptc.g2.packageitem+xml", + b"application/vnd.iptc.g2.planningitem+xml", + b"application/vnd.irepository.package+xml", + b"application/vnd.las.las+json", + b"application/vnd.las.las+xml", + b"application/vnd.leap+json", + b"application/vnd.liberty-request+xml", + b"application/vnd.llamagraphics.life-balance.exchange+xml", + b"application/vnd.marlin.drm.actiontoken+xml", + b"application/vnd.marlin.drm.conftoken+xml", + b"application/vnd.marlin.drm.license+xml", + b"application/vnd.mason+json", + b"application/vnd.micro+json", + b"application/vnd.miele+json", + b"application/vnd.mozilla.xul+xml", + b"application/vnd.ms-fontobject", + b"application/vnd.ms-office.activex+xml", + b"application/vnd.ms-opentype", + b"application/vnd.ms-playready.initiator+xml", + b"application/vnd.ms-printdevicecapabilities+xml", + b"application/vnd.ms-printing.printticket+xml", + b"application/vnd.ms-printschematicket+xml", + b"application/vnd.nearst.inv+json", + b"application/vnd.nokia.conml+xml", + b"application/vnd.nokia.iptv.config+xml", + b"application/vnd.nokia.landmark+xml", + b"application/vnd.nokia.landmarkcollection+xml", + b"application/vnd.nokia.n-gage.ac+xml", + b"application/vnd.nokia.pcd+xml", + b"application/vnd.oci.image.manifest.v1+json", + b"application/vnd.oftn.l10n+json", + b"application/vnd.oipf.contentaccessdownload+xml", + b"application/vnd.oipf.contentaccessstreaming+xml", + b"application/vnd.oipf.dae.svg+xml", + b"application/vnd.oipf.dae.xhtml+xml", + b"application/vnd.oipf.mippvcontrolmessage+xml", + b"application/vnd.oipf.spdiscovery+xml", + b"application/vnd.oipf.spdlist+xml", + b"application/vnd.oipf.ueprofile+xml", + b"application/vnd.oipf.userprofile+xml", + b"application/vnd.oma.bcast.associated-procedure-parameter+xml", + b"application/vnd.oma.bcast.drm-trigger+xml", + b"application/vnd.oma.bcast.imd+xml", + b"application/vnd.oma.bcast.notification+xml", + b"application/vnd.oma.bcast.sgdd+xml", + b"application/vnd.oma.bcast.smartcard-trigger+xml", + b"application/vnd.oma.bcast.sprov+xml", + b"application/vnd.oma.cab-address-book+xml", + b"application/vnd.oma.cab-feature-handler+xml", + b"application/vnd.oma.cab-pcc+xml", + b"application/vnd.oma.cab-subs-invite+xml", + b"application/vnd.oma.cab-user-prefs+xml", + b"application/vnd.oma.dd2+xml", + b"application/vnd.oma.drm.risd+xml", + b"application/vnd.oma.group-usage-list+xml", + b"application/vnd.oma.lwm2m+json", + b"application/vnd.oma.pal+xml", + b"application/vnd.oma.poc.detailed-progress-report+xml", + b"application/vnd.oma.poc.final-report+xml", + b"application/vnd.oma.poc.groups+xml", + b"application/vnd.oma.poc.invocation-descriptor+xml", + b"application/vnd.oma.poc.optimized-progress-report+xml", + b"application/vnd.oma.scidm.messages+xml", + b"application/vnd.oma.xcap-directory+xml", + b"application/vnd.omads-email+xml", + b"application/vnd.omads-file+xml", + b"application/vnd.omads-folder+xml", + b"application/vnd.openblox.game+xml", + b"application/vnd.openstreetmap.data+xml", + b"application/vnd.openxmlformats-officedocument.custom-properties+xml", + b"application/vnd.openxmlformats-officedocument.customxmlproperties+xml", + b"application/vnd.openxmlformats-officedocument.drawing+xml", + b"application/vnd.openxmlformats-officedocument.drawingml.chart+xml", + b"application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml", + b"application/vnd.openxmlformats-officedocument.drawingml.diagramcolors+xml", + b"application/vnd.openxmlformats-officedocument.drawingml.diagramdata+xml", + b"application/vnd.openxmlformats-officedocument.drawingml.diagramlayout+xml", + b"application/vnd.openxmlformats-officedocument.drawingml.diagramstyle+xml", + b"application/vnd.openxmlformats-officedocument.extended-properties+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.commentauthors+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.comments+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.handoutmaster+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.notesmaster+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.notesslide+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.presprops+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.slide+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.slidelayout+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.slidemaster+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.slideshow.main+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.slideupdateinfo+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.tablestyles+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.tags+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.template.main+xml", + b"application/vnd.openxmlformats-officedocument.presentationml.viewprops+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.calcchain+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.dialogsheet+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.externallink+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.pivotcachedefinition+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.pivotcacherecords+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.pivottable+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.querytable+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.revisionheaders+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.revisionlog+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.sharedstrings+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.sheetmetadata+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.tablesinglecells+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.template.main+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.usernames+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.volatiledependencies+xml", + b"application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml", + b"application/vnd.openxmlformats-officedocument.theme+xml", + b"application/vnd.openxmlformats-officedocument.themeoverride+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.document.glossary+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.fonttable+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.template.main+xml", + b"application/vnd.openxmlformats-officedocument.wordprocessingml.websettings+xml", + b"application/vnd.openxmlformats-package.core-properties+xml", + b"application/vnd.openxmlformats-package.digital-signature-xmlsignature+xml", + b"application/vnd.openxmlformats-package.relationships+xml", + b"application/vnd.oracle.resource+json", + b"application/vnd.otps.ct-kip+xml", + b"application/vnd.pagerduty+json", + b"application/vnd.poc.group-advertisement+xml", + b"application/vnd.pwg-xhtml-print+xml", + b"application/vnd.radisys.moml+xml", + b"application/vnd.radisys.msml+xml", + b"application/vnd.radisys.msml-audit+xml", + b"application/vnd.radisys.msml-audit-conf+xml", + b"application/vnd.radisys.msml-audit-conn+xml", + b"application/vnd.radisys.msml-audit-dialog+xml", + b"application/vnd.radisys.msml-audit-stream+xml", + b"application/vnd.radisys.msml-conf+xml", + b"application/vnd.radisys.msml-dialog+xml", + b"application/vnd.radisys.msml-dialog-base+xml", + b"application/vnd.radisys.msml-dialog-fax-detect+xml", + b"application/vnd.radisys.msml-dialog-fax-sendrecv+xml", + b"application/vnd.radisys.msml-dialog-group+xml", + b"application/vnd.radisys.msml-dialog-speech+xml", + b"application/vnd.radisys.msml-dialog-transform+xml", + b"application/vnd.recordare.musicxml+xml", + b"application/vnd.restful+json", + b"application/vnd.route66.link66+xml", + b"application/vnd.seis+json", + b"application/vnd.shootproof+json", + b"application/vnd.shopkick+json", + b"application/vnd.siren+json", + b"application/vnd.software602.filler.form+xml", + b"application/vnd.solent.sdkm+xml", + b"application/vnd.sun.wadl+xml", + b"application/vnd.sycle+xml", + b"application/vnd.syncml+xml", + b"application/vnd.syncml.dm+xml", + b"application/vnd.syncml.dmddf+xml", + b"application/vnd.syncml.dmtnds+xml", + b"application/vnd.tableschema+json", + b"application/vnd.think-cell.ppttc+json", + b"application/vnd.tmd.mediaflex.api+xml", + b"application/vnd.uoml+xml", + b"application/vnd.vel+json", + b"application/vnd.wv.csp+xml", + b"application/vnd.wv.ssp+xml", + b"application/vnd.xacml+json", + b"application/vnd.xmi+xml", + b"application/vnd.yamaha.openscoreformat.osfpvg+xml", + b"application/vnd.zzazz.deck+xml", + b"application/voicexml+xml", + b"application/voucher-cms+json", + b"application/wasm", + b"application/watcherinfo+xml", + b"application/webpush-options+json", + b"application/wsdl+xml", + b"application/wspolicy+xml", + b"application/x-dtbncx+xml", + b"application/x-dtbook+xml", + b"application/x-dtbresource+xml", + b"application/x-httpd-php", + b"application/x-javascript", + b"application/x-ns-proxy-autoconfig", + b"application/x-sh", + b"application/x-tar", + b"application/x-virtualbox-hdd", + b"application/x-virtualbox-ova", + b"application/x-virtualbox-ovf", + b"application/x-virtualbox-vbox", + b"application/x-virtualbox-vdi", + b"application/x-virtualbox-vhd", + b"application/x-virtualbox-vmdk", + b"application/x-web-app-manifest+json", + b"application/x-www-form-urlencoded", + b"application/x-xliff+xml", + b"application/xacml+xml", + b"application/xaml+xml", + b"application/xcap-att+xml", + b"application/xcap-caps+xml", + b"application/xcap-diff+xml", + b"application/xcap-el+xml", + b"application/xcap-error+xml", + b"application/xcap-ns+xml", + b"application/xcon-conference-info+xml", + b"application/xcon-conference-info-diff+xml", + b"application/xenc+xml", + b"application/xhtml+xml", + b"application/xhtml-voice+xml", + b"application/xliff+xml", + b"application/xml", + b"application/xml-dtd", + b"application/xml-patch+xml", + b"application/xmpp+xml", + b"application/xop+xml", + b"application/xproc+xml", + b"application/xslt+xml", + b"application/xspf+xml", + b"application/xv+xml", + b"application/yang-data+json", + b"application/yang-data+xml", + b"application/yang-patch+json", + b"application/yang-patch+xml", + b"application/yin+xml", + b"font/otf", + b"font/ttf", + b"image/bmp", + b"image/svg+xml", + b"image/vnd.adobe.photoshop", + b"image/x-icon", + b"image/x-ms-bmp", + b"message/imdn+xml", + b"message/rfc822", + b"model/gltf+json", + b"model/gltf-binary", + b"model/vnd.collada+xml", + b"model/vnd.moml+xml", + b"model/x3d+xml", + b"text/cache-manifest", + b"text/calender", + b"text/cmd", + b"text/css", + b"text/csv", + b"text/html", + b"text/javascript", + b"text/jsx", + b"text/less", + b"text/markdown", + b"text/mdx", + b"text/n3", + b"text/plain", + b"text/richtext", + b"text/rtf", + b"text/tab-separated-values", + b"text/uri-list", + b"text/vcard", + b"text/vtt", + b"text/x-gwt-rpc", + b"text/x-jquery-tmpl", + b"text/x-markdown", + b"text/x-org", + b"text/x-processing", + b"text/x-suse-ymp", + b"text/xml", + b"text/yaml", + b"x-shader/x-fragment", + b"x-shader/x-vertex", +}; + +fn known_compressible(ct: &[u8]) -> bool { + CONTENT_TYPES.contains(ct) +} + +fn known_mime(ct: &[u8]) -> Option { + let s = std::str::from_utf8(ct).ok()?; + let m = mime::Mime::from_str(s).ok()?; + Some(known_compressible(m.essence_str().as_bytes())) +} + +/// Determine if the supplied content type is considered compressible +pub fn is_content_compressible(ct: impl AsRef<[u8]>) -> bool { + let ct = ct.as_ref(); + let prefix = ct.split(|c| *c == b';').next().unwrap(); + known_compressible(prefix) || known_mime(prefix).unwrap_or_default() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn non_compressible_content_type() { + assert!(!is_content_compressible("application/vnd.deno+json")); + assert!(!is_content_compressible("text/fake")); + } + + #[test] + fn compressible_content_type() { + assert!(is_content_compressible("application/json")); + assert!(is_content_compressible("text/plain;charset=UTF-8")); + assert!(is_content_compressible("text/PlAIn; charset=utf-8")); + } +} diff --git a/vendor/deno_http/fly_accept_encoding.rs b/vendor/deno_http/fly_accept_encoding.rs new file mode 100644 index 00000000..4d6fd223 --- /dev/null +++ b/vendor/deno_http/fly_accept_encoding.rs @@ -0,0 +1,237 @@ +// Copyright 2018 Yoshua Wuyts. All rights reserved. MIT license. +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Forked from https://github.com/superfly/accept-encoding/blob/1cded757ec7ff3916e5bfe7441db76cdc48170dc/ +// Forked to support both http 0.3 and http 1.0 crates. + +use itertools::Itertools; + +/// A list enumerating the categories of errors in this crate. +/// +/// This list is intended to grow over time and it is not recommended to +/// exhaustively match against it. +/// +/// It is used with the [`Error`] struct. +/// +/// [`Error`]: std.struct.Error.html +#[derive(Debug, thiserror::Error)] +pub enum EncodingError { + /// Invalid header encoding. + #[error("Invalid header encoding.")] + InvalidEncoding, + /// The encoding scheme is unknown. + #[error("Unknown encoding scheme.")] + UnknownEncoding, +} + +/// Encodings to use. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] +pub enum Encoding { + /// The Gzip encoding. + Gzip, + /// The Deflate encoding. + Deflate, + /// The Brotli encoding. + Brotli, + /// The Zstd encoding. + Zstd, + /// No encoding. + Identity, +} + +impl Encoding { + /// Parses a given string into its corresponding encoding. + fn parse(s: &str) -> Result, EncodingError> { + match s { + "gzip" => Ok(Some(Encoding::Gzip)), + "deflate" => Ok(Some(Encoding::Deflate)), + "br" => Ok(Some(Encoding::Brotli)), + "zstd" => Ok(Some(Encoding::Zstd)), + "identity" => Ok(Some(Encoding::Identity)), + "*" => Ok(None), + _ => Err(EncodingError::UnknownEncoding), + } + } +} + +/// Select the encoding with the largest qval or the first with qval ~= 1 +pub fn preferred( + encodings: impl Iterator, f32), EncodingError>>, +) -> Result, EncodingError> { + let mut preferred_encoding = None; + let mut max_qval = 0.0; + + for r in encodings { + let (encoding, qval) = r?; + if (qval - 1.0f32).abs() < 0.01 { + return Ok(encoding); + } else if qval > max_qval { + preferred_encoding = encoding; + max_qval = qval; + } + } + + Ok(preferred_encoding) +} + +/// Parse a set of HTTP headers into an iterator containing tuples of options containing encodings and their corresponding q-values. +/// +/// Compatible with `http` crate for version 0.2.x. +pub fn encodings_iter_http_02( + headers: &http_v02::HeaderMap, +) -> impl Iterator, f32), EncodingError>> + '_ { + let iter = headers + .get_all(http_v02::header::ACCEPT_ENCODING) + .iter() + .map(|hval| hval.to_str().map_err(|_| EncodingError::InvalidEncoding)); + encodings_iter_inner(iter) +} + +/// Parse a set of HTTP headers into an iterator containing tuples of options containing encodings and their corresponding q-values. +/// +/// Compatible with `http` crate for version 1.x. +pub fn encodings_iter_http_1( + headers: &http::HeaderMap, +) -> impl Iterator, f32), EncodingError>> + '_ { + let iter = headers + .get_all(http::header::ACCEPT_ENCODING) + .iter() + .map(|hval| hval.to_str().map_err(|_| EncodingError::InvalidEncoding)); + encodings_iter_inner(iter) +} + +/// Parse a set of HTTP headers into an iterator containing tuples of options containing encodings and their corresponding q-values. +fn encodings_iter_inner<'s>( + headers: impl Iterator> + 's, +) -> impl Iterator, f32), EncodingError>> + 's { + headers + .map_ok(|s| s.split(',').map(str::trim)) + .flatten_ok() + .filter_map_ok(|v| { + let (e, q) = match v.split_once(";q=") { + Some((e, q)) => (e, q), + None => return Some(Ok((Encoding::parse(v).ok()?, 1.0f32))), + }; + let encoding = Encoding::parse(e).ok()?; // ignore unknown encodings + let qval = match q.parse() { + Ok(f) if f > 1.0 => return Some(Err(EncodingError::InvalidEncoding)), // q-values over 1 are unacceptable, + Ok(f) => f, + Err(_) => return Some(Err(EncodingError::InvalidEncoding)), + }; + Some(Ok((encoding, qval))) + }) + .flatten() +} + +#[cfg(test)] +mod tests { + use super::*; + use http_v02::header::ACCEPT_ENCODING; + use http_v02::HeaderMap; + use http_v02::HeaderValue; + + fn encodings( + headers: &HeaderMap, + ) -> Result, f32)>, EncodingError> { + encodings_iter_http_02(headers).collect() + } + + fn parse(headers: &HeaderMap) -> Result, EncodingError> { + preferred(encodings_iter_http_02(headers)) + } + + #[test] + fn single_encoding() { + let mut headers = HeaderMap::new(); + headers.insert(ACCEPT_ENCODING, HeaderValue::from_str("gzip").unwrap()); + + let encoding = parse(&headers).unwrap().unwrap(); + assert_eq!(encoding, Encoding::Gzip); + } + + #[test] + fn multiple_encodings() { + let mut headers = HeaderMap::new(); + headers.insert( + ACCEPT_ENCODING, + HeaderValue::from_str("gzip, deflate, br").unwrap(), + ); + + let encoding = parse(&headers).unwrap().unwrap(); + assert_eq!(encoding, Encoding::Gzip); + } + + #[test] + fn single_encoding_with_qval() { + let mut headers = HeaderMap::new(); + headers.insert( + ACCEPT_ENCODING, + HeaderValue::from_str("deflate;q=1.0").unwrap(), + ); + + let encoding = parse(&headers).unwrap().unwrap(); + assert_eq!(encoding, Encoding::Deflate); + } + + #[test] + fn multiple_encodings_with_qval_1() { + let mut headers = HeaderMap::new(); + headers.insert( + ACCEPT_ENCODING, + HeaderValue::from_str("deflate, gzip;q=1.0, *;q=0.5").unwrap(), + ); + + let encoding = parse(&headers).unwrap().unwrap(); + assert_eq!(encoding, Encoding::Deflate); + } + + #[test] + fn multiple_encodings_with_qval_2() { + let mut headers = HeaderMap::new(); + headers.insert( + ACCEPT_ENCODING, + HeaderValue::from_str("gzip;q=0.5, deflate;q=1.0, *;q=0.5").unwrap(), + ); + + let encoding = parse(&headers).unwrap().unwrap(); + assert_eq!(encoding, Encoding::Deflate); + } + + #[test] + fn multiple_encodings_with_qval_3() { + let mut headers = HeaderMap::new(); + headers.insert( + ACCEPT_ENCODING, + HeaderValue::from_str("gzip;q=0.5, deflate;q=0.75, *;q=1.0").unwrap(), + ); + + let encoding = parse(&headers).unwrap(); + assert!(encoding.is_none()); + } + + #[test] + fn list_encodings() { + let mut headers = HeaderMap::new(); + headers.insert( + ACCEPT_ENCODING, + HeaderValue::from_str("zstd;q=1.0, deflate;q=0.8, br;q=0.9").unwrap(), + ); + + let encodings = encodings(&headers).unwrap(); + assert_eq!(encodings[0], (Some(Encoding::Zstd), 1.0)); + assert_eq!(encodings[1], (Some(Encoding::Deflate), 0.8)); + assert_eq!(encodings[2], (Some(Encoding::Brotli), 0.9)); + } + + #[test] + fn list_encodings_ignore_unknown() { + let mut headers = HeaderMap::new(); + headers.insert( + ACCEPT_ENCODING, + HeaderValue::from_str("zstd;q=1.0, unknown;q=0.8, br;q=0.9").unwrap(), + ); + + let encodings = encodings(&headers).unwrap(); + assert_eq!(encodings[0], (Some(Encoding::Zstd), 1.0)); + assert_eq!(encodings[1], (Some(Encoding::Brotli), 0.9)); + } +} diff --git a/vendor/deno_http/http_next.rs b/vendor/deno_http/http_next.rs new file mode 100644 index 00000000..7dbac602 --- /dev/null +++ b/vendor/deno_http/http_next.rs @@ -0,0 +1,1400 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use crate::compressible::is_content_compressible; +use crate::extract_network_stream; +use crate::network_buffered_stream::NetworkStreamPrefixCheck; +use crate::request_body::HttpRequestBody; +use crate::request_properties::HttpConnectionProperties; +use crate::request_properties::HttpListenProperties; +use crate::request_properties::HttpPropertyExtractor; +use crate::response_body::Compression; +use crate::response_body::ResponseBytesInner; +use crate::service::handle_request; +use crate::service::http_general_trace; +use crate::service::http_trace; +use crate::service::HttpRecord; +use crate::service::HttpRecordResponse; +use crate::service::HttpRequestBodyAutocloser; +use crate::service::HttpServerState; +use crate::service::SignallingRc; +use crate::websocket_upgrade::WebSocketUpgrade; +use crate::LocalExecutor; +use crate::Options; +use cache_control::CacheControl; +use deno_core::external; +use deno_core::futures::future::poll_fn; +use deno_core::futures::TryFutureExt; +use deno_core::op2; +use deno_core::serde_v8::from_v8; +use deno_core::unsync::spawn; +use deno_core::unsync::JoinHandle; +use deno_core::v8; +use deno_core::AsyncRefCell; +use deno_core::AsyncResult; +use deno_core::BufView; +use deno_core::ByteString; +use deno_core::CancelFuture; +use deno_core::CancelHandle; +use deno_core::CancelTryFuture; +use deno_core::ExternalPointer; +use deno_core::JsBuffer; +use deno_core::OpState; +use deno_core::RcRef; +use deno_core::Resource; +use deno_core::ResourceId; +use deno_net::ops_tls::TlsStream; +use deno_net::raw::NetworkStream; +use deno_websocket::ws_create_server_stream; +use hyper::body::Incoming; +use hyper::header::HeaderMap; +use hyper::header::ACCEPT_ENCODING; +use hyper::header::CACHE_CONTROL; +use hyper::header::CONTENT_ENCODING; +use hyper::header::CONTENT_LENGTH; +use hyper::header::CONTENT_RANGE; +use hyper::header::CONTENT_TYPE; +use hyper::header::COOKIE; +use hyper::http::HeaderName; +use hyper::http::HeaderValue; +use hyper::server::conn::http1; +use hyper::server::conn::http2; +use hyper::service::service_fn; +use hyper::service::HttpService; +use hyper::StatusCode; +use hyper_util::rt::TokioIo; +use once_cell::sync::Lazy; +use smallvec::SmallVec; +use std::borrow::Cow; +use std::cell::RefCell; +use std::ffi::c_void; +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::ptr::null; +use std::rc::Rc; + +use super::fly_accept_encoding; +use fly_accept_encoding::Encoding; + +use tokio::io::AsyncReadExt; +use tokio::io::AsyncWriteExt; + +type Request = hyper::Request; + +static USE_WRITEV: Lazy = Lazy::new(|| { + let enable = std::env::var("DENO_USE_WRITEV").ok(); + + if let Some(val) = enable { + return !val.is_empty(); + } + + false +}); + +/// All HTTP/2 connections start with this byte string. +/// +/// In HTTP/2, each endpoint is required to send a connection preface as a final confirmation +/// of the protocol in use and to establish the initial settings for the HTTP/2 connection. The +/// client and server each send a different connection preface. +/// +/// The client connection preface starts with a sequence of 24 octets, which in hex notation is: +/// +/// 0x505249202a20485454502f322e300d0a0d0a534d0d0a0d0a +/// +/// That is, the connection preface starts with the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence +/// MUST be followed by a SETTINGS frame (Section 6.5), which MAY be empty. +const HTTP2_PREFIX: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + +/// ALPN negotiation for "h2" +const TLS_ALPN_HTTP_2: &[u8] = b"h2"; + +/// ALPN negotiation for "http/1.1" +const TLS_ALPN_HTTP_11: &[u8] = b"http/1.1"; + +/// Name a trait for streams we can serve HTTP over. +trait HttpServeStream: + tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static +{ +} +impl< + S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static, + > HttpServeStream for S +{ +} + +#[repr(transparent)] +struct RcHttpRecord(Rc); + +// Register the [`HttpRecord`] as an external. +external!(RcHttpRecord, "http record"); + +/// Construct Rc from raw external pointer, consuming +/// refcount. You must make sure the external is deleted on the JS side. +macro_rules! take_external { + ($external:expr, $args:tt) => {{ + let ptr = ExternalPointer::::from_raw($external); + let record = ptr.unsafely_take().0; + http_trace!(record, $args); + record + }}; +} + +/// Clone Rc from raw external pointer. +macro_rules! clone_external { + ($external:expr, $args:tt) => {{ + let ptr = ExternalPointer::::from_raw($external); + ptr.unsafely_deref().0.clone() + }}; +} + +#[derive(Debug, thiserror::Error)] +pub enum HttpNextError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error("{0}")] + Io(#[from] io::Error), + #[error(transparent)] + WebSocketUpgrade(crate::websocket_upgrade::WebSocketUpgradeError), + #[error("{0}")] + Hyper(#[from] hyper::Error), + #[error(transparent)] + JoinError(#[from] tokio::task::JoinError), + #[error(transparent)] + Canceled(#[from] deno_core::Canceled), + #[error(transparent)] + HttpPropertyExtractor(deno_core::error::AnyError), + #[error(transparent)] + UpgradeUnavailable(#[from] crate::service::UpgradeUnavailableError), +} + +#[op2(fast)] +#[smi] +pub fn op_http_upgrade_raw( + state: &mut OpState, + external: *const c_void, +) -> Result { + // SAFETY: external is deleted before calling this op. + let http = unsafe { take_external!(external, "op_http_upgrade_raw") }; + + // Stage 1: extract the upgrade future + let upgrade = http.upgrade()?; + let (read, write) = tokio::io::duplex(1024); + let (read_rx, write_tx) = tokio::io::split(read); + let (mut write_rx, mut read_tx) = tokio::io::split(write); + spawn(async move { + let mut upgrade_stream = WebSocketUpgrade::<()>::default(); + + // Stage 2: Extract the Upgraded connection + let mut buf = [0; 1024]; + let upgraded = loop { + let read = Pin::new(&mut write_rx).read(&mut buf).await?; + match upgrade_stream.write(&buf[..read]) { + Ok(None) => continue, + Ok(Some((response, bytes))) => { + let (response_parts, _) = response.into_parts(); + *http.response_parts() = response_parts; + http.complete(); + let mut upgraded = TokioIo::new(upgrade.await?); + upgraded.write_all(&bytes).await?; + break upgraded; + } + Err(err) => return Err(HttpNextError::WebSocketUpgrade(err)), + } + }; + + // Stage 3: Pump the data + let (mut upgraded_rx, mut upgraded_tx) = tokio::io::split(upgraded); + + spawn(async move { + let mut buf = [0; 1024]; + loop { + let read = upgraded_rx.read(&mut buf).await?; + if read == 0 { + break; + } + read_tx.write_all(&buf[..read]).await?; + } + Ok::<_, HttpNextError>(()) + }); + spawn(async move { + let mut buf = [0; 1024]; + loop { + let read = write_rx.read(&mut buf).await?; + if read == 0 { + break; + } + upgraded_tx.write_all(&buf[..read]).await?; + } + Ok::<_, HttpNextError>(()) + }); + + Ok(()) + }); + + Ok( + state + .resource_table + .add(UpgradeStream::new(read_rx, write_tx)), + ) +} + +#[op2(async)] +#[smi] +pub async fn op_http_upgrade_websocket_next( + state: Rc>, + external: *const c_void, + #[serde] headers: Vec<(ByteString, ByteString)>, +) -> Result { + let http = + // SAFETY: external is deleted before calling this op. + unsafe { take_external!(external, "op_http_upgrade_websocket_next") }; + // Stage 1: set the response to 101 Switching Protocols and send it + let upgrade = http.upgrade()?; + { + let mut response_parts = http.response_parts(); + response_parts.status = StatusCode::SWITCHING_PROTOCOLS; + for (name, value) in headers { + response_parts.headers.append( + HeaderName::from_bytes(&name).unwrap(), + HeaderValue::from_bytes(&value).unwrap(), + ); + } + } + http.complete(); + + // Stage 2: wait for the request to finish upgrading + let upgraded = upgrade.await?; + + // Stage 3: take the extracted raw network stream and upgrade it to a websocket, then return it + let (stream, bytes) = extract_network_stream(upgraded); + Ok(ws_create_server_stream( + &mut state.borrow_mut(), + stream, + bytes, + )) +} + +#[op2(fast)] +pub fn op_http_set_promise_complete(external: *const c_void, status: u16) { + let http = + // SAFETY: external is deleted before calling this op. + unsafe { take_external!(external, "op_http_set_promise_complete") }; + set_promise_complete(http, status); +} + +fn set_promise_complete(http: Rc, status: u16) { + // The Javascript code should never provide a status that is invalid here (see 23_response.js), so we + // will quietly ignore invalid values. + if let Ok(code) = StatusCode::from_u16(status) { + http.response_parts().status = code; + } + http.complete(); +} + +#[op2] +pub fn op_http_get_request_method_and_url<'scope, HTTP>( + scope: &mut v8::HandleScope<'scope>, + external: *const c_void, +) -> v8::Local<'scope, v8::Array> +where + HTTP: HttpPropertyExtractor, +{ + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_get_request_method_and_url") }; + let request_info = http.request_info(); + let request_parts = http.request_parts(); + let request_properties = HTTP::request_properties( + &request_info, + &request_parts.uri, + &request_parts.headers, + ); + + let method: v8::Local = v8::String::new_from_utf8( + scope, + request_parts.method.as_str().as_bytes(), + v8::NewStringType::Normal, + ) + .unwrap() + .into(); + + let authority: v8::Local = match request_properties.authority { + Some(authority) => v8::String::new_from_utf8( + scope, + authority.as_bytes(), + v8::NewStringType::Normal, + ) + .unwrap() + .into(), + None => v8::undefined(scope).into(), + }; + + // Only extract the path part - we handle authority elsewhere + let path = match request_parts.uri.path_and_query() { + Some(path_and_query) => { + let path = path_and_query.as_str(); + if matches!(path.as_bytes().first(), Some(b'/' | b'*')) { + Cow::Borrowed(path) + } else { + Cow::Owned(format!("/{}", path)) + } + } + None => Cow::Borrowed(""), + }; + + let path: v8::Local = v8::String::new_from_utf8( + scope, + path.as_bytes(), + v8::NewStringType::Normal, + ) + .unwrap() + .into(); + + let peer_address: v8::Local = v8::String::new_from_utf8( + scope, + request_info.peer_address.as_bytes(), + v8::NewStringType::Normal, + ) + .unwrap() + .into(); + + let port: v8::Local = match request_info.peer_port { + Some(port) => v8::Integer::new(scope, port.into()).into(), + None => v8::undefined(scope).into(), + }; + + let vec = [method, authority, path, peer_address, port]; + v8::Array::new_with_elements(scope, vec.as_slice()) +} + +#[op2] +#[serde] +pub fn op_http_get_request_header( + external: *const c_void, + #[string] name: String, +) -> Option { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_get_request_header") }; + let request_parts = http.request_parts(); + let value = request_parts.headers.get(name); + value.map(|value| value.as_bytes().into()) +} + +#[op2] +pub fn op_http_get_request_headers<'scope>( + scope: &mut v8::HandleScope<'scope>, + external: *const c_void, +) -> v8::Local<'scope, v8::Array> { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_get_request_headers") }; + let headers = &http.request_parts().headers; + // Two slots for each header key/value pair + let mut vec: SmallVec<[v8::Local; 32]> = + SmallVec::with_capacity(headers.len() * 2); + + let mut cookies: Option> = None; + for (name, value) in headers { + if name == COOKIE { + if let Some(ref mut cookies) = cookies { + cookies.push(value.as_bytes()); + } else { + cookies = Some(vec![value.as_bytes()]); + } + } else { + vec.push( + v8::String::new_from_one_byte( + scope, + name.as_ref(), + v8::NewStringType::Normal, + ) + .unwrap() + .into(), + ); + vec.push( + v8::String::new_from_one_byte( + scope, + value.as_bytes(), + v8::NewStringType::Normal, + ) + .unwrap() + .into(), + ); + } + } + + // We treat cookies specially, because we don't want them to get them + // mangled by the `Headers` object in JS. What we do is take all cookie + // headers and concat them into a single cookie header, separated by + // semicolons. + // TODO(mmastrac): This should probably happen on the JS side on-demand + if let Some(cookies) = cookies { + let cookie_sep = "; ".as_bytes(); + + vec.push( + v8::String::new_external_onebyte_static(scope, COOKIE.as_ref()) + .unwrap() + .into(), + ); + vec.push( + v8::String::new_from_one_byte( + scope, + cookies.join(cookie_sep).as_ref(), + v8::NewStringType::Normal, + ) + .unwrap() + .into(), + ); + } + + v8::Array::new_with_elements(scope, vec.as_slice()) +} + +#[op2(fast)] +#[smi] +pub fn op_http_read_request_body( + state: Rc>, + external: *const c_void, +) -> ResourceId { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_read_request_body") }; + let rid = if let Some(incoming) = http.take_request_body() { + let body_resource = Rc::new(HttpRequestBody::new(incoming)); + state.borrow_mut().resource_table.add_rc(body_resource) + } else { + // This should not be possible, but rather than panicking we'll return an invalid + // resource value to JavaScript. + ResourceId::MAX + }; + http.put_resource(HttpRequestBodyAutocloser::new(rid, state.clone())); + rid +} + +#[op2(fast)] +pub fn op_http_set_response_header( + external: *const c_void, + #[string(onebyte)] name: Cow<[u8]>, + #[string(onebyte)] value: Cow<[u8]>, +) { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_set_response_header") }; + let mut response_parts = http.response_parts(); + // These are valid latin-1 strings + let name = HeaderName::from_bytes(&name).unwrap(); + let value = match value { + Cow::Borrowed(bytes) => HeaderValue::from_bytes(bytes).unwrap(), + // SAFETY: These are valid latin-1 strings + Cow::Owned(bytes_vec) => unsafe { + HeaderValue::from_maybe_shared_unchecked(bytes::Bytes::from(bytes_vec)) + }, + }; + response_parts.headers.append(name, value); +} + +#[op2(fast)] +pub fn op_http_set_response_headers( + scope: &mut v8::HandleScope, + external: *const c_void, + headers: v8::Local, +) { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_set_response_headers") }; + // TODO(mmastrac): Invalid headers should be handled? + let mut response_parts = http.response_parts(); + + let len = headers.length(); + let header_len = len * 2; + response_parts + .headers + .reserve(header_len.try_into().unwrap()); + + for i in 0..len { + let item = headers.get_index(scope, i).unwrap(); + let pair = v8::Local::::try_from(item).unwrap(); + let name = pair.get_index(scope, 0).unwrap(); + let value = pair.get_index(scope, 1).unwrap(); + + let v8_name: ByteString = from_v8(scope, name).unwrap(); + let v8_value: ByteString = from_v8(scope, value).unwrap(); + let header_name = HeaderName::from_bytes(&v8_name).unwrap(); + let header_value = + // SAFETY: These are valid latin-1 strings + unsafe { HeaderValue::from_maybe_shared_unchecked(v8_value) }; + response_parts.headers.append(header_name, header_value); + } +} + +#[op2] +pub fn op_http_set_response_trailers( + external: *const c_void, + #[serde] trailers: Vec<(ByteString, ByteString)>, +) { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_set_response_trailers") }; + let mut trailer_map: HeaderMap = HeaderMap::with_capacity(trailers.len()); + for (name, value) in trailers { + // These are valid latin-1 strings + let name = HeaderName::from_bytes(&name).unwrap(); + // SAFETY: These are valid latin-1 strings + let value = unsafe { HeaderValue::from_maybe_shared_unchecked(value) }; + trailer_map.append(name, value); + } + *http.trailers() = Some(trailer_map); +} + +fn is_request_compressible( + length: Option, + headers: &HeaderMap, +) -> Compression { + if let Some(length) = length { + // By the time we add compression headers and Accept-Encoding, it probably doesn't make sense + // to compress stuff that's smaller than this. + if length < 64 { + return Compression::None; + } + } + + let Some(accept_encoding) = headers.get(ACCEPT_ENCODING) else { + return Compression::None; + }; + + match accept_encoding.to_str() { + // Firefox and Chrome send this -- no need to parse + Ok("gzip, deflate, br") => return Compression::Brotli, + Ok("gzip, deflate, br, zstd") => return Compression::Brotli, + Ok("gzip") => return Compression::GZip, + Ok("br") => return Compression::Brotli, + _ => (), + } + + // Fall back to the expensive parser + let accepted = + fly_accept_encoding::encodings_iter_http_1(headers).filter(|r| { + matches!( + r, + Ok(( + Some(Encoding::Identity | Encoding::Gzip | Encoding::Brotli), + _ + )) + ) + }); + match fly_accept_encoding::preferred(accepted) { + Ok(Some(fly_accept_encoding::Encoding::Gzip)) => Compression::GZip, + Ok(Some(fly_accept_encoding::Encoding::Brotli)) => Compression::Brotli, + _ => Compression::None, + } +} + +fn is_response_compressible(headers: &HeaderMap) -> bool { + if let Some(content_type) = headers.get(CONTENT_TYPE) { + if !is_content_compressible(content_type) { + return false; + } + } else { + return false; + } + if headers.contains_key(CONTENT_ENCODING) { + return false; + } + if headers.contains_key(CONTENT_RANGE) { + return false; + } + if let Some(cache_control) = headers.get(CACHE_CONTROL) { + if let Ok(s) = std::str::from_utf8(cache_control.as_bytes()) { + if let Some(cache_control) = CacheControl::from_value(s) { + if cache_control.no_transform { + return false; + } + } + } + } + true +} + +fn modify_compressibility_from_response( + compression: Compression, + headers: &mut HeaderMap, +) -> Compression { + ensure_vary_accept_encoding(headers); + if compression == Compression::None { + return Compression::None; + } + if !is_response_compressible(headers) { + return Compression::None; + } + let encoding = match compression { + Compression::Brotli => "br", + Compression::GZip => "gzip", + _ => unreachable!(), + }; + weaken_etag(headers); + headers.remove(CONTENT_LENGTH); + headers.insert(CONTENT_ENCODING, HeaderValue::from_static(encoding)); + compression +} + +/// If the user provided a ETag header for uncompressed data, we need to ensure it is a +/// weak Etag header ("W/"). +fn weaken_etag(hmap: &mut HeaderMap) { + if let Some(etag) = hmap.get_mut(hyper::header::ETAG) { + if !etag.as_bytes().starts_with(b"W/") { + let mut v = Vec::with_capacity(etag.as_bytes().len() + 2); + v.extend(b"W/"); + v.extend(etag.as_bytes()); + *etag = v.try_into().unwrap(); + } + } +} + +// Set Vary: Accept-Encoding header for direct body response. +// Note: we set the header irrespective of whether or not we compress the data +// to make sure cache services do not serve uncompressed data to clients that +// support compression. +fn ensure_vary_accept_encoding(hmap: &mut HeaderMap) { + if let Some(v) = hmap.get_mut(hyper::header::VARY) { + if let Ok(s) = v.to_str() { + if !s.to_lowercase().contains("accept-encoding") { + *v = format!("Accept-Encoding, {s}").try_into().unwrap() + } + return; + } + } + hmap.insert( + hyper::header::VARY, + HeaderValue::from_static("Accept-Encoding"), + ); +} + +/// Sets the appropriate response body. Use `force_instantiate_body` if you need +/// to ensure that the response is cleaned up correctly (eg: for resources). +fn set_response( + http: Rc, + length: Option, + status: u16, + force_instantiate_body: bool, + response_fn: impl FnOnce(Compression) -> ResponseBytesInner, +) { + // The request may have been cancelled by this point and if so, there's no need for us to + // do all of this work to send the response. + if !http.cancelled() { + let compression = + is_request_compressible(length, &http.request_parts().headers); + let mut response_headers = + std::cell::RefMut::map(http.response_parts(), |this| &mut this.headers); + let compression = + modify_compressibility_from_response(compression, &mut response_headers); + drop(response_headers); + http.set_response_body(response_fn(compression)); + + // The Javascript code should never provide a status that is invalid here (see 23_response.js), so we + // will quietly ignore invalid values. + if let Ok(code) = StatusCode::from_u16(status) { + http.response_parts().status = code; + } + } else if force_instantiate_body { + response_fn(Compression::None).abort(); + } + + http.complete(); +} + +#[op2(fast)] +pub fn op_http_get_request_cancelled(external: *const c_void) -> bool { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_get_request_cancelled") }; + http.cancelled() +} + +#[op2(async)] +pub async fn op_http_request_on_cancel(external: *const c_void) { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_request_on_cancel") }; + let (tx, rx) = tokio::sync::oneshot::channel(); + + http.on_cancel(tx); + drop(http); + + rx.await.ok(); +} + +/// Returned promise resolves when body streaming finishes. +/// Call [`op_http_close_after_finish`] when done with the external. +#[op2(async)] +pub async fn op_http_set_response_body_resource( + state: Rc>, + external: *const c_void, + #[smi] stream_rid: ResourceId, + auto_close: bool, + status: u16, +) -> Result { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_set_response_body_resource") }; + + // IMPORTANT: We might end up requiring the OpState lock in set_response if we need to drop the request + // body resource so we _cannot_ hold the OpState lock longer than necessary. + + // If the stream is auto_close, we will hold the last ref to it until the response is complete. + // TODO(mmastrac): We should be using the same auto-close functionality rather than removing autoclose resources. + // It's possible things could fail elsewhere if code expects the rid to continue existing after the response has been + // returned. + let resource = { + let mut state = state.borrow_mut(); + if auto_close { + state + .resource_table + .take_any(stream_rid) + .map_err(HttpNextError::Resource)? + } else { + state + .resource_table + .get_any(stream_rid) + .map_err(HttpNextError::Resource)? + } + }; + + *http.needs_close_after_finish() = true; + + set_response( + http.clone(), + resource.size_hint().1.map(|s| s as usize), + status, + true, + move |compression| { + ResponseBytesInner::from_resource(compression, resource, auto_close) + }, + ); + + Ok(http.response_body_finished().await) +} + +#[op2(fast)] +pub fn op_http_close_after_finish(external: *const c_void) { + let http = + // SAFETY: external is deleted before calling this op. + unsafe { take_external!(external, "op_http_close_after_finish") }; + http.close_after_finish(); +} + +#[op2(fast)] +pub fn op_http_set_response_body_text( + external: *const c_void, + #[string] text: String, + status: u16, +) { + let http = + // SAFETY: external is deleted before calling this op. + unsafe { take_external!(external, "op_http_set_response_body_text") }; + if !text.is_empty() { + set_response(http, Some(text.len()), status, false, |compression| { + ResponseBytesInner::from_vec(compression, text.into_bytes()) + }); + } else { + set_promise_complete(http, status); + } +} + +#[op2] +pub fn op_http_set_response_body_bytes( + external: *const c_void, + #[buffer] buffer: JsBuffer, + status: u16, +) { + let http = + // SAFETY: external is deleted before calling this op. + unsafe { take_external!(external, "op_http_set_response_body_bytes") }; + if !buffer.is_empty() { + set_response(http, Some(buffer.len()), status, false, |compression| { + ResponseBytesInner::from_bufview(compression, BufView::from(buffer)) + }); + } else { + set_promise_complete(http, status); + } +} + +fn serve_http11_unconditional( + io: impl HttpServeStream, + svc: impl HttpService + 'static, + cancel: Rc, + http1_builder_hook: Option http1::Builder>, +) -> impl Future> + 'static { + let mut builder = http1::Builder::new(); + builder.keep_alive(true).writev(*USE_WRITEV); + + if let Some(http1_builder_hook) = http1_builder_hook { + builder = http1_builder_hook(builder); + } + + let conn = builder + .serve_connection(TokioIo::new(io), svc) + .with_upgrades(); + + async { + match conn.or_abort(cancel).await { + Err(mut conn) => { + Pin::new(&mut conn).graceful_shutdown(); + conn.await + } + Ok(res) => res, + } + } +} + +fn serve_http2_unconditional( + io: impl HttpServeStream, + svc: impl HttpService + 'static, + cancel: Rc, + http2_builder_hook: Option< + fn(http2::Builder) -> http2::Builder, + >, +) -> impl Future> + 'static { + let mut builder = http2::Builder::new(LocalExecutor); + + if let Some(http2_builder_hook) = http2_builder_hook { + builder = http2_builder_hook(builder); + } + + let conn = builder.serve_connection(TokioIo::new(io), svc); + async { + match conn.or_abort(cancel).await { + Err(mut conn) => { + Pin::new(&mut conn).graceful_shutdown(); + conn.await + } + Ok(res) => res, + } + } +} + +async fn serve_http2_autodetect( + io: impl HttpServeStream, + svc: impl HttpService + 'static, + cancel: Rc, + options: Options, +) -> Result<(), HttpNextError> { + let prefix = NetworkStreamPrefixCheck::new(io, HTTP2_PREFIX); + let (matches, io) = prefix.match_prefix().await?; + if matches { + serve_http2_unconditional(io, svc, cancel, options.http2_builder_hook) + .await + .map_err(HttpNextError::Hyper) + } else { + serve_http11_unconditional(io, svc, cancel, options.http1_builder_hook) + .await + .map_err(HttpNextError::Hyper) + } +} + +fn serve_https( + mut io: TlsStream, + request_info: HttpConnectionProperties, + lifetime: HttpLifetime, + tx: tokio::sync::mpsc::Sender>, + options: Options, +) -> JoinHandle> { + let HttpLifetime { + server_state, + connection_cancel_handle, + listen_cancel_handle, + } = lifetime; + + let svc = service_fn(move |req: Request| { + handle_request(req, request_info.clone(), server_state.clone(), tx.clone()) + }); + spawn( + async move { + let handshake = io.handshake().await?; + // If the client specifically negotiates a protocol, we will use it. If not, we'll auto-detect + // based on the prefix bytes + let handshake = handshake.alpn; + if Some(TLS_ALPN_HTTP_2) == handshake.as_deref() { + serve_http2_unconditional( + io, + svc, + listen_cancel_handle, + options.http2_builder_hook, + ) + .await + .map_err(HttpNextError::Hyper) + } else if Some(TLS_ALPN_HTTP_11) == handshake.as_deref() { + serve_http11_unconditional( + io, + svc, + listen_cancel_handle, + options.http1_builder_hook, + ) + .await + .map_err(HttpNextError::Hyper) + } else { + serve_http2_autodetect(io, svc, listen_cancel_handle, options).await + } + } + .try_or_cancel(connection_cancel_handle), + ) +} + +fn serve_http( + io: impl HttpServeStream, + request_info: HttpConnectionProperties, + lifetime: HttpLifetime, + tx: tokio::sync::mpsc::Sender>, + options: Options, +) -> JoinHandle> { + let HttpLifetime { + server_state, + connection_cancel_handle, + listen_cancel_handle, + } = lifetime; + + let svc = service_fn(move |req: Request| { + handle_request(req, request_info.clone(), server_state.clone(), tx.clone()) + }); + spawn( + serve_http2_autodetect(io, svc, listen_cancel_handle, options) + .try_or_cancel(connection_cancel_handle), + ) +} + +fn serve_http_on( + connection: HTTP::Connection, + listen_properties: &HttpListenProperties, + lifetime: HttpLifetime, + tx: tokio::sync::mpsc::Sender>, + options: Options, +) -> JoinHandle> +where + HTTP: HttpPropertyExtractor, +{ + let connection_properties: HttpConnectionProperties = + HTTP::connection_properties(listen_properties, &connection); + + let network_stream = HTTP::to_network_stream_from_connection(connection); + + match network_stream { + NetworkStream::Tcp(conn) => { + serve_http(conn, connection_properties, lifetime, tx, options) + } + NetworkStream::Tls(conn) => { + serve_https(conn, connection_properties, lifetime, tx, options) + } + #[cfg(unix)] + NetworkStream::Unix(conn) => { + serve_http(conn, connection_properties, lifetime, tx, options) + } + } +} + +#[derive(Clone)] +struct HttpLifetime { + connection_cancel_handle: Rc, + listen_cancel_handle: Rc, + server_state: SignallingRc, +} + +struct HttpJoinHandle { + join_handle: AsyncRefCell>>>, + connection_cancel_handle: Rc, + listen_cancel_handle: Rc, + rx: AsyncRefCell>>, + server_state: SignallingRc, +} + +impl HttpJoinHandle { + fn new(rx: tokio::sync::mpsc::Receiver>) -> Self { + Self { + join_handle: AsyncRefCell::new(None), + connection_cancel_handle: CancelHandle::new_rc(), + listen_cancel_handle: CancelHandle::new_rc(), + rx: AsyncRefCell::new(rx), + server_state: HttpServerState::new(), + } + } + + fn lifetime(self: &Rc) -> HttpLifetime { + HttpLifetime { + connection_cancel_handle: self.connection_cancel_handle.clone(), + listen_cancel_handle: self.listen_cancel_handle.clone(), + server_state: self.server_state.clone(), + } + } + + fn connection_cancel_handle(self: &Rc) -> Rc { + self.connection_cancel_handle.clone() + } + + fn listen_cancel_handle(self: &Rc) -> Rc { + self.listen_cancel_handle.clone() + } +} + +impl Resource for HttpJoinHandle { + fn name(&self) -> Cow { + "http".into() + } + + fn close(self: Rc) { + // During a close operation, we cancel everything + self.connection_cancel_handle.cancel(); + self.listen_cancel_handle.cancel(); + } +} + +impl Drop for HttpJoinHandle { + fn drop(&mut self) { + // In some cases we may be dropped without closing, so let's cancel everything on the way out + self.connection_cancel_handle.cancel(); + self.listen_cancel_handle.cancel(); + } +} + +#[op2] +#[serde] +pub fn op_http_serve( + state: Rc>, + #[smi] listener_rid: ResourceId, +) -> Result<(ResourceId, &'static str, String), HttpNextError> +where + HTTP: HttpPropertyExtractor, +{ + let listener = + HTTP::get_listener_for_rid(&mut state.borrow_mut(), listener_rid) + .map_err(HttpNextError::Resource)?; + + let listen_properties = HTTP::listen_properties_from_listener(&listener)?; + + let (tx, rx) = tokio::sync::mpsc::channel(10); + let resource: Rc = Rc::new(HttpJoinHandle::new(rx)); + let listen_cancel_clone = resource.listen_cancel_handle(); + + let lifetime = resource.lifetime(); + + let options = { + let state = state.borrow(); + *state.borrow::() + }; + + let listen_properties_clone: HttpListenProperties = listen_properties.clone(); + let handle = spawn(async move { + loop { + let conn = HTTP::accept_connection_from_listener(&listener) + .try_or_cancel(listen_cancel_clone.clone()) + .await + .map_err(HttpNextError::HttpPropertyExtractor)?; + serve_http_on::( + conn, + &listen_properties_clone, + lifetime.clone(), + tx.clone(), + options, + ); + } + #[allow(unreachable_code)] + Ok::<_, HttpNextError>(()) + }); + + // Set the handle after we start the future + *RcRef::map(&resource, |this| &this.join_handle) + .try_borrow_mut() + .unwrap() = Some(handle); + + Ok(( + state.borrow_mut().resource_table.add_rc(resource), + listen_properties.scheme, + listen_properties.fallback_host, + )) +} + +#[op2] +#[serde] +pub fn op_http_serve_on( + state: Rc>, + #[smi] connection_rid: ResourceId, +) -> Result<(ResourceId, &'static str, String), HttpNextError> +where + HTTP: HttpPropertyExtractor, +{ + let connection = + HTTP::get_connection_for_rid(&mut state.borrow_mut(), connection_rid) + .map_err(HttpNextError::Resource)?; + + let listen_properties = HTTP::listen_properties_from_connection(&connection)?; + + let (tx, rx) = tokio::sync::mpsc::channel(10); + let resource: Rc = Rc::new(HttpJoinHandle::new(rx)); + + let options = { + let state = state.borrow(); + *state.borrow::() + }; + + let handle = serve_http_on::( + connection, + &listen_properties, + resource.lifetime(), + tx, + options, + ); + + // Set the handle after we start the future + *RcRef::map(&resource, |this| &this.join_handle) + .try_borrow_mut() + .unwrap() = Some(handle); + + Ok(( + state.borrow_mut().resource_table.add_rc(resource), + listen_properties.scheme, + listen_properties.fallback_host, + )) +} + +/// Synchronous, non-blocking call to see if there are any further HTTP requests. If anything +/// goes wrong in this method we return null and let the async handler pick up the real error. +#[op2(fast)] +pub fn op_http_try_wait( + state: &mut OpState, + #[smi] rid: ResourceId, +) -> *const c_void { + // The resource needs to exist. + let Ok(join_handle) = state.resource_table.get::(rid) else { + return null(); + }; + + // If join handle is somehow locked, just abort. + let Some(mut handle) = + RcRef::map(&join_handle, |this| &this.rx).try_borrow_mut() + else { + return null(); + }; + + // See if there are any requests waiting on this channel. If not, return. + let Ok(record) = handle.try_recv() else { + return null(); + }; + + let ptr = ExternalPointer::new(RcHttpRecord(record)); + ptr.into_raw() +} + +#[op2(async)] +pub async fn op_http_wait( + state: Rc>, + #[smi] rid: ResourceId, +) -> Result<*const c_void, HttpNextError> { + // We will get the join handle initially, as we might be consuming requests still + let join_handle = state + .borrow_mut() + .resource_table + .get::(rid) + .map_err(HttpNextError::Resource)?; + + let cancel = join_handle.listen_cancel_handle(); + let next = async { + let mut recv = RcRef::map(&join_handle, |this| &this.rx).borrow_mut().await; + recv.recv().await + } + .or_cancel(cancel) + .unwrap_or_else(|_| None) + .await; + + // Do we have a request? + if let Some(record) = next { + let ptr = ExternalPointer::new(RcHttpRecord(record)); + return Ok(ptr.into_raw()); + } + + // No - we're shutting down + let res = RcRef::map(join_handle, |this| &this.join_handle) + .borrow_mut() + .await + .take() + .unwrap() + .await?; + + // Filter out shutdown (ENOTCONN) errors + if let Err(err) = res { + if let HttpNextError::Io(err) = &err { + if err.kind() == io::ErrorKind::NotConnected { + return Ok(null()); + } + } + + return Err(err); + } + + Ok(null()) +} + +/// Cancels the HTTP handle. +#[op2(fast)] +pub fn op_http_cancel( + state: &mut OpState, + #[smi] rid: ResourceId, + graceful: bool, +) -> Result<(), deno_core::error::AnyError> { + let join_handle = state.resource_table.get::(rid)?; + + if graceful { + // In a graceful shutdown, we close the listener and allow all the remaining connections to drain + join_handle.listen_cancel_handle().cancel(); + } else { + // In a forceful shutdown, we close everything + join_handle.listen_cancel_handle().cancel(); + join_handle.connection_cancel_handle().cancel(); + } + + Ok(()) +} + +#[op2(async)] +pub async fn op_http_close( + state: Rc>, + #[smi] rid: ResourceId, + graceful: bool, +) -> Result<(), HttpNextError> { + let join_handle = state + .borrow_mut() + .resource_table + .take::(rid) + .map_err(HttpNextError::Resource)?; + + if graceful { + http_general_trace!("graceful shutdown"); + // In a graceful shutdown, we close the listener and allow all the remaining connections to drain + join_handle.listen_cancel_handle().cancel(); + poll_fn(|cx| join_handle.server_state.poll_complete(cx)).await; + } else { + http_general_trace!("forceful shutdown"); + // In a forceful shutdown, we close everything + join_handle.listen_cancel_handle().cancel(); + join_handle.connection_cancel_handle().cancel(); + // Give streaming responses a tick to close + tokio::task::yield_now().await; + } + + http_general_trace!("awaiting shutdown"); + + let mut join_handle = RcRef::map(&join_handle, |this| &this.join_handle) + .borrow_mut() + .await; + if let Some(join_handle) = join_handle.take() { + join_handle.await??; + } + + Ok(()) +} + +struct UpgradeStream { + read: AsyncRefCell>, + write: AsyncRefCell>, + cancel_handle: CancelHandle, +} + +impl UpgradeStream { + pub fn new( + read: tokio::io::ReadHalf, + write: tokio::io::WriteHalf, + ) -> Self { + Self { + read: AsyncRefCell::new(read), + write: AsyncRefCell::new(write), + cancel_handle: CancelHandle::new(), + } + } + + async fn read( + self: Rc, + buf: &mut [u8], + ) -> Result { + let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle); + async { + let read = RcRef::map(self, |this| &this.read); + let mut read = read.borrow_mut().await; + Pin::new(&mut *read).read(buf).await + } + .try_or_cancel(cancel_handle) + .await + } + + async fn write(self: Rc, buf: &[u8]) -> Result { + let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle); + async { + let write = RcRef::map(self, |this| &this.write); + let mut write = write.borrow_mut().await; + Pin::new(&mut *write).write(buf).await + } + .try_or_cancel(cancel_handle) + .await + } + + async fn write_vectored( + self: Rc, + buf1: &[u8], + buf2: &[u8], + ) -> Result { + let mut wr = RcRef::map(self, |r| &r.write).borrow_mut().await; + + let total = buf1.len() + buf2.len(); + let mut bufs = [std::io::IoSlice::new(buf1), std::io::IoSlice::new(buf2)]; + let mut nwritten = wr.write_vectored(&bufs).await?; + if nwritten == total { + return Ok(nwritten); + } + + // Slightly more optimized than (unstable) write_all_vectored for 2 iovecs. + while nwritten <= buf1.len() { + bufs[0] = std::io::IoSlice::new(&buf1[nwritten..]); + nwritten += wr.write_vectored(&bufs).await?; + } + + // First buffer out of the way. + if nwritten < total && nwritten > buf1.len() { + wr.write_all(&buf2[nwritten - buf1.len()..]).await?; + } + + Ok(total) + } +} + +impl Resource for UpgradeStream { + fn name(&self) -> Cow { + "httpRawUpgradeStream".into() + } + + deno_core::impl_readable_byob!(); + deno_core::impl_writable!(); + + fn close(self: Rc) { + self.cancel_handle.cancel(); + } +} + +#[op2(fast)] +pub fn op_can_write_vectored( + state: &mut OpState, + #[smi] rid: ResourceId, +) -> bool { + state.resource_table.get::(rid).is_ok() +} + +#[op2(async)] +#[number] +pub async fn op_raw_write_vectored( + state: Rc>, + #[smi] rid: ResourceId, + #[buffer] buf1: JsBuffer, + #[buffer] buf2: JsBuffer, +) -> Result { + let resource: Rc = state + .borrow() + .resource_table + .get::(rid) + .map_err(HttpNextError::Resource)?; + let nwritten = resource.write_vectored(&buf1, &buf2).await?; + Ok(nwritten) +} diff --git a/vendor/deno_http/lib.rs b/vendor/deno_http/lib.rs new file mode 100644 index 00000000..39b0bbc2 --- /dev/null +++ b/vendor/deno_http/lib.rs @@ -0,0 +1,1279 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use async_compression::tokio::write::BrotliEncoder; +use async_compression::tokio::write::GzipEncoder; +use async_compression::Level; +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use cache_control::CacheControl; +use deno_core::futures::channel::mpsc; +use deno_core::futures::channel::oneshot; +use deno_core::futures::future::pending; +use deno_core::futures::future::select; +use deno_core::futures::future::Either; +use deno_core::futures::future::Pending; +use deno_core::futures::future::RemoteHandle; +use deno_core::futures::future::Shared; +use deno_core::futures::never::Never; +use deno_core::futures::ready; +use deno_core::futures::stream::Peekable; +use deno_core::futures::FutureExt; +use deno_core::futures::StreamExt; +use deno_core::futures::TryFutureExt; +use deno_core::op2; +use deno_core::unsync::spawn; +use deno_core::AsyncRefCell; +use deno_core::AsyncResult; +use deno_core::BufView; +use deno_core::ByteString; +use deno_core::CancelFuture; +use deno_core::CancelHandle; +use deno_core::CancelTryFuture; +use deno_core::JsBuffer; +use deno_core::OpState; +use deno_core::RcRef; +use deno_core::Resource; +use deno_core::ResourceId; +use deno_core::StringOrBuffer; +use deno_net::raw::NetworkStream; +use deno_websocket::ws_create_server_stream; +use flate2::write::GzEncoder; +use flate2::Compression; +use hyper::server::conn::http1; +use hyper::server::conn::http2; +use hyper_util::rt::TokioIo; +use hyper_v014::body::Bytes; +use hyper_v014::body::HttpBody; +use hyper_v014::body::SizeHint; +use hyper_v014::header::HeaderName; +use hyper_v014::header::HeaderValue; +use hyper_v014::server::conn::Http; +use hyper_v014::service::Service; +use hyper_v014::Body; +use hyper_v014::HeaderMap; +use hyper_v014::Request; +use hyper_v014::Response; +use serde::Serialize; +use std::borrow::Cow; +use std::cell::RefCell; +use std::cmp::min; +use std::error::Error; +use std::future::Future; +use std::io; +use std::io::Write; +use std::mem::replace; +use std::mem::take; +use std::pin::pin; +use std::pin::Pin; +use std::rc::Rc; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; +use tokio::io::AsyncRead; +use tokio::io::AsyncWrite; +use tokio::io::AsyncWriteExt; + +use crate::network_buffered_stream::NetworkBufferedStream; +use crate::reader_stream::ExternallyAbortableReaderStream; +use crate::reader_stream::ShutdownHandle; + +pub mod compressible; +mod fly_accept_encoding; +mod http_next; +mod network_buffered_stream; +mod reader_stream; +mod request_body; +mod request_properties; +mod response_body; +mod service; +mod websocket_upgrade; + +use fly_accept_encoding::Encoding; +pub use http_next::HttpNextError; +pub use request_properties::DefaultHttpPropertyExtractor; +pub use request_properties::HttpConnectionProperties; +pub use request_properties::HttpListenProperties; +pub use request_properties::HttpPropertyExtractor; +pub use request_properties::HttpRequestProperties; +pub use service::UpgradeUnavailableError; +pub use websocket_upgrade::WebSocketUpgradeError; + +#[derive(Debug, Default, Clone, Copy)] +pub struct Options { + /// By passing a hook function, the caller can customize various configuration + /// options for the HTTP/2 server. + /// See [`http2::Builder`] for what parameters can be customized. + /// + /// If `None`, the default configuration provided by hyper will be used. Note + /// that the default configuration is subject to change in future versions. + pub http2_builder_hook: + Option) -> http2::Builder>, + /// By passing a hook function, the caller can customize various configuration + /// options for the HTTP/1 server. + /// See [`http1::Builder`] for what parameters can be customized. + /// + /// If `None`, the default configuration provided by hyper will be used. Note + /// that the default configuration is subject to change in future versions. + pub http1_builder_hook: Option http1::Builder>, +} + +deno_core::extension!( + deno_http, + deps = [deno_web, deno_net, deno_fetch, deno_websocket], + parameters = [ HTTP: HttpPropertyExtractor ], + ops = [ + op_http_accept, + op_http_headers, + op_http_shutdown, + op_http_upgrade_websocket, + op_http_websocket_accept_header, + op_http_write_headers, + op_http_write_resource, + op_http_write, + http_next::op_http_close_after_finish, + http_next::op_http_get_request_header, + http_next::op_http_get_request_headers, + http_next::op_http_request_on_cancel, + http_next::op_http_get_request_method_and_url, + http_next::op_http_get_request_cancelled, + http_next::op_http_read_request_body, + http_next::op_http_serve_on, + http_next::op_http_serve, + http_next::op_http_set_promise_complete, + http_next::op_http_set_response_body_bytes, + http_next::op_http_set_response_body_resource, + http_next::op_http_set_response_body_text, + http_next::op_http_set_response_header, + http_next::op_http_set_response_headers, + http_next::op_http_set_response_trailers, + http_next::op_http_upgrade_websocket_next, + http_next::op_http_upgrade_raw, + http_next::op_raw_write_vectored, + http_next::op_can_write_vectored, + http_next::op_http_try_wait, + http_next::op_http_wait, + http_next::op_http_close, + http_next::op_http_cancel, + ], + esm = ["00_serve.ts", "01_http.js", "02_websocket.ts"], + options = { + options: Options, + }, + state = |state, options| { + state.put::(options.options); + } +); + +#[derive(Debug, thiserror::Error)] +pub enum HttpError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error(transparent)] + Canceled(#[from] deno_core::Canceled), + #[error("{0}")] + HyperV014(#[source] Arc), + #[error("{0}")] + InvalidHeaderName(#[from] hyper_v014::header::InvalidHeaderName), + #[error("{0}")] + InvalidHeaderValue(#[from] hyper_v014::header::InvalidHeaderValue), + #[error("{0}")] + Http(#[from] hyper_v014::http::Error), + #[error("response headers already sent")] + ResponseHeadersAlreadySent, + #[error("connection closed while sending response")] + ConnectionClosedWhileSendingResponse, + #[error("already in use")] + AlreadyInUse, + #[error("{0}")] + Io(#[from] std::io::Error), + #[error("no response headers")] + NoResponseHeaders, + #[error("response already completed")] + ResponseAlreadyCompleted, + #[error("cannot upgrade because request body was used")] + UpgradeBodyUsed, + #[error(transparent)] + Other(deno_core::error::AnyError), +} + +pub enum HttpSocketAddr { + IpSocket(std::net::SocketAddr), + #[cfg(unix)] + UnixSocket(tokio::net::unix::SocketAddr), +} + +impl From for HttpSocketAddr { + fn from(addr: std::net::SocketAddr) -> Self { + Self::IpSocket(addr) + } +} + +#[cfg(unix)] +impl From for HttpSocketAddr { + fn from(addr: tokio::net::unix::SocketAddr) -> Self { + Self::UnixSocket(addr) + } +} + +struct HttpConnResource { + addr: HttpSocketAddr, + scheme: &'static str, + acceptors_tx: mpsc::UnboundedSender, + closed_fut: Shared>>>, + cancel_handle: Rc, // Closes gracefully and cancels accept ops. +} + +impl HttpConnResource { + fn new(io: S, scheme: &'static str, addr: HttpSocketAddr) -> Self + where + S: AsyncRead + AsyncWrite + Unpin + Send + 'static, + { + let (acceptors_tx, acceptors_rx) = mpsc::unbounded::(); + let service = HttpService::new(acceptors_rx); + + let conn_fut = Http::new() + .with_executor(LocalExecutor) + .serve_connection(io, service) + .with_upgrades(); + + // When the cancel handle is used, the connection shuts down gracefully. + // No new HTTP streams will be accepted, but existing streams will be able + // to continue operating and eventually shut down cleanly. + let cancel_handle = CancelHandle::new_rc(); + let shutdown_fut = never().or_cancel(&cancel_handle).fuse(); + + // A local task that polls the hyper connection future to completion. + let task_fut = async move { + let conn_fut = pin!(conn_fut); + let shutdown_fut = pin!(shutdown_fut); + let result = match select(conn_fut, shutdown_fut).await { + Either::Left((result, _)) => result, + Either::Right((_, mut conn_fut)) => { + conn_fut.as_mut().graceful_shutdown(); + conn_fut.await + } + }; + filter_enotconn(result).map_err(Arc::from) + }; + let (task_fut, closed_fut) = task_fut.remote_handle(); + let closed_fut = closed_fut.shared(); + spawn(task_fut); + + Self { + addr, + scheme, + acceptors_tx, + closed_fut, + cancel_handle, + } + } + + // Accepts a new incoming HTTP request. + async fn accept( + self: &Rc, + ) -> Result< + Option<( + HttpStreamReadResource, + HttpStreamWriteResource, + String, + String, + )>, + HttpError, + > { + let fut = async { + let (request_tx, request_rx) = oneshot::channel(); + let (response_tx, response_rx) = oneshot::channel(); + + let acceptor = HttpAcceptor::new(request_tx, response_rx); + self.acceptors_tx.unbounded_send(acceptor).ok()?; + + let request = request_rx.await.ok()?; + let accept_encoding = { + let encodings = + fly_accept_encoding::encodings_iter_http_02(request.headers()) + .filter(|r| { + matches!(r, Ok((Some(Encoding::Brotli | Encoding::Gzip), _))) + }); + + fly_accept_encoding::preferred(encodings) + .ok() + .flatten() + .unwrap_or(Encoding::Identity) + }; + + let method = request.method().to_string(); + let url = req_url(&request, self.scheme, &self.addr); + let read_stream = HttpStreamReadResource::new(self, request); + let write_stream = + HttpStreamWriteResource::new(self, response_tx, accept_encoding); + Some((read_stream, write_stream, method, url)) + }; + + async { + match fut.await { + Some(stream) => Ok(Some(stream)), + // Return the connection error, if any. + None => self.closed().map_ok(|_| None).await, + } + } + .try_or_cancel(&self.cancel_handle) + .await + } + + /// A future that completes when this HTTP connection is closed or errors. + async fn closed(&self) -> Result<(), HttpError> { + self.closed_fut.clone().map_err(HttpError::HyperV014).await + } +} + +impl Resource for HttpConnResource { + fn name(&self) -> Cow { + "httpConn".into() + } + + fn close(self: Rc) { + self.cancel_handle.cancel(); + } +} + +/// Creates a new HttpConn resource which uses `io` as its transport. +pub fn http_create_conn_resource( + state: &mut OpState, + io: S, + addr: A, + scheme: &'static str, +) -> ResourceId +where + S: AsyncRead + AsyncWrite + Unpin + Send + 'static, + A: Into, +{ + let conn = HttpConnResource::new(io, scheme, addr.into()); + state.resource_table.add(conn) +} + +/// An object that implements the `hyper::Service` trait, through which Hyper +/// delivers incoming HTTP requests. +struct HttpService { + acceptors_rx: Peekable>, +} + +impl HttpService { + fn new(acceptors_rx: mpsc::UnboundedReceiver) -> Self { + let acceptors_rx = acceptors_rx.peekable(); + Self { acceptors_rx } + } +} + +impl Service> for HttpService { + type Response = Response; + type Error = oneshot::Canceled; + type Future = oneshot::Receiver>; + + fn poll_ready( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> { + let acceptors_rx = Pin::new(&mut self.acceptors_rx); + let result = ready!(acceptors_rx.poll_peek(cx)) + .map(|_| ()) + .ok_or(oneshot::Canceled); + Poll::Ready(result) + } + + fn call(&mut self, request: Request) -> Self::Future { + let acceptor = self.acceptors_rx.next().now_or_never().flatten().unwrap(); + acceptor.call(request) + } +} + +/// A pair of one-shot channels which first transfer a HTTP request from the +/// Hyper service to the HttpConn resource, and then take the Response back to +/// the service. +struct HttpAcceptor { + request_tx: oneshot::Sender>, + response_rx: oneshot::Receiver>, +} + +impl HttpAcceptor { + fn new( + request_tx: oneshot::Sender>, + response_rx: oneshot::Receiver>, + ) -> Self { + Self { + request_tx, + response_rx, + } + } + + fn call(self, request: Request) -> oneshot::Receiver> { + let Self { + request_tx, + response_rx, + } = self; + request_tx + .send(request) + .map(|_| response_rx) + .unwrap_or_else(|_| oneshot::channel().1) // Make new canceled receiver. + } +} + +pub struct HttpStreamReadResource { + _conn: Rc, + pub rd: AsyncRefCell, + cancel_handle: CancelHandle, + size: SizeHint, +} + +pub struct HttpStreamWriteResource { + conn: Rc, + wr: AsyncRefCell, + accept_encoding: Encoding, +} + +impl HttpStreamReadResource { + fn new(conn: &Rc, request: Request) -> Self { + let size = request.body().size_hint(); + Self { + _conn: conn.clone(), + rd: HttpRequestReader::Headers(request).into(), + size, + cancel_handle: CancelHandle::new(), + } + } +} + +impl Resource for HttpStreamReadResource { + fn name(&self) -> Cow { + "httpReadStream".into() + } + + fn read(self: Rc, limit: usize) -> AsyncResult { + Box::pin(async move { + let mut rd = RcRef::map(&self, |r| &r.rd).borrow_mut().await; + + let body = loop { + match &mut *rd { + HttpRequestReader::Headers(_) => {} + HttpRequestReader::Body(_, body) => break body, + HttpRequestReader::Closed => return Ok(BufView::empty()), + } + match take(&mut *rd) { + HttpRequestReader::Headers(request) => { + let (parts, body) = request.into_parts(); + *rd = HttpRequestReader::Body(parts.headers, body.peekable()); + } + _ => unreachable!(), + }; + }; + + let fut = async { + let mut body = Pin::new(body); + loop { + match body.as_mut().peek_mut().await { + Some(Ok(chunk)) if !chunk.is_empty() => { + let len = min(limit, chunk.len()); + let buf = chunk.split_to(len); + let view = BufView::from(buf); + break Ok(view); + } + // This unwrap is safe because `peek_mut()` returned `Some`, and thus + // currently has a peeked value that can be synchronously returned + // from `next()`. + // + // The future returned from `next()` is always ready, so we can + // safely call `await` on it without creating a race condition. + Some(_) => match body.as_mut().next().await.unwrap() { + Ok(chunk) => assert!(chunk.is_empty()), + Err(err) => { + break Err(HttpError::HyperV014(Arc::new(err)).into()) + } + }, + None => break Ok(BufView::empty()), + } + } + }; + + let cancel_handle = RcRef::map(&self, |r| &r.cancel_handle); + fut.try_or_cancel(cancel_handle).await + }) + } + + fn close(self: Rc) { + self.cancel_handle.cancel(); + } + + fn size_hint(&self) -> (u64, Option) { + (self.size.lower(), self.size.upper()) + } +} + +impl HttpStreamWriteResource { + fn new( + conn: &Rc, + response_tx: oneshot::Sender>, + accept_encoding: Encoding, + ) -> Self { + Self { + conn: conn.clone(), + wr: HttpResponseWriter::Headers(response_tx).into(), + accept_encoding, + } + } +} + +impl Resource for HttpStreamWriteResource { + fn name(&self) -> Cow { + "httpWriteStream".into() + } +} + +/// The read half of an HTTP stream. +pub enum HttpRequestReader { + Headers(Request), + Body(HeaderMap, Peekable), + Closed, +} + +impl Default for HttpRequestReader { + fn default() -> Self { + Self::Closed + } +} + +/// The write half of an HTTP stream. +enum HttpResponseWriter { + Headers(oneshot::Sender>), + Body { + writer: Pin>, + shutdown_handle: ShutdownHandle, + }, + BodyUncompressed(BodyUncompressedSender), + Closed, +} + +impl Default for HttpResponseWriter { + fn default() -> Self { + Self::Closed + } +} + +struct BodyUncompressedSender(Option); + +impl BodyUncompressedSender { + fn sender(&mut self) -> &mut hyper_v014::body::Sender { + // This is safe because we only ever take the sender out of the option + // inside of the shutdown method. + self.0.as_mut().unwrap() + } + + fn shutdown(mut self) { + // take the sender out of self so that when self is dropped at the end of + // this block, it doesn't get aborted + self.0.take(); + } +} + +impl From for BodyUncompressedSender { + fn from(sender: hyper_v014::body::Sender) -> Self { + BodyUncompressedSender(Some(sender)) + } +} + +impl Drop for BodyUncompressedSender { + fn drop(&mut self) { + if let Some(sender) = self.0.take() { + sender.abort(); + } + } +} + +// We use a tuple instead of struct to avoid serialization overhead of the keys. +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct NextRequestResponse( + // read_stream_rid: + ResourceId, + // write_stream_rid: + ResourceId, + // method: + // This is a String rather than a ByteString because reqwest will only return + // the method as a str which is guaranteed to be ASCII-only. + String, + // url: + String, +); + +#[op2(async)] +#[serde] +async fn op_http_accept( + state: Rc>, + #[smi] rid: ResourceId, +) -> Result, HttpError> { + let conn = state + .borrow() + .resource_table + .get::(rid) + .map_err(HttpError::Resource)?; + + match conn.accept().await { + Ok(Some((read_stream, write_stream, method, url))) => { + let read_stream_rid = state + .borrow_mut() + .resource_table + .add_rc(Rc::new(read_stream)); + let write_stream_rid = state + .borrow_mut() + .resource_table + .add_rc(Rc::new(write_stream)); + let r = + NextRequestResponse(read_stream_rid, write_stream_rid, method, url); + Ok(Some(r)) + } + Ok(None) => Ok(None), + Err(err) => Err(err), + } +} + +fn req_url( + req: &hyper_v014::Request, + scheme: &'static str, + addr: &HttpSocketAddr, +) -> String { + let host: Cow = match addr { + HttpSocketAddr::IpSocket(addr) => { + if let Some(auth) = req.uri().authority() { + match addr.port() { + 443 if scheme == "https" => Cow::Borrowed(auth.host()), + 80 if scheme == "http" => Cow::Borrowed(auth.host()), + _ => Cow::Borrowed(auth.as_str()), // Includes port number. + } + } else if let Some(host) = req.uri().host() { + Cow::Borrowed(host) + } else if let Some(host) = req.headers().get("HOST") { + match host.to_str() { + Ok(host) => Cow::Borrowed(host), + Err(_) => Cow::Owned( + host + .as_bytes() + .iter() + .cloned() + .map(char::from) + .collect::(), + ), + } + } else { + Cow::Owned(addr.to_string()) + } + } + // There is no standard way for unix domain socket URLs + // nginx and nodejs request use http://unix:[socket_path]:/ but it is not a valid URL + // httpie uses http+unix://[percent_encoding_of_path]/ which we follow + #[cfg(unix)] + HttpSocketAddr::UnixSocket(addr) => Cow::Owned( + percent_encoding::percent_encode( + addr + .as_pathname() + .and_then(|x| x.to_str()) + .unwrap_or_default() + .as_bytes(), + percent_encoding::NON_ALPHANUMERIC, + ) + .to_string(), + ), + }; + let path = req + .uri() + .path_and_query() + .map(|p| p.as_str()) + .unwrap_or("/"); + [scheme, "://", &host, path].concat() +} + +fn req_headers( + header_map: &HeaderMap, +) -> Vec<(ByteString, ByteString)> { + // We treat cookies specially, because we don't want them to get them + // mangled by the `Headers` object in JS. What we do is take all cookie + // headers and concat them into a single cookie header, separated by + // semicolons. + let cookie_sep = "; ".as_bytes(); + let mut cookies = vec![]; + + let mut headers = Vec::with_capacity(header_map.len()); + for (name, value) in header_map.iter() { + if name == hyper_v014::header::COOKIE { + cookies.push(value.as_bytes()); + } else { + let name: &[u8] = name.as_ref(); + let value = value.as_bytes(); + headers.push((name.into(), value.into())); + } + } + + if !cookies.is_empty() { + headers.push(("cookie".into(), cookies.join(cookie_sep).into())); + } + + headers +} + +#[op2(async)] +async fn op_http_write_headers( + state: Rc>, + #[smi] rid: u32, + #[smi] status: u16, + #[serde] headers: Vec<(ByteString, ByteString)>, + #[serde] data: Option, +) -> Result<(), HttpError> { + let stream = state + .borrow_mut() + .resource_table + .get::(rid) + .map_err(HttpError::Resource)?; + + // Track supported encoding + let encoding = stream.accept_encoding; + + let mut builder = Response::builder(); + // SAFETY: can not fail, since a fresh Builder is non-errored + let hmap = unsafe { builder.headers_mut().unwrap_unchecked() }; + + // Add headers + hmap.reserve(headers.len() + 2); + for (k, v) in headers.into_iter() { + let v: Vec = v.into(); + hmap.append( + HeaderName::try_from(k.as_slice())?, + HeaderValue::try_from(v)?, + ); + } + ensure_vary_accept_encoding(hmap); + + let accepts_compression = + matches!(encoding, Encoding::Brotli | Encoding::Gzip); + let compressing = accepts_compression + && (matches!(data, Some(ref data) if data.len() > 20) || data.is_none()) + && should_compress(hmap); + + if compressing { + weaken_etag(hmap); + // Drop 'content-length' header. Hyper will update it using compressed body. + hmap.remove(hyper_v014::header::CONTENT_LENGTH); + // Content-Encoding header + hmap.insert( + hyper_v014::header::CONTENT_ENCODING, + HeaderValue::from_static(match encoding { + Encoding::Brotli => "br", + Encoding::Gzip => "gzip", + _ => unreachable!(), // Forbidden by accepts_compression + }), + ); + } + + let (new_wr, body) = http_response(data, compressing, encoding)?; + let body = builder.status(status).body(body)?; + + let mut old_wr = RcRef::map(&stream, |r| &r.wr).borrow_mut().await; + let response_tx = match replace(&mut *old_wr, new_wr) { + HttpResponseWriter::Headers(response_tx) => response_tx, + _ => return Err(HttpError::ResponseHeadersAlreadySent), + }; + + match response_tx.send(body) { + Ok(_) => Ok(()), + Err(_) => { + stream.conn.closed().await?; + Err(HttpError::ConnectionClosedWhileSendingResponse) + } + } +} + +#[op2] +#[serde] +fn op_http_headers( + state: &mut OpState, + #[smi] rid: u32, +) -> Result, HttpError> { + let stream = state + .resource_table + .get::(rid) + .map_err(HttpError::Resource)?; + let rd = RcRef::map(&stream, |r| &r.rd) + .try_borrow() + .ok_or(HttpError::AlreadyInUse)?; + match &*rd { + HttpRequestReader::Headers(request) => Ok(req_headers(request.headers())), + HttpRequestReader::Body(headers, _) => Ok(req_headers(headers)), + _ => unreachable!(), + } +} + +fn http_response( + data: Option, + compressing: bool, + encoding: Encoding, +) -> Result<(HttpResponseWriter, hyper_v014::Body), HttpError> { + // Gzip, after level 1, doesn't produce significant size difference. + // This default matches nginx default gzip compression level (1): + // https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level + const GZIP_DEFAULT_COMPRESSION_LEVEL: u8 = 1; + + match data { + Some(data) if compressing => match encoding { + Encoding::Brotli => { + // quality level 6 is based on google's nginx default value for + // on-the-fly compression + // https://github.com/google/ngx_brotli#brotli_comp_level + // lgwin 22 is equivalent to brotli window size of (2**22)-16 bytes + // (~4MB) + let mut writer = brotli::CompressorWriter::new(Vec::new(), 4096, 6, 22); + writer.write_all(&data)?; + Ok((HttpResponseWriter::Closed, writer.into_inner().into())) + } + Encoding::Gzip => { + let mut writer = GzEncoder::new( + Vec::new(), + Compression::new(GZIP_DEFAULT_COMPRESSION_LEVEL.into()), + ); + writer.write_all(&data)?; + Ok((HttpResponseWriter::Closed, writer.finish()?.into())) + } + _ => unreachable!(), // forbidden by accepts_compression + }, + Some(data) => { + // If a buffer was passed, but isn't compressible, we use it to + // construct a response body. + Ok((HttpResponseWriter::Closed, data.to_vec().into())) + } + None if compressing => { + // Create a one way pipe that implements tokio's async io traits. To do + // this we create a [tokio::io::DuplexStream], but then throw away one + // of the directions to create a one way pipe. + let (a, b) = tokio::io::duplex(64 * 1024); + let (reader, _) = tokio::io::split(a); + let (_, writer) = tokio::io::split(b); + let writer: Pin> = match encoding { + Encoding::Brotli => { + Box::pin(BrotliEncoder::with_quality(writer, Level::Fastest)) + } + Encoding::Gzip => Box::pin(GzipEncoder::with_quality( + writer, + Level::Precise(GZIP_DEFAULT_COMPRESSION_LEVEL.into()), + )), + _ => unreachable!(), // forbidden by accepts_compression + }; + let (stream, shutdown_handle) = + ExternallyAbortableReaderStream::new(reader); + Ok(( + HttpResponseWriter::Body { + writer, + shutdown_handle, + }, + Body::wrap_stream(stream), + )) + } + None => { + let (body_tx, body_rx) = Body::channel(); + Ok(( + HttpResponseWriter::BodyUncompressed(body_tx.into()), + body_rx, + )) + } + } +} + +// If user provided a ETag header for uncompressed data, we need to +// ensure it is a Weak Etag header ("W/"). +fn weaken_etag(hmap: &mut hyper_v014::HeaderMap) { + if let Some(etag) = hmap.get_mut(hyper_v014::header::ETAG) { + if !etag.as_bytes().starts_with(b"W/") { + let mut v = Vec::with_capacity(etag.as_bytes().len() + 2); + v.extend(b"W/"); + v.extend(etag.as_bytes()); + *etag = v.try_into().unwrap(); + } + } +} + +// Set Vary: Accept-Encoding header for direct body response. +// Note: we set the header irrespective of whether or not we compress the data +// to make sure cache services do not serve uncompressed data to clients that +// support compression. +fn ensure_vary_accept_encoding(hmap: &mut hyper_v014::HeaderMap) { + if let Some(v) = hmap.get_mut(hyper_v014::header::VARY) { + if let Ok(s) = v.to_str() { + if !s.to_lowercase().contains("accept-encoding") { + *v = format!("Accept-Encoding, {s}").try_into().unwrap() + } + return; + } + } + hmap.insert( + hyper_v014::header::VARY, + HeaderValue::from_static("Accept-Encoding"), + ); +} + +fn should_compress(headers: &hyper_v014::HeaderMap) -> bool { + // skip compression if the cache-control header value is set to "no-transform" or not utf8 + fn cache_control_no_transform( + headers: &hyper_v014::HeaderMap, + ) -> Option { + let v = headers.get(hyper_v014::header::CACHE_CONTROL)?; + let s = match std::str::from_utf8(v.as_bytes()) { + Ok(s) => s, + Err(_) => return Some(true), + }; + let c = CacheControl::from_value(s)?; + Some(c.no_transform) + } + // we skip compression if the `content-range` header value is set, as it + // indicates the contents of the body were negotiated based directly + // with the user code and we can't compress the response + let content_range = headers.contains_key(hyper_v014::header::CONTENT_RANGE); + // assume body is already compressed if Content-Encoding header present, thus avoid recompressing + let is_precompressed = + headers.contains_key(hyper_v014::header::CONTENT_ENCODING); + + !content_range + && !is_precompressed + && !cache_control_no_transform(headers).unwrap_or_default() + && headers + .get(hyper_v014::header::CONTENT_TYPE) + .map(compressible::is_content_compressible) + .unwrap_or_default() +} + +#[op2(async)] +async fn op_http_write_resource( + state: Rc>, + #[smi] rid: ResourceId, + #[smi] stream: ResourceId, +) -> Result<(), HttpError> { + let http_stream = state + .borrow() + .resource_table + .get::(rid) + .map_err(HttpError::Resource)?; + let mut wr = RcRef::map(&http_stream, |r| &r.wr).borrow_mut().await; + let resource = state + .borrow() + .resource_table + .get_any(stream) + .map_err(HttpError::Resource)?; + loop { + match *wr { + HttpResponseWriter::Headers(_) => { + return Err(HttpError::NoResponseHeaders) + } + HttpResponseWriter::Closed => { + return Err(HttpError::ResponseAlreadyCompleted) + } + _ => {} + }; + + let view = resource + .clone() + .read(64 * 1024) + .await + .map_err(HttpError::Other)?; // 64KB + if view.is_empty() { + break; + } + + match &mut *wr { + HttpResponseWriter::Body { writer, .. } => { + let mut result = writer.write_all(&view).await; + if result.is_ok() { + result = writer.flush().await; + } + if let Err(err) = result { + assert_eq!(err.kind(), std::io::ErrorKind::BrokenPipe); + // Don't return "broken pipe", that's an implementation detail. + // Pull up the failure associated with the transport connection instead. + http_stream.conn.closed().await?; + // If there was no connection error, drop body_tx. + *wr = HttpResponseWriter::Closed; + } + } + HttpResponseWriter::BodyUncompressed(body) => { + let bytes = view.to_vec().into(); + if let Err(err) = body.sender().send_data(bytes).await { + assert!(err.is_closed()); + // Pull up the failure associated with the transport connection instead. + http_stream.conn.closed().await?; + // If there was no connection error, drop body_tx. + *wr = HttpResponseWriter::Closed; + } + } + _ => unreachable!(), + }; + } + Ok(()) +} + +#[op2(async)] +async fn op_http_write( + state: Rc>, + #[smi] rid: ResourceId, + #[buffer] buf: JsBuffer, +) -> Result<(), HttpError> { + let stream = state + .borrow() + .resource_table + .get::(rid) + .map_err(HttpError::Resource)?; + let mut wr = RcRef::map(&stream, |r| &r.wr).borrow_mut().await; + + match &mut *wr { + HttpResponseWriter::Headers(_) => Err(HttpError::NoResponseHeaders), + HttpResponseWriter::Closed => Err(HttpError::ResponseAlreadyCompleted), + HttpResponseWriter::Body { writer, .. } => { + let mut result = writer.write_all(&buf).await; + if result.is_ok() { + result = writer.flush().await; + } + match result { + Ok(_) => Ok(()), + Err(err) => { + assert_eq!(err.kind(), std::io::ErrorKind::BrokenPipe); + // Don't return "broken pipe", that's an implementation detail. + // Pull up the failure associated with the transport connection instead. + stream.conn.closed().await?; + // If there was no connection error, drop body_tx. + *wr = HttpResponseWriter::Closed; + Err(HttpError::ResponseAlreadyCompleted) + } + } + } + HttpResponseWriter::BodyUncompressed(body) => { + let bytes = Bytes::from(buf.to_vec()); + match body.sender().send_data(bytes).await { + Ok(_) => Ok(()), + Err(err) => { + assert!(err.is_closed()); + // Pull up the failure associated with the transport connection instead. + stream.conn.closed().await?; + // If there was no connection error, drop body_tx. + *wr = HttpResponseWriter::Closed; + Err(HttpError::ResponseAlreadyCompleted) + } + } + } + } +} + +/// Gracefully closes the write half of the HTTP stream. Note that this does not +/// remove the HTTP stream resource from the resource table; it still has to be +/// closed with `Deno.core.close()`. +#[op2(async)] +async fn op_http_shutdown( + state: Rc>, + #[smi] rid: ResourceId, +) -> Result<(), HttpError> { + let stream = state + .borrow() + .resource_table + .get::(rid) + .map_err(HttpError::Resource)?; + let mut wr = RcRef::map(&stream, |r| &r.wr).borrow_mut().await; + let wr = take(&mut *wr); + match wr { + HttpResponseWriter::Body { + mut writer, + shutdown_handle, + } => { + shutdown_handle.shutdown(); + match writer.shutdown().await { + Ok(_) => {} + Err(err) => { + assert_eq!(err.kind(), std::io::ErrorKind::BrokenPipe); + // Don't return "broken pipe", that's an implementation detail. + // Pull up the failure associated with the transport connection instead. + stream.conn.closed().await?; + } + } + } + HttpResponseWriter::BodyUncompressed(body) => { + body.shutdown(); + } + _ => {} + }; + Ok(()) +} + +#[op2] +#[string] +fn op_http_websocket_accept_header(#[string] key: String) -> String { + let digest = ring::digest::digest( + &ring::digest::SHA1_FOR_LEGACY_USE_ONLY, + format!("{key}258EAFA5-E914-47DA-95CA-C5AB0DC85B11").as_bytes(), + ); + BASE64_STANDARD.encode(digest) +} + +#[op2(async)] +#[smi] +async fn op_http_upgrade_websocket( + state: Rc>, + #[smi] rid: ResourceId, +) -> Result { + let stream = state + .borrow_mut() + .resource_table + .get::(rid) + .map_err(HttpError::Resource)?; + let mut rd = RcRef::map(&stream, |r| &r.rd).borrow_mut().await; + + let request = match &mut *rd { + HttpRequestReader::Headers(request) => request, + _ => return Err(HttpError::UpgradeBodyUsed), + }; + + let (transport, bytes) = extract_network_stream( + hyper_v014::upgrade::on(request) + .await + .map_err(|err| HttpError::HyperV014(Arc::new(err)))?, + ); + Ok(ws_create_server_stream( + &mut state.borrow_mut(), + transport, + bytes, + )) +} + +// Needed so hyper can use non Send futures +#[derive(Clone)] +pub struct LocalExecutor; + +impl hyper_v014::rt::Executor for LocalExecutor +where + Fut: Future + 'static, + Fut::Output: 'static, +{ + fn execute(&self, fut: Fut) { + deno_core::unsync::spawn(fut); + } +} + +impl hyper::rt::Executor for LocalExecutor +where + Fut: Future + 'static, + Fut::Output: 'static, +{ + fn execute(&self, fut: Fut) { + deno_core::unsync::spawn(fut); + } +} + +/// Filters out the ever-surprising 'shutdown ENOTCONN' errors. +fn filter_enotconn( + result: Result<(), hyper_v014::Error>, +) -> Result<(), hyper_v014::Error> { + if result + .as_ref() + .err() + .and_then(|err| err.source()) + .and_then(|err| err.downcast_ref::()) + .filter(|err| err.kind() == io::ErrorKind::NotConnected) + .is_some() + { + Ok(()) + } else { + result + } +} + +/// Create a future that is forever pending. +fn never() -> Pending { + pending() +} + +trait CanDowncastUpgrade: Sized { + fn downcast( + self, + ) -> Result<(T, Bytes), Self>; +} + +impl CanDowncastUpgrade for hyper::upgrade::Upgraded { + fn downcast( + self, + ) -> Result<(T, Bytes), Self> { + let hyper::upgrade::Parts { io, read_buf, .. } = + self.downcast::>()?; + Ok((io.into_inner(), read_buf)) + } +} + +impl CanDowncastUpgrade for hyper_v014::upgrade::Upgraded { + fn downcast( + self, + ) -> Result<(T, Bytes), Self> { + let hyper_v014::upgrade::Parts { io, read_buf, .. } = self.downcast()?; + Ok((io, read_buf)) + } +} + +fn maybe_extract_network_stream< + T: Into + AsyncRead + AsyncWrite + Unpin + 'static, + U: CanDowncastUpgrade, +>( + upgraded: U, +) -> Result<(NetworkStream, Bytes), U> { + let upgraded = match upgraded.downcast::() { + Ok((stream, bytes)) => return Ok((stream.into(), bytes)), + Err(x) => x, + }; + + match upgraded.downcast::>() { + Ok((stream, upgraded_bytes)) => { + // Both the upgrade and the stream might have unread bytes + let (io, stream_bytes) = stream.into_inner(); + let bytes = match (stream_bytes.is_empty(), upgraded_bytes.is_empty()) { + (false, false) => Bytes::default(), + (true, false) => upgraded_bytes, + (false, true) => stream_bytes, + (true, true) => { + // The upgraded bytes come first as they have already been read + let mut v = upgraded_bytes.to_vec(); + v.append(&mut stream_bytes.to_vec()); + Bytes::from(v) + } + }; + Ok((io.into(), bytes)) + } + Err(x) => Err(x), + } +} + +fn extract_network_stream( + upgraded: U, +) -> (NetworkStream, Bytes) { + let upgraded = + match maybe_extract_network_stream::(upgraded) { + Ok(res) => return res, + Err(x) => x, + }; + let upgraded = + match maybe_extract_network_stream::( + upgraded, + ) { + Ok(res) => return res, + Err(x) => x, + }; + #[cfg(unix)] + let upgraded = + match maybe_extract_network_stream::(upgraded) { + Ok(res) => return res, + Err(x) => x, + }; + let upgraded = + match maybe_extract_network_stream::(upgraded) { + Ok(res) => return res, + Err(x) => x, + }; + + // TODO(mmastrac): HTTP/2 websockets may yield an un-downgradable type + drop(upgraded); + unreachable!("unexpected stream type"); +} diff --git a/vendor/deno_http/network_buffered_stream.rs b/vendor/deno_http/network_buffered_stream.rs new file mode 100644 index 00000000..73df2dbd --- /dev/null +++ b/vendor/deno_http/network_buffered_stream.rs @@ -0,0 +1,308 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use bytes::Bytes; +use deno_core::futures::future::poll_fn; +use deno_core::futures::ready; +use std::io; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::Poll; +use tokio::io::AsyncRead; +use tokio::io::AsyncWrite; +use tokio::io::ReadBuf; + +const MAX_PREFIX_SIZE: usize = 256; + +/// [`NetworkStreamPrefixCheck`] is used to differentiate a stream between two different modes, depending +/// on whether the first bytes match a given prefix (or not). +/// +/// IMPORTANT: This stream makes the assumption that the incoming bytes will never partially match the prefix +/// and then "hang" waiting for a write. For this code not to hang, the incoming stream must: +/// +/// * match the prefix fully and then request writes at a later time +/// * not match the prefix, and then request writes after writing a byte that causes the prefix not to match +/// * not match the prefix and then close +pub struct NetworkStreamPrefixCheck { + buffer: [MaybeUninit; MAX_PREFIX_SIZE * 2], + io: S, + prefix: &'static [u8], +} + +impl NetworkStreamPrefixCheck { + pub fn new(io: S, prefix: &'static [u8]) -> Self { + debug_assert!(prefix.len() < MAX_PREFIX_SIZE); + Self { + io, + prefix, + buffer: [MaybeUninit::::uninit(); MAX_PREFIX_SIZE * 2], + } + } + + // Returns a [`NetworkBufferedStream`] and a flag determining if we matched a prefix, rewound with the bytes we read to determine what + // type of stream this is. + pub async fn match_prefix( + self, + ) -> io::Result<(bool, NetworkBufferedStream)> { + let mut buffer = self.buffer; + let mut readbuf = ReadBuf::uninit(&mut buffer); + let mut io = self.io; + let prefix = self.prefix; + loop { + enum State { + Unknown, + Matched, + NotMatched, + } + + let state = poll_fn(|cx| { + let filled_len = readbuf.filled().len(); + let res = ready!(Pin::new(&mut io).poll_read(cx, &mut readbuf)); + if let Err(e) = res { + return Poll::Ready(Err(e)); + } + let filled = readbuf.filled(); + let new_len = filled.len(); + if new_len == filled_len { + // Empty read, no match + return Poll::Ready(Ok(State::NotMatched)); + } else if new_len < prefix.len() { + // Read less than prefix, make sure we're still matching the prefix (early exit) + if !prefix.starts_with(filled) { + return Poll::Ready(Ok(State::NotMatched)); + } + } else if new_len >= prefix.len() { + // We have enough to determine + if filled.starts_with(prefix) { + return Poll::Ready(Ok(State::Matched)); + } else { + return Poll::Ready(Ok(State::NotMatched)); + } + } + + Poll::Ready(Ok(State::Unknown)) + }) + .await?; + + match state { + State::Unknown => continue, + State::Matched => { + let initialized_len = readbuf.filled().len(); + return Ok(( + true, + NetworkBufferedStream::new(io, buffer, initialized_len), + )); + } + State::NotMatched => { + let initialized_len = readbuf.filled().len(); + return Ok(( + false, + NetworkBufferedStream::new(io, buffer, initialized_len), + )); + } + } + } + } +} + +/// [`NetworkBufferedStream`] is a stream that allows us to efficiently search for an incoming prefix in another stream without +/// reading too much data. If the stream detects that the prefix has definitely been matched, or definitely not been matched, +/// it returns a flag and a rewound stream allowing later code to take another pass at that data. +/// +/// [`NetworkBufferedStream`] is a custom wrapper around an asynchronous stream that implements AsyncRead +/// and AsyncWrite. It is designed to provide additional buffering functionality to the wrapped stream. +/// The primary use case for this struct is when you want to read a small amount of data from the beginning +/// of a stream, process it, and then continue reading the rest of the stream. +/// +/// While the bounds for the class are limited to [`AsyncRead`] for easier testing, it is far more useful to use +/// with interactive duplex streams that have a prefix determining which mode to operate in. For example, this class +/// can determine whether an incoming stream is HTTP/2 or non-HTTP/2 and allow downstream code to make that determination. +pub struct NetworkBufferedStream { + prefix: [MaybeUninit; MAX_PREFIX_SIZE * 2], + io: S, + initialized_len: usize, + prefix_offset: usize, + /// Have the prefix bytes been completely read out? + prefix_read: bool, +} + +impl NetworkBufferedStream { + /// This constructor is private, because passing partially initialized data between the [`NetworkStreamPrefixCheck`] and + /// this [`NetworkBufferedStream`] is challenging without the introduction of extra copies. + fn new( + io: S, + prefix: [MaybeUninit; MAX_PREFIX_SIZE * 2], + initialized_len: usize, + ) -> Self { + Self { + io, + initialized_len, + prefix_offset: 0, + prefix, + prefix_read: false, + } + } + + fn current_slice(&self) -> &[u8] { + // We trust that these bytes are initialized properly + let slice = &self.prefix[self.prefix_offset..self.initialized_len]; + + // This guarantee comes from slice_assume_init_ref (we can't use that until it's stable) + + // SAFETY: casting `slice` to a `*const [T]` is safe since the caller guarantees that + // `slice` is initialized, and `MaybeUninit` is guaranteed to have the same layout as `T`. + // The pointer obtained is valid since it refers to memory owned by `slice` which is a + // reference and thus guaranteed to be valid for reads. + + unsafe { &*(slice as *const [_] as *const [u8]) as _ } + } + + pub fn into_inner(self) -> (S, Bytes) { + let bytes = Bytes::copy_from_slice(self.current_slice()); + (self.io, bytes) + } +} + +impl AsyncRead for NetworkBufferedStream { + // From hyper's Rewind (https://github.com/hyperium/hyper), MIT License, Copyright (c) Sean McArthur + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + if !self.prefix_read { + let prefix = self.current_slice(); + + // If there are no remaining bytes, let the bytes get dropped. + if !prefix.is_empty() { + let copy_len = std::cmp::min(prefix.len(), buf.remaining()); + buf.put_slice(&prefix[..copy_len]); + self.prefix_offset += copy_len; + + return Poll::Ready(Ok(())); + } else { + self.prefix_read = true; + } + } + Pin::new(&mut self.io).poll_read(cx, buf) + } +} + +impl AsyncWrite + for NetworkBufferedStream +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> std::task::Poll> { + Pin::new(&mut self.io).poll_write(cx, buf) + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.io).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.io).poll_shutdown(cx) + } + + fn is_write_vectored(&self) -> bool { + self.io.is_write_vectored() + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> std::task::Poll> { + Pin::new(&mut self.io).poll_write_vectored(cx, bufs) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::io::AsyncReadExt; + + struct YieldsOneByteAtATime(&'static [u8]); + + impl AsyncRead for YieldsOneByteAtATime { + fn poll_read( + mut self: Pin<&mut Self>, + _cx: &mut std::task::Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + if let Some((head, tail)) = self.as_mut().0.split_first() { + self.as_mut().0 = tail; + let dest = buf.initialize_unfilled_to(1); + dest[0] = *head; + buf.advance(1); + } + Poll::Ready(Ok(())) + } + } + + async fn test( + io: impl AsyncRead + Unpin, + prefix: &'static [u8], + expect_match: bool, + expect_string: &'static str, + ) -> io::Result<()> { + let (matches, mut io) = NetworkStreamPrefixCheck::new(io, prefix) + .match_prefix() + .await?; + assert_eq!(matches, expect_match); + let mut s = String::new(); + Pin::new(&mut io).read_to_string(&mut s).await?; + assert_eq!(s, expect_string); + Ok(()) + } + + #[tokio::test] + async fn matches_prefix_simple() -> io::Result<()> { + let buf = b"prefix match".as_slice(); + test(buf, b"prefix", true, "prefix match").await + } + + #[tokio::test] + async fn matches_prefix_exact() -> io::Result<()> { + let buf = b"prefix".as_slice(); + test(buf, b"prefix", true, "prefix").await + } + + #[tokio::test] + async fn not_matches_prefix_simple() -> io::Result<()> { + let buf = b"prefill match".as_slice(); + test(buf, b"prefix", false, "prefill match").await + } + + #[tokio::test] + async fn not_matches_prefix_short() -> io::Result<()> { + let buf = b"nope".as_slice(); + test(buf, b"prefix", false, "nope").await + } + + #[tokio::test] + async fn not_matches_prefix_empty() -> io::Result<()> { + let buf = b"".as_slice(); + test(buf, b"prefix", false, "").await + } + + #[tokio::test] + async fn matches_one_byte_at_a_time() -> io::Result<()> { + let buf = YieldsOneByteAtATime(b"prefix"); + test(buf, b"prefix", true, "prefix").await + } + + #[tokio::test] + async fn not_matches_one_byte_at_a_time() -> io::Result<()> { + let buf = YieldsOneByteAtATime(b"prefill"); + test(buf, b"prefix", false, "prefill").await + } +} diff --git a/vendor/deno_http/reader_stream.rs b/vendor/deno_http/reader_stream.rs new file mode 100644 index 00000000..be6d571b --- /dev/null +++ b/vendor/deno_http/reader_stream.rs @@ -0,0 +1,157 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use std::pin::Pin; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; + +use bytes::Bytes; +use deno_core::futures::Stream; +use pin_project::pin_project; +use tokio::io::AsyncRead; +use tokio_util::io::ReaderStream; + +/// [ExternallyAbortableByteStream] adapts a [tokio::AsyncRead] into a [Stream]. +/// It is used to bridge between the HTTP response body resource, and +/// `hyper::Body`. The stream has the special property that it errors if the +/// underlying reader is closed before an explicit EOF is sent (in the form of +/// setting the `shutdown` flag to true). +#[pin_project] +pub struct ExternallyAbortableReaderStream { + #[pin] + inner: ReaderStream, + done: Arc, +} + +pub struct ShutdownHandle(Arc); + +impl ShutdownHandle { + pub fn shutdown(&self) { + self.0.store(true, std::sync::atomic::Ordering::SeqCst); + } +} + +impl ExternallyAbortableReaderStream { + pub fn new(reader: R) -> (Self, ShutdownHandle) { + let done = Arc::new(AtomicBool::new(false)); + let this = Self { + inner: ReaderStream::new(reader), + done: done.clone(), + }; + (this, ShutdownHandle(done)) + } +} + +impl Stream for ExternallyAbortableReaderStream { + type Item = std::io::Result; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.project(); + let val = std::task::ready!(this.inner.poll_next(cx)); + match val { + None if this.done.load(Ordering::SeqCst) => Poll::Ready(None), + None => Poll::Ready(Some(Err(std::io::Error::new( + std::io::ErrorKind::UnexpectedEof, + "stream reader has shut down", + )))), + Some(val) => Poll::Ready(Some(val)), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bytes::Bytes; + use deno_core::futures::StreamExt; + use tokio::io::AsyncWriteExt; + + #[tokio::test] + async fn success() { + let (a, b) = tokio::io::duplex(64 * 1024); + let (reader, _) = tokio::io::split(a); + let (_, mut writer) = tokio::io::split(b); + + let (mut stream, shutdown_handle) = + ExternallyAbortableReaderStream::new(reader); + + writer.write_all(b"hello").await.unwrap(); + assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("hello")); + + writer.write_all(b"world").await.unwrap(); + assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("world")); + + shutdown_handle.shutdown(); + writer.shutdown().await.unwrap(); + drop(writer); + assert!(stream.next().await.is_none()); + } + + #[tokio::test] + async fn error() { + let (a, b) = tokio::io::duplex(64 * 1024); + let (reader, _) = tokio::io::split(a); + let (_, mut writer) = tokio::io::split(b); + + let (mut stream, _shutdown_handle) = + ExternallyAbortableReaderStream::new(reader); + + writer.write_all(b"hello").await.unwrap(); + assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("hello")); + + drop(writer); + assert_eq!( + stream.next().await.unwrap().unwrap_err().kind(), + std::io::ErrorKind::UnexpectedEof + ); + } + + #[tokio::test] + async fn error2() { + let (a, b) = tokio::io::duplex(64 * 1024); + let (reader, _) = tokio::io::split(a); + let (_, mut writer) = tokio::io::split(b); + + let (mut stream, _shutdown_handle) = + ExternallyAbortableReaderStream::new(reader); + + writer.write_all(b"hello").await.unwrap(); + assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("hello")); + + writer.shutdown().await.unwrap(); + drop(writer); + assert_eq!( + stream.next().await.unwrap().unwrap_err().kind(), + std::io::ErrorKind::UnexpectedEof + ); + } + + #[tokio::test] + async fn write_after_shutdown() { + let (a, b) = tokio::io::duplex(64 * 1024); + let (reader, _) = tokio::io::split(a); + let (_, mut writer) = tokio::io::split(b); + + let (mut stream, shutdown_handle) = + ExternallyAbortableReaderStream::new(reader); + + writer.write_all(b"hello").await.unwrap(); + assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("hello")); + + writer.write_all(b"world").await.unwrap(); + assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("world")); + + shutdown_handle.shutdown(); + writer.shutdown().await.unwrap(); + + assert!(writer.write_all(b"!").await.is_err()); + + drop(writer); + assert!(stream.next().await.is_none()); + } +} diff --git a/vendor/deno_http/request_body.rs b/vendor/deno_http/request_body.rs new file mode 100644 index 00000000..f1c3f358 --- /dev/null +++ b/vendor/deno_http/request_body.rs @@ -0,0 +1,91 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use bytes::Bytes; +use deno_core::futures::stream::Peekable; +use deno_core::futures::Stream; +use deno_core::futures::StreamExt; +use deno_core::futures::TryFutureExt; +use deno_core::AsyncRefCell; +use deno_core::AsyncResult; +use deno_core::BufView; +use deno_core::RcRef; +use deno_core::Resource; +use hyper::body::Body; +use hyper::body::Incoming; +use hyper::body::SizeHint; +use std::borrow::Cow; +use std::pin::Pin; +use std::rc::Rc; +use std::task::ready; +use std::task::Poll; + +/// Converts a hyper incoming body stream into a stream of [`Bytes`] that we can use to read in V8. +struct ReadFuture(Incoming); + +impl Stream for ReadFuture { + type Item = Result; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + // Loop until we receive a non-empty frame from Hyper + let this = self.get_mut(); + loop { + let res = ready!(Pin::new(&mut this.0).poll_frame(cx)); + break match res { + Some(Ok(frame)) => { + if let Ok(data) = frame.into_data() { + // Ensure that we never yield an empty frame + if !data.is_empty() { + break Poll::Ready(Some(Ok(data))); + } + } + // Loop again so we don't lose the waker + continue; + } + Some(Err(e)) => Poll::Ready(Some(Err(e))), + None => Poll::Ready(None), + }; + } + } +} + +pub struct HttpRequestBody(AsyncRefCell>, SizeHint); + +impl HttpRequestBody { + pub fn new(body: Incoming) -> Self { + let size_hint = body.size_hint(); + Self(AsyncRefCell::new(ReadFuture(body).peekable()), size_hint) + } + + async fn read(self: Rc, limit: usize) -> Result { + let peekable = RcRef::map(self, |this| &this.0); + let mut peekable = peekable.borrow_mut().await; + match Pin::new(&mut *peekable).peek_mut().await { + None => Ok(BufView::empty()), + Some(Err(_)) => Err(peekable.next().await.unwrap().err().unwrap()), + Some(Ok(bytes)) => { + if bytes.len() <= limit { + // We can safely take the next item since we peeked it + return Ok(BufView::from(peekable.next().await.unwrap()?)); + } + let ret = bytes.split_to(limit); + Ok(BufView::from(ret)) + } + } + } +} + +impl Resource for HttpRequestBody { + fn name(&self) -> Cow { + "requestBody".into() + } + + fn read(self: Rc, limit: usize) -> AsyncResult { + Box::pin(HttpRequestBody::read(self, limit).map_err(Into::into)) + } + + fn size_hint(&self) -> (u64, Option) { + (self.1.lower(), self.1.upper()) + } +} diff --git a/vendor/deno_http/request_properties.rs b/vendor/deno_http/request_properties.rs new file mode 100644 index 00000000..39d35a79 --- /dev/null +++ b/vendor/deno_http/request_properties.rs @@ -0,0 +1,312 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use deno_core::error::AnyError; +use deno_core::OpState; +use deno_core::ResourceId; +use deno_net::raw::take_network_stream_listener_resource; +use deno_net::raw::take_network_stream_resource; +use deno_net::raw::NetworkStream; +use deno_net::raw::NetworkStreamAddress; +use deno_net::raw::NetworkStreamListener; +use deno_net::raw::NetworkStreamType; +use hyper::header::HOST; +use hyper::HeaderMap; +use hyper::Uri; +use std::borrow::Cow; +use std::net::Ipv4Addr; +use std::net::SocketAddr; +use std::net::SocketAddrV4; +use std::rc::Rc; + +// TODO(mmastrac): I don't like that we have to clone this, but it's one-time setup +#[derive(Clone)] +pub struct HttpListenProperties { + pub scheme: &'static str, + pub fallback_host: String, + pub local_port: Option, + pub stream_type: NetworkStreamType, +} + +#[derive(Clone)] +pub struct HttpConnectionProperties { + pub peer_address: Rc, + pub peer_port: Option, + pub local_port: Option, + pub stream_type: NetworkStreamType, +} + +pub struct HttpRequestProperties<'a> { + pub authority: Option>, +} + +/// Pluggable trait to determine listen, connection and request properties +/// for embedders that wish to provide alternative routes for incoming HTTP. +#[async_trait::async_trait(?Send)] +pub trait HttpPropertyExtractor { + type Listener: 'static; + type Connection; + + /// Given a listener [`ResourceId`], returns the [`HttpPropertyExtractor::Listener`]. + fn get_listener_for_rid( + state: &mut OpState, + listener_rid: ResourceId, + ) -> Result; + + /// Given a connection [`ResourceId`], returns the [`HttpPropertyExtractor::Connection`]. + fn get_connection_for_rid( + state: &mut OpState, + connection_rid: ResourceId, + ) -> Result; + + /// Determines the listener properties. + fn listen_properties_from_listener( + listener: &Self::Listener, + ) -> Result; + + /// Determines the listener properties given a [`HttpPropertyExtractor::Connection`]. + fn listen_properties_from_connection( + connection: &Self::Connection, + ) -> Result; + + /// Accept a new [`HttpPropertyExtractor::Connection`] from the given listener [`HttpPropertyExtractor::Listener`]. + async fn accept_connection_from_listener( + listener: &Self::Listener, + ) -> Result; + + /// Determines the connection properties. + fn connection_properties( + listen_properties: &HttpListenProperties, + connection: &Self::Connection, + ) -> HttpConnectionProperties; + + /// Turn a given [`HttpPropertyExtractor::Connection`] into a [`NetworkStream`]. + fn to_network_stream_from_connection( + connection: Self::Connection, + ) -> NetworkStream; + + /// Determines the request properties. + fn request_properties<'a>( + connection_properties: &'a HttpConnectionProperties, + uri: &'a Uri, + headers: &'a HeaderMap, + ) -> HttpRequestProperties<'a>; +} + +pub struct DefaultHttpPropertyExtractor {} + +#[async_trait::async_trait(?Send)] +impl HttpPropertyExtractor for DefaultHttpPropertyExtractor { + type Listener = NetworkStreamListener; + + type Connection = NetworkStream; + + fn get_listener_for_rid( + state: &mut OpState, + listener_rid: ResourceId, + ) -> Result { + take_network_stream_listener_resource( + &mut state.resource_table, + listener_rid, + ) + } + + fn get_connection_for_rid( + state: &mut OpState, + stream_rid: ResourceId, + ) -> Result { + take_network_stream_resource(&mut state.resource_table, stream_rid) + } + + async fn accept_connection_from_listener( + listener: &NetworkStreamListener, + ) -> Result { + listener + .accept() + .await + .map_err(Into::into) + .map(|(stm, _)| stm) + } + + fn listen_properties_from_listener( + listener: &NetworkStreamListener, + ) -> Result { + let stream_type = listener.stream(); + let local_address = listener.listen_address()?; + listener_properties(stream_type, local_address) + } + + fn listen_properties_from_connection( + connection: &Self::Connection, + ) -> Result { + let stream_type = connection.stream(); + let local_address = connection.local_address()?; + listener_properties(stream_type, local_address) + } + + fn to_network_stream_from_connection( + connection: Self::Connection, + ) -> NetworkStream { + connection + } + + fn connection_properties( + listen_properties: &HttpListenProperties, + connection: &NetworkStream, + ) -> HttpConnectionProperties { + // We always want some sort of peer address. If we can't get one, just make up one. + let peer_address = connection.peer_address().unwrap_or_else(|_| { + NetworkStreamAddress::Ip(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(0, 0, 0, 0), + 0, + ))) + }); + let peer_port: Option = match peer_address { + NetworkStreamAddress::Ip(ip) => Some(ip.port()), + #[cfg(unix)] + NetworkStreamAddress::Unix(_) => None, + }; + let peer_address = match peer_address { + NetworkStreamAddress::Ip(addr) => Rc::from(addr.ip().to_string()), + #[cfg(unix)] + NetworkStreamAddress::Unix(_) => Rc::from("unix"), + }; + let local_port = listen_properties.local_port; + let stream_type = listen_properties.stream_type; + + HttpConnectionProperties { + peer_address, + peer_port, + local_port, + stream_type, + } + } + + fn request_properties<'a>( + connection_properties: &'a HttpConnectionProperties, + uri: &'a Uri, + headers: &'a HeaderMap, + ) -> HttpRequestProperties<'a> { + let authority = req_host( + uri, + headers, + connection_properties.stream_type, + connection_properties.local_port.unwrap_or_default(), + ); + + HttpRequestProperties { authority } + } +} + +fn listener_properties( + stream_type: NetworkStreamType, + local_address: NetworkStreamAddress, +) -> Result { + let scheme = req_scheme_from_stream_type(stream_type); + let fallback_host = req_host_from_addr(stream_type, &local_address); + let local_port: Option = match local_address { + NetworkStreamAddress::Ip(ip) => Some(ip.port()), + #[cfg(unix)] + NetworkStreamAddress::Unix(_) => None, + }; + Ok(HttpListenProperties { + scheme, + fallback_host, + local_port, + stream_type, + }) +} + +/// Compute the fallback address from the [`NetworkStreamListenAddress`]. If the request has no authority/host in +/// its URI, and there is no [`HeaderName::HOST`] header, we fall back to this. +fn req_host_from_addr( + stream_type: NetworkStreamType, + addr: &NetworkStreamAddress, +) -> String { + match addr { + NetworkStreamAddress::Ip(addr) => { + if (stream_type == NetworkStreamType::Tls && addr.port() == 443) + || (stream_type == NetworkStreamType::Tcp && addr.port() == 80) + { + if addr.ip().is_loopback() || addr.ip().is_unspecified() { + return "localhost".to_owned(); + } + addr.ip().to_string() + } else { + if addr.ip().is_loopback() || addr.ip().is_unspecified() { + return format!("localhost:{}", addr.port()); + } + addr.to_string() + } + } + // There is no standard way for unix domain socket URLs + // nginx and nodejs request use http://unix:[socket_path]:/ but it is not a valid URL + // httpie uses http+unix://[percent_encoding_of_path]/ which we follow + #[cfg(unix)] + NetworkStreamAddress::Unix(unix) => percent_encoding::percent_encode( + unix + .as_pathname() + .and_then(|x| x.to_str()) + .unwrap_or_default() + .as_bytes(), + percent_encoding::NON_ALPHANUMERIC, + ) + .to_string(), + } +} + +fn req_scheme_from_stream_type(stream_type: NetworkStreamType) -> &'static str { + match stream_type { + NetworkStreamType::Tcp => "http://", + NetworkStreamType::Tls => "https://", + #[cfg(unix)] + NetworkStreamType::Unix => "http+unix://", + } +} + +fn req_host<'a>( + uri: &'a Uri, + headers: &'a HeaderMap, + addr_type: NetworkStreamType, + port: u16, +) -> Option> { + // Unix sockets always use the socket address + #[cfg(unix)] + if addr_type == NetworkStreamType::Unix { + return None; + } + + // It is rare that an authority will be passed, but if it does, it takes priority + if let Some(auth) = uri.authority() { + match addr_type { + NetworkStreamType::Tcp => { + if port == 80 { + return Some(Cow::Borrowed(auth.host())); + } + } + NetworkStreamType::Tls => { + if port == 443 { + return Some(Cow::Borrowed(auth.host())); + } + } + #[cfg(unix)] + NetworkStreamType::Unix => {} + } + return Some(Cow::Borrowed(auth.as_str())); + } + + // TODO(mmastrac): Most requests will use this path and we probably will want to optimize it in the future + if let Some(host) = headers.get(HOST) { + return Some(match host.to_str() { + Ok(host) => Cow::Borrowed(host), + Err(_) => Cow::Owned( + host + .as_bytes() + .iter() + .cloned() + .map(char::from) + .collect::(), + ), + }); + } + + None +} diff --git a/vendor/deno_http/response_body.rs b/vendor/deno_http/response_body.rs new file mode 100644 index 00000000..bac43bf3 --- /dev/null +++ b/vendor/deno_http/response_body.rs @@ -0,0 +1,805 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use std::io::Write; +use std::pin::Pin; +use std::rc::Rc; + +use brotli::enc::encode::BrotliEncoderOperation; +use brotli::enc::encode::BrotliEncoderParameter; +use brotli::enc::encode::BrotliEncoderStateStruct; +use brotli::writer::StandardAlloc; +use bytes::Bytes; +use bytes::BytesMut; +use deno_core::error::AnyError; +use deno_core::futures::ready; +use deno_core::futures::FutureExt; +use deno_core::AsyncResult; +use deno_core::BufView; +use deno_core::Resource; +use flate2::write::GzEncoder; +use hyper::body::Frame; +use hyper::body::SizeHint; +use pin_project::pin_project; + +/// Simplification for nested types we use for our streams. We provide a way to convert from +/// this type into Hyper's body [`Frame`]. +pub enum ResponseStreamResult { + /// Stream is over. + EndOfStream, + /// Stream provided non-empty data. + NonEmptyBuf(BufView), + /// Stream is ready, but provided no data. Retry. This is a result that is like Pending, but does + /// not register a waker and should be called again at the lowest level of this code. Generally this + /// will only be returned from compression streams that require additional buffering. + NoData, + /// Stream failed. + Error(AnyError), +} + +impl From for Option, AnyError>> { + fn from(value: ResponseStreamResult) -> Self { + match value { + ResponseStreamResult::EndOfStream => None, + ResponseStreamResult::NonEmptyBuf(buf) => Some(Ok(Frame::data(buf))), + ResponseStreamResult::Error(err) => Some(Err(err)), + // This result should be handled by retrying + ResponseStreamResult::NoData => unimplemented!(), + } + } +} + +pub trait PollFrame: Unpin { + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll; + + fn size_hint(&self) -> SizeHint; +} + +#[derive(PartialEq, Eq)] +pub enum Compression { + None, + GZip, + Brotli, +} + +pub enum ResponseStream { + /// A resource stream, piped in fast mode. + Resource(ResourceBodyAdapter), + #[cfg(test)] + TestChannel(tokio::sync::mpsc::Receiver), +} + +impl ResponseStream { + pub fn abort(self) { + match self { + ResponseStream::Resource(resource) => resource.stm.close(), + #[cfg(test)] + ResponseStream::TestChannel(..) => {} + } + } +} + +#[derive(Default)] +pub enum ResponseBytesInner { + /// An empty stream. + #[default] + Empty, + /// A completed stream. + Done, + /// A static buffer of bytes, sent in one fell swoop. + Bytes(BufView), + /// An uncompressed stream. + UncompressedStream(ResponseStream), + /// A GZip stream. + GZipStream(Box), + /// A Brotli stream. + BrotliStream(Box), +} + +impl std::fmt::Debug for ResponseBytesInner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Done => f.write_str("Done"), + Self::Empty => f.write_str("Empty"), + Self::Bytes(..) => f.write_str("Bytes"), + Self::UncompressedStream(..) => f.write_str("Uncompressed"), + Self::GZipStream(..) => f.write_str("GZip"), + Self::BrotliStream(..) => f.write_str("Brotli"), + } + } +} + +impl ResponseBytesInner { + pub fn abort(self) { + match self { + Self::Done | Self::Empty | Self::Bytes(..) => {} + Self::BrotliStream(stm) => stm.abort(), + Self::GZipStream(stm) => stm.abort(), + Self::UncompressedStream(stm) => stm.abort(), + } + } + + pub fn size_hint(&self) -> SizeHint { + match self { + Self::Done => SizeHint::with_exact(0), + Self::Empty => SizeHint::with_exact(0), + Self::Bytes(bytes) => SizeHint::with_exact(bytes.len() as u64), + Self::UncompressedStream(res) => res.size_hint(), + Self::GZipStream(..) => SizeHint::default(), + Self::BrotliStream(..) => SizeHint::default(), + } + } + + fn from_stream(compression: Compression, stream: ResponseStream) -> Self { + match compression { + Compression::GZip => { + Self::GZipStream(Box::new(GZipResponseStream::new(stream))) + } + Compression::Brotli => { + Self::BrotliStream(Box::new(BrotliResponseStream::new(stream))) + } + _ => Self::UncompressedStream(stream), + } + } + + pub fn from_resource( + compression: Compression, + stm: Rc, + auto_close: bool, + ) -> Self { + Self::from_stream( + compression, + ResponseStream::Resource(ResourceBodyAdapter::new(stm, auto_close)), + ) + } + + pub fn from_bufview(compression: Compression, buf: BufView) -> Self { + match compression { + Compression::GZip => { + let mut writer = + GzEncoder::new(Vec::new(), flate2::Compression::fast()); + writer.write_all(&buf).unwrap(); + Self::Bytes(BufView::from(writer.finish().unwrap())) + } + Compression::Brotli => { + // quality level 6 is based on google's nginx default value for + // on-the-fly compression + // https://github.com/google/ngx_brotli#brotli_comp_level + // lgwin 22 is equivalent to brotli window size of (2**22)-16 bytes + // (~4MB) + let mut writer = + brotli::CompressorWriter::new(Vec::new(), 65 * 1024, 6, 22); + writer.write_all(&buf).unwrap(); + writer.flush().unwrap(); + Self::Bytes(BufView::from(writer.into_inner())) + } + _ => Self::Bytes(buf), + } + } + + pub fn from_vec(compression: Compression, vec: Vec) -> Self { + match compression { + Compression::GZip => { + let mut writer = + GzEncoder::new(Vec::new(), flate2::Compression::fast()); + writer.write_all(&vec).unwrap(); + Self::Bytes(BufView::from(writer.finish().unwrap())) + } + Compression::Brotli => { + let mut writer = + brotli::CompressorWriter::new(Vec::new(), 65 * 1024, 6, 22); + writer.write_all(&vec).unwrap(); + writer.flush().unwrap(); + Self::Bytes(BufView::from(writer.into_inner())) + } + _ => Self::Bytes(BufView::from(vec)), + } + } + + /// Did we complete this response successfully? + pub fn is_complete(&self) -> bool { + matches!(self, ResponseBytesInner::Done | ResponseBytesInner::Empty) + } +} + +pub struct ResourceBodyAdapter { + auto_close: bool, + stm: Rc, + future: AsyncResult, +} + +impl ResourceBodyAdapter { + pub fn new(stm: Rc, auto_close: bool) -> Self { + let future = stm.clone().read(64 * 1024); + ResourceBodyAdapter { + auto_close, + stm, + future, + } + } +} + +impl PollFrame for ResponseStream { + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + match &mut *self { + ResponseStream::Resource(res) => Pin::new(res).poll_frame(cx), + #[cfg(test)] + ResponseStream::TestChannel(rx) => Pin::new(rx).poll_frame(cx), + } + } + + fn size_hint(&self) -> SizeHint { + match self { + ResponseStream::Resource(res) => res.size_hint(), + #[cfg(test)] + ResponseStream::TestChannel(_) => SizeHint::default(), + } + } +} + +impl PollFrame for ResourceBodyAdapter { + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let res = match ready!(self.future.poll_unpin(cx)) { + Err(err) => ResponseStreamResult::Error(err), + Ok(buf) => { + if buf.is_empty() { + if self.auto_close { + self.stm.clone().close(); + } + ResponseStreamResult::EndOfStream + } else { + // Re-arm the future + self.future = self.stm.clone().read(64 * 1024); + ResponseStreamResult::NonEmptyBuf(buf) + } + } + }; + std::task::Poll::Ready(res) + } + + fn size_hint(&self) -> SizeHint { + let hint = self.stm.size_hint(); + let mut size_hint = SizeHint::new(); + size_hint.set_lower(hint.0); + if let Some(upper) = hint.1 { + size_hint.set_upper(upper) + } + size_hint + } +} + +#[cfg(test)] +impl PollFrame for tokio::sync::mpsc::Receiver { + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let res = match ready!(self.poll_recv(cx)) { + Some(buf) => ResponseStreamResult::NonEmptyBuf(buf), + None => ResponseStreamResult::EndOfStream, + }; + std::task::Poll::Ready(res) + } + + fn size_hint(&self) -> SizeHint { + SizeHint::default() + } +} + +#[derive(Copy, Clone, Debug)] +enum GZipState { + Header, + Streaming, + Flushing, + Trailer, + EndOfStream, +} + +#[pin_project] +pub struct GZipResponseStream { + stm: flate2::Compress, + crc: flate2::Crc, + next_buf: Option, + partial: Option, + #[pin] + underlying: ResponseStream, + state: GZipState, +} + +impl GZipResponseStream { + pub fn new(underlying: ResponseStream) -> Self { + Self { + stm: flate2::Compress::new(flate2::Compression::fast(), false), + crc: flate2::Crc::new(), + next_buf: None, + partial: None, + state: GZipState::Header, + underlying, + } + } + + pub fn abort(self) { + self.underlying.abort() + } +} + +/// This is a minimal GZip header suitable for serving data from a webserver. We don't need to provide +/// most of the information. We're skipping header name, CRC, etc, and providing a null timestamp. +/// +/// We're using compression level 1, as higher levels don't produce significant size differences. This +/// is probably the reason why nginx's default gzip compression level is also 1: +/// +/// https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level +static GZIP_HEADER: Bytes = + Bytes::from_static(&[0x1f, 0x8b, 0x08, 0, 0, 0, 0, 0, 0x01, 0xff]); + +impl PollFrame for GZipResponseStream { + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let this = self.get_mut(); + let state = &mut this.state; + let orig_state = *state; + let frame = match *state { + GZipState::EndOfStream => { + return std::task::Poll::Ready(ResponseStreamResult::EndOfStream) + } + GZipState::Header => { + *state = GZipState::Streaming; + return std::task::Poll::Ready(ResponseStreamResult::NonEmptyBuf( + BufView::from(GZIP_HEADER.clone()), + )); + } + GZipState::Trailer => { + *state = GZipState::EndOfStream; + let mut v = Vec::with_capacity(8); + v.extend(&this.crc.sum().to_le_bytes()); + v.extend(&this.crc.amount().to_le_bytes()); + return std::task::Poll::Ready(ResponseStreamResult::NonEmptyBuf( + BufView::from(v), + )); + } + GZipState::Streaming => { + if let Some(partial) = this.partial.take() { + ResponseStreamResult::NonEmptyBuf(partial) + } else { + ready!(Pin::new(&mut this.underlying).poll_frame(cx)) + } + } + GZipState::Flushing => ResponseStreamResult::EndOfStream, + }; + + let stm = &mut this.stm; + + // Ideally we could use MaybeUninit here, but flate2 requires &[u8]. We should also try + // to dynamically adjust this buffer. + let mut buf = this + .next_buf + .take() + .unwrap_or_else(|| BytesMut::zeroed(64 * 1024)); + + let start_in = stm.total_in(); + let start_out = stm.total_out(); + let res = match frame { + // Short-circuit these and just return + x @ (ResponseStreamResult::NoData | ResponseStreamResult::Error(..)) => { + return std::task::Poll::Ready(x) + } + ResponseStreamResult::EndOfStream => { + *state = GZipState::Flushing; + stm.compress(&[], &mut buf, flate2::FlushCompress::Finish) + } + ResponseStreamResult::NonEmptyBuf(mut input) => { + let res = stm.compress(&input, &mut buf, flate2::FlushCompress::Sync); + let len_in = (stm.total_in() - start_in) as usize; + debug_assert!(len_in <= input.len()); + this.crc.update(&input[..len_in]); + if len_in < input.len() { + input.advance_cursor(len_in); + this.partial = Some(input); + } + res + } + }; + let len = stm.total_out() - start_out; + let res = match res { + Err(err) => ResponseStreamResult::Error(err.into()), + Ok(flate2::Status::BufError) => { + // This should not happen + unreachable!("old={orig_state:?} new={state:?} buf_len={}", buf.len()); + } + Ok(flate2::Status::Ok) => { + if len == 0 { + this.next_buf = Some(buf); + ResponseStreamResult::NoData + } else { + buf.truncate(len as usize); + ResponseStreamResult::NonEmptyBuf(BufView::from(buf.freeze())) + } + } + Ok(flate2::Status::StreamEnd) => { + *state = GZipState::Trailer; + if len == 0 { + this.next_buf = Some(buf); + ResponseStreamResult::NoData + } else { + buf.truncate(len as usize); + ResponseStreamResult::NonEmptyBuf(BufView::from(buf.freeze())) + } + } + }; + + std::task::Poll::Ready(res) + } + + fn size_hint(&self) -> SizeHint { + SizeHint::default() + } +} + +#[derive(Copy, Clone, Debug)] +enum BrotliState { + Streaming, + Flushing, + EndOfStream, +} + +#[pin_project] +pub struct BrotliResponseStream { + state: BrotliState, + stm: BrotliEncoderStateStruct, + #[pin] + underlying: ResponseStream, +} + +impl BrotliResponseStream { + pub fn new(underlying: ResponseStream) -> Self { + let mut stm = BrotliEncoderStateStruct::new(StandardAlloc::default()); + // Quality level 6 is based on google's nginx default value for on-the-fly compression + // https://github.com/google/ngx_brotli#brotli_comp_level + // lgwin 22 is equivalent to brotli window size of (2**22)-16 bytes (~4MB) + stm.set_parameter(BrotliEncoderParameter::BROTLI_PARAM_QUALITY, 6); + stm.set_parameter(BrotliEncoderParameter::BROTLI_PARAM_LGWIN, 22); + Self { + stm, + state: BrotliState::Streaming, + underlying, + } + } + + pub fn abort(self) { + self.underlying.abort() + } +} + +fn max_compressed_size(input_size: usize) -> usize { + if input_size == 0 { + return 2; + } + + // [window bits / empty metadata] + N * [uncompressed] + [last empty] + let num_large_blocks = input_size >> 14; + let overhead = 2 + (4 * num_large_blocks) + 3 + 1; + let result = input_size + overhead; + + if result < input_size { + 0 + } else { + result + } +} + +impl PollFrame for BrotliResponseStream { + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let this = self.get_mut(); + let state = &mut this.state; + let frame = match *state { + BrotliState::Streaming => { + ready!(Pin::new(&mut this.underlying).poll_frame(cx)) + } + BrotliState::Flushing => ResponseStreamResult::EndOfStream, + BrotliState::EndOfStream => { + return std::task::Poll::Ready(ResponseStreamResult::EndOfStream); + } + }; + + let res = match frame { + ResponseStreamResult::NonEmptyBuf(buf) => { + let mut output_buffer = vec![0; max_compressed_size(buf.len())]; + let mut output_offset = 0; + + this.stm.compress_stream( + BrotliEncoderOperation::BROTLI_OPERATION_FLUSH, + &mut buf.len(), + &buf, + &mut 0, + &mut output_buffer.len(), + &mut output_buffer, + &mut output_offset, + &mut None, + &mut |_, _, _, _| (), + ); + + output_buffer.truncate(output_offset); + ResponseStreamResult::NonEmptyBuf(BufView::from(output_buffer)) + } + ResponseStreamResult::EndOfStream => { + let mut output_buffer = vec![0; 1024]; + let mut output_offset = 0; + + this.stm.compress_stream( + BrotliEncoderOperation::BROTLI_OPERATION_FINISH, + &mut 0, + &[], + &mut 0, + &mut output_buffer.len(), + &mut output_buffer, + &mut output_offset, + &mut None, + &mut |_, _, _, _| (), + ); + + if output_offset == 0 { + this.state = BrotliState::EndOfStream; + ResponseStreamResult::EndOfStream + } else { + this.state = BrotliState::Flushing; + output_buffer.truncate(output_offset); + ResponseStreamResult::NonEmptyBuf(BufView::from(output_buffer)) + } + } + _ => frame, + }; + + std::task::Poll::Ready(res) + } + + fn size_hint(&self) -> SizeHint { + SizeHint::default() + } +} + +#[allow(clippy::print_stderr)] +#[cfg(test)] +mod tests { + use super::*; + use deno_core::futures::future::poll_fn; + use std::hash::Hasher; + use std::io::Read; + use std::io::Write; + + fn zeros() -> Vec { + vec![0; 1024 * 1024] + } + + fn hard_to_gzip_data() -> Vec { + const SIZE: usize = 1024 * 1024; + let mut v = Vec::with_capacity(SIZE); + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + for i in 0..SIZE { + hasher.write_usize(i); + v.push(hasher.finish() as u8); + } + v + } + + fn already_gzipped_data() -> Vec { + let mut v = Vec::with_capacity(1024 * 1024); + let mut gz = + flate2::GzBuilder::new().write(&mut v, flate2::Compression::best()); + gz.write_all(&hard_to_gzip_data()).unwrap(); + _ = gz.finish().unwrap(); + v + } + + fn chunk(v: Vec) -> impl Iterator> { + // Chunk the data into 10k + let mut out = vec![]; + for v in v.chunks(10 * 1024) { + out.push(v.to_vec()); + } + out.into_iter() + } + + fn random(mut v: Vec) -> impl Iterator> { + let mut out = vec![]; + loop { + if v.is_empty() { + break; + } + let rand = (rand::random::() % v.len()) + 1; + let new = v.split_off(rand); + out.push(v); + v = new; + } + // Print the lengths of the vectors if we actually fail this test at some point + let lengths = out.iter().map(|v| v.len()).collect::>(); + eprintln!("Lengths = {:?}", lengths); + out.into_iter() + } + + fn front_load(mut v: Vec) -> impl Iterator> { + // Chunk the data at 90% + let offset = (v.len() * 90) / 100; + let v2 = v.split_off(offset); + vec![v, v2].into_iter() + } + + fn front_load_but_one(mut v: Vec) -> impl Iterator> { + let offset = v.len() - 1; + let v2 = v.split_off(offset); + vec![v, v2].into_iter() + } + + fn back_load(mut v: Vec) -> impl Iterator> { + // Chunk the data at 10% + let offset = (v.len() * 10) / 100; + let v2 = v.split_off(offset); + vec![v, v2].into_iter() + } + + async fn test_gzip(i: impl Iterator> + Send + 'static) { + let v = i.collect::>(); + let mut expected: Vec = vec![]; + for v in &v { + expected.extend(v); + } + let (tx, rx) = tokio::sync::mpsc::channel(1); + let underlying = ResponseStream::TestChannel(rx); + let mut resp = GZipResponseStream::new(underlying); + let handle = tokio::task::spawn(async move { + for chunk in v { + tx.send(chunk.into()).await.ok().unwrap(); + } + }); + // Limit how many times we'll loop + const LIMIT: usize = 1000; + let mut v: Vec = vec![]; + for i in 0..=LIMIT { + assert_ne!(i, LIMIT); + let frame = poll_fn(|cx| Pin::new(&mut resp).poll_frame(cx)).await; + if matches!(frame, ResponseStreamResult::EndOfStream) { + break; + } + if matches!(frame, ResponseStreamResult::NoData) { + continue; + } + let ResponseStreamResult::NonEmptyBuf(buf) = frame else { + panic!("Unexpected stream type"); + }; + assert_ne!(buf.len(), 0); + v.extend(&*buf); + } + + let mut gz = flate2::read::GzDecoder::new(&*v); + let mut v = vec![]; + gz.read_to_end(&mut v).unwrap(); + + assert_eq!(v, expected); + + handle.await.unwrap(); + } + + async fn test_brotli(i: impl Iterator> + Send + 'static) { + let v = i.collect::>(); + let mut expected: Vec = vec![]; + for v in &v { + expected.extend(v); + } + let (tx, rx) = tokio::sync::mpsc::channel(1); + let underlying = ResponseStream::TestChannel(rx); + let mut resp = BrotliResponseStream::new(underlying); + let handle = tokio::task::spawn(async move { + for chunk in v { + tx.send(chunk.into()).await.ok().unwrap(); + } + }); + // Limit how many times we'll loop + const LIMIT: usize = 1000; + let mut v: Vec = vec![]; + for i in 0..=LIMIT { + assert_ne!(i, LIMIT); + let frame = poll_fn(|cx| Pin::new(&mut resp).poll_frame(cx)).await; + if matches!(frame, ResponseStreamResult::EndOfStream) { + break; + } + if matches!(frame, ResponseStreamResult::NoData) { + continue; + } + let ResponseStreamResult::NonEmptyBuf(buf) = frame else { + panic!("Unexpected stream type"); + }; + assert_ne!(buf.len(), 0); + v.extend(&*buf); + } + + let mut gz = brotli::Decompressor::new(&*v, v.len()); + let mut v = vec![]; + if !expected.is_empty() { + gz.read_to_end(&mut v).unwrap(); + } + + assert_eq!(v, expected); + + handle.await.unwrap(); + } + + #[tokio::test] + async fn test_simple() { + test_brotli(vec![b"hello world".to_vec()].into_iter()).await; + test_gzip(vec![b"hello world".to_vec()].into_iter()).await; + } + + #[tokio::test] + async fn test_empty() { + test_brotli(vec![].into_iter()).await; + test_gzip(vec![].into_iter()).await; + } + + #[tokio::test] + async fn test_simple_zeros() { + test_brotli(vec![vec![0; 0x10000]].into_iter()).await; + test_gzip(vec![vec![0; 0x10000]].into_iter()).await; + } + + macro_rules! test { + ($vec:ident) => { + mod $vec { + #[tokio::test] + async fn chunk() { + let iter = super::chunk(super::$vec()); + super::test_gzip(iter).await; + let br_iter = super::chunk(super::$vec()); + super::test_brotli(br_iter).await; + } + + #[tokio::test] + async fn front_load() { + let iter = super::front_load(super::$vec()); + super::test_gzip(iter).await; + let br_iter = super::front_load(super::$vec()); + super::test_brotli(br_iter).await; + } + + #[tokio::test] + async fn front_load_but_one() { + let iter = super::front_load_but_one(super::$vec()); + super::test_gzip(iter).await; + let br_iter = super::front_load_but_one(super::$vec()); + super::test_brotli(br_iter).await; + } + + #[tokio::test] + async fn back_load() { + let iter = super::back_load(super::$vec()); + super::test_gzip(iter).await; + let br_iter = super::back_load(super::$vec()); + super::test_brotli(br_iter).await; + } + + #[tokio::test] + async fn random() { + let iter = super::random(super::$vec()); + super::test_gzip(iter).await; + let br_iter = super::random(super::$vec()); + super::test_brotli(br_iter).await; + } + } + }; + } + + test!(zeros); + test!(hard_to_gzip_data); + test!(already_gzipped_data); +} diff --git a/vendor/deno_http/service.rs b/vendor/deno_http/service.rs new file mode 100644 index 00000000..ce24dea4 --- /dev/null +++ b/vendor/deno_http/service.rs @@ -0,0 +1,705 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use crate::request_properties::HttpConnectionProperties; +use crate::response_body::ResponseBytesInner; +use crate::response_body::ResponseStreamResult; +use deno_core::futures::ready; +use deno_core::BufView; +use deno_core::OpState; +use deno_core::ResourceId; +use http::request::Parts; +use hyper::body::Body; +use hyper::body::Frame; +use hyper::body::Incoming; +use hyper::body::SizeHint; +use hyper::header::HeaderMap; +use hyper::upgrade::OnUpgrade; + +use scopeguard::guard; +use scopeguard::ScopeGuard; +use std::cell::Cell; +use std::cell::Ref; +use std::cell::RefCell; +use std::cell::RefMut; +use std::future::Future; +use std::mem::ManuallyDrop; +use std::pin::Pin; +use std::rc::Rc; +use std::task::Context; +use std::task::Poll; +use std::task::Waker; +use tokio::sync::oneshot; + +pub type Request = hyper::Request; +pub type Response = hyper::Response; + +#[cfg(feature = "__http_tracing")] +pub static RECORD_COUNT: std::sync::atomic::AtomicUsize = + std::sync::atomic::AtomicUsize::new(0); + +macro_rules! http_general_trace { + ($($args:expr),*) => { + #[cfg(feature = "__http_tracing")] + { + let count = $crate::service::RECORD_COUNT + .load(std::sync::atomic::Ordering::SeqCst); + + println!( + "HTTP [+{count}]: {}", + format!($($args),*), + ); + } + }; +} + +macro_rules! http_trace { + ($record:expr $(, $args:expr)*) => { + #[cfg(feature = "__http_tracing")] + { + let count = $crate::service::RECORD_COUNT + .load(std::sync::atomic::Ordering::SeqCst); + + println!( + "HTTP [+{count}] id={:p} strong={}: {}", + $record, + std::rc::Rc::strong_count(&$record), + format!($($args),*), + ); + } + }; +} + +pub(crate) use http_general_trace; +pub(crate) use http_trace; + +pub(crate) struct HttpServerStateInner { + pool: Vec<(Rc, HeaderMap)>, +} + +/// A signalling version of `Rc` that allows one to poll for when all other references +/// to the `Rc` have been dropped. +#[repr(transparent)] +pub(crate) struct SignallingRc(Rc<(T, Cell>)>); + +impl SignallingRc { + #[inline] + pub fn new(t: T) -> Self { + Self(Rc::new((t, Default::default()))) + } + + #[inline] + pub fn strong_count(&self) -> usize { + Rc::strong_count(&self.0) + } + + /// Resolves when this is the only remaining reference. + #[inline] + pub fn poll_complete(&self, cx: &mut Context<'_>) -> Poll<()> { + if Rc::strong_count(&self.0) == 1 { + Poll::Ready(()) + } else { + self.0 .1.set(Some(cx.waker().clone())); + Poll::Pending + } + } +} + +impl Clone for SignallingRc { + #[inline] + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl Drop for SignallingRc { + #[inline] + fn drop(&mut self) { + // Trigger the waker iff the refcount is about to become 1. + if Rc::strong_count(&self.0) == 2 { + if let Some(waker) = self.0 .1.take() { + waker.wake(); + } + } + } +} + +impl std::ops::Deref for SignallingRc { + type Target = T; + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 .0 + } +} + +pub(crate) struct HttpServerState(RefCell); + +impl HttpServerState { + pub fn new() -> SignallingRc { + SignallingRc::new(Self(RefCell::new(HttpServerStateInner { + pool: Vec::new(), + }))) + } +} + +impl std::ops::Deref for HttpServerState { + type Target = RefCell; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +enum RequestBodyState { + Incoming(Incoming), + Resource(#[allow(dead_code)] HttpRequestBodyAutocloser), +} + +impl From for RequestBodyState { + fn from(value: Incoming) -> Self { + RequestBodyState::Incoming(value) + } +} + +/// Ensures that the request body closes itself when no longer needed. +pub struct HttpRequestBodyAutocloser(ResourceId, Rc>); + +impl HttpRequestBodyAutocloser { + pub fn new(res: ResourceId, op_state: Rc>) -> Self { + Self(res, op_state) + } +} + +impl Drop for HttpRequestBodyAutocloser { + fn drop(&mut self) { + if let Ok(res) = self.1.borrow_mut().resource_table.take_any(self.0) { + res.close(); + } + } +} + +pub(crate) async fn handle_request( + request: Request, + request_info: HttpConnectionProperties, + server_state: SignallingRc, // Keep server alive for duration of this future. + tx: tokio::sync::mpsc::Sender>, +) -> Result { + // If the underlying TCP connection is closed, this future will be dropped + // and execution could stop at any await point. + // The HttpRecord must live until JavaScript is done processing so is wrapped + // in an Rc. The guard ensures unneeded resources are freed at cancellation. + let guarded_record = guard( + HttpRecord::new(request, request_info, server_state), + HttpRecord::cancel, + ); + + // Clone HttpRecord and send to JavaScript for processing. + // Safe to unwrap as channel receiver is never closed. + tx.send(guarded_record.clone()).await.unwrap(); + + // Wait for JavaScript handler to return request. + http_trace!(*guarded_record, "handle_request response_ready.await"); + guarded_record.response_ready().await; + + // Defuse the guard. Must not await after this point. + let record = ScopeGuard::into_inner(guarded_record); + http_trace!(record, "handle_request complete"); + let response = record.into_response(); + Ok(response) +} + +#[derive(Debug, thiserror::Error)] +#[error("upgrade unavailable")] +pub struct UpgradeUnavailableError; + +struct HttpRecordInner { + server_state: SignallingRc, + closed_channel: Option>, + request_info: HttpConnectionProperties, + request_parts: http::request::Parts, + request_body: Option, + response_parts: Option, + response_ready: bool, + response_waker: Option, + response_body: ResponseBytesInner, + response_body_finished: bool, + response_body_waker: Option, + trailers: Option, + been_dropped: bool, + finished: bool, + needs_close_after_finish: bool, +} + +pub struct HttpRecord(RefCell>); + +#[cfg(feature = "__http_tracing")] +impl Drop for HttpRecord { + fn drop(&mut self) { + RECORD_COUNT + .fetch_sub(1, std::sync::atomic::Ordering::SeqCst) + .checked_sub(1) + .expect("Count went below zero"); + http_general_trace!("HttpRecord::drop"); + } +} + +impl HttpRecord { + fn new( + request: Request, + request_info: HttpConnectionProperties, + server_state: SignallingRc, + ) -> Rc { + let (request_parts, request_body) = request.into_parts(); + let request_body = Some(request_body.into()); + let (mut response_parts, _) = http::Response::new(()).into_parts(); + let record = + if let Some((record, headers)) = server_state.borrow_mut().pool.pop() { + response_parts.headers = headers; + http_trace!(record, "HttpRecord::reuse"); + record + } else { + #[cfg(feature = "__http_tracing")] + { + RECORD_COUNT.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + } + + #[allow(clippy::let_and_return)] + let record = Rc::new(Self(RefCell::new(None))); + http_trace!(record, "HttpRecord::new"); + record + }; + *record.0.borrow_mut() = Some(HttpRecordInner { + server_state, + request_info, + request_parts, + request_body, + response_parts: Some(response_parts), + response_ready: false, + response_waker: None, + response_body: ResponseBytesInner::Empty, + response_body_finished: false, + response_body_waker: None, + trailers: None, + closed_channel: None, + been_dropped: false, + finished: false, + needs_close_after_finish: false, + }); + record + } + + fn finish(self: Rc) { + http_trace!(self, "HttpRecord::finish"); + let mut inner = self.self_mut(); + inner.response_body_finished = true; + let response_body_waker = inner.response_body_waker.take(); + let needs_close_after_finish = inner.needs_close_after_finish; + drop(inner); + if let Some(waker) = response_body_waker { + waker.wake(); + } + if !needs_close_after_finish { + self.recycle(); + } + } + + pub fn close_after_finish(self: Rc) { + debug_assert!(self.self_ref().needs_close_after_finish); + let mut inner = self.self_mut(); + inner.needs_close_after_finish = false; + if !inner.finished { + drop(inner); + self.recycle(); + } + } + + pub fn needs_close_after_finish(&self) -> RefMut<'_, bool> { + RefMut::map(self.self_mut(), |inner| &mut inner.needs_close_after_finish) + } + + pub fn on_cancel(&self, sender: oneshot::Sender<()>) { + self.self_mut().closed_channel = Some(sender); + } + + fn recycle(self: Rc) { + assert!( + Rc::strong_count(&self) == 1, + "HTTP state error: Expected to be last strong reference" + ); + let HttpRecordInner { + server_state, + request_parts: Parts { mut headers, .. }, + .. + } = self.0.borrow_mut().take().unwrap(); + + let inflight = server_state.strong_count(); + http_trace!(self, "HttpRecord::recycle inflight={}", inflight); + + // Keep a buffer of allocations on hand to be reused by incoming requests. + // Estimated target size is 16 + 1/8 the number of inflight requests. + let target = 16 + (inflight >> 3); + let pool = &mut server_state.borrow_mut().pool; + if target > pool.len() { + headers.clear(); + pool.push((self, headers)); + } else if target < pool.len() - 8 { + pool.truncate(target); + } + } + + fn self_ref(&self) -> Ref<'_, HttpRecordInner> { + Ref::map(self.0.borrow(), |option| option.as_ref().unwrap()) + } + + fn self_mut(&self) -> RefMut<'_, HttpRecordInner> { + RefMut::map(self.0.borrow_mut(), |option| option.as_mut().unwrap()) + } + + /// Perform the Hyper upgrade on this record. + pub fn upgrade(&self) -> Result { + // Manually perform the upgrade. We're peeking into hyper's underlying machinery here a bit + self + .self_mut() + .request_parts + .extensions + .remove::() + .ok_or(UpgradeUnavailableError) + } + + /// Take the Hyper body from this record. + pub fn take_request_body(&self) -> Option { + let body_holder = &mut self.self_mut().request_body; + let body = body_holder.take(); + match body { + Some(RequestBodyState::Incoming(body)) => Some(body), + x => { + *body_holder = x; + None + } + } + } + + /// Replace the request body with a resource ID and the OpState we'll need to shut it down. + /// We cannot keep just the resource itself, as JS code might be reading from the resource ID + /// to generate the response data (requiring us to keep it in the resource table). + pub fn put_resource(&self, res: HttpRequestBodyAutocloser) { + self.self_mut().request_body = Some(RequestBodyState::Resource(res)); + } + + /// Cleanup resources not needed after the future is dropped. + fn cancel(self: Rc) { + http_trace!(self, "HttpRecord::cancel"); + let mut inner = self.self_mut(); + if inner.response_ready { + // Future dropped between wake() and async fn resuming. + drop(inner); + self.finish(); + return; + } + inner.been_dropped = true; + // The request body might include actual resources. + inner.request_body.take(); + if let Some(closed_channel) = inner.closed_channel.take() { + let _ = closed_channel.send(()); + } + } + + /// Complete this record, potentially expunging it if it is fully complete (ie: cancelled as well). + pub fn complete(self: Rc) { + http_trace!(self, "HttpRecord::complete"); + let mut inner = self.self_mut(); + assert!( + !inner.response_ready, + "HTTP state error: Entry has already been completed" + ); + if inner.been_dropped { + drop(inner); + self.finish(); + return; + } + inner.response_ready = true; + if let Some(waker) = inner.response_waker.take() { + drop(inner); + waker.wake(); + } + } + + fn take_response_body(&self) -> ResponseBytesInner { + let mut inner = self.self_mut(); + debug_assert!( + !matches!(inner.response_body, ResponseBytesInner::Done), + "HTTP state error: response body already complete" + ); + std::mem::replace(&mut inner.response_body, ResponseBytesInner::Done) + } + + /// Has the future for this record been dropped? ie, has the underlying TCP connection + /// been closed? + pub fn cancelled(&self) -> bool { + self.self_ref().been_dropped + } + + /// Get a mutable reference to the response status and headers. + pub fn response_parts(&self) -> RefMut<'_, http::response::Parts> { + RefMut::map(self.self_mut(), |inner| { + inner.response_parts.as_mut().unwrap() + }) + } + + /// Get a mutable reference to the trailers. + pub fn trailers(&self) -> RefMut<'_, Option> { + RefMut::map(self.self_mut(), |inner| &mut inner.trailers) + } + + pub fn set_response_body(&self, response_body: ResponseBytesInner) { + let mut inner = self.self_mut(); + debug_assert!(matches!(inner.response_body, ResponseBytesInner::Empty)); + inner.response_body = response_body; + } + + /// Take the response. + fn into_response(self: Rc) -> Response { + let parts = self.self_mut().response_parts.take().unwrap(); + let body = HttpRecordResponse(ManuallyDrop::new(self)); + Response::from_parts(parts, body) + } + + /// Get a reference to the connection properties. + pub fn request_info(&self) -> Ref<'_, HttpConnectionProperties> { + Ref::map(self.self_ref(), |inner| &inner.request_info) + } + + /// Get a reference to the request parts. + pub fn request_parts(&self) -> Ref<'_, Parts> { + Ref::map(self.self_ref(), |inner| &inner.request_parts) + } + + /// Resolves when response head is ready. + fn response_ready(&self) -> impl Future + '_ { + struct HttpRecordReady<'a>(&'a HttpRecord); + + impl<'a> Future for HttpRecordReady<'a> { + type Output = (); + + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll { + let mut mut_self = self.0.self_mut(); + if mut_self.response_ready { + return Poll::Ready(()); + } + mut_self.response_waker = Some(cx.waker().clone()); + Poll::Pending + } + } + + HttpRecordReady(self) + } + + /// Resolves when response body has finished streaming. Returns true if the + /// response completed. + pub fn response_body_finished(&self) -> impl Future + '_ { + struct HttpRecordFinished<'a>(&'a HttpRecord); + + impl<'a> Future for HttpRecordFinished<'a> { + type Output = bool; + + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll { + let mut mut_self = self.0.self_mut(); + if mut_self.response_body_finished { + // If we sent the response body and the trailers, this body completed successfully + return Poll::Ready( + mut_self.response_body.is_complete() && mut_self.trailers.is_none(), + ); + } + mut_self.response_body_waker = Some(cx.waker().clone()); + Poll::Pending + } + } + + HttpRecordFinished(self) + } +} + +#[repr(transparent)] +pub struct HttpRecordResponse(ManuallyDrop>); + +impl Body for HttpRecordResponse { + type Data = BufView; + type Error = deno_core::error::AnyError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + use crate::response_body::PollFrame; + let record = &self.0; + + let res = loop { + let mut inner = record.self_mut(); + let res = match &mut inner.response_body { + ResponseBytesInner::Done | ResponseBytesInner::Empty => { + if let Some(trailers) = inner.trailers.take() { + return Poll::Ready(Some(Ok(Frame::trailers(trailers)))); + } + unreachable!() + } + ResponseBytesInner::Bytes(..) => { + drop(inner); + let ResponseBytesInner::Bytes(data) = record.take_response_body() + else { + unreachable!(); + }; + return Poll::Ready(Some(Ok(Frame::data(data)))); + } + ResponseBytesInner::UncompressedStream(stm) => { + ready!(Pin::new(stm).poll_frame(cx)) + } + ResponseBytesInner::GZipStream(stm) => { + ready!(Pin::new(stm.as_mut()).poll_frame(cx)) + } + ResponseBytesInner::BrotliStream(stm) => { + ready!(Pin::new(stm.as_mut()).poll_frame(cx)) + } + }; + // This is where we retry the NoData response + if matches!(res, ResponseStreamResult::NoData) { + continue; + } + break res; + }; + + if matches!(res, ResponseStreamResult::EndOfStream) { + if let Some(trailers) = record.self_mut().trailers.take() { + return Poll::Ready(Some(Ok(Frame::trailers(trailers)))); + } + record.take_response_body(); + } + Poll::Ready(res.into()) + } + + fn is_end_stream(&self) -> bool { + let inner = self.0.self_ref(); + matches!( + inner.response_body, + ResponseBytesInner::Done | ResponseBytesInner::Empty + ) && inner.trailers.is_none() + } + + fn size_hint(&self) -> SizeHint { + // The size hint currently only used in the case where it is exact bounds in hyper, but we'll pass it through + // anyways just in case hyper needs it. + self.0.self_ref().response_body.size_hint() + } +} + +impl Drop for HttpRecordResponse { + fn drop(&mut self) { + // SAFETY: this ManuallyDrop is not used again. + let record = unsafe { ManuallyDrop::take(&mut self.0) }; + http_trace!(record, "HttpRecordResponse::drop"); + record.finish(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::response_body::Compression; + use crate::response_body::ResponseBytesInner; + use bytes::Buf; + use deno_net::raw::NetworkStreamType; + use hyper::body::Body; + use hyper::service::service_fn; + use hyper::service::HttpService; + use hyper_util::rt::TokioIo; + use std::error::Error as StdError; + + /// Execute client request on service and concurrently map the response. + async fn serve_request( + req: http::Request, + service: S, + map_response: impl FnOnce(hyper::Response) -> F, + ) -> hyper::Result + where + B: Body + Send + 'static, // Send bound due to DuplexStream + B::Data: Send, + B::Error: Into>, + S: HttpService, + S::Error: Into>, + S::ResBody: 'static, + ::Error: Into>, + F: std::future::Future>, + { + use hyper::client::conn::http1::handshake; + use hyper::server::conn::http1::Builder; + let (stream_client, stream_server) = tokio::io::duplex(16 * 1024); + let conn_server = + Builder::new().serve_connection(TokioIo::new(stream_server), service); + let (mut sender, conn_client) = + handshake(TokioIo::new(stream_client)).await?; + + let (res, _, _) = tokio::try_join!( + async move { + let res = sender.send_request(req).await?; + map_response(res).await + }, + conn_server, + conn_client, + )?; + Ok(res) + } + + #[tokio::test] + async fn test_handle_request() -> Result<(), deno_core::error::AnyError> { + let (tx, mut rx) = tokio::sync::mpsc::channel(10); + let server_state = HttpServerState::new(); + let server_state_check = server_state.clone(); + let request_info = HttpConnectionProperties { + peer_address: "".into(), + peer_port: None, + local_port: None, + stream_type: NetworkStreamType::Tcp, + }; + let svc = service_fn(move |req: hyper::Request| { + handle_request( + req, + request_info.clone(), + server_state.clone(), + tx.clone(), + ) + }); + + let client_req = http::Request::builder().uri("/").body("".to_string())?; + + // Response produced by concurrent tasks + tokio::try_join!( + async move { + // JavaScript handler produces response + let record = rx.recv().await.unwrap(); + record.set_response_body(ResponseBytesInner::from_vec( + Compression::None, + b"hello world".to_vec(), + )); + record.complete(); + Ok(()) + }, + // Server connection executes service + async move { + serve_request(client_req, svc, |res| async { + // Client reads the response + use http_body_util::BodyExt; + assert_eq!(res.status(), 200); + let body = res.collect().await?.to_bytes(); + assert_eq!(body.chunk(), b"hello world"); + Ok(()) + }) + .await + }, + )?; + assert_eq!(server_state_check.strong_count(), 1); + Ok(()) + } +} diff --git a/vendor/deno_http/websocket_upgrade.rs b/vendor/deno_http/websocket_upgrade.rs new file mode 100644 index 00000000..af950471 --- /dev/null +++ b/vendor/deno_http/websocket_upgrade.rs @@ -0,0 +1,355 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use std::marker::PhantomData; + +use bytes::Bytes; +use bytes::BytesMut; +use httparse::Status; +use hyper::header::HeaderName; +use hyper::header::HeaderValue; +use hyper::Response; +use memmem::Searcher; +use memmem::TwoWaySearcher; +use once_cell::sync::OnceCell; + +#[derive(Debug, thiserror::Error)] +pub enum WebSocketUpgradeError { + #[error("invalid headers")] + InvalidHeaders, + #[error("{0}")] + HttpParse(#[from] httparse::Error), + #[error("{0}")] + Http(#[from] http::Error), + #[error("{0}")] + Utf8(#[from] std::str::Utf8Error), + #[error("{0}")] + InvalidHeaderName(#[from] http::header::InvalidHeaderName), + #[error("{0}")] + InvalidHeaderValue(#[from] http::header::InvalidHeaderValue), + #[error("invalid HTTP status line")] + InvalidHttpStatusLine, + #[error("attempted to write to completed upgrade buffer")] + UpgradeBufferAlreadyCompleted, +} + +/// Given a buffer that ends in `\n\n` or `\r\n\r\n`, returns a parsed [`Request`]. +fn parse_response( + header_bytes: &[u8], +) -> Result<(usize, Response), WebSocketUpgradeError> { + let mut headers = [httparse::EMPTY_HEADER; 16]; + let status = httparse::parse_headers(header_bytes, &mut headers)?; + match status { + Status::Complete((index, parsed)) => { + let mut resp = Response::builder().status(101).body(T::default())?; + for header in parsed.iter() { + resp.headers_mut().append( + HeaderName::from_bytes(header.name.as_bytes())?, + HeaderValue::from_str(std::str::from_utf8(header.value)?)?, + ); + } + Ok((index, resp)) + } + _ => Err(WebSocketUpgradeError::InvalidHeaders), + } +} + +/// Find a newline in a slice. +fn find_newline(slice: &[u8]) -> Option { + for (i, byte) in slice.iter().enumerate() { + if *byte == b'\n' { + return Some(i); + } + } + None +} + +/// WebSocket upgrade state machine states. +#[derive(Default)] +enum WebSocketUpgradeState { + #[default] + Initial, + StatusLine, + Headers, + Complete, +} + +static HEADER_SEARCHER: OnceCell = OnceCell::new(); +static HEADER_SEARCHER2: OnceCell = OnceCell::new(); + +#[derive(Default)] +pub struct WebSocketUpgrade { + state: WebSocketUpgradeState, + buf: BytesMut, + _t: PhantomData, +} + +impl WebSocketUpgrade { + /// Ensures that the status line starts with "HTTP/1.1 101 " which matches all of the node.js + /// WebSocket libraries that are known. We don't care about the trailing status text. + fn validate_status( + &self, + status: &[u8], + ) -> Result<(), WebSocketUpgradeError> { + if status.starts_with(b"HTTP/1.1 101 ") { + Ok(()) + } else { + Err(WebSocketUpgradeError::InvalidHttpStatusLine) + } + } + + /// Writes bytes to our upgrade buffer, returning [`Ok(None)`] if we need to keep feeding it data, + /// [`Ok(Some(Response))`] if we got a valid upgrade header, or [`Err`] if something went badly. + pub fn write( + &mut self, + bytes: &[u8], + ) -> Result, Bytes)>, WebSocketUpgradeError> { + use WebSocketUpgradeState::*; + + match self.state { + Initial => { + if let Some(index) = find_newline(bytes) { + let (status, rest) = bytes.split_at(index + 1); + self.validate_status(status)?; + + // Fast path for the most common node.js WebSocket libraries that use \r\n as the + // separator between header lines and send the whole response in one packet. + if rest.ends_with(b"\r\n\r\n") { + let (index, response) = parse_response(rest)?; + if index == rest.len() { + return Ok(Some((response, Bytes::default()))); + } else { + let bytes = Bytes::copy_from_slice(&rest[index..]); + return Ok(Some((response, bytes))); + } + } + + self.state = Headers; + self.write(rest) + } else { + self.state = StatusLine; + self.buf.extend_from_slice(bytes); + Ok(None) + } + } + StatusLine => { + if let Some(index) = find_newline(bytes) { + let (status, rest) = bytes.split_at(index + 1); + self.buf.extend_from_slice(status); + self.validate_status(&self.buf)?; + self.buf.clear(); + // Recursively process this write + self.state = Headers; + self.write(rest) + } else { + self.buf.extend_from_slice(bytes); + Ok(None) + } + } + Headers => { + self.buf.extend_from_slice(bytes); + let header_searcher = + HEADER_SEARCHER.get_or_init(|| TwoWaySearcher::new(b"\r\n\r\n")); + let header_searcher2 = + HEADER_SEARCHER2.get_or_init(|| TwoWaySearcher::new(b"\n\n")); + if header_searcher.search_in(&self.buf).is_some() + || header_searcher2.search_in(&self.buf).is_some() + { + let (index, response) = parse_response(&self.buf)?; + let mut buf = std::mem::take(&mut self.buf); + self.state = Complete; + Ok(Some((response, buf.split_off(index).freeze()))) + } else { + Ok(None) + } + } + Complete => Err(WebSocketUpgradeError::UpgradeBufferAlreadyCompleted), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hyper_v014::Body; + + type ExpectedResponseAndHead = Option<(Response, &'static [u8])>; + + fn assert_response( + result: Result, Bytes)>, WebSocketUpgradeError>, + expected: Result, + chunk_info: Option<(usize, usize)>, + ) { + let formatted = format!("{result:?}"); + match expected { + Ok(Some((resp1, remainder1))) => match result { + Ok(Some((resp2, remainder2))) => { + assert_eq!(format!("{resp1:?}"), format!("{resp2:?}")); + if let Some((byte_len, chunk_size)) = chunk_info { + // We need to compute how many bytes should be in the trailing data + + // We know how many bytes of header data we had + let last_packet_header_size = + (byte_len - remainder1.len() + chunk_size - 1) % chunk_size + 1; + + // Which means we can compute how much was in the remainder + let remaining = + (chunk_size - last_packet_header_size).min(remainder1.len()); + + assert_eq!(remainder1[..remaining], remainder2); + } else { + assert_eq!(remainder1, remainder2); + } + } + _ => panic!("Expected Ok(Some(...)), was {formatted}"), + }, + Ok(None) => assert!( + result.ok().unwrap().is_none(), + "Expected Ok(None), was {formatted}", + ), + Err(e) => assert_eq!( + format!("{e:?}"), + format!("{:?}", result.unwrap_err()), + "Expected error, was {formatted}", + ), + } + } + + fn validate_upgrade_all_at_once( + s: &str, + expected: Result, + ) { + let mut upgrade = WebSocketUpgrade::default(); + let res = upgrade.write(s.as_bytes()); + + assert_response(res, expected, None); + } + + fn validate_upgrade_chunks( + s: &str, + size: usize, + expected: Result, + ) { + let chunk_info = Some((s.as_bytes().len(), size)); + let mut upgrade = WebSocketUpgrade::default(); + let mut result = Ok(None); + for chunk in s.as_bytes().chunks(size) { + result = upgrade.write(chunk); + if let Ok(Some(..)) = &result { + assert_response(result, expected, chunk_info); + return; + } + } + assert_response(result, expected, chunk_info); + } + + fn validate_upgrade( + s: &str, + expected: fn() -> Result, + ) { + validate_upgrade_all_at_once(s, expected()); + validate_upgrade_chunks(s, 1, expected()); + validate_upgrade_chunks(s, 2, expected()); + validate_upgrade_chunks(s, 10, expected()); + + // Replace \n with \r\n, but only in headers + let (headers, trailing) = s.split_once("\n\n").unwrap(); + let s = headers.replace('\n', "\r\n") + "\r\n\r\n" + trailing; + let s = s.as_ref(); + + validate_upgrade_all_at_once(s, expected()); + validate_upgrade_chunks(s, 1, expected()); + validate_upgrade_chunks(s, 2, expected()); + validate_upgrade_chunks(s, 10, expected()); + } + + #[test] + fn upgrade1() { + validate_upgrade( + "HTTP/1.1 101 Switching Protocols\nConnection: Upgrade\n\n", + || { + let mut expected = + Response::builder().status(101).body(Body::empty()).unwrap(); + expected.headers_mut().append( + HeaderName::from_static("connection"), + HeaderValue::from_static("Upgrade"), + ); + Ok(Some((expected, b""))) + }, + ); + } + + #[test] + fn upgrade_trailing() { + validate_upgrade( + "HTTP/1.1 101 Switching Protocols\nConnection: Upgrade\n\ntrailing data", + || { + let mut expected = + Response::builder().status(101).body(Body::empty()).unwrap(); + expected.headers_mut().append( + HeaderName::from_static("connection"), + HeaderValue::from_static("Upgrade"), + ); + Ok(Some((expected, b"trailing data"))) + }, + ); + } + + #[test] + fn upgrade_trailing_with_newlines() { + validate_upgrade( + "HTTP/1.1 101 Switching Protocols\nConnection: Upgrade\n\ntrailing data\r\n\r\n", + || { + let mut expected = + Response::builder().status(101).body(Body::empty()).unwrap(); + expected.headers_mut().append( + HeaderName::from_static("connection"), + HeaderValue::from_static("Upgrade"), + ); + Ok(Some((expected, b"trailing data\r\n\r\n"))) + }, + ); + } + + #[test] + fn upgrade2() { + validate_upgrade( + "HTTP/1.1 101 Switching Protocols\nConnection: Upgrade\nOther: 123\n\n", + || { + let mut expected = + Response::builder().status(101).body(Body::empty()).unwrap(); + expected.headers_mut().append( + HeaderName::from_static("connection"), + HeaderValue::from_static("Upgrade"), + ); + expected.headers_mut().append( + HeaderName::from_static("other"), + HeaderValue::from_static("123"), + ); + Ok(Some((expected, b""))) + }, + ); + } + + #[test] + fn upgrade_invalid_status() { + validate_upgrade("HTTP/1.1 200 OK\nConnection: Upgrade\n\n", || { + Err(WebSocketUpgradeError::InvalidHttpStatusLine) + }); + } + + #[test] + fn upgrade_too_many_headers() { + let headers = (0..20) + .map(|i| format!("h{i}: {i}")) + .collect::>() + .join("\n"); + validate_upgrade( + &format!("HTTP/1.1 101 Switching Protocols\n{headers}\n\n"), + || { + Err(WebSocketUpgradeError::HttpParse( + httparse::Error::TooManyHeaders, + )) + }, + ); + } +} diff --git a/vendor/deno_telemetry/Cargo.toml b/vendor/deno_telemetry/Cargo.toml new file mode 100644 index 00000000..339fa6d8 --- /dev/null +++ b/vendor/deno_telemetry/Cargo.toml @@ -0,0 +1,35 @@ +# Copyright 2018-2025 the Deno authors. All rights reserved. MIT license. + +[package] +name = "deno_telemetry" +version = "0.17.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +readme = "README.md" +repository.workspace = true +description = "Telemetry for Deno" + +[lib] +path = "lib.rs" + +[dependencies] +base_rt = { version = "0.1.0", path = "../../crates/base_rt" } + +async-trait.workspace = true +deno_core.workspace = true +deno_tls.workspace = true +http-body-util.workspace = true +hyper.workspace = true +hyper-rustls.workspace = true +hyper-util.workspace = true +log.workspace = true +once_cell.workspace = true +opentelemetry.workspace = true +opentelemetry-http.workspace = true +opentelemetry-otlp.workspace = true +opentelemetry-semantic-conventions.workspace = true +opentelemetry_sdk.workspace = true +pin-project.workspace = true +serde.workspace = true +tokio.workspace = true diff --git a/vendor/deno_telemetry/README.md b/vendor/deno_telemetry/README.md new file mode 100644 index 00000000..6931a3b8 --- /dev/null +++ b/vendor/deno_telemetry/README.md @@ -0,0 +1,3 @@ +# `deno_telemetry` + +This crate implements telemetry for Deno using OpenTelemetry. diff --git a/vendor/deno_telemetry/lib.rs b/vendor/deno_telemetry/lib.rs new file mode 100644 index 00000000..fce620be --- /dev/null +++ b/vendor/deno_telemetry/lib.rs @@ -0,0 +1,2179 @@ +// Copyright 2018-2025 the Deno authors. MIT license. + +#![allow(clippy::too_many_arguments)] + +use std::borrow::Cow; +use std::cell::RefCell; +use std::collections::HashMap; +use std::env; +use std::fmt::Debug; +use std::pin::Pin; +use std::rc::Rc; +use std::sync::atomic::AtomicU64; +use std::sync::Arc; +use std::sync::Mutex; +use std::task::Context; +use std::task::Poll; +use std::thread; +use std::time::Duration; +use std::time::SystemTime; + +use base_rt::RuntimeOtelExtraAttributes; +use deno_core::anyhow; +use deno_core::anyhow::anyhow; +use deno_core::anyhow::bail; +use deno_core::futures::channel::mpsc; +use deno_core::futures::channel::mpsc::UnboundedSender; +use deno_core::futures::future::BoxFuture; +use deno_core::futures::stream; +use deno_core::futures::FutureExt; +use deno_core::futures::Stream; +use deno_core::futures::StreamExt; +use deno_core::op2; +use deno_core::v8; +use deno_core::v8::DataError; +use deno_core::GarbageCollected; +use deno_core::OpState; +use once_cell::sync::Lazy; +use once_cell::sync::OnceCell; +use opentelemetry::logs::AnyValue; +use opentelemetry::logs::LogRecord as LogRecordTrait; +use opentelemetry::logs::Severity; +use opentelemetry::metrics::AsyncInstrumentBuilder; +pub use opentelemetry::metrics::Histogram; +use opentelemetry::metrics::InstrumentBuilder; +pub use opentelemetry::metrics::MeterProvider; +pub use opentelemetry::metrics::UpDownCounter; +use opentelemetry::otel_debug; +use opentelemetry::otel_error; +use opentelemetry::trace::Link; +use opentelemetry::trace::SpanContext; +use opentelemetry::trace::SpanId; +use opentelemetry::trace::SpanKind; +use opentelemetry::trace::Status as SpanStatus; +use opentelemetry::trace::TraceFlags; +use opentelemetry::trace::TraceId; +use opentelemetry::trace::TraceState; +use opentelemetry::InstrumentationScope; +pub use opentelemetry::Key; +pub use opentelemetry::KeyValue; +pub use opentelemetry::StringValue; +pub use opentelemetry::Value; +use opentelemetry_otlp::HttpExporterBuilder; +use opentelemetry_otlp::Protocol; +use opentelemetry_otlp::WithExportConfig; +use opentelemetry_otlp::WithHttpConfig; +use opentelemetry_sdk::export::trace::SpanData; +use opentelemetry_sdk::logs::BatchLogProcessor; +use opentelemetry_sdk::logs::LogProcessor; +use opentelemetry_sdk::logs::LogRecord; +use opentelemetry_sdk::metrics::exporter::PushMetricExporter; +use opentelemetry_sdk::metrics::reader::MetricReader; +use opentelemetry_sdk::metrics::ManualReader; +use opentelemetry_sdk::metrics::MetricResult; +use opentelemetry_sdk::metrics::SdkMeterProvider; +use opentelemetry_sdk::metrics::Temporality; +use opentelemetry_sdk::trace::BatchSpanProcessor; +use opentelemetry_sdk::trace::IdGenerator; +use opentelemetry_sdk::trace::RandomIdGenerator; +use opentelemetry_sdk::trace::SpanEvents; +use opentelemetry_sdk::trace::SpanLinks; +use opentelemetry_sdk::trace::SpanProcessor as _; +use opentelemetry_sdk::Resource; +use opentelemetry_semantic_conventions::resource::PROCESS_RUNTIME_NAME; +use opentelemetry_semantic_conventions::resource::PROCESS_RUNTIME_VERSION; +use opentelemetry_semantic_conventions::resource::TELEMETRY_SDK_LANGUAGE; +use opentelemetry_semantic_conventions::resource::TELEMETRY_SDK_NAME; +use opentelemetry_semantic_conventions::resource::TELEMETRY_SDK_VERSION; +use serde::Deserialize; +use serde::Serialize; +use tokio::sync::oneshot; +use tokio::task::JoinSet; + +deno_core::extension!( + deno_telemetry, + ops = [ + op_otel_log, + op_otel_log_foreign, + op_otel_span_attribute1, + op_otel_span_attribute2, + op_otel_span_attribute3, + op_otel_span_add_link, + op_otel_span_update_name, + op_otel_metric_attribute3, + op_otel_metric_record0, + op_otel_metric_record1, + op_otel_metric_record2, + op_otel_metric_record3, + op_otel_metric_observable_record0, + op_otel_metric_observable_record1, + op_otel_metric_observable_record2, + op_otel_metric_observable_record3, + op_otel_metric_wait_to_observe, + op_otel_metric_observation_done, + ], + objects = [OtelTracer, OtelMeter, OtelSpan], + esm = ["telemetry.ts", "util.ts"], +); + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OtelRuntimeConfig { + pub runtime_name: Cow<'static, str>, + pub runtime_version: Cow<'static, str>, +} + +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +pub struct OtelConfig { + pub tracing_enabled: bool, + pub metrics_enabled: bool, + pub console: OtelConsoleConfig, + pub deterministic_prefix: Option, + pub propagators: std::collections::HashSet, +} + +impl OtelConfig { + pub fn as_v8(&self) -> Box<[u8]> { + let mut data = vec![ + self.tracing_enabled as u8, + self.metrics_enabled as u8, + self.console as u8, + ]; + + data.extend(self.propagators.iter().map(|propagator| *propagator as u8)); + + data.into_boxed_slice() + } +} + +#[derive( + Default, Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, +)] +#[repr(u8)] +pub enum OtelPropagators { + TraceContext = 0, + Baggage = 1, + #[default] + None = 2, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[repr(u8)] +pub enum OtelConsoleConfig { + Ignore = 0, + Capture = 1, + Replace = 2, +} + +impl Default for OtelConsoleConfig { + fn default() -> Self { + Self::Ignore + } +} + +static OTEL_SHARED_RUNTIME_SPAWN_TASK_TX: Lazy< + UnboundedSender>, +> = Lazy::new(otel_create_shared_runtime); + +static OTEL_PRE_COLLECT_CALLBACKS: Lazy< + Mutex>>>, +> = Lazy::new(Default::default); + +fn otel_create_shared_runtime() -> UnboundedSender> { + let (spawn_task_tx, mut spawn_task_rx) = + mpsc::unbounded::>(); + + thread::spawn(move || { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_io() + .enable_time() + // This limits the number of threads for blocking operations (like for + // synchronous fs ops) or CPU bound tasks like when we run dprint in + // parallel for deno fmt. + // The default value is 512, which is an unhelpfully large thread pool. We + // don't ever want to have more than a couple dozen threads. + .max_blocking_threads(if cfg!(windows) { + // on windows, tokio uses blocking tasks for child process IO, make sure + // we have enough available threads for other tasks to run + 4 * std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(8) + } else { + 32 + }) + .build() + .unwrap(); + + rt.block_on(async move { + while let Some(task) = spawn_task_rx.next().await { + tokio::spawn(task); + } + }); + }); + + spawn_task_tx +} + +#[derive(Clone, Copy)] +pub struct OtelSharedRuntime; + +impl hyper::rt::Executor> for OtelSharedRuntime { + fn execute(&self, fut: BoxFuture<'static, ()>) { + (*OTEL_SHARED_RUNTIME_SPAWN_TASK_TX) + .unbounded_send(fut) + .expect("failed to send task to shared OpenTelemetry runtime"); + } +} + +impl opentelemetry_sdk::runtime::Runtime for OtelSharedRuntime { + type Interval = Pin + Send + 'static>>; + type Delay = Pin>; + + fn interval(&self, period: Duration) -> Self::Interval { + stream::repeat(()) + .then(move |_| tokio::time::sleep(period)) + .boxed() + } + + fn spawn(&self, future: BoxFuture<'static, ()>) { + (*OTEL_SHARED_RUNTIME_SPAWN_TASK_TX) + .unbounded_send(future) + .expect("failed to send task to shared OpenTelemetry runtime"); + } + + fn delay(&self, duration: Duration) -> Self::Delay { + Box::pin(tokio::time::sleep(duration)) + } +} + +impl opentelemetry_sdk::runtime::RuntimeChannel for OtelSharedRuntime { + type Receiver = BatchMessageChannelReceiver; + type Sender = BatchMessageChannelSender; + + fn batch_message_channel( + &self, + capacity: usize, + ) -> (Self::Sender, Self::Receiver) { + let (batch_tx, batch_rx) = tokio::sync::mpsc::channel::(capacity); + (batch_tx.into(), batch_rx.into()) + } +} + +#[derive(Debug)] +pub struct BatchMessageChannelSender { + sender: tokio::sync::mpsc::Sender, +} + +impl From> + for BatchMessageChannelSender +{ + fn from(sender: tokio::sync::mpsc::Sender) -> Self { + Self { sender } + } +} + +impl opentelemetry_sdk::runtime::TrySend + for BatchMessageChannelSender +{ + type Message = T; + + fn try_send( + &self, + item: Self::Message, + ) -> Result<(), opentelemetry_sdk::runtime::TrySendError> { + self.sender.try_send(item).map_err(|err| match err { + tokio::sync::mpsc::error::TrySendError::Full(_) => { + opentelemetry_sdk::runtime::TrySendError::ChannelFull + } + tokio::sync::mpsc::error::TrySendError::Closed(_) => { + opentelemetry_sdk::runtime::TrySendError::ChannelClosed + } + }) + } +} + +pub struct BatchMessageChannelReceiver { + receiver: tokio::sync::mpsc::Receiver, +} + +impl From> + for BatchMessageChannelReceiver +{ + fn from(receiver: tokio::sync::mpsc::Receiver) -> Self { + Self { receiver } + } +} + +impl Stream for BatchMessageChannelReceiver { + type Item = T; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + self.receiver.poll_recv(cx) + } +} + +enum DenoPeriodicReaderMessage { + Register(std::sync::Weak), + Export, + ForceFlush(oneshot::Sender>), + Shutdown(oneshot::Sender>), +} + +#[derive(Debug)] +struct DenoPeriodicReader { + tx: tokio::sync::mpsc::Sender, + temporality: Temporality, +} + +impl MetricReader for DenoPeriodicReader { + fn register_pipeline( + &self, + pipeline: std::sync::Weak, + ) { + let _ = self + .tx + .try_send(DenoPeriodicReaderMessage::Register(pipeline)); + } + + fn collect( + &self, + _rm: &mut opentelemetry_sdk::metrics::data::ResourceMetrics, + ) -> opentelemetry_sdk::metrics::MetricResult<()> { + unreachable!("collect should not be called on DenoPeriodicReader"); + } + + fn force_flush(&self) -> opentelemetry_sdk::metrics::MetricResult<()> { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.try_send(DenoPeriodicReaderMessage::ForceFlush(tx)); + deno_core::futures::executor::block_on(rx).unwrap()?; + Ok(()) + } + + fn shutdown(&self) -> opentelemetry_sdk::metrics::MetricResult<()> { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.try_send(DenoPeriodicReaderMessage::Shutdown(tx)); + deno_core::futures::executor::block_on(rx).unwrap()?; + Ok(()) + } + + fn temporality( + &self, + _kind: opentelemetry_sdk::metrics::InstrumentKind, + ) -> Temporality { + self.temporality + } +} + +const METRIC_EXPORT_INTERVAL_NAME: &str = "OTEL_METRIC_EXPORT_INTERVAL"; +const DEFAULT_INTERVAL: Duration = Duration::from_secs(60); + +impl DenoPeriodicReader { + fn new(exporter: opentelemetry_otlp::MetricExporter) -> Self { + let interval = env::var(METRIC_EXPORT_INTERVAL_NAME) + .ok() + .and_then(|v| v.parse().map(Duration::from_millis).ok()) + .unwrap_or(DEFAULT_INTERVAL); + + let (tx, mut rx) = tokio::sync::mpsc::channel(256); + + let temporality = PushMetricExporter::temporality(&exporter); + + let worker = async move { + let inner = ManualReader::builder() + .with_temporality(PushMetricExporter::temporality(&exporter)) + .build(); + + let collect_and_export = |collect_observed: bool| { + let inner = &inner; + let exporter = &exporter; + async move { + let mut resource_metrics = + opentelemetry_sdk::metrics::data::ResourceMetrics { + resource: Default::default(), + scope_metrics: Default::default(), + }; + if collect_observed { + let callbacks = { + let mut callbacks = OTEL_PRE_COLLECT_CALLBACKS.lock().unwrap(); + std::mem::take(&mut *callbacks) + }; + let mut futures = JoinSet::new(); + for callback in callbacks { + let (tx, rx) = oneshot::channel(); + if let Ok(()) = callback.send(tx) { + futures.spawn(rx); + } + } + while futures.join_next().await.is_some() {} + } + inner.collect(&mut resource_metrics)?; + if resource_metrics.scope_metrics.is_empty() { + return Ok(()); + } + exporter.export(&mut resource_metrics).await?; + Ok(()) + } + }; + + let mut ticker = tokio::time::interval(interval); + ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + ticker.tick().await; + + loop { + let message = tokio::select! { + _ = ticker.tick() => DenoPeriodicReaderMessage::Export, + message = rx.recv() => if let Some(message) = message { + message + } else { + break; + }, + }; + + match message { + DenoPeriodicReaderMessage::Register(new_pipeline) => { + inner.register_pipeline(new_pipeline); + } + DenoPeriodicReaderMessage::Export => { + otel_debug!( + name: "DenoPeriodicReader.ExportTriggered", + message = "Export message received.", + ); + if let Err(err) = collect_and_export(true).await { + otel_error!( + name: "DenoPeriodicReader.ExportFailed", + message = "Failed to export metrics", + reason = format!("{}", err)); + } + } + DenoPeriodicReaderMessage::ForceFlush(sender) => { + otel_debug!( + name: "DenoPeriodicReader.ForceFlushCalled", + message = "Flush message received.", + ); + let res = collect_and_export(false).await; + if let Err(send_error) = sender.send(res) { + otel_debug!( + name: "DenoPeriodicReader.Flush.SendResultError", + message = "Failed to send flush result.", + reason = format!("{:?}", send_error), + ); + } + } + DenoPeriodicReaderMessage::Shutdown(sender) => { + otel_debug!( + name: "DenoPeriodicReader.ShutdownCalled", + message = "Shutdown message received", + ); + let res = collect_and_export(false).await; + let _ = exporter.shutdown(); + if let Err(send_error) = sender.send(res) { + otel_debug!( + name: "DenoPeriodicReader.Shutdown.SendResultError", + message = "Failed to send shutdown result", + reason = format!("{:?}", send_error), + ); + } + break; + } + } + } + }; + + (*OTEL_SHARED_RUNTIME_SPAWN_TASK_TX) + .unbounded_send(worker.boxed()) + .expect("failed to send task to shared OpenTelemetry runtime"); + + DenoPeriodicReader { tx, temporality } + } +} + +mod hyper_client { + use std::fmt::Debug; + use std::pin::Pin; + use std::task::Poll; + use std::task::{self}; + + use deno_tls::create_client_config; + use deno_tls::load_certs; + use deno_tls::load_private_keys; + use deno_tls::SocketUse; + use deno_tls::TlsKey; + use deno_tls::TlsKeys; + use http_body_util::BodyExt; + use http_body_util::Full; + use hyper::body::Body as HttpBody; + use hyper::body::Frame; + use hyper_rustls::HttpsConnector; + use hyper_util::client::legacy::connect::HttpConnector; + use hyper_util::client::legacy::Client; + use opentelemetry_http::Bytes; + use opentelemetry_http::HttpError; + use opentelemetry_http::Request; + use opentelemetry_http::Response; + use opentelemetry_http::ResponseExt; + + use super::OtelSharedRuntime; + + // same as opentelemetry_http::HyperClient except it uses OtelSharedRuntime + #[derive(Debug, Clone)] + pub struct HyperClient { + inner: Client, Body>, + } + + impl HyperClient { + pub fn new() -> deno_core::anyhow::Result { + let ca_certs = match std::env::var("OTEL_EXPORTER_OTLP_CERTIFICATE") { + Ok(path) => vec![std::fs::read(path)?], + _ => vec![], + }; + + let keys = match ( + std::env::var("OTEL_EXPORTER_OTLP_CLIENT_KEY"), + std::env::var("OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE"), + ) { + (Ok(key_path), Ok(cert_path)) => { + let key = std::fs::read(key_path)?; + let cert = std::fs::read(cert_path)?; + + let certs = load_certs(&mut std::io::Cursor::new(cert))?; + let key = load_private_keys(&key)?.into_iter().next().unwrap(); + + TlsKeys::Static(TlsKey(certs, key)) + } + _ => TlsKeys::Null, + }; + + let tls_config = + create_client_config(None, ca_certs, None, keys, SocketUse::Http)?; + let mut http_connector = HttpConnector::new(); + http_connector.enforce_http(false); + let connector = HttpsConnector::from((http_connector, tls_config)); + + Ok(Self { + inner: Client::builder(OtelSharedRuntime).build(connector), + }) + } + } + + #[async_trait::async_trait] + impl opentelemetry_http::HttpClient for HyperClient { + async fn send( + &self, + request: Request>, + ) -> Result, HttpError> { + let (parts, body) = request.into_parts(); + let request = Request::from_parts(parts, Body(Full::from(body))); + let mut response = self.inner.request(request).await?; + let headers = std::mem::take(response.headers_mut()); + + let mut http_response = Response::builder() + .status(response.status()) + .body(response.into_body().collect().await?.to_bytes())?; + *http_response.headers_mut() = headers; + + Ok(http_response.error_for_status()?) + } + } + + #[pin_project::pin_project] + pub struct Body(#[pin] Full); + + impl HttpBody for Body { + type Data = Bytes; + type Error = Box; + + #[inline] + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + ) -> Poll, Self::Error>>> { + self.project().0.poll_frame(cx).map_err(Into::into) + } + + #[inline] + fn is_end_stream(&self) -> bool { + self.0.is_end_stream() + } + + #[inline] + fn size_hint(&self) -> hyper::body::SizeHint { + self.0.size_hint() + } + } +} + +#[derive(Debug)] +pub struct OtelGlobals { + pub span_processor: BatchSpanProcessor, + pub log_processor: BatchLogProcessor, + pub id_generator: DenoIdGenerator, + pub meter_provider: SdkMeterProvider, + pub builtin_instrumentation_scope: InstrumentationScope, + pub config: OtelConfig, +} + +pub static OTEL_GLOBALS: OnceCell = OnceCell::new(); + +pub fn init( + rt_config: OtelRuntimeConfig, + config: OtelConfig, +) -> deno_core::anyhow::Result<()> { + // Parse the `OTEL_EXPORTER_OTLP_PROTOCOL` variable. The opentelemetry_* + // crates don't do this automatically. + // TODO(piscisaureus): enable GRPC support. + let protocol = match env::var("OTEL_EXPORTER_OTLP_PROTOCOL").as_deref() { + Ok("http/protobuf") => Protocol::HttpBinary, + Ok("http/json") => Protocol::HttpJson, + Ok("") | Err(env::VarError::NotPresent) => Protocol::HttpBinary, + Ok(protocol) => { + return Err(deno_core::anyhow::anyhow!( + "Env var OTEL_EXPORTER_OTLP_PROTOCOL specifies an unsupported protocol: {}", + protocol + )); + } + Err(err) => { + return Err(deno_core::anyhow::anyhow!( + "Failed to read env var OTEL_EXPORTER_OTLP_PROTOCOL: {}", + err + )); + } + }; + + // Define the resource attributes that will be attached to all log records. + // These attributes are sourced as follows (in order of precedence): + // * The `service.name` attribute from the `OTEL_SERVICE_NAME` env var. + // * Additional attributes from the `OTEL_RESOURCE_ATTRIBUTES` env var. + // * Default attribute values defined here. + // TODO(piscisaureus): add more default attributes (e.g. script path). + let mut resource = Resource::default(); + + // Add the runtime name and version to the resource attributes. Also override + // the `telemetry.sdk` attributes to include the Deno runtime. + resource = resource.merge(&Resource::new(vec![ + KeyValue::new(PROCESS_RUNTIME_NAME, rt_config.runtime_name), + KeyValue::new(PROCESS_RUNTIME_VERSION, rt_config.runtime_version.clone()), + KeyValue::new( + TELEMETRY_SDK_LANGUAGE, + format!( + "deno-{}", + resource.get(Key::new(TELEMETRY_SDK_LANGUAGE)).unwrap() + ), + ), + KeyValue::new( + TELEMETRY_SDK_NAME, + format!( + "deno-{}", + resource.get(Key::new(TELEMETRY_SDK_NAME)).unwrap() + ), + ), + KeyValue::new( + TELEMETRY_SDK_VERSION, + format!( + "{}-{}", + rt_config.runtime_version, + resource.get(Key::new(TELEMETRY_SDK_VERSION)).unwrap() + ), + ), + ])); + + // The OTLP endpoint is automatically picked up from the + // `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable. Additional headers can + // be specified using `OTEL_EXPORTER_OTLP_HEADERS`. + + let client = hyper_client::HyperClient::new()?; + + let span_exporter = HttpExporterBuilder::default() + .with_http_client(client.clone()) + .with_protocol(protocol) + .build_span_exporter()?; + let mut span_processor = + BatchSpanProcessor::builder(span_exporter, OtelSharedRuntime).build(); + span_processor.set_resource(&resource); + + let temporality_preference = + env::var("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE") + .ok() + .map(|s| s.to_lowercase()); + let temporality = match temporality_preference.as_deref() { + None | Some("cumulative") => Temporality::Cumulative, + Some("delta") => Temporality::Delta, + Some("lowmemory") => Temporality::LowMemory, + Some(other) => { + return Err(deno_core::anyhow::anyhow!( + "Invalid value for OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: {}", + other + )); + } + }; + let metric_exporter = HttpExporterBuilder::default() + .with_http_client(client.clone()) + .with_protocol(protocol) + .build_metrics_exporter(temporality)?; + let metric_reader = DenoPeriodicReader::new(metric_exporter); + let meter_provider = SdkMeterProvider::builder() + .with_reader(metric_reader) + .with_resource(resource.clone()) + .build(); + + let log_exporter = HttpExporterBuilder::default() + .with_http_client(client) + .with_protocol(protocol) + .build_log_exporter()?; + let log_processor = + BatchLogProcessor::builder(log_exporter, OtelSharedRuntime).build(); + log_processor.set_resource(&resource); + + let builtin_instrumentation_scope = + opentelemetry::InstrumentationScope::builder("deno") + .with_version(rt_config.runtime_version.clone()) + .build(); + + let id_generator = if let Some(prefix) = config.deterministic_prefix { + DenoIdGenerator::deterministic(prefix) + } else { + DenoIdGenerator::random() + }; + + OTEL_GLOBALS + .set(OtelGlobals { + log_processor, + span_processor, + id_generator, + meter_provider, + builtin_instrumentation_scope, + config, + }) + .map_err(|_| deno_core::anyhow::anyhow!("failed to set otel globals"))?; + + Ok(()) +} + +/// This function is called by the runtime whenever it is about to call +/// `process::exit()`, to ensure that all OpenTelemetry logs are properly +/// flushed before the process terminates. +pub fn flush() { + if let Some(OtelGlobals { + span_processor: spans, + log_processor: logs, + meter_provider, + .. + }) = OTEL_GLOBALS.get() + { + let _ = spans.force_flush(); + let _ = logs.force_flush(); + let _ = meter_provider.force_flush(); + } +} + +pub fn handle_log(record: &log::Record) { + use log::Level; + + let Some(OtelGlobals { + log_processor: logs, + builtin_instrumentation_scope, + .. + }) = OTEL_GLOBALS.get() + else { + return; + }; + + let mut log_record = LogRecord::default(); + + log_record.set_observed_timestamp(SystemTime::now()); + log_record.set_severity_number(match record.level() { + Level::Error => Severity::Error, + Level::Warn => Severity::Warn, + Level::Info => Severity::Info, + Level::Debug => Severity::Debug, + Level::Trace => Severity::Trace, + }); + log_record.set_severity_text(record.level().as_str()); + log_record.set_body(record.args().to_string().into()); + log_record.set_target(record.metadata().target().to_string()); + + struct Visitor<'s>(&'s mut LogRecord); + + impl<'kvs> log::kv::VisitSource<'kvs> for Visitor<'_> { + fn visit_pair( + &mut self, + key: log::kv::Key<'kvs>, + value: log::kv::Value<'kvs>, + ) -> Result<(), log::kv::Error> { + #[allow(clippy::manual_map)] + let value = if let Some(v) = value.to_bool() { + Some(AnyValue::Boolean(v)) + } else if let Some(v) = value.to_borrowed_str() { + Some(AnyValue::String(v.to_owned().into())) + } else if let Some(v) = value.to_f64() { + Some(AnyValue::Double(v)) + } else if let Some(v) = value.to_i64() { + Some(AnyValue::Int(v)) + } else { + None + }; + + if let Some(value) = value { + let key = Key::from(key.as_str().to_owned()); + self.0.add_attribute(key, value); + } + + Ok(()) + } + } + + let _ = record.key_values().visit(&mut Visitor(&mut log_record)); + + logs.emit(&mut log_record, builtin_instrumentation_scope); +} + +#[derive(Debug)] +pub enum DenoIdGenerator { + Random(RandomIdGenerator), + Deterministic { + next_trace_id: AtomicU64, + next_span_id: AtomicU64, + }, +} + +impl IdGenerator for DenoIdGenerator { + fn new_trace_id(&self) -> TraceId { + match self { + Self::Random(generator) => generator.new_trace_id(), + Self::Deterministic { next_trace_id, .. } => { + let id = + next_trace_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let bytes = id.to_be_bytes(); + let bytes = [ + 0, 0, 0, 0, 0, 0, 0, 0, bytes[0], bytes[1], bytes[2], bytes[3], + bytes[4], bytes[5], bytes[6], bytes[7], + ]; + TraceId::from_bytes(bytes) + } + } + } + + fn new_span_id(&self) -> SpanId { + match self { + Self::Random(generator) => generator.new_span_id(), + Self::Deterministic { next_span_id, .. } => { + let id = + next_span_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + SpanId::from_bytes(id.to_be_bytes()) + } + } + } +} + +impl DenoIdGenerator { + fn random() -> Self { + Self::Random(RandomIdGenerator::default()) + } + + fn deterministic(prefix: u8) -> Self { + let prefix = u64::from(prefix) << 56; + Self::Deterministic { + next_trace_id: AtomicU64::new(prefix + 1), + next_span_id: AtomicU64::new(prefix + 1), + } + } +} + +fn parse_trace_id( + scope: &mut v8::HandleScope<'_>, + trace_id: v8::Local<'_, v8::Value>, +) -> TraceId { + if let Ok(string) = trace_id.try_cast() { + let value_view = v8::ValueView::new(scope, string); + match value_view.data() { + v8::ValueViewData::OneByte(bytes) => { + TraceId::from_hex(&String::from_utf8_lossy(bytes)) + .unwrap_or(TraceId::INVALID) + } + + _ => TraceId::INVALID, + } + } else if let Ok(uint8array) = trace_id.try_cast::() { + let data = uint8array.data(); + let byte_length = uint8array.byte_length(); + if byte_length != 16 { + return TraceId::INVALID; + } + // SAFETY: We have ensured that the byte length is 16, so it is safe to + // cast the data to an array of 16 bytes. + let bytes = unsafe { &*(data as *const u8 as *const [u8; 16]) }; + TraceId::from_bytes(*bytes) + } else { + TraceId::INVALID + } +} + +fn parse_span_id( + scope: &mut v8::HandleScope<'_>, + span_id: v8::Local<'_, v8::Value>, +) -> SpanId { + if let Ok(string) = span_id.try_cast() { + let value_view = v8::ValueView::new(scope, string); + match value_view.data() { + v8::ValueViewData::OneByte(bytes) => { + SpanId::from_hex(&String::from_utf8_lossy(bytes)) + .unwrap_or(SpanId::INVALID) + } + _ => SpanId::INVALID, + } + } else if let Ok(uint8array) = span_id.try_cast::() { + let data = uint8array.data(); + let byte_length = uint8array.byte_length(); + if byte_length != 8 { + return SpanId::INVALID; + } + // SAFETY: We have ensured that the byte length is 8, so it is safe to + // cast the data to an array of 8 bytes. + let bytes = unsafe { &*(data as *const u8 as *const [u8; 8]) }; + SpanId::from_bytes(*bytes) + } else { + SpanId::INVALID + } +} + +macro_rules! attr_raw { + ($scope:ident, $name:expr, $value:expr) => {{ + let name = if let Ok(name) = $name.try_cast() { + let view = v8::ValueView::new($scope, name); + match view.data() { + v8::ValueViewData::OneByte(bytes) => { + Some(String::from_utf8_lossy(bytes).into_owned()) + } + v8::ValueViewData::TwoByte(bytes) => { + Some(String::from_utf16_lossy(bytes)) + } + } + } else { + None + }; + let value = if let Ok(string) = $value.try_cast::() { + Some(Value::String(StringValue::from({ + let x = v8::ValueView::new($scope, string); + match x.data() { + v8::ValueViewData::OneByte(bytes) => { + String::from_utf8_lossy(bytes).into_owned() + } + v8::ValueViewData::TwoByte(bytes) => String::from_utf16_lossy(bytes), + } + }))) + } else if let Ok(number) = $value.try_cast::() { + Some(Value::F64(number.value())) + } else if let Ok(boolean) = $value.try_cast::() { + Some(Value::Bool(boolean.is_true())) + } else if let Ok(bigint) = $value.try_cast::() { + let (i64_value, _lossless) = bigint.i64_value(); + Some(Value::I64(i64_value)) + } else if let Ok(_array) = $value.try_cast::() { + // TODO: implement array attributes + None + } else { + None + }; + if let (Some(name), Some(value)) = (name, value) { + Some(KeyValue::new(name, value)) + } else { + None + } + }}; +} + +macro_rules! attr { + ($scope:ident, $attributes:expr $(=> $dropped_attributes_count:expr)?, $name:expr, $value:expr) => { + let attr = attr_raw!($scope, $name, $value); + if let Some(kv) = attr { + $attributes.push(kv); + } + $( + else { + $dropped_attributes_count += 1; + } + )? + }; +} + +#[op2(fast)] +fn op_otel_log<'s>( + scope: &mut v8::HandleScope<'s>, + state: &mut OpState, + message: v8::Local<'s, v8::Value>, + #[smi] level: i32, + span: v8::Local<'s, v8::Value>, +) { + let Some(OtelGlobals { + log_processor, + builtin_instrumentation_scope, + .. + }) = OTEL_GLOBALS.get() + else { + return; + }; + + // Convert the integer log level that ext/console uses to the corresponding + // OpenTelemetry log severity. + let severity = match level { + ..=0 => Severity::Debug, + 1 => Severity::Info, + 2 => Severity::Warn, + 3.. => Severity::Error, + }; + + let mut log_record = LogRecord::default(); + log_record.set_observed_timestamp(SystemTime::now()); + let Ok(message) = message.try_cast() else { + return; + }; + log_record.set_body(owned_string(scope, message).into()); + log_record.set_severity_number(severity); + log_record.set_severity_text(severity.name()); + if let Some(runtime_attributes) = + state.try_borrow::() + { + for (k, v) in runtime_attributes.0.clone() { + log_record.add_attribute(k, v.to_string()); + } + } + if let Some(span) = + deno_core::_ops::try_unwrap_cppgc_object::(scope, span) + { + let state = span.0.borrow(); + match &**state { + OtelSpanState::Recording(span) => { + log_record.set_trace_context( + span.span_context.trace_id(), + span.span_context.span_id(), + Some(span.span_context.trace_flags()), + ); + } + OtelSpanState::Done(span_context) => { + log_record.set_trace_context( + span_context.trace_id(), + span_context.span_id(), + Some(span_context.trace_flags()), + ); + } + } + } + + log_processor.emit(&mut log_record, builtin_instrumentation_scope); +} + +#[op2(fast)] +fn op_otel_log_foreign( + scope: &mut v8::HandleScope<'_>, + state: &mut OpState, + #[string] message: String, + #[smi] level: i32, + trace_id: v8::Local<'_, v8::Value>, + span_id: v8::Local<'_, v8::Value>, + #[smi] trace_flags: u8, +) { + let Some(OtelGlobals { + log_processor, + builtin_instrumentation_scope, + .. + }) = OTEL_GLOBALS.get() + else { + return; + }; + + // Convert the integer log level that ext/console uses to the corresponding + // OpenTelemetry log severity. + let severity = match level { + ..=0 => Severity::Debug, + 1 => Severity::Info, + 2 => Severity::Warn, + 3.. => Severity::Error, + }; + + let trace_id = parse_trace_id(scope, trace_id); + let span_id = parse_span_id(scope, span_id); + + let mut log_record = LogRecord::default(); + + log_record.set_observed_timestamp(SystemTime::now()); + log_record.set_body(message.into()); + log_record.set_severity_number(severity); + log_record.set_severity_text(severity.name()); + if let Some(runtime_attributes) = + state.try_borrow::() + { + for (k, v) in runtime_attributes.0.clone() { + log_record.add_attribute(k, v.to_string()); + } + } + if trace_id != TraceId::INVALID && span_id != SpanId::INVALID { + log_record.set_trace_context( + trace_id, + span_id, + Some(TraceFlags::new(trace_flags)), + ); + } + + log_processor.emit(&mut log_record, builtin_instrumentation_scope); +} + +fn owned_string<'s>( + scope: &mut v8::HandleScope<'s>, + string: v8::Local<'s, v8::String>, +) -> String { + let x = v8::ValueView::new(scope, string); + match x.data() { + v8::ValueViewData::OneByte(bytes) => { + String::from_utf8_lossy(bytes).into_owned() + } + v8::ValueViewData::TwoByte(bytes) => String::from_utf16_lossy(bytes), + } +} + +struct OtelTracer(InstrumentationScope); + +impl deno_core::GarbageCollected for OtelTracer {} + +#[op2] +impl OtelTracer { + #[constructor] + #[cppgc] + fn new( + #[string] name: String, + #[string] version: Option, + #[string] schema_url: Option, + ) -> OtelTracer { + let mut builder = opentelemetry::InstrumentationScope::builder(name); + if let Some(version) = version { + builder = builder.with_version(version); + } + if let Some(schema_url) = schema_url { + builder = builder.with_schema_url(schema_url); + } + let scope = builder.build(); + OtelTracer(scope) + } + + #[static_method] + #[cppgc] + fn builtin() -> OtelTracer { + let OtelGlobals { + builtin_instrumentation_scope, + .. + } = OTEL_GLOBALS.get().unwrap(); + OtelTracer(builtin_instrumentation_scope.clone()) + } + + #[cppgc] + fn start_span<'s>( + &self, + scope: &mut v8::HandleScope<'s>, + #[cppgc] parent: Option<&OtelSpan>, + name: v8::Local<'s, v8::Value>, + #[smi] span_kind: u8, + start_time: Option, + #[smi] attribute_count: usize, + ) -> Result { + let OtelGlobals { id_generator, .. } = OTEL_GLOBALS.get().unwrap(); + let span_context; + let parent_span_id; + match parent { + Some(parent) => { + let parent = parent.0.borrow(); + let parent_span_context = match &**parent { + OtelSpanState::Recording(span) => &span.span_context, + OtelSpanState::Done(span_context) => span_context, + }; + span_context = SpanContext::new( + parent_span_context.trace_id(), + id_generator.new_span_id(), + TraceFlags::SAMPLED, + false, + parent_span_context.trace_state().clone(), + ); + parent_span_id = parent_span_context.span_id(); + } + None => { + span_context = SpanContext::new( + id_generator.new_trace_id(), + id_generator.new_span_id(), + TraceFlags::SAMPLED, + false, + TraceState::NONE, + ); + parent_span_id = SpanId::INVALID; + } + } + let name = owned_string( + scope, + name + .try_cast() + .map_err(|e: DataError| anyhow!(e.to_string()))?, + ); + let span_kind = match span_kind { + 0 => SpanKind::Internal, + 1 => SpanKind::Server, + 2 => SpanKind::Client, + 3 => SpanKind::Producer, + 4 => SpanKind::Consumer, + _ => return Err(anyhow!("invalid span kind")), + }; + let start_time = start_time + .map(|start_time| { + SystemTime::UNIX_EPOCH + .checked_add(std::time::Duration::from_secs_f64(start_time / 1000.0)) + .ok_or_else(|| anyhow!("invalid start time")) + }) + .unwrap_or_else(|| Ok(SystemTime::now()))?; + let span_data = SpanData { + span_context, + parent_span_id, + span_kind, + name: Cow::Owned(name), + start_time, + end_time: SystemTime::UNIX_EPOCH, + attributes: Vec::with_capacity(attribute_count), + dropped_attributes_count: 0, + status: SpanStatus::Unset, + events: SpanEvents::default(), + links: SpanLinks::default(), + instrumentation_scope: self.0.clone(), + }; + Ok(OtelSpan(RefCell::new(Box::new(OtelSpanState::Recording( + span_data, + ))))) + } + + #[cppgc] + fn start_span_foreign<'s>( + &self, + scope: &mut v8::HandleScope<'s>, + parent_trace_id: v8::Local<'s, v8::Value>, + parent_span_id: v8::Local<'s, v8::Value>, + name: v8::Local<'s, v8::Value>, + #[smi] span_kind: u8, + start_time: Option, + #[smi] attribute_count: usize, + ) -> Result { + let parent_trace_id = parse_trace_id(scope, parent_trace_id); + if parent_trace_id == TraceId::INVALID { + return Err(anyhow!("invalid trace id")); + }; + let parent_span_id = parse_span_id(scope, parent_span_id); + if parent_span_id == SpanId::INVALID { + return Err(anyhow!("invalid span id")); + }; + let OtelGlobals { id_generator, .. } = OTEL_GLOBALS.get().unwrap(); + let span_context = SpanContext::new( + parent_trace_id, + id_generator.new_span_id(), + TraceFlags::SAMPLED, + false, + TraceState::NONE, + ); + let name = owned_string( + scope, + name + .try_cast() + .map_err(|e: DataError| anyhow!(e.to_string()))?, + ); + let span_kind = match span_kind { + 0 => SpanKind::Internal, + 1 => SpanKind::Server, + 2 => SpanKind::Client, + 3 => SpanKind::Producer, + 4 => SpanKind::Consumer, + _ => return Err(anyhow!("invalid span kind")), + }; + let start_time = start_time + .map(|start_time| { + SystemTime::UNIX_EPOCH + .checked_add(std::time::Duration::from_secs_f64(start_time / 1000.0)) + .ok_or_else(|| anyhow!("invalid start time")) + }) + .unwrap_or_else(|| Ok(SystemTime::now()))?; + let span_data = SpanData { + span_context, + parent_span_id, + span_kind, + name: Cow::Owned(name), + start_time, + end_time: SystemTime::UNIX_EPOCH, + attributes: Vec::with_capacity(attribute_count), + dropped_attributes_count: 0, + status: SpanStatus::Unset, + events: SpanEvents::default(), + links: SpanLinks::default(), + instrumentation_scope: self.0.clone(), + }; + Ok(OtelSpan(RefCell::new(Box::new(OtelSpanState::Recording( + span_data, + ))))) + } +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct JsSpanContext { + trace_id: Box, + span_id: Box, + trace_flags: u8, +} + +// boxed because of https://github.com/denoland/rusty_v8/issues/1676 +#[derive(Debug)] +struct OtelSpan(RefCell>); + +#[derive(Debug)] +#[allow(clippy::large_enum_variant)] +enum OtelSpanState { + Recording(SpanData), + Done(SpanContext), +} + +impl deno_core::GarbageCollected for OtelSpan {} + +#[op2] +impl OtelSpan { + #[constructor] + #[cppgc] + fn new() -> Result { + bail!("OtelSpan cannot be constructed.") + } + + #[serde] + fn span_context(&self) -> JsSpanContext { + let state = self.0.borrow(); + let span_context = match &**state { + OtelSpanState::Recording(span) => &span.span_context, + OtelSpanState::Done(span_context) => span_context, + }; + JsSpanContext { + trace_id: format!("{:?}", span_context.trace_id()).into(), + span_id: format!("{:?}", span_context.span_id()).into(), + trace_flags: span_context.trace_flags().to_u8(), + } + } + + #[fast] + fn set_status<'s>( + &self, + #[smi] status: u8, + #[string] error_description: String, + ) -> Result<(), anyhow::Error> { + let mut state = self.0.borrow_mut(); + let OtelSpanState::Recording(span) = &mut **state else { + return Ok(()); + }; + span.status = match status { + 0 => SpanStatus::Unset, + 1 => SpanStatus::Ok, + 2 => SpanStatus::Error { + description: Cow::Owned(error_description), + }, + _ => bail!("invalid span status code"), + }; + Ok(()) + } + + #[fast] + fn drop_event(&self) { + let mut state = self.0.borrow_mut(); + match &mut **state { + OtelSpanState::Recording(span) => { + span.events.dropped_count += 1; + } + OtelSpanState::Done(_) => {} + } + } + + #[fast] + fn end(&self, state: &mut OpState, end_time: f64) { + let end_time = if end_time.is_nan() { + SystemTime::now() + } else { + SystemTime::UNIX_EPOCH + .checked_add(Duration::from_secs_f64(end_time / 1000.0)) + .unwrap() + }; + let runtime_attributes = + state.try_borrow::().clone(); + + let mut state = self.0.borrow_mut(); + if let OtelSpanState::Recording(span) = &mut **state { + let span_context = span.span_context.clone(); + if let OtelSpanState::Recording(mut span) = *std::mem::replace( + &mut *state, + Box::new(OtelSpanState::Done(span_context)), + ) { + span.end_time = end_time; + if let Some(attributes) = runtime_attributes { + for (k, v) in attributes.0.clone() { + span.attributes.push(KeyValue::new(k, v)); + } + } + let Some(OtelGlobals { span_processor, .. }) = OTEL_GLOBALS.get() + else { + return; + }; + span_processor.on_end(span); + } + } + } +} + +#[op2(fast)] +fn op_otel_span_attribute1<'s>( + scope: &mut v8::HandleScope<'s>, + span: v8::Local<'_, v8::Value>, + key: v8::Local<'s, v8::Value>, + value: v8::Local<'s, v8::Value>, +) { + let Some(span) = + deno_core::_ops::try_unwrap_cppgc_object::(scope, span) + else { + return; + }; + let mut state = span.0.borrow_mut(); + if let OtelSpanState::Recording(span) = &mut **state { + attr!(scope, span.attributes => span.dropped_attributes_count, key, value); + } +} + +#[op2(fast)] +fn op_otel_span_attribute2<'s>( + scope: &mut v8::HandleScope<'s>, + span: v8::Local<'_, v8::Value>, + key1: v8::Local<'s, v8::Value>, + value1: v8::Local<'s, v8::Value>, + key2: v8::Local<'s, v8::Value>, + value2: v8::Local<'s, v8::Value>, +) { + let Some(span) = + deno_core::_ops::try_unwrap_cppgc_object::(scope, span) + else { + return; + }; + let mut state = span.0.borrow_mut(); + if let OtelSpanState::Recording(span) = &mut **state { + attr!(scope, span.attributes => span.dropped_attributes_count, key1, value1); + attr!(scope, span.attributes => span.dropped_attributes_count, key2, value2); + } +} + +#[allow(clippy::too_many_arguments)] +#[op2(fast)] +fn op_otel_span_attribute3<'s>( + scope: &mut v8::HandleScope<'s>, + span: v8::Local<'_, v8::Value>, + key1: v8::Local<'s, v8::Value>, + value1: v8::Local<'s, v8::Value>, + key2: v8::Local<'s, v8::Value>, + value2: v8::Local<'s, v8::Value>, + key3: v8::Local<'s, v8::Value>, + value3: v8::Local<'s, v8::Value>, +) { + let Some(span) = + deno_core::_ops::try_unwrap_cppgc_object::(scope, span) + else { + return; + }; + let mut state = span.0.borrow_mut(); + if let OtelSpanState::Recording(span) = &mut **state { + attr!(scope, span.attributes => span.dropped_attributes_count, key1, value1); + attr!(scope, span.attributes => span.dropped_attributes_count, key2, value2); + attr!(scope, span.attributes => span.dropped_attributes_count, key3, value3); + } +} + +#[op2(fast)] +fn op_otel_span_update_name<'s>( + scope: &mut v8::HandleScope<'s>, + span: v8::Local<'s, v8::Value>, + name: v8::Local<'s, v8::Value>, +) { + let Ok(name) = name.try_cast() else { + return; + }; + let name = owned_string(scope, name); + let Some(span) = + deno_core::_ops::try_unwrap_cppgc_object::(scope, span) + else { + return; + }; + let mut state = span.0.borrow_mut(); + if let OtelSpanState::Recording(span) = &mut **state { + span.name = Cow::Owned(name) + } +} + +#[op2(fast)] +fn op_otel_span_add_link<'s>( + scope: &mut v8::HandleScope<'s>, + span: v8::Local<'s, v8::Value>, + trace_id: v8::Local<'s, v8::Value>, + span_id: v8::Local<'s, v8::Value>, + #[smi] trace_flags: u8, + is_remote: bool, + #[smi] dropped_attributes_count: u32, +) -> bool { + let trace_id = parse_trace_id(scope, trace_id); + if trace_id == TraceId::INVALID { + return false; + }; + let span_id = parse_span_id(scope, span_id); + if span_id == SpanId::INVALID { + return false; + }; + let span_context = SpanContext::new( + trace_id, + span_id, + TraceFlags::new(trace_flags), + is_remote, + TraceState::NONE, + ); + + let Some(span) = + deno_core::_ops::try_unwrap_cppgc_object::(scope, span) + else { + return true; + }; + let mut state = span.0.borrow_mut(); + if let OtelSpanState::Recording(span) = &mut **state { + span.links.links.push(Link::new( + span_context, + vec![], + dropped_attributes_count, + )); + } + true +} + +struct OtelMeter(opentelemetry::metrics::Meter); + +impl deno_core::GarbageCollected for OtelMeter {} + +#[op2] +impl OtelMeter { + #[constructor] + #[cppgc] + fn new( + #[string] name: String, + #[string] version: Option, + #[string] schema_url: Option, + ) -> OtelMeter { + let mut builder = opentelemetry::InstrumentationScope::builder(name); + if let Some(version) = version { + builder = builder.with_version(version); + } + if let Some(schema_url) = schema_url { + builder = builder.with_schema_url(schema_url); + } + let scope = builder.build(); + let meter = OTEL_GLOBALS + .get() + .unwrap() + .meter_provider + .meter_with_scope(scope); + OtelMeter(meter) + } + + #[cppgc] + fn create_counter<'s>( + &self, + scope: &mut v8::HandleScope<'s>, + name: v8::Local<'s, v8::Value>, + description: v8::Local<'s, v8::Value>, + unit: v8::Local<'s, v8::Value>, + ) -> Result { + create_instrument( + |name| self.0.f64_counter(name), + |i| Instrument::Counter(i.build()), + scope, + name, + description, + unit, + ) + .map_err(|e| anyhow!(e.to_string())) + } + + #[cppgc] + fn create_up_down_counter<'s>( + &self, + scope: &mut v8::HandleScope<'s>, + name: v8::Local<'s, v8::Value>, + description: v8::Local<'s, v8::Value>, + unit: v8::Local<'s, v8::Value>, + ) -> Result { + create_instrument( + |name| self.0.f64_up_down_counter(name), + |i| Instrument::UpDownCounter(i.build()), + scope, + name, + description, + unit, + ) + .map_err(|e| anyhow!(e.to_string())) + } + + #[cppgc] + fn create_gauge<'s>( + &self, + scope: &mut v8::HandleScope<'s>, + name: v8::Local<'s, v8::Value>, + description: v8::Local<'s, v8::Value>, + unit: v8::Local<'s, v8::Value>, + ) -> Result { + create_instrument( + |name| self.0.f64_gauge(name), + |i| Instrument::Gauge(i.build()), + scope, + name, + description, + unit, + ) + .map_err(|e| anyhow!(e.to_string())) + } + + #[cppgc] + fn create_histogram<'s>( + &self, + scope: &mut v8::HandleScope<'s>, + name: v8::Local<'s, v8::Value>, + description: v8::Local<'s, v8::Value>, + unit: v8::Local<'s, v8::Value>, + #[serde] boundaries: Option>, + ) -> Result { + let name = owned_string( + scope, + name + .try_cast() + .map_err(|e: DataError| anyhow!(e.to_string()))?, + ); + let mut builder = self.0.f64_histogram(name); + if !description.is_null_or_undefined() { + let description = owned_string( + scope, + description + .try_cast() + .map_err(|e: DataError| anyhow!(e.to_string()))?, + ); + builder = builder.with_description(description); + }; + if !unit.is_null_or_undefined() { + let unit = owned_string( + scope, + unit + .try_cast() + .map_err(|e: DataError| anyhow!(e.to_string()))?, + ); + builder = builder.with_unit(unit); + }; + if let Some(boundaries) = boundaries { + builder = builder.with_boundaries(boundaries); + } + + Ok(Instrument::Histogram(builder.build())) + } + + #[cppgc] + fn create_observable_counter<'s>( + &self, + scope: &mut v8::HandleScope<'s>, + name: v8::Local<'s, v8::Value>, + description: v8::Local<'s, v8::Value>, + unit: v8::Local<'s, v8::Value>, + ) -> Result { + create_async_instrument( + |name| self.0.f64_observable_counter(name), + |i| { + i.build(); + }, + scope, + name, + description, + unit, + ) + .map_err(|e| anyhow!(e.to_string())) + } + + #[cppgc] + fn create_observable_up_down_counter<'s>( + &self, + scope: &mut v8::HandleScope<'s>, + name: v8::Local<'s, v8::Value>, + description: v8::Local<'s, v8::Value>, + unit: v8::Local<'s, v8::Value>, + ) -> Result { + create_async_instrument( + |name| self.0.f64_observable_up_down_counter(name), + |i| { + i.build(); + }, + scope, + name, + description, + unit, + ) + .map_err(|e| anyhow!(e.to_string())) + } + + #[cppgc] + fn create_observable_gauge<'s>( + &self, + scope: &mut v8::HandleScope<'s>, + name: v8::Local<'s, v8::Value>, + description: v8::Local<'s, v8::Value>, + unit: v8::Local<'s, v8::Value>, + ) -> Result { + create_async_instrument( + |name| self.0.f64_observable_gauge(name), + |i| { + i.build(); + }, + scope, + name, + description, + unit, + ) + .map_err(|e| anyhow!(e.to_string())) + } +} + +enum Instrument { + Counter(opentelemetry::metrics::Counter), + UpDownCounter(UpDownCounter), + Gauge(opentelemetry::metrics::Gauge), + Histogram(Histogram), + Observable(Arc, f64>>>), +} + +impl GarbageCollected for Instrument {} + +fn create_instrument<'a, 'b, T>( + cb: impl FnOnce(String) -> InstrumentBuilder<'b, T>, + cb2: impl FnOnce(InstrumentBuilder<'b, T>) -> Instrument, + scope: &mut v8::HandleScope<'a>, + name: v8::Local<'a, v8::Value>, + description: v8::Local<'a, v8::Value>, + unit: v8::Local<'a, v8::Value>, +) -> Result { + let name = owned_string(scope, name.try_cast()?); + let mut builder = cb(name); + if !description.is_null_or_undefined() { + let description = owned_string(scope, description.try_cast()?); + builder = builder.with_description(description); + }; + if !unit.is_null_or_undefined() { + let unit = owned_string(scope, unit.try_cast()?); + builder = builder.with_unit(unit); + }; + + Ok(cb2(builder)) +} + +fn create_async_instrument<'a, 'b, T>( + cb: impl FnOnce(String) -> AsyncInstrumentBuilder<'b, T, f64>, + cb2: impl FnOnce(AsyncInstrumentBuilder<'b, T, f64>), + scope: &mut v8::HandleScope<'a>, + name: v8::Local<'a, v8::Value>, + description: v8::Local<'a, v8::Value>, + unit: v8::Local<'a, v8::Value>, +) -> Result { + let name = owned_string(scope, name.try_cast()?); + let mut builder = cb(name); + if !description.is_null_or_undefined() { + let description = owned_string(scope, description.try_cast()?); + builder = builder.with_description(description); + }; + if !unit.is_null_or_undefined() { + let unit = owned_string(scope, unit.try_cast()?); + builder = builder.with_unit(unit); + }; + + let data_share = Arc::new(Mutex::new(HashMap::new())); + let data_share_: Arc, f64>>> = data_share.clone(); + builder = builder.with_callback(move |i| { + let data = { + let mut data = data_share_.lock().unwrap(); + std::mem::take(&mut *data) + }; + for (attributes, value) in data { + i.observe(value, &attributes); + } + }); + cb2(builder); + + Ok(Instrument::Observable(data_share)) +} + +struct MetricAttributes { + attributes: Vec, +} + +#[op2(fast)] +fn op_otel_metric_record0( + state: &mut OpState, + #[cppgc] instrument: &Instrument, + value: f64, +) { + let values = state.try_take::(); + let attributes = match &values { + Some(values) => &*values.attributes, + None => &[], + }; + match instrument { + Instrument::Counter(counter) => counter.add(value, attributes), + Instrument::UpDownCounter(counter) => counter.add(value, attributes), + Instrument::Gauge(gauge) => gauge.record(value, attributes), + Instrument::Histogram(histogram) => histogram.record(value, attributes), + _ => {} + } +} + +#[op2(fast)] +fn op_otel_metric_record1( + state: &mut OpState, + scope: &mut v8::HandleScope<'_>, + instrument: v8::Local<'_, v8::Value>, + value: f64, + key1: v8::Local<'_, v8::Value>, + value1: v8::Local<'_, v8::Value>, +) { + let Some(instrument) = deno_core::_ops::try_unwrap_cppgc_object::( + &mut *scope, + instrument, + ) else { + return; + }; + let mut values = state.try_take::(); + let attr1 = attr_raw!(scope, key1, value1); + let attributes = match &mut values { + Some(values) => { + if let Some(kv) = attr1 { + values.attributes.reserve_exact(1); + values.attributes.push(kv); + } + &*values.attributes + } + None => match attr1 { + Some(kv1) => &[kv1] as &[KeyValue], + None => &[], + }, + }; + match &*instrument { + Instrument::Counter(counter) => counter.add(value, attributes), + Instrument::UpDownCounter(counter) => counter.add(value, attributes), + Instrument::Gauge(gauge) => gauge.record(value, attributes), + Instrument::Histogram(histogram) => histogram.record(value, attributes), + _ => {} + } +} + +#[allow(clippy::too_many_arguments)] +#[op2(fast)] +fn op_otel_metric_record2( + state: &mut OpState, + scope: &mut v8::HandleScope<'_>, + instrument: v8::Local<'_, v8::Value>, + value: f64, + key1: v8::Local<'_, v8::Value>, + value1: v8::Local<'_, v8::Value>, + key2: v8::Local<'_, v8::Value>, + value2: v8::Local<'_, v8::Value>, +) { + let Some(instrument) = deno_core::_ops::try_unwrap_cppgc_object::( + &mut *scope, + instrument, + ) else { + return; + }; + let mut values = state.try_take::(); + let attr1 = attr_raw!(scope, key1, value1); + let attr2 = attr_raw!(scope, key2, value2); + let attributes = match &mut values { + Some(values) => { + values.attributes.reserve_exact(2); + if let Some(kv1) = attr1 { + values.attributes.push(kv1); + } + if let Some(kv2) = attr2 { + values.attributes.push(kv2); + } + &*values.attributes + } + None => match (attr1, attr2) { + (Some(kv1), Some(kv2)) => &[kv1, kv2] as &[KeyValue], + (Some(kv1), None) => &[kv1], + (None, Some(kv2)) => &[kv2], + (None, None) => &[], + }, + }; + match &*instrument { + Instrument::Counter(counter) => counter.add(value, attributes), + Instrument::UpDownCounter(counter) => counter.add(value, attributes), + Instrument::Gauge(gauge) => gauge.record(value, attributes), + Instrument::Histogram(histogram) => histogram.record(value, attributes), + _ => {} + } +} + +#[allow(clippy::too_many_arguments)] +#[op2(fast)] +fn op_otel_metric_record3( + state: &mut OpState, + scope: &mut v8::HandleScope<'_>, + instrument: v8::Local<'_, v8::Value>, + value: f64, + key1: v8::Local<'_, v8::Value>, + value1: v8::Local<'_, v8::Value>, + key2: v8::Local<'_, v8::Value>, + value2: v8::Local<'_, v8::Value>, + key3: v8::Local<'_, v8::Value>, + value3: v8::Local<'_, v8::Value>, +) { + let Some(instrument) = deno_core::_ops::try_unwrap_cppgc_object::( + &mut *scope, + instrument, + ) else { + return; + }; + let mut values = state.try_take::(); + let attr1 = attr_raw!(scope, key1, value1); + let attr2 = attr_raw!(scope, key2, value2); + let attr3 = attr_raw!(scope, key3, value3); + let attributes = match &mut values { + Some(values) => { + values.attributes.reserve_exact(3); + if let Some(kv1) = attr1 { + values.attributes.push(kv1); + } + if let Some(kv2) = attr2 { + values.attributes.push(kv2); + } + if let Some(kv3) = attr3 { + values.attributes.push(kv3); + } + &*values.attributes + } + None => match (attr1, attr2, attr3) { + (Some(kv1), Some(kv2), Some(kv3)) => &[kv1, kv2, kv3] as &[KeyValue], + (Some(kv1), Some(kv2), None) => &[kv1, kv2], + (Some(kv1), None, Some(kv3)) => &[kv1, kv3], + (None, Some(kv2), Some(kv3)) => &[kv2, kv3], + (Some(kv1), None, None) => &[kv1], + (None, Some(kv2), None) => &[kv2], + (None, None, Some(kv3)) => &[kv3], + (None, None, None) => &[], + }, + }; + match &*instrument { + Instrument::Counter(counter) => counter.add(value, attributes), + Instrument::UpDownCounter(counter) => counter.add(value, attributes), + Instrument::Gauge(gauge) => gauge.record(value, attributes), + Instrument::Histogram(histogram) => histogram.record(value, attributes), + _ => {} + } +} + +#[op2(fast)] +fn op_otel_metric_observable_record0( + state: &mut OpState, + #[cppgc] instrument: &Instrument, + value: f64, +) { + let values = state.try_take::(); + let attributes = values.map(|attr| attr.attributes).unwrap_or_default(); + if let Instrument::Observable(data_share) = instrument { + let mut data = data_share.lock().unwrap(); + data.insert(attributes, value); + } +} + +#[op2(fast)] +fn op_otel_metric_observable_record1( + state: &mut OpState, + scope: &mut v8::HandleScope<'_>, + instrument: v8::Local<'_, v8::Value>, + value: f64, + key1: v8::Local<'_, v8::Value>, + value1: v8::Local<'_, v8::Value>, +) { + let Some(instrument) = deno_core::_ops::try_unwrap_cppgc_object::( + &mut *scope, + instrument, + ) else { + return; + }; + let values = state.try_take::(); + let attr1 = attr_raw!(scope, key1, value1); + let mut attributes = values + .map(|mut attr| { + attr.attributes.reserve_exact(1); + attr.attributes + }) + .unwrap_or_else(|| Vec::with_capacity(1)); + if let Some(kv1) = attr1 { + attributes.push(kv1); + } + if let Instrument::Observable(data_share) = &*instrument { + let mut data = data_share.lock().unwrap(); + data.insert(attributes, value); + } +} + +#[allow(clippy::too_many_arguments)] +#[op2(fast)] +fn op_otel_metric_observable_record2( + state: &mut OpState, + scope: &mut v8::HandleScope<'_>, + instrument: v8::Local<'_, v8::Value>, + value: f64, + key1: v8::Local<'_, v8::Value>, + value1: v8::Local<'_, v8::Value>, + key2: v8::Local<'_, v8::Value>, + value2: v8::Local<'_, v8::Value>, +) { + let Some(instrument) = deno_core::_ops::try_unwrap_cppgc_object::( + &mut *scope, + instrument, + ) else { + return; + }; + let values = state.try_take::(); + let mut attributes = values + .map(|mut attr| { + attr.attributes.reserve_exact(2); + attr.attributes + }) + .unwrap_or_else(|| Vec::with_capacity(2)); + let attr1 = attr_raw!(scope, key1, value1); + let attr2 = attr_raw!(scope, key2, value2); + if let Some(kv1) = attr1 { + attributes.push(kv1); + } + if let Some(kv2) = attr2 { + attributes.push(kv2); + } + if let Instrument::Observable(data_share) = &*instrument { + let mut data = data_share.lock().unwrap(); + data.insert(attributes, value); + } +} + +#[allow(clippy::too_many_arguments)] +#[op2(fast)] +fn op_otel_metric_observable_record3( + state: &mut OpState, + scope: &mut v8::HandleScope<'_>, + instrument: v8::Local<'_, v8::Value>, + value: f64, + key1: v8::Local<'_, v8::Value>, + value1: v8::Local<'_, v8::Value>, + key2: v8::Local<'_, v8::Value>, + value2: v8::Local<'_, v8::Value>, + key3: v8::Local<'_, v8::Value>, + value3: v8::Local<'_, v8::Value>, +) { + let Some(instrument) = deno_core::_ops::try_unwrap_cppgc_object::( + &mut *scope, + instrument, + ) else { + return; + }; + let values = state.try_take::(); + let mut attributes = values + .map(|mut attr| { + attr.attributes.reserve_exact(3); + attr.attributes + }) + .unwrap_or_else(|| Vec::with_capacity(3)); + let attr1 = attr_raw!(scope, key1, value1); + let attr2 = attr_raw!(scope, key2, value2); + let attr3 = attr_raw!(scope, key3, value3); + if let Some(kv1) = attr1 { + attributes.push(kv1); + } + if let Some(kv2) = attr2 { + attributes.push(kv2); + } + if let Some(kv3) = attr3 { + attributes.push(kv3); + } + if let Instrument::Observable(data_share) = &*instrument { + let mut data = data_share.lock().unwrap(); + data.insert(attributes, value); + } +} + +#[allow(clippy::too_many_arguments)] +#[op2(fast)] +fn op_otel_metric_attribute3<'s>( + scope: &mut v8::HandleScope<'s>, + state: &mut OpState, + #[smi] capacity: u32, + key1: v8::Local<'s, v8::Value>, + value1: v8::Local<'s, v8::Value>, + key2: v8::Local<'s, v8::Value>, + value2: v8::Local<'s, v8::Value>, + key3: v8::Local<'s, v8::Value>, + value3: v8::Local<'s, v8::Value>, +) { + let mut values = state.try_borrow_mut::(); + let attr1 = attr_raw!(scope, key1, value1); + let attr2 = attr_raw!(scope, key2, value2); + let attr3 = attr_raw!(scope, key3, value3); + if let Some(values) = &mut values { + values.attributes.reserve_exact( + (capacity as usize).saturating_sub(values.attributes.capacity()), + ); + if let Some(kv1) = attr1 { + values.attributes.push(kv1); + } + if let Some(kv2) = attr2 { + values.attributes.push(kv2); + } + if let Some(kv3) = attr3 { + values.attributes.push(kv3); + } + } else { + let mut attributes = Vec::with_capacity(capacity as usize); + if let Some(kv1) = attr1 { + attributes.push(kv1); + } + if let Some(kv2) = attr2 { + attributes.push(kv2); + } + if let Some(kv3) = attr3 { + attributes.push(kv3); + } + state.put(MetricAttributes { attributes }); + } +} + +struct ObservationDone(oneshot::Sender<()>); + +#[op2(async)] +async fn op_otel_metric_wait_to_observe(state: Rc>) -> bool { + let (tx, rx) = oneshot::channel(); + { + OTEL_PRE_COLLECT_CALLBACKS + .lock() + .expect("mutex poisoned") + .push(tx); + } + if let Ok(done) = rx.await { + state.borrow_mut().put(ObservationDone(done)); + true + } else { + false + } +} + +#[op2(fast)] +fn op_otel_metric_observation_done(state: &mut OpState) { + if let Some(ObservationDone(done)) = state.try_take::() { + let _ = done.send(()); + } +} diff --git a/vendor/deno_telemetry/telemetry.ts b/vendor/deno_telemetry/telemetry.ts new file mode 100644 index 00000000..119e089c --- /dev/null +++ b/vendor/deno_telemetry/telemetry.ts @@ -0,0 +1,1721 @@ +// Copyright 2018-2025 the Deno authors. MIT license. + +import { core, primordials } from "ext:core/mod.js"; +import { + op_otel_log, + op_otel_log_foreign, + op_otel_metric_attribute3, + op_otel_metric_observable_record0, + op_otel_metric_observable_record1, + op_otel_metric_observable_record2, + op_otel_metric_observable_record3, + op_otel_metric_observation_done, + op_otel_metric_record0, + op_otel_metric_record1, + op_otel_metric_record2, + op_otel_metric_record3, + op_otel_metric_wait_to_observe, + op_otel_span_add_link, + op_otel_span_attribute1, + op_otel_span_attribute2, + op_otel_span_attribute3, + op_otel_span_update_name, + OtelMeter, + OtelSpan, + OtelTracer, +} from "ext:core/ops"; +import { Console } from "ext:deno_console/01_console.js"; + +const { + ArrayFrom, + ArrayIsArray, + ArrayPrototypeFilter, + ArrayPrototypeForEach, + ArrayPrototypeJoin, + ArrayPrototypeMap, + ArrayPrototypePush, + ArrayPrototypeReduce, + ArrayPrototypeReverse, + ArrayPrototypeShift, + ArrayPrototypeSlice, + DatePrototype, + DatePrototypeGetTime, + Error, + MapPrototypeEntries, + MapPrototypeKeys, + Number, + NumberParseInt, + NumberPrototypeToString, + ObjectAssign, + ObjectDefineProperty, + ObjectEntries, + ObjectKeys, + ObjectPrototypeIsPrototypeOf, + ObjectValues, + ReflectApply, + SafeArrayIterator, + SafeIterator, + SafeMap, + SafePromiseAll, + SafeRegExp, + SafeSet, + SafeWeakSet, + StringPrototypeIndexOf, + StringPrototypeSlice, + StringPrototypeSplit, + StringPrototypeSubstring, + StringPrototypeTrim, + SymbolFor, + TypeError, + decodeURIComponent, + encodeURIComponent, +} = primordials; +const { AsyncVariable, getAsyncContext, setAsyncContext } = core; + +export let TRACING_ENABLED = false; +export let METRICS_ENABLED = false; +export let PROPAGATORS: TextMapPropagator[] = []; + +// Note: These start at 0 in the JS library, +// but start at 1 when serialized with JSON. +enum SpanKind { + INTERNAL = 0, + SERVER = 1, + CLIENT = 2, + PRODUCER = 3, + CONSUMER = 4, +} + +interface TraceState { + set(key: string, value: string): TraceState; + unset(key: string): TraceState; + get(key: string): string | undefined; + serialize(): string; +} + +interface SpanContext { + traceId: string; + spanId: string; + isRemote?: boolean; + traceFlags: number; + traceState?: TraceState; +} + +enum SpanStatusCode { + UNSET = 0, + OK = 1, + ERROR = 2, +} + +interface SpanStatus { + code: SpanStatusCode; + message?: string; +} + +type AttributeValue = + | string + | number + | boolean + | Array + | Array + | Array; + +interface Attributes { + [attributeKey: string]: AttributeValue | undefined; +} + +type SpanAttributes = Attributes; + +type TimeInput = [number, number] | number | Date; + +interface SpanOptions { + kind?: SpanKind; + attributes?: Attributes; + links?: Link[]; + startTime?: TimeInput; + root?: boolean; +} + +interface Link { + context: SpanContext; + attributes?: SpanAttributes; + droppedAttributesCount?: number; +} + +interface IArrayValue { + values: IAnyValue[]; +} + +interface IAnyValue { + stringValue?: string | null; + boolValue?: boolean | null; + intValue?: number | null; + doubleValue?: number | null; + arrayValue?: IArrayValue; + kvlistValue?: IKeyValueList; + bytesValue?: Uint8Array; +} + +interface IKeyValueList { + values: IKeyValue[]; +} + +interface IKeyValue { + key: string; + value: IAnyValue; +} + +function hrToMs(hr: [number, number]): number { + return (hr[0] * 1e3 + hr[1] / 1e6); +} + +interface AsyncContextSnapshot { + __brand: "AsyncContextSnapshot"; +} + +export function enterSpan(span: Span): AsyncContextSnapshot | undefined { + if (!span.isRecording()) return undefined; + const context = (CURRENT.get() ?? ROOT_CONTEXT).setValue(SPAN_KEY, span); + return CURRENT.enter(context); +} + +export const currentSnapshot = getAsyncContext; +export const restoreSnapshot = setAsyncContext; + +function isDate(value: unknown): value is Date { + return ObjectPrototypeIsPrototypeOf(value, DatePrototype); +} + +interface OtelTracer { + __key: "tracer"; + + // deno-lint-ignore no-misused-new + new (name: string, version?: string, schemaUrl?: string): OtelTracer; + + startSpan( + parent: OtelSpan | undefined, + name: string, + spanKind: SpanKind, + startTime: number | undefined, + attributeCount: number, + ): OtelSpan; + + startSpanForeign( + parentTraceId: string, + parentSpanId: string, + name: string, + spanKind: SpanKind, + startTime: number | undefined, + attributeCount: number, + ): OtelSpan; +} + +interface OtelSpan { + __key: "span"; + + spanContext(): SpanContext; + setStatus(status: SpanStatusCode, errorDescription: string): void; + dropEvent(): void; + end(endTime: number): void; +} + +interface TracerOptions { + schemaUrl?: string; +} + +class TracerProvider { + constructor() { + throw new TypeError("TracerProvider can not be constructed"); + } + + static getTracer( + name: string, + version?: string, + options?: TracerOptions, + ): Tracer { + const tracer = new OtelTracer(name, version, options?.schemaUrl); + return new Tracer(tracer); + } +} + +class Tracer { + #tracer: OtelTracer; + + constructor(tracer: OtelTracer) { + this.#tracer = tracer; + } + + startActiveSpan unknown>( + name: string, + fn: F, + ): ReturnType; + startActiveSpan unknown>( + name: string, + options: SpanOptions, + fn: F, + ): ReturnType; + startActiveSpan unknown>( + name: string, + options: SpanOptions, + context: Context, + fn: F, + ): ReturnType; + startActiveSpan unknown>( + name: string, + optionsOrFn: SpanOptions | F, + fnOrContext?: F | Context, + maybeFn?: F, + ) { + let options; + let context; + let fn; + if (typeof optionsOrFn === "function") { + options = undefined; + fn = optionsOrFn; + } else if (typeof fnOrContext === "function") { + options = optionsOrFn; + fn = fnOrContext; + } else if (typeof maybeFn === "function") { + options = optionsOrFn; + context = fnOrContext; + fn = maybeFn; + } else { + throw new Error("startActiveSpan requires a function argument"); + } + if (options?.root) { + context = ROOT_CONTEXT; + } else { + context = context ?? CURRENT.get() ?? ROOT_CONTEXT; + } + const span = this.startSpan(name, options, context); + const ctx = CURRENT.enter(context.setValue(SPAN_KEY, span)); + try { + return ReflectApply(fn, undefined, [span]); + } finally { + setAsyncContext(ctx); + } + } + + startSpan(name: string, options?: SpanOptions, context?: Context): Span { + if (options?.root) { + context = undefined; + } else { + context = context ?? CURRENT.get(); + } + + let startTime = options?.startTime; + if (startTime && ArrayIsArray(startTime)) { + startTime = hrToMs(startTime); + } else if (startTime && isDate(startTime)) { + startTime = DatePrototypeGetTime(startTime); + } + + const parentSpan = context?.getValue(SPAN_KEY) as + | Span + | { spanContext(): SpanContext } + | undefined; + const attributesCount = options?.attributes + ? ObjectKeys(options.attributes).length + : 0; + const parentOtelSpan: OtelSpan | null | undefined = parentSpan !== undefined + ? getOtelSpan(parentSpan) ?? undefined + : undefined; + let otelSpan: OtelSpan; + if (parentOtelSpan || !parentSpan) { + otelSpan = this.#tracer.startSpan( + parentOtelSpan, + name, + options?.kind ?? 0, + startTime, + attributesCount, + ); + } else { + const spanContext = parentSpan.spanContext(); + otelSpan = this.#tracer.startSpanForeign( + spanContext.traceId, + spanContext.spanId, + name, + options?.kind ?? 0, + startTime, + attributesCount, + ); + } + const span = new Span(otelSpan); + if (options?.links) span.addLinks(options?.links); + if (options?.attributes) span.setAttributes(options?.attributes); + return span; + } +} + +const SPAN_KEY = SymbolFor("OpenTelemetry Context Key SPAN"); + +let getOtelSpan: (span: object) => OtelSpan | null | undefined; + +class Span { + #otelSpan: OtelSpan | null; + #spanContext: SpanContext | undefined; + + static { + // deno-lint-ignore prefer-primordials + getOtelSpan = (span) => (#otelSpan in span ? span.#otelSpan : undefined); + } + + constructor(otelSpan: OtelSpan | null) { + this.#otelSpan = otelSpan; + } + + spanContext() { + if (!this.#spanContext) { + if (this.#otelSpan) { + this.#spanContext = this.#otelSpan.spanContext(); + } else { + this.#spanContext = { + traceId: "00000000000000000000000000000000", + spanId: "0000000000000000", + traceFlags: 0, + }; + } + } + return this.#spanContext; + } + + addEvent( + _name: string, + _attributesOrStartTime?: Attributes | TimeInput, + _startTime?: TimeInput, + ): this { + this.#otelSpan?.dropEvent(); + return this; + } + + addLink(link: Link): this { + const droppedAttributeCount = (link.droppedAttributesCount ?? 0) + + (link.attributes ? ObjectKeys(link.attributes).length : 0); + const valid = op_otel_span_add_link( + this.#otelSpan, + link.context.traceId, + link.context.spanId, + link.context.traceFlags, + link.context.isRemote ?? false, + droppedAttributeCount, + ); + if (!valid) return this; + return this; + } + + addLinks(links: Link[]): this { + for (let i = 0; i < links.length; i++) { + this.addLink(links[i]); + } + return this; + } + + end(endTime?: TimeInput): void { + if (endTime && ArrayIsArray(endTime)) { + endTime = hrToMs(endTime); + } else if (endTime && isDate(endTime)) { + endTime = DatePrototypeGetTime(endTime); + } + this.#otelSpan?.end(endTime || NaN); + } + + isRecording(): boolean { + return this.#otelSpan !== undefined; + } + + // deno-lint-ignore no-explicit-any + recordException(_exception: any, _time?: TimeInput): void { + this.#otelSpan?.dropEvent(); + } + + setAttribute(key: string, value: AttributeValue): this { + if (!this.#otelSpan) return this; + op_otel_span_attribute1(this.#otelSpan, key, value); + return this; + } + + setAttributes(attributes: Attributes): this { + if (!this.#otelSpan) return this; + const attributeKvs = ObjectEntries(attributes); + let i = 0; + while (i < attributeKvs.length) { + if (i + 2 < attributeKvs.length) { + op_otel_span_attribute3( + this.#otelSpan, + attributeKvs[i][0], + attributeKvs[i][1], + attributeKvs[i + 1][0], + attributeKvs[i + 1][1], + attributeKvs[i + 2][0], + attributeKvs[i + 2][1], + ); + i += 3; + } else if (i + 1 < attributeKvs.length) { + op_otel_span_attribute2( + this.#otelSpan, + attributeKvs[i][0], + attributeKvs[i][1], + attributeKvs[i + 1][0], + attributeKvs[i + 1][1], + ); + i += 2; + } else { + op_otel_span_attribute1( + this.#otelSpan, + attributeKvs[i][0], + attributeKvs[i][1], + ); + i += 1; + } + } + return this; + } + + setStatus(status: SpanStatus): this { + this.#otelSpan?.setStatus(status.code, status.message ?? ""); + return this; + } + + updateName(name: string): this { + if (!this.#otelSpan) return this; + op_otel_span_update_name(this.#otelSpan, name); + return this; + } +} + +const CURRENT = new AsyncVariable(); + +class Context { + // @ts-ignore __proto__ is not supported in TypeScript + #data: Record = { __proto__: null }; + + constructor(data?: Record | null | undefined) { + // @ts-ignore __proto__ is not supported in TypeScript + this.#data = { __proto__: null, ...data }; + } + + getValue(key: symbol): unknown { + return this.#data[key]; + } + + setValue(key: symbol, value: unknown): Context { + const c = new Context(this.#data); + c.#data[key] = value; + return c; + } + + deleteValue(key: symbol): Context { + const c = new Context(this.#data); + delete c.#data[key]; + return c; + } +} + +// TODO(lucacasonato): @opentelemetry/api defines it's own ROOT_CONTEXT +const ROOT_CONTEXT = new Context(); + +// Context manager for opentelemetry js library +export class ContextManager { + constructor() { + throw new TypeError("ContextManager can not be constructed"); + } + + static active(): Context { + return CURRENT.get() ?? ROOT_CONTEXT; + } + + static with ReturnType>( + context: Context, + fn: F, + thisArg?: ThisParameterType, + ...args: A + ): ReturnType { + const ctx = CURRENT.enter(context); + try { + return ReflectApply(fn, thisArg, args); + } finally { + setAsyncContext(ctx); + } + } + + // deno-lint-ignore no-explicit-any + static bind any>( + context: Context, + target: T, + ): T { + return ((...args) => { + const ctx = CURRENT.enter(context); + try { + return ReflectApply(target, this, args); + } finally { + setAsyncContext(ctx); + } + }) as T; + } + + static enable() { + return this; + } + + static disable() { + return this; + } +} + +// metrics + +interface MeterOptions { + schemaUrl?: string; +} + +interface MetricOptions { + description?: string; + + unit?: string; + + valueType?: ValueType; + + advice?: MetricAdvice; +} + +enum ValueType { + INT = 0, + DOUBLE = 1, +} + +interface MetricAdvice { + /** + * Hint the explicit bucket boundaries for SDK if the metric is been + * aggregated with a HistogramAggregator. + */ + explicitBucketBoundaries?: number[]; +} + +interface OtelMeter { + __key: "meter"; + createCounter(name: string, description?: string, unit?: string): Instrument; + createUpDownCounter( + name: string, + description?: string, + unit?: string, + ): Instrument; + createGauge(name: string, description?: string, unit?: string): Instrument; + createHistogram( + name: string, + description?: string, + unit?: string, + explicitBucketBoundaries?: number[], + ): Instrument; + createObservableCounter( + name: string, + description?: string, + unit?: string, + ): Instrument; + createObservableUpDownCounter( + name: string, + description?: string, + unit?: string, + ): Instrument; + createObservableGauge( + name: string, + description?: string, + unit?: string, + ): Instrument; +} + +class MeterProvider { + constructor() { + throw new TypeError("MeterProvider can not be constructed"); + } + + static getMeter( + name: string, + version?: string, + options?: MeterOptions, + ): Meter { + const meter = new OtelMeter(name, version, options?.schemaUrl); + return new Meter(meter); + } +} + +type MetricAttributes = Attributes; + +type Instrument = { __key: "instrument" }; + +let batchResultHasObservables: ( + res: BatchObservableResult, + observables: Observable[], +) => boolean; + +class BatchObservableResult { + #observables: WeakSet; + + constructor(observables: WeakSet) { + this.#observables = observables; + } + + static { + batchResultHasObservables = (cb, observables) => { + for (const observable of new SafeIterator(observables)) { + if (!cb.#observables.has(observable)) return false; + } + return true; + }; + } + + observe( + metric: Observable, + value: number, + attributes?: MetricAttributes, + ): void { + if (!this.#observables.has(metric)) return; + getObservableResult(metric).observe(value, attributes); + } +} + +const BATCH_CALLBACKS = new SafeMap< + BatchObservableCallback, + BatchObservableResult +>(); +const INDIVIDUAL_CALLBACKS = new SafeMap>(); + +class Meter { + #meter: OtelMeter; + + constructor(meter: OtelMeter) { + this.#meter = meter; + } + + createCounter(name: string, options?: MetricOptions): Counter { + if (options?.valueType !== undefined && options?.valueType !== 1) { + throw new Error("Only valueType: DOUBLE is supported"); + } + if (!METRICS_ENABLED) return new Counter(null, false); + const instrument = this.#meter.createCounter( + name, + // deno-lint-ignore prefer-primordials + options?.description, + options?.unit, + ) as Instrument; + return new Counter(instrument, false); + } + + createUpDownCounter(name: string, options?: MetricOptions): Counter { + if (options?.valueType !== undefined && options?.valueType !== 1) { + throw new Error("Only valueType: DOUBLE is supported"); + } + if (!METRICS_ENABLED) return new Counter(null, true); + const instrument = this.#meter.createUpDownCounter( + name, + // deno-lint-ignore prefer-primordials + options?.description, + options?.unit, + ) as Instrument; + return new Counter(instrument, true); + } + + createGauge(name: string, options?: MetricOptions): Gauge { + if (options?.valueType !== undefined && options?.valueType !== 1) { + throw new Error("Only valueType: DOUBLE is supported"); + } + if (!METRICS_ENABLED) return new Gauge(null); + const instrument = this.#meter.createGauge( + name, + // deno-lint-ignore prefer-primordials + options?.description, + options?.unit, + ) as Instrument; + return new Gauge(instrument); + } + + createHistogram(name: string, options?: MetricOptions): Histogram { + if (options?.valueType !== undefined && options?.valueType !== 1) { + throw new Error("Only valueType: DOUBLE is supported"); + } + if (!METRICS_ENABLED) return new Histogram(null); + const instrument = this.#meter.createHistogram( + name, + // deno-lint-ignore prefer-primordials + options?.description, + options?.unit, + options?.advice?.explicitBucketBoundaries, + ) as Instrument; + return new Histogram(instrument); + } + + createObservableCounter(name: string, options?: MetricOptions): Observable { + if (options?.valueType !== undefined && options?.valueType !== 1) { + throw new Error("Only valueType: DOUBLE is supported"); + } + if (!METRICS_ENABLED) new Observable(new ObservableResult(null, true)); + const instrument = this.#meter.createObservableCounter( + name, + // deno-lint-ignore prefer-primordials + options?.description, + options?.unit, + ) as Instrument; + return new Observable(new ObservableResult(instrument, true)); + } + + createObservableUpDownCounter( + name: string, + options?: MetricOptions, + ): Observable { + if (options?.valueType !== undefined && options?.valueType !== 1) { + throw new Error("Only valueType: DOUBLE is supported"); + } + if (!METRICS_ENABLED) new Observable(new ObservableResult(null, false)); + const instrument = this.#meter.createObservableUpDownCounter( + name, + // deno-lint-ignore prefer-primordials + options?.description, + options?.unit, + ) as Instrument; + return new Observable(new ObservableResult(instrument, false)); + } + + createObservableGauge(name: string, options?: MetricOptions): Observable { + if (options?.valueType !== undefined && options?.valueType !== 1) { + throw new Error("Only valueType: DOUBLE is supported"); + } + if (!METRICS_ENABLED) new Observable(new ObservableResult(null, false)); + const instrument = this.#meter.createObservableGauge( + name, + // deno-lint-ignore prefer-primordials + options?.description, + options?.unit, + ) as Instrument; + return new Observable(new ObservableResult(instrument, false)); + } + + addBatchObservableCallback( + callback: BatchObservableCallback, + observables: Observable[], + ): void { + if (!METRICS_ENABLED) return; + const result = new BatchObservableResult(new SafeWeakSet(observables)); + startObserving(); + BATCH_CALLBACKS.set(callback, result); + } + + removeBatchObservableCallback( + callback: BatchObservableCallback, + observables: Observable[], + ): void { + if (!METRICS_ENABLED) return; + const result = BATCH_CALLBACKS.get(callback); + if (result && batchResultHasObservables(result, observables)) { + BATCH_CALLBACKS.delete(callback); + } + } +} + +type BatchObservableCallback = ( + observableResult: BatchObservableResult, +) => void | Promise; + +function record( + instrument: Instrument | null, + value: number, + attributes?: MetricAttributes, +) { + if (instrument === null) return; + if (attributes === undefined) { + op_otel_metric_record0(instrument, value); + } else { + const attrs = ObjectEntries(attributes); + if (attrs.length === 0) { + op_otel_metric_record0(instrument, value); + } + let i = 0; + while (i < attrs.length) { + const remaining = attrs.length - i; + if (remaining > 3) { + op_otel_metric_attribute3( + instrument, + value, + attrs[i][0], + attrs[i][1], + attrs[i + 1][0], + attrs[i + 1][1], + attrs[i + 2][0], + attrs[i + 2][1], + ); + i += 3; + } else if (remaining === 3) { + op_otel_metric_record3( + instrument, + value, + attrs[i][0], + attrs[i][1], + attrs[i + 1][0], + attrs[i + 1][1], + attrs[i + 2][0], + attrs[i + 2][1], + ); + i += 3; + } else if (remaining === 2) { + op_otel_metric_record2( + instrument, + value, + attrs[i][0], + attrs[i][1], + attrs[i + 1][0], + attrs[i + 1][1], + ); + i += 2; + } else if (remaining === 1) { + op_otel_metric_record1(instrument, value, attrs[i][0], attrs[i][1]); + i += 1; + } + } + } +} + +function recordObservable( + instrument: Instrument | null, + value: number, + attributes?: MetricAttributes, +) { + if (instrument === null) return; + if (attributes === undefined) { + op_otel_metric_observable_record0(instrument, value); + } else { + const attrs = ObjectEntries(attributes); + if (attrs.length === 0) { + op_otel_metric_observable_record0(instrument, value); + } + let i = 0; + while (i < attrs.length) { + const remaining = attrs.length - i; + if (remaining > 3) { + op_otel_metric_attribute3( + instrument, + value, + attrs[i][0], + attrs[i][1], + attrs[i + 1][0], + attrs[i + 1][1], + attrs[i + 2][0], + attrs[i + 2][1], + ); + i += 3; + } else if (remaining === 3) { + op_otel_metric_observable_record3( + instrument, + value, + attrs[i][0], + attrs[i][1], + attrs[i + 1][0], + attrs[i + 1][1], + attrs[i + 2][0], + attrs[i + 2][1], + ); + i += 3; + } else if (remaining === 2) { + op_otel_metric_observable_record2( + instrument, + value, + attrs[i][0], + attrs[i][1], + attrs[i + 1][0], + attrs[i + 1][1], + ); + i += 2; + } else if (remaining === 1) { + op_otel_metric_observable_record1( + instrument, + value, + attrs[i][0], + attrs[i][1], + ); + i += 1; + } + } + } +} + +class Counter { + #instrument: Instrument | null; + #upDown: boolean; + + constructor(instrument: Instrument | null, upDown: boolean) { + this.#instrument = instrument; + this.#upDown = upDown; + } + + add(value: number, attributes?: MetricAttributes, _context?: Context): void { + if (value < 0 && !this.#upDown) { + throw new Error("Counter can only be incremented"); + } + record(this.#instrument, value, attributes); + } +} + +class Gauge { + #instrument: Instrument | null; + + constructor(instrument: Instrument | null) { + this.#instrument = instrument; + } + + record( + value: number, + attributes?: MetricAttributes, + _context?: Context, + ): void { + record(this.#instrument, value, attributes); + } +} + +class Histogram { + #instrument: Instrument | null; + + constructor(instrument: Instrument | null) { + this.#instrument = instrument; + } + + record( + value: number, + attributes?: MetricAttributes, + _context?: Context, + ): void { + record(this.#instrument, value, attributes); + } +} + +type ObservableCallback = ( + observableResult: ObservableResult, +) => void | Promise; + +let getObservableResult: (observable: Observable) => ObservableResult; + +class Observable { + #result: ObservableResult; + + constructor(result: ObservableResult) { + this.#result = result; + } + + static { + getObservableResult = (observable) => observable.#result; + } + + addCallback(callback: ObservableCallback): void { + const res = INDIVIDUAL_CALLBACKS.get(this); + if (res) res.add(callback); + else INDIVIDUAL_CALLBACKS.set(this, new SafeSet([callback])); + startObserving(); + } + + removeCallback(callback: ObservableCallback): void { + const res = INDIVIDUAL_CALLBACKS.get(this); + if (res) res.delete(callback); + if (res?.size === 0) INDIVIDUAL_CALLBACKS.delete(this); + } +} + +class ObservableResult { + #instrument: Instrument | null; + #isRegularCounter: boolean; + + constructor(instrument: Instrument | null, isRegularCounter: boolean) { + this.#instrument = instrument; + this.#isRegularCounter = isRegularCounter; + } + + observe( + this: ObservableResult, + value: number, + attributes?: MetricAttributes, + ): void { + if (this.#isRegularCounter) { + if (value < 0) { + throw new Error("Observable counters can only be incremented"); + } + } + recordObservable(this.#instrument, value, attributes); + } +} + +async function observe(): Promise { + const promises: Promise[] = []; + // Primordials are not needed, because this is a SafeMap. + // deno-lint-ignore prefer-primordials + for (const { 0: observable, 1: callbacks } of INDIVIDUAL_CALLBACKS) { + const result = getObservableResult(observable); + // Primordials are not needed, because this is a SafeSet. + // deno-lint-ignore prefer-primordials + for (const callback of callbacks) { + // PromiseTry is not in primordials? + // deno-lint-ignore prefer-primordials + ArrayPrototypePush(promises, Promise.try(callback, result)); + } + } + // Primordials are not needed, because this is a SafeMap. + // deno-lint-ignore prefer-primordials + for (const { 0: callback, 1: result } of BATCH_CALLBACKS) { + // PromiseTry is not in primordials? + // deno-lint-ignore prefer-primordials + ArrayPrototypePush(promises, Promise.try(callback, result)); + } + await SafePromiseAll(promises); +} + +let isObserving = false; +function startObserving() { + if (!isObserving) { + isObserving = true; + (async () => { + while (true) { + const promise = op_otel_metric_wait_to_observe(); + core.unrefOpPromise(promise); + const ok = await promise; + if (!ok) break; + await observe(); + op_otel_metric_observation_done(); + } + })(); + } +} + +const otelConsoleConfig = { + ignore: 0, + capture: 1, + replace: 2, +}; + +function otelLog(message: string, level: number) { + const currentSpan = CURRENT.get()?.getValue(SPAN_KEY); + const otelSpan = currentSpan !== undefined + ? getOtelSpan(currentSpan) + : undefined; + if (otelSpan || currentSpan === undefined) { + op_otel_log(message, level, otelSpan); + } else { + const spanContext = currentSpan.spanContext(); + op_otel_log_foreign( + message, + level, + spanContext.traceId, + spanContext.spanId, + spanContext.traceFlags, + ); + } +} + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +const VERSION = "00"; +const VERSION_PART = "(?!ff)[\\da-f]{2}"; +const TRACE_ID_PART = "(?![0]{32})[\\da-f]{32}"; +const PARENT_ID_PART = "(?![0]{16})[\\da-f]{16}"; +const FLAGS_PART = "[\\da-f]{2}"; +const TRACE_PARENT_REGEX = new SafeRegExp( + `^\\s?(${VERSION_PART})-(${TRACE_ID_PART})-(${PARENT_ID_PART})-(${FLAGS_PART})(-.*)?\\s?$`, +); +const VALID_TRACEID_REGEX = new SafeRegExp("^([0-9a-f]{32})$", "i"); +const VALID_SPANID_REGEX = new SafeRegExp("^[0-9a-f]{16}$", "i"); +const MAX_TRACE_STATE_ITEMS = 32; +const MAX_TRACE_STATE_LEN = 512; +const LIST_MEMBERS_SEPARATOR = ","; +const LIST_MEMBER_KEY_VALUE_SPLITTER = "="; +const VALID_KEY_CHAR_RANGE = "[_0-9a-z-*/]"; +const VALID_KEY = `[a-z]${VALID_KEY_CHAR_RANGE}{0,255}`; +const VALID_VENDOR_KEY = + `[a-z0-9]${VALID_KEY_CHAR_RANGE}{0,240}@[a-z]${VALID_KEY_CHAR_RANGE}{0,13}`; +const VALID_KEY_REGEX = new SafeRegExp( + `^(?:${VALID_KEY}|${VALID_VENDOR_KEY})$`, +); +const VALID_VALUE_BASE_REGEX = new SafeRegExp("^[ -~]{0,255}[!-~]$"); +const INVALID_VALUE_COMMA_EQUAL_REGEX = new SafeRegExp(",|="); + +const TRACE_PARENT_HEADER = "traceparent"; +const TRACE_STATE_HEADER = "tracestate"; +const INVALID_TRACEID = "00000000000000000000000000000000"; +const INVALID_SPANID = "0000000000000000"; +const INVALID_SPAN_CONTEXT: SpanContext = { + traceId: INVALID_TRACEID, + spanId: INVALID_SPANID, + traceFlags: 0, +}; +const BAGGAGE_KEY_PAIR_SEPARATOR = "="; +const BAGGAGE_PROPERTIES_SEPARATOR = ";"; +const BAGGAGE_ITEMS_SEPARATOR = ","; +const BAGGAGE_HEADER = "baggage"; +const BAGGAGE_MAX_NAME_VALUE_PAIRS = 180; +const BAGGAGE_MAX_PER_NAME_VALUE_PAIRS = 4096; +const BAGGAGE_MAX_TOTAL_LENGTH = 8192; + +class NonRecordingSpan implements Span { + constructor( + private readonly _spanContext: SpanContext = INVALID_SPAN_CONTEXT, + ) {} + + spanContext(): SpanContext { + return this._spanContext; + } + + setAttribute(_key: string, _value: unknown): this { + return this; + } + + setAttributes(_attributes: SpanAttributes): this { + return this; + } + + addEvent(_name: string, _attributes?: SpanAttributes): this { + return this; + } + + addLink(_link: Link): this { + return this; + } + + addLinks(_links: Link[]): this { + return this; + } + + setStatus(_status: SpanStatus): this { + return this; + } + + updateName(_name: string): this { + return this; + } + + end(_endTime?: TimeInput): void {} + + isRecording(): boolean { + return false; + } + + // deno-lint-ignore no-explicit-any + recordException(_exception: any, _time?: TimeInput): void {} +} + +const otelPropagators = { + traceContext: 0, + baggage: 1, + none: 2, +}; + +function parseTraceParent(traceParent: string): SpanContext | null { + const match = TRACE_PARENT_REGEX.exec(traceParent); + if (!match) return null; + + // According to the specification the implementation should be compatible + // with future versions. If there are more parts, we only reject it if it's using version 00 + // See https://www.w3.org/TR/trace-context/#versioning-of-traceparent + if (match[1] === "00" && match[5]) return null; + + return { + traceId: match[2], + spanId: match[3], + traceFlags: NumberParseInt(match[4], 16), + }; +} + +// deno-lint-ignore no-explicit-any +interface TextMapSetter { + set(carrier: Carrier, key: string, value: string): void; +} + +// deno-lint-ignore no-explicit-any +interface TextMapPropagator { + inject( + context: Context, + carrier: Carrier, + setter: TextMapSetter, + ): void; + extract( + context: Context, + carrier: Carrier, + getter: TextMapGetter, + ): Context; + fields(): string[]; +} + +// deno-lint-ignore no-explicit-any +interface TextMapGetter { + keys(carrier: Carrier): string[]; + get(carrier: Carrier, key: string): undefined | string | string[]; +} + +function isTracingSuppressed(context: Context): boolean { + return context.getValue( + SymbolFor("OpenTelemetry SDK Context Key SUPPRESS_TRACING"), + ) === true; +} + +function isValidTraceId(traceId: string): boolean { + return VALID_TRACEID_REGEX.test(traceId) && traceId !== INVALID_TRACEID; +} + +function isValidSpanId(spanId: string): boolean { + return VALID_SPANID_REGEX.test(spanId) && spanId !== INVALID_SPANID; +} + +function isSpanContextValid(spanContext: SpanContext): boolean { + return ( + isValidTraceId(spanContext.traceId) && isValidSpanId(spanContext.spanId) + ); +} + +function validateKey(key: string): boolean { + return VALID_KEY_REGEX.test(key); +} + +function validateValue(value: string): boolean { + return ( + VALID_VALUE_BASE_REGEX.test(value) && + !INVALID_VALUE_COMMA_EQUAL_REGEX.test(value) + ); +} + +class TraceStateClass implements TraceState { + private _internalState: Map = new SafeMap(); + + constructor(rawTraceState?: string) { + if (rawTraceState) this._parse(rawTraceState); + } + + set(key: string, value: string): TraceStateClass { + const traceState = this._clone(); + if (traceState._internalState.has(key)) { + traceState._internalState.delete(key); + } + traceState._internalState.set(key, value); + return traceState; + } + + unset(key: string): TraceStateClass { + const traceState = this._clone(); + traceState._internalState.delete(key); + return traceState; + } + + get(key: string): string | undefined { + return this._internalState.get(key); + } + + serialize(): string { + return ArrayPrototypeJoin( + ArrayPrototypeReduce(this._keys(), (agg: string[], key) => { + ArrayPrototypePush( + agg, + key + LIST_MEMBER_KEY_VALUE_SPLITTER + this.get(key), + ); + return agg; + }, []), + LIST_MEMBERS_SEPARATOR, + ); + } + + private _parse(rawTraceState: string) { + if (rawTraceState.length > MAX_TRACE_STATE_LEN) return; + this._internalState = ArrayPrototypeReduce( + ArrayPrototypeReverse( + StringPrototypeSplit(rawTraceState, LIST_MEMBERS_SEPARATOR), + ), + (agg: Map, part: string) => { + const listMember = StringPrototypeTrim(part); // Optional Whitespace (OWS) handling + const i = StringPrototypeIndexOf( + listMember, + LIST_MEMBER_KEY_VALUE_SPLITTER, + ); + if (i !== -1) { + const key = StringPrototypeSlice(listMember, 0, i); + const value = StringPrototypeSlice(listMember, i + 1, part.length); + if (validateKey(key) && validateValue(value)) { + agg.set(key, value); + } + } + return agg; + }, + new SafeMap(), + ); + + // Because of the reverse() requirement, trunc must be done after map is created + if (this._internalState.size > MAX_TRACE_STATE_ITEMS) { + this._internalState = new SafeMap( + ArrayPrototypeSlice( + ArrayPrototypeReverse( + ArrayFrom(MapPrototypeEntries(this._internalState)), + ), + 0, + MAX_TRACE_STATE_ITEMS, + ), + ); + } + } + + private _keys(): string[] { + return ArrayPrototypeReverse( + ArrayFrom(MapPrototypeKeys(this._internalState)), + ); + } + + private _clone(): TraceStateClass { + const traceState = new TraceStateClass(); + traceState._internalState = new SafeMap(this._internalState); + return traceState; + } +} + +class W3CTraceContextPropagator implements TextMapPropagator { + inject(context: Context, carrier: unknown, setter: TextMapSetter): void { + const spanContext = (context.getValue(SPAN_KEY) as Span | undefined) + ?.spanContext(); + if ( + !spanContext || + isTracingSuppressed(context) || + !isSpanContextValid(spanContext) + ) { + return; + } + + const traceParent = + `${VERSION}-${spanContext.traceId}-${spanContext.spanId}-0${ + NumberPrototypeToString(Number(spanContext.traceFlags || 0), 16) + }`; + + setter.set(carrier, TRACE_PARENT_HEADER, traceParent); + if (spanContext.traceState) { + setter.set( + carrier, + TRACE_STATE_HEADER, + spanContext.traceState.serialize(), + ); + } + } + + extract(context: Context, carrier: unknown, getter: TextMapGetter): Context { + const traceParentHeader = getter.get(carrier, TRACE_PARENT_HEADER); + if (!traceParentHeader) return context; + const traceParent = ArrayIsArray(traceParentHeader) + ? traceParentHeader[0] + : traceParentHeader; + if (typeof traceParent !== "string") return context; + const spanContext = parseTraceParent(traceParent); + if (!spanContext) return context; + + spanContext.isRemote = true; + + const traceStateHeader = getter.get(carrier, TRACE_STATE_HEADER); + if (traceStateHeader) { + // If more than one `tracestate` header is found, we merge them into a + // single header. + const state = ArrayIsArray(traceStateHeader) + ? ArrayPrototypeJoin(traceStateHeader, ",") + : traceStateHeader; + spanContext.traceState = new TraceStateClass( + typeof state === "string" ? state : undefined, + ); + } + return context.setValue(SPAN_KEY, new NonRecordingSpan(spanContext)); + } + + fields(): string[] { + return [TRACE_PARENT_HEADER, TRACE_STATE_HEADER]; + } +} + +const baggageEntryMetadataSymbol = SymbolFor("BaggageEntryMetadata"); + +type BaggageEntryMetadata = { toString(): string } & { + __TYPE__: typeof baggageEntryMetadataSymbol; +}; + +interface BaggageEntry { + value: string; + metadata?: BaggageEntryMetadata; +} + +interface ParsedBaggageKeyValue { + key: string; + value: string; + metadata: BaggageEntryMetadata | undefined; +} + +interface Baggage { + getEntry(key: string): BaggageEntry | undefined; + getAllEntries(): [string, BaggageEntry][]; + setEntry(key: string, entry: BaggageEntry): Baggage; + removeEntry(key: string): Baggage; + removeEntries(...key: string[]): Baggage; + clear(): Baggage; +} + +export function baggageEntryMetadataFromString( + str: string, +): BaggageEntryMetadata { + if (typeof str !== "string") { + str = ""; + } + + return { + __TYPE__: baggageEntryMetadataSymbol, + toString() { + return str; + }, + }; +} + +function serializeKeyPairs(keyPairs: string[]): string { + return ArrayPrototypeReduce(keyPairs, (hValue: string, current: string) => { + const value = `${hValue}${ + hValue !== "" ? BAGGAGE_ITEMS_SEPARATOR : "" + }${current}`; + return value.length > BAGGAGE_MAX_TOTAL_LENGTH ? hValue : value; + }, ""); +} + +function getKeyPairs(baggage: Baggage): string[] { + return ArrayPrototypeMap(baggage.getAllEntries(), (baggageEntry) => { + let entry = `${encodeURIComponent(baggageEntry[0])}=${ + encodeURIComponent(baggageEntry[1].value) + }`; + + // include opaque metadata if provided + // NOTE: we intentionally don't URI-encode the metadata - that responsibility falls on the metadata implementation + if (baggageEntry[1].metadata !== undefined) { + entry += BAGGAGE_PROPERTIES_SEPARATOR + + // deno-lint-ignore prefer-primordials + baggageEntry[1].metadata.toString(); + } + + return entry; + }); +} + +function parsePairKeyValue( + entry: string, +): ParsedBaggageKeyValue | undefined { + const valueProps = StringPrototypeSplit(entry, BAGGAGE_PROPERTIES_SEPARATOR); + if (valueProps.length <= 0) return; + const keyPairPart = ArrayPrototypeShift(valueProps); + if (!keyPairPart) return; + const separatorIndex = StringPrototypeIndexOf( + keyPairPart, + BAGGAGE_KEY_PAIR_SEPARATOR, + ); + if (separatorIndex <= 0) return; + const key = decodeURIComponent( + StringPrototypeTrim( + StringPrototypeSubstring(keyPairPart, 0, separatorIndex), + ), + ); + const value = decodeURIComponent( + StringPrototypeTrim( + StringPrototypeSubstring(keyPairPart, separatorIndex + 1), + ), + ); + let metadata; + if (valueProps.length > 0) { + metadata = baggageEntryMetadataFromString( + ArrayPrototypeJoin(valueProps, BAGGAGE_PROPERTIES_SEPARATOR), + ); + } + return { key, value, metadata }; +} + +class BaggageImpl implements Baggage { + #entries: Map; + + constructor(entries?: Map) { + this.#entries = entries ? new SafeMap(entries) : new SafeMap(); + } + + getEntry(key: string): BaggageEntry | undefined { + const entry = this.#entries.get(key); + if (!entry) { + return undefined; + } + + return ObjectAssign({}, entry); + } + + getAllEntries(): [string, BaggageEntry][] { + return ArrayPrototypeMap( + ArrayFrom(MapPrototypeEntries(this.#entries)), + (entry) => [entry[0], entry[1]], + ); + } + + setEntry(key: string, entry: BaggageEntry): BaggageImpl { + const newBaggage = new BaggageImpl(this.#entries); + newBaggage.#entries.set(key, entry); + return newBaggage; + } + + removeEntry(key: string): BaggageImpl { + const newBaggage = new BaggageImpl(this.#entries); + newBaggage.#entries.delete(key); + return newBaggage; + } + + removeEntries(...keys: string[]): BaggageImpl { + const newBaggage = new BaggageImpl(this.#entries); + for (const key of new SafeArrayIterator(keys)) { + newBaggage.#entries.delete(key); + } + return newBaggage; + } + + clear(): BaggageImpl { + return new BaggageImpl(); + } +} + +export class W3CBaggagePropagator implements TextMapPropagator { + inject(context: Context, carrier: unknown, setter: TextMapSetter): void { + const baggage = context.getValue(baggageEntryMetadataSymbol) as + | Baggage + | undefined; + if (!baggage || isTracingSuppressed(context)) return; + const keyPairs = ArrayPrototypeSlice( + ArrayPrototypeFilter(getKeyPairs(baggage), (pair: string) => { + return pair.length <= BAGGAGE_MAX_PER_NAME_VALUE_PAIRS; + }), + 0, + BAGGAGE_MAX_NAME_VALUE_PAIRS, + ); + const headerValue = serializeKeyPairs(keyPairs); + if (headerValue.length > 0) { + setter.set(carrier, BAGGAGE_HEADER, headerValue); + } + } + + extract(context: Context, carrier: unknown, getter: TextMapGetter): Context { + const headerValue = getter.get(carrier, BAGGAGE_HEADER); + const baggageString = ArrayIsArray(headerValue) + ? ArrayPrototypeJoin(headerValue, BAGGAGE_ITEMS_SEPARATOR) + : headerValue; + if (!baggageString) return context; + const baggage: Record = {}; + if (baggageString.length === 0) { + return context; + } + const pairs = StringPrototypeSplit(baggageString, BAGGAGE_ITEMS_SEPARATOR); + ArrayPrototypeForEach(pairs, (entry) => { + const keyPair = parsePairKeyValue(entry); + if (keyPair) { + const baggageEntry: BaggageEntry = { value: keyPair.value }; + if (keyPair.metadata) { + baggageEntry.metadata = keyPair.metadata; + } + baggage[keyPair.key] = baggageEntry; + } + }); + if (ObjectEntries(baggage).length === 0) { + return context; + } + + return context.setValue( + baggageEntryMetadataSymbol, + new BaggageImpl(new SafeMap(ObjectEntries(baggage))), + ); + } + + fields(): string[] { + return [BAGGAGE_HEADER]; + } +} + +let builtinTracerCache: Tracer; + +export function builtinTracer(): Tracer { + if (!builtinTracerCache) { + builtinTracerCache = new Tracer(OtelTracer.builtin()); + } + return builtinTracerCache; +} + +// We specify a very high version number, to allow any `@opentelemetry/api` +// version to load this module. This does cause @opentelemetry/api to not be +// able to register anything itself with the global registration methods. +const OTEL_API_COMPAT_VERSION = "1.999.999"; + +export function bootstrap( + config: [ + 0 | 1, + 0 | 1, + (typeof otelConsoleConfig)[keyof typeof otelConsoleConfig], + 0 | 1, + ...Array<(typeof otelPropagators)[keyof typeof otelPropagators]>, + ], +): void { + const { + 0: tracingEnabled, + 1: metricsEnabled, + 2: consoleConfig, + ...propagators + } = config; + + TRACING_ENABLED = tracingEnabled === 1; + METRICS_ENABLED = metricsEnabled === 1; + + PROPAGATORS = ArrayPrototypeMap( + ArrayPrototypeFilter( + ObjectValues(propagators), + (propagator) => propagator !== otelPropagators.none, + ), + (propagator) => { + switch (propagator) { + case otelPropagators.traceContext: + return new W3CTraceContextPropagator(); + case otelPropagators.baggage: + return new W3CBaggagePropagator(); + } + }, + ); + + switch (consoleConfig) { + case otelConsoleConfig.capture: + core.wrapConsole(globalThis.console, new Console(otelLog)); + break; + case otelConsoleConfig.replace: + ObjectDefineProperty( + globalThis, + "console", + core.propNonEnumerable(new Console(otelLog)), + ); + break; + default: + break; + } + + if (TRACING_ENABLED || METRICS_ENABLED) { + const otel = globalThis[SymbolFor("opentelemetry.js.api.1")] ??= { + version: OTEL_API_COMPAT_VERSION, + }; + if (TRACING_ENABLED) { + otel.trace = TracerProvider; + otel.context = ContextManager; + } + if (METRICS_ENABLED) { + otel.metrics = MeterProvider; + } + } +} + +export const telemetry = { + tracerProvider: TracerProvider, + contextManager: ContextManager, + meterProvider: MeterProvider, +}; diff --git a/vendor/deno_telemetry/util.ts b/vendor/deno_telemetry/util.ts new file mode 100644 index 00000000..54612a4d --- /dev/null +++ b/vendor/deno_telemetry/util.ts @@ -0,0 +1,37 @@ +// Copyright 2018-2025 the Deno authors. MIT license. + +import { primordials } from "ext:core/mod.js"; +import type { Span } from "ext:deno_telemetry/telemetry.ts"; + +const { String, StringPrototypeSlice } = primordials; + +export function updateSpanFromRequest(span: Span, request: Request) { + span.updateName(request.method); + + span.setAttribute("http.request.method", request.method); + const url = new URL(request.url); + span.setAttribute("url.full", request.url); + span.setAttribute( + "url.scheme", + StringPrototypeSlice(url.protocol, 0, -1), + ); + span.setAttribute("url.path", url.pathname); + span.setAttribute("url.query", StringPrototypeSlice(url.search, 1)); +} + +export function updateSpanFromResponse(span: Span, response: Response) { + span.setAttribute( + "http.response.status_code", + String(response.status), + ); + if (response.status >= 400) { + span.setAttribute("error.type", String(response.status)); + span.setStatus({ code: 2, message: response.statusText }); + } +} + +// deno-lint-ignore no-explicit-any +export function updateSpanFromError(span: Span, error: any) { + span.setAttribute("error.type", error.name ?? "Error"); + span.setStatus({ code: 2, message: error.message ?? String(error) }); +}