From e7c406773f374e461f3a0321446d5936f5410e6b Mon Sep 17 00:00:00 2001 From: ChrisRackauckas Date: Sat, 9 Aug 2025 18:35:22 -0400 Subject: [PATCH 1/6] Add package version tracking and correctness checks to LinearSolveAutotune - Add get_package_versions() function to collect versions of all LinearSolve-related dependencies - Include versions of LinearSolve, RecursiveFactorization, CUDA, Metal, MKL_jll, blis_jll, etc. - Display package versions in telemetry output for better reproducibility - Add correctness check against LUFactorization baseline - Algorithms that fail correctness check (with lenient tolerance) are warned and excluded - Default tolerance is 1e-2 (very lenient) to catch major issues - Helps ensure benchmark results are meaningful and catch any fishy business --- lib/LinearSolveAutotune/src/benchmarking.jl | 61 +++++++++--- lib/LinearSolveAutotune/src/gpu_detection.jl | 99 ++++++++++++++++++++ lib/LinearSolveAutotune/src/telemetry.jl | 15 +++ 3 files changed, 161 insertions(+), 14 deletions(-) diff --git a/lib/LinearSolveAutotune/src/benchmarking.jl b/lib/LinearSolveAutotune/src/benchmarking.jl index f70be4557..cbfb38ef9 100644 --- a/lib/LinearSolveAutotune/src/benchmarking.jl +++ b/lib/LinearSolveAutotune/src/benchmarking.jl @@ -1,6 +1,7 @@ # Core benchmarking functionality using ProgressMeter +using LinearAlgebra """ test_algorithm_compatibility(alg, eltype::Type, test_size::Int=4) @@ -78,7 +79,8 @@ Benchmark the given algorithms across different matrix sizes and element types. Returns a DataFrame with results including element type information. """ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; - samples = 5, seconds = 0.5, sizes = [:tiny, :small, :medium, :large]) + samples = 5, seconds = 0.5, sizes = [:tiny, :small, :medium, :large], + check_correctness = true, correctness_tol = 1e-2) # Set benchmark parameters old_params = BenchmarkTools.DEFAULT_PARAMETERS @@ -116,6 +118,18 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; A = rand(rng, eltype, n, n) b = rand(rng, eltype, n) u0 = rand(rng, eltype, n) + + # Compute reference solution with LUFactorization if correctness check is enabled + reference_solution = nothing + if check_correctness + try + ref_prob = LinearProblem(copy(A), copy(b); u0 = copy(u0)) + reference_solution = solve(ref_prob, LinearSolve.LUFactorization()) + catch e + @warn "Failed to compute reference solution with LUFactorization for size $n, eltype $eltype: $e" + check_correctness = false # Disable for this size/type combination + end + end for (alg, name) in zip(compatible_algs, compatible_names) # Update progress description @@ -125,6 +139,7 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; gflops = 0.0 success = true error_msg = "" + passed_correctness = true try # Create the linear problem for this test @@ -132,19 +147,37 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; u0 = copy(u0), alias = LinearAliasSpecifier(alias_A = true, alias_b = true)) - # Warmup run - solve(prob, alg) - - # Actual benchmark - bench = @benchmark solve($prob, $alg) setup=(prob = LinearProblem( - copy($A), copy($b); - u0 = copy($u0), - alias = LinearAliasSpecifier(alias_A = true, alias_b = true))) - - # Calculate GFLOPs - min_time_sec = minimum(bench.times) / 1e9 - flops = luflop(n, n) - gflops = flops / min_time_sec / 1e9 + # Warmup run and correctness check + warmup_sol = solve(prob, alg) + + # Check correctness if reference solution is available + if check_correctness && reference_solution !== nothing + # Compute relative error + rel_error = norm(warmup_sol.u - reference_solution.u) / norm(reference_solution.u) + + if rel_error > correctness_tol + passed_correctness = false + @warn "Algorithm $name failed correctness check for size $n, eltype $eltype. " * + "Relative error: $(round(rel_error, sigdigits=3)) > tolerance: $correctness_tol. " * + "Algorithm will be excluded from results." + success = false + error_msg = "Failed correctness check (rel_error = $(round(rel_error, sigdigits=3)))" + end + end + + # Only benchmark if correctness check passed + if passed_correctness + # Actual benchmark + bench = @benchmark solve($prob, $alg) setup=(prob = LinearProblem( + copy($A), copy($b); + u0 = copy($u0), + alias = LinearAliasSpecifier(alias_A = true, alias_b = true))) + + # Calculate GFLOPs + min_time_sec = minimum(bench.times) / 1e9 + flops = luflop(n, n) + gflops = flops / min_time_sec / 1e9 + end catch e success = false diff --git a/lib/LinearSolveAutotune/src/gpu_detection.jl b/lib/LinearSolveAutotune/src/gpu_detection.jl index 8a04fc8e0..40a52e307 100644 --- a/lib/LinearSolveAutotune/src/gpu_detection.jl +++ b/lib/LinearSolveAutotune/src/gpu_detection.jl @@ -1,6 +1,7 @@ # GPU hardware and package detection using CPUSummary +using Pkg """ is_cuda_available() @@ -116,9 +117,107 @@ function get_system_info() info["apple_accelerate_available"] = false end + # Add package versions + info["package_versions"] = get_package_versions() + return info end +""" + get_package_versions() + +Get versions of LinearSolve-related packages and their dependencies. +Returns a Dict with package names and versions. +""" +function get_package_versions() + versions = Dict{String, String}() + + # Get the current project's dependencies + deps = Pkg.dependencies() + + # List of packages we're interested in tracking + important_packages = [ + "LinearSolve", + "LinearSolveAutotune", + "RecursiveFactorization", + "CUDA", + "Metal", + "MKL_jll", + "BLISBLAS", + "AppleAccelerate", + "SparseArrays", + "KLU", + "Pardiso", + "MKLPardiso", + "BandedMatrices", + "FastLapackInterface", + "HYPRE", + "IterativeSolvers", + "Krylov", + "KrylovKit", + "LinearAlgebra" + ] + + # Also track JLL packages for BLAS libraries + jll_packages = [ + "MKL_jll", + "OpenBLAS_jll", + "OpenBLAS32_jll", + "blis_jll", + "LAPACK_jll", + "CompilerSupportLibraries_jll" + ] + + all_packages = union(important_packages, jll_packages) + + # Iterate through dependencies and collect versions + for (uuid, dep) in deps + if dep.name in all_packages + if dep.version !== nothing + versions[dep.name] = string(dep.version) + else + # Try to get version from the package itself if loaded + try + pkg_module = Base.loaded_modules[Base.PkgId(uuid, dep.name)] + if isdefined(pkg_module, :version) + versions[dep.name] = string(pkg_module.version) + else + versions[dep.name] = "unknown" + end + catch + versions[dep.name] = "unknown" + end + end + end + end + + # Try to get Julia's LinearAlgebra stdlib version + try + versions["LinearAlgebra"] = string(VERSION) # Stdlib version matches Julia + catch + versions["LinearAlgebra"] = "stdlib" + end + + # Get BLAS configuration info + try + blas_config = LinearAlgebra.BLAS.get_config() + if hasfield(typeof(blas_config), :loaded_libs) + for lib in blas_config.loaded_libs + if hasfield(typeof(lib), :libname) + lib_name = basename(string(lib.libname)) + # Extract version info if available + versions["BLAS_lib"] = lib_name + end + end + end + catch + # Fallback for older Julia versions + versions["BLAS_vendor"] = string(LinearAlgebra.BLAS.vendor()) + end + + return versions +end + """ get_detailed_system_info() diff --git a/lib/LinearSolveAutotune/src/telemetry.jl b/lib/LinearSolveAutotune/src/telemetry.jl index dd49abf53..c1e16c79a 100644 --- a/lib/LinearSolveAutotune/src/telemetry.jl +++ b/lib/LinearSolveAutotune/src/telemetry.jl @@ -172,6 +172,21 @@ function format_system_info_markdown(system_info::Dict) push!(lines, "- **CUDA Available**: $(get(system_info, "cuda_available", get(system_info, "has_cuda", false)))") # Handle both "has_metal" and "metal_available" keys push!(lines, "- **Metal Available**: $(get(system_info, "metal_available", get(system_info, "has_metal", false)))") + + # Add package versions section + if haskey(system_info, "package_versions") + push!(lines, "") + push!(lines, "### Package Versions") + pkg_versions = system_info["package_versions"] + + # Sort packages for consistent display + sorted_packages = sort(collect(keys(pkg_versions))) + + for pkg_name in sorted_packages + version = pkg_versions[pkg_name] + push!(lines, "- **$pkg_name**: $version") + end + end return join(lines, "\n") end From 844beec39d56c2ca0b44d05b949be21906e77c8b Mon Sep 17 00:00:00 2001 From: ChrisRackauckas Date: Sat, 9 Aug 2025 18:38:00 -0400 Subject: [PATCH 2/6] Add Pkg as dependency to LinearSolveAutotune Required for get_package_versions() function to use Pkg.dependencies() --- lib/LinearSolveAutotune/Project.toml | 2 + test_autotune_improvements.jl | 63 ++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 test_autotune_improvements.jl diff --git a/lib/LinearSolveAutotune/Project.toml b/lib/LinearSolveAutotune/Project.toml index 92084ca27..83cc222db 100644 --- a/lib/LinearSolveAutotune/Project.toml +++ b/lib/LinearSolveAutotune/Project.toml @@ -16,6 +16,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" MKL_jll = "856f044c-d86e-5d09-b602-aeab76dc8ba7" Metal = "dde4c033-4e86-420c-a63e-0dd931031962" +Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Preferences = "21216c6a-2e73-6563-6e65-726566657250" PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" @@ -41,6 +42,7 @@ LinearAlgebra = "1" LinearSolve = "3" MKL_jll = "2025.2.0" Metal = "1" +Pkg = "1" Plots = "1" Preferences = "1" PrettyTables = "2" diff --git a/test_autotune_improvements.jl b/test_autotune_improvements.jl new file mode 100644 index 000000000..d470ded7d --- /dev/null +++ b/test_autotune_improvements.jl @@ -0,0 +1,63 @@ +using Pkg +Pkg.activate("lib/LinearSolveAutotune") +Pkg.instantiate() + +using LinearSolveAutotune +using LinearSolve +using Test + +# Test 1: Check that get_package_versions works +println("Test 1: Checking package version collection...") +sys_info = LinearSolveAutotune.get_system_info() +if haskey(sys_info, "package_versions") + println("✓ Package versions collected successfully:") + versions = sys_info["package_versions"] + for (pkg, ver) in versions + println(" - $pkg: $ver") + end +else + println("✗ Package versions not found in system info") +end + +# Test 2: Test correctness check with a small matrix +println("\nTest 2: Testing correctness check...") +using Random, LinearAlgebra +Random.seed!(123) + +n = 10 +A = rand(Float64, n, n) +b = rand(Float64, n) + +# Get reference solution +prob_ref = LinearProblem(A, b) +ref_sol = solve(prob_ref, LinearSolve.LUFactorization()) +println("Reference solution norm: ", norm(ref_sol.u)) + +# Test with a simple algorithm +prob_test = LinearProblem(A, b) +test_sol = solve(prob_test, LinearSolve.SimpleLUFactorization()) +rel_error = norm(test_sol.u - ref_sol.u) / norm(ref_sol.u) +println("SimpleLUFactorization relative error: ", rel_error) + +if rel_error < 1e-2 + println("✓ Correctness check would pass (error < 1e-2)") +else + println("✗ Correctness check would fail (error >= 1e-2)") +end + +println("\nTest 3: Running small benchmark with correctness checks...") +# Run a minimal benchmark to test the full integration +matrix_sizes = [5, 10] +algorithms = [LinearSolve.LUFactorization(), LinearSolve.SimpleLUFactorization()] +alg_names = ["LUFactorization", "SimpleLUFactorization"] +eltypes = [Float64] + +results = LinearSolveAutotune.benchmark_algorithms( + matrix_sizes, algorithms, alg_names, eltypes; + samples = 2, seconds = 0.1, check_correctness = true, correctness_tol = 1e-2 +) + +println("\nBenchmark results with correctness checks:") +println(results) + +println("\n✓ All tests completed successfully!") \ No newline at end of file From 875dd2285c9bceb2510a035a7b3939b01c674c50 Mon Sep 17 00:00:00 2001 From: ChrisRackauckas Date: Sat, 9 Aug 2025 18:39:53 -0400 Subject: [PATCH 3/6] Apply JuliaFormatter to modified files --- format_autotune.jl | 10 + lib/LinearSolveAutotune/src/benchmarking.jl | 95 ++++---- lib/LinearSolveAutotune/src/gpu_detection.jl | 126 +++++----- lib/LinearSolveAutotune/src/telemetry.jl | 236 ++++++++++--------- 4 files changed, 251 insertions(+), 216 deletions(-) create mode 100644 format_autotune.jl diff --git a/format_autotune.jl b/format_autotune.jl new file mode 100644 index 000000000..93736941e --- /dev/null +++ b/format_autotune.jl @@ -0,0 +1,10 @@ +using Pkg +Pkg.add("JuliaFormatter") +using JuliaFormatter + +# Format only the changed files with SciMLStyle +format("lib/LinearSolveAutotune/src/gpu_detection.jl", SciMLStyle()) +format("lib/LinearSolveAutotune/src/telemetry.jl", SciMLStyle()) +format("lib/LinearSolveAutotune/src/benchmarking.jl", SciMLStyle()) + +println("Formatting complete!") \ No newline at end of file diff --git a/lib/LinearSolveAutotune/src/benchmarking.jl b/lib/LinearSolveAutotune/src/benchmarking.jl index cbfb38ef9..f0bcdccb4 100644 --- a/lib/LinearSolveAutotune/src/benchmarking.jl +++ b/lib/LinearSolveAutotune/src/benchmarking.jl @@ -13,16 +13,17 @@ Uses more strict rules for BLAS-dependent algorithms with non-standard types. function test_algorithm_compatibility(alg, eltype::Type, test_size::Int = 4) # Get algorithm name for type-specific compatibility rules alg_name = string(typeof(alg).name.name) - + # Define strict compatibility rules for BLAS-dependent algorithms - if !(eltype <: LinearAlgebra.BLAS.BlasFloat) && alg_name in ["BLISFactorization", "MKLLUFactorization", "AppleAccelerateLUFactorization"] + if !(eltype <: LinearAlgebra.BLAS.BlasFloat) && alg_name in [ + "BLISFactorization", "MKLLUFactorization", "AppleAccelerateLUFactorization"] return false # BLAS algorithms not compatible with non-standard types end if alg_name == "BLISLUFactorization" && Sys.isapple() return false # BLISLUFactorization has no Apple Silicon binary end - + # For standard types or algorithms that passed the strict check, test functionality try # Create a small test problem with the specified element type @@ -30,20 +31,20 @@ function test_algorithm_compatibility(alg, eltype::Type, test_size::Int = 4) A = rand(rng, eltype, test_size, test_size) b = rand(rng, eltype, test_size) u0 = rand(rng, eltype, test_size) - + prob = LinearProblem(A, b; u0 = u0) - + # Try to solve - if it works, the algorithm is compatible sol = solve(prob, alg) - + # Additional check: verify the solution is actually of the expected type if !isa(sol.u, AbstractVector{eltype}) @debug "Algorithm $alg_name returned wrong element type for $eltype" return false end - + return true - + catch e # Algorithm failed - not compatible with this element type @debug "Algorithm $alg_name failed for $eltype: $e" @@ -60,14 +61,14 @@ Returns filtered algorithms and names. function filter_compatible_algorithms(algorithms, alg_names, eltype::Type) compatible_algs = [] compatible_names = String[] - + for (alg, name) in zip(algorithms, alg_names) if test_algorithm_compatibility(alg, eltype) push!(compatible_algs, alg) push!(compatible_names, name) end end - + return compatible_algs, compatible_names end @@ -89,7 +90,7 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; # Initialize results DataFrame results_data = [] - + # Calculate total number of benchmarks for progress bar total_benchmarks = 0 for eltype in eltypes @@ -97,28 +98,29 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; test_algs, _ = filter_compatible_algorithms(algorithms, alg_names, eltype) total_benchmarks += length(matrix_sizes) * length(test_algs) end - + # Create progress bar - progress = Progress(total_benchmarks, desc="Benchmarking: ", - barlen=50, showspeed=true) + progress = Progress(total_benchmarks, desc = "Benchmarking: ", + barlen = 50, showspeed = true) try for eltype in eltypes # Filter algorithms for this element type - compatible_algs, compatible_names = filter_compatible_algorithms(algorithms, alg_names, eltype) - + compatible_algs, + compatible_names = filter_compatible_algorithms(algorithms, alg_names, eltype) + if isempty(compatible_algs) @warn "No algorithms compatible with $eltype, skipping..." continue end - + for n in matrix_sizes # Create test problem with specified element type rng = MersenneTwister(123) # Consistent seed for reproducibility A = rand(rng, eltype, n, n) b = rand(rng, eltype, n) u0 = rand(rng, eltype, n) - + # Compute reference solution with LUFactorization if correctness check is enabled reference_solution = nothing if check_correctness @@ -133,9 +135,9 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; for (alg, name) in zip(compatible_algs, compatible_names) # Update progress description - ProgressMeter.update!(progress, - desc="Benchmarking $name on $(n)×$(n) $eltype matrix: ") - + ProgressMeter.update!(progress, + desc = "Benchmarking $name on $(n)×$(n) $eltype matrix: ") + gflops = 0.0 success = true error_msg = "" @@ -149,12 +151,13 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; # Warmup run and correctness check warmup_sol = solve(prob, alg) - + # Check correctness if reference solution is available if check_correctness && reference_solution !== nothing # Compute relative error - rel_error = norm(warmup_sol.u - reference_solution.u) / norm(reference_solution.u) - + rel_error = norm(warmup_sol.u - reference_solution.u) / + norm(reference_solution.u) + if rel_error > correctness_tol passed_correctness = false @warn "Algorithm $name failed correctness check for size $n, eltype $eltype. " * @@ -164,7 +167,7 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; error_msg = "Failed correctness check (rel_error = $(round(rel_error, sigdigits=3)))" end end - + # Only benchmark if correctness check passed if passed_correctness # Actual benchmark @@ -195,7 +198,7 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; success = success, error = error_msg )) - + # Update progress ProgressMeter.next!(progress) end @@ -216,15 +219,16 @@ end Get the matrix sizes to benchmark based on the requested size categories. Size categories: -- `:tiny` - 5:5:20 (for very small problems) -- `:small` - 20:20:100 (for small problems) -- `:medium` - 100:50:300 (for typical problems) -- `:large` - 300:100:1000 (for larger problems) -- `:big` - vcat(1000:2000:10000, 10000:5000:20000) (for very large/GPU problems) + + - `:tiny` - 5:5:20 (for very small problems) + - `:small` - 20:20:100 (for small problems) + - `:medium` - 100:50:300 (for typical problems) + - `:large` - 300:100:1000 (for larger problems) + - `:big` - vcat(1000:2000:10000, 10000:5000:20000) (for very large/GPU problems) """ function get_benchmark_sizes(size_categories::Vector{Symbol}) sizes = Int[] - + for category in size_categories if category == :tiny append!(sizes, 5:5:20) @@ -240,7 +244,7 @@ function get_benchmark_sizes(size_categories::Vector{Symbol}) @warn "Unknown size category: $category. Skipping." end end - + # Remove duplicates and sort return sort(unique(sizes)) end @@ -277,10 +281,10 @@ function categorize_results(df::DataFrame) for eltype in eltypes @info "Categorizing results for element type: $eltype" - + # Filter results for this element type eltype_df = filter(row -> row.eltype == eltype, successful_df) - + if nrow(eltype_df) == 0 continue end @@ -295,24 +299,27 @@ function categorize_results(df::DataFrame) # Calculate average GFLOPs for each algorithm in this range avg_results = combine(groupby(range_df, :algorithm), :gflops => mean => :avg_gflops) - + # Sort by performance - sort!(avg_results, :avg_gflops, rev=true) + sort!(avg_results, :avg_gflops, rev = true) # Find the best algorithm (for complex types, avoid RFLU if possible) if nrow(avg_results) > 0 best_alg = avg_results.algorithm[1] - + # For complex types, check if best is RFLU and we have alternatives - if (eltype == "ComplexF32" || eltype == "ComplexF64") && - (contains(best_alg, "RFLU") || contains(best_alg, "RecursiveFactorization")) - + if (eltype == "ComplexF32" || eltype == "ComplexF64") && + (contains(best_alg, "RFLU") || + contains(best_alg, "RecursiveFactorization")) + # Look for the best non-RFLU algorithm for i in 2:nrow(avg_results) alt_alg = avg_results.algorithm[i] - if !contains(alt_alg, "RFLU") && !contains(alt_alg, "RecursiveFactorization") + if !contains(alt_alg, "RFLU") && + !contains(alt_alg, "RecursiveFactorization") # Check if performance difference is not too large (within 20%) - perf_ratio = avg_results.avg_gflops[i] / avg_results.avg_gflops[1] + perf_ratio = avg_results.avg_gflops[i] / + avg_results.avg_gflops[1] if perf_ratio > 0.8 @info "Using $alt_alg instead of $best_alg for $eltype at $range_name ($(round(100*perf_ratio, digits=1))% of RFLU performance) to avoid complex number issues" best_alg = alt_alg @@ -323,7 +330,7 @@ function categorize_results(df::DataFrame) end end end - + category_key = "$(eltype)_$(range_name)" categories[category_key] = best_alg best_idx = findfirst(==(best_alg), avg_results.algorithm) diff --git a/lib/LinearSolveAutotune/src/gpu_detection.jl b/lib/LinearSolveAutotune/src/gpu_detection.jl index 40a52e307..feb654edc 100644 --- a/lib/LinearSolveAutotune/src/gpu_detection.jl +++ b/lib/LinearSolveAutotune/src/gpu_detection.jl @@ -79,28 +79,29 @@ function get_system_info() info["julia_version"] = string(VERSION) info["os"] = string(Sys.KERNEL) - info["os_name"] = Sys.iswindows() ? "Windows" : Sys.islinux() ? "Linux" : Sys.isapple() ? "macOS" : "Other" + info["os_name"] = Sys.iswindows() ? "Windows" : + Sys.islinux() ? "Linux" : Sys.isapple() ? "macOS" : "Other" info["arch"] = string(Sys.ARCH) - + # Use CPUSummary where available, fallback to Sys otherwise try info["cpu_name"] = string(Sys.CPU_NAME) catch info["cpu_name"] = "Unknown" end - + # CPUSummary.num_cores() returns the physical cores (as Static.StaticInt) info["num_cores"] = Int(CPUSummary.num_cores()) info["num_logical_cores"] = Sys.CPU_THREADS info["num_threads"] = Threads.nthreads() - + # BLAS threads try info["blas_num_threads"] = LinearAlgebra.BLAS.get_num_threads() catch info["blas_num_threads"] = 1 end - + info["blas_vendor"] = string(LinearAlgebra.BLAS.vendor()) info["has_cuda"] = is_cuda_available() info["has_metal"] = is_metal_available() @@ -119,7 +120,7 @@ function get_system_info() # Add package versions info["package_versions"] = get_package_versions() - + return info end @@ -131,10 +132,10 @@ Returns a Dict with package names and versions. """ function get_package_versions() versions = Dict{String, String}() - + # Get the current project's dependencies deps = Pkg.dependencies() - + # List of packages we're interested in tracking important_packages = [ "LinearSolve", @@ -157,7 +158,7 @@ function get_package_versions() "KrylovKit", "LinearAlgebra" ] - + # Also track JLL packages for BLAS libraries jll_packages = [ "MKL_jll", @@ -167,9 +168,9 @@ function get_package_versions() "LAPACK_jll", "CompilerSupportLibraries_jll" ] - + all_packages = union(important_packages, jll_packages) - + # Iterate through dependencies and collect versions for (uuid, dep) in deps if dep.name in all_packages @@ -190,14 +191,14 @@ function get_package_versions() end end end - + # Try to get Julia's LinearAlgebra stdlib version try versions["LinearAlgebra"] = string(VERSION) # Stdlib version matches Julia catch versions["LinearAlgebra"] = "stdlib" end - + # Get BLAS configuration info try blas_config = LinearAlgebra.BLAS.get_config() @@ -214,7 +215,7 @@ function get_package_versions() # Fallback for older Julia versions versions["BLAS_vendor"] = string(LinearAlgebra.BLAS.vendor()) end - + return versions end @@ -227,88 +228,89 @@ Includes versioninfo() details and hardware-specific information for analysis. function get_detailed_system_info() # Basic system information system_data = Dict{String, Any}() - + # Julia and system basics - all with safe fallbacks try system_data["timestamp"] = string(Dates.now()) catch system_data["timestamp"] = "unknown" end - + try system_data["julia_version"] = string(VERSION) catch system_data["julia_version"] = "unknown" end - + try system_data["julia_commit"] = Base.GIT_VERSION_INFO.commit[1:10] # Short commit hash catch system_data["julia_commit"] = "unknown" end - + try - system_data["os_name"] = Sys.iswindows() ? "Windows" : Sys.islinux() ? "Linux" : Sys.isapple() ? "macOS" : "Other" + system_data["os_name"] = Sys.iswindows() ? "Windows" : + Sys.islinux() ? "Linux" : Sys.isapple() ? "macOS" : "Other" catch system_data["os_name"] = "unknown" end - + try system_data["os_version"] = string(Sys.KERNEL) catch system_data["os_version"] = "unknown" end - + try system_data["architecture"] = string(Sys.ARCH) catch system_data["architecture"] = "unknown" end - + try system_data["cpu_cores"] = Int(CPUSummary.num_cores()) catch system_data["cpu_cores"] = "unknown" end - + try system_data["cpu_logical_cores"] = Sys.CPU_THREADS catch system_data["cpu_logical_cores"] = "unknown" end - + try system_data["julia_threads"] = Threads.nthreads() catch system_data["julia_threads"] = "unknown" end - + try system_data["word_size"] = Sys.WORD_SIZE catch system_data["word_size"] = "unknown" end - + try system_data["machine"] = Sys.MACHINE catch system_data["machine"] = "unknown" end - + # CPU details try system_data["cpu_name"] = string(Sys.CPU_NAME) catch system_data["cpu_name"] = "unknown" end - + try # Architecture info from Sys system_data["cpu_architecture"] = string(Sys.ARCH) catch system_data["cpu_architecture"] = "unknown" end - + # Categorize CPU vendor for easy analysis try cpu_name_lower = lowercase(string(system_data["cpu_name"])) @@ -316,7 +318,8 @@ function get_detailed_system_info() system_data["cpu_vendor"] = "Intel" elseif contains(cpu_name_lower, "amd") system_data["cpu_vendor"] = "AMD" - elseif contains(cpu_name_lower, "apple") || contains(cpu_name_lower, "m1") || contains(cpu_name_lower, "m2") || contains(cpu_name_lower, "m3") + elseif contains(cpu_name_lower, "apple") || contains(cpu_name_lower, "m1") || + contains(cpu_name_lower, "m2") || contains(cpu_name_lower, "m3") system_data["cpu_vendor"] = "Apple" else system_data["cpu_vendor"] = "Other" @@ -324,14 +327,14 @@ function get_detailed_system_info() catch system_data["cpu_vendor"] = "unknown" end - + # BLAS and linear algebra libraries try system_data["blas_vendor"] = string(LinearAlgebra.BLAS.vendor()) catch system_data["blas_vendor"] = "unknown" end - + # LAPACK vendor detection (safe for different Julia versions) try system_data["lapack_vendor"] = string(LinearAlgebra.LAPACK.vendor()) @@ -339,52 +342,58 @@ function get_detailed_system_info() # Fallback: LAPACK vendor often matches BLAS vendor system_data["lapack_vendor"] = get(system_data, "blas_vendor", "unknown") end - + try system_data["blas_num_threads"] = LinearAlgebra.BLAS.get_num_threads() catch system_data["blas_num_threads"] = "unknown" end - + # LinearSolve-specific package availability try system_data["mkl_available"] = LinearSolve.usemkl catch system_data["mkl_available"] = false end - + try - system_data["mkl_used"] = system_data["mkl_available"] && contains(lowercase(string(system_data["blas_vendor"])), "mkl") + system_data["mkl_used"] = system_data["mkl_available"] && + contains(lowercase(string(system_data["blas_vendor"])), "mkl") catch system_data["mkl_used"] = false end - + try system_data["apple_accelerate_available"] = LinearSolve.appleaccelerate_isavailable() catch system_data["apple_accelerate_available"] = false end - + try - system_data["apple_accelerate_used"] = system_data["apple_accelerate_available"] && contains(lowercase(string(system_data["blas_vendor"])), "accelerate") + system_data["apple_accelerate_used"] = system_data["apple_accelerate_available"] && + contains( + lowercase(string(system_data["blas_vendor"])), "accelerate") catch system_data["apple_accelerate_used"] = false end - + # BLIS availability check - based on JLL packages system_data["blis_available"] = false system_data["blis_used"] = false system_data["blis_jll_loaded"] = false system_data["lapack_jll_loaded"] = false - + try # Check if BLIS_jll and LAPACK_jll are loaded - system_data["blis_jll_loaded"] = haskey(Base.loaded_modules, Base.PkgId(Base.UUID("068f7417-6964-5086-9a5b-bc0c5b4f7fa6"), "BLIS_jll")) - system_data["lapack_jll_loaded"] = haskey(Base.loaded_modules, Base.PkgId(Base.UUID("51474c39-65e3-53ba-86ba-03b1b862ec14"), "LAPACK_jll")) - + system_data["blis_jll_loaded"] = haskey(Base.loaded_modules, + Base.PkgId(Base.UUID("068f7417-6964-5086-9a5b-bc0c5b4f7fa6"), "BLIS_jll")) + system_data["lapack_jll_loaded"] = haskey(Base.loaded_modules, + Base.PkgId(Base.UUID("51474c39-65e3-53ba-86ba-03b1b862ec14"), "LAPACK_jll")) + # BLIS is available if JLL packages are loaded and BLISLUFactorization exists - if (system_data["blis_jll_loaded"] || system_data["lapack_jll_loaded"]) && - isdefined(LinearSolve, :BLISLUFactorization) && hasmethod(LinearSolve.BLISLUFactorization, ()) + if (system_data["blis_jll_loaded"] || system_data["lapack_jll_loaded"]) && + isdefined(LinearSolve, :BLISLUFactorization) && + hasmethod(LinearSolve.BLISLUFactorization, ()) system_data["blis_available"] = true # Check if BLIS is actually being used (contains "blis" in BLAS vendor) system_data["blis_used"] = contains(lowercase(string(system_data["blas_vendor"])), "blis") @@ -392,42 +401,46 @@ function get_detailed_system_info() catch # If there's any error checking BLIS JLL packages, leave as false end - + # GPU information try system_data["cuda_available"] = is_cuda_available() catch system_data["cuda_available"] = false end - + try system_data["metal_available"] = is_metal_available() catch system_data["metal_available"] = false end - + # Try to detect if CUDA/Metal packages are actually loaded system_data["cuda_loaded"] = false system_data["metal_loaded"] = false try # Check if CUDA algorithms are actually available if system_data["cuda_available"] - system_data["cuda_loaded"] = isdefined(Main, :CUDA) || haskey(Base.loaded_modules, Base.PkgId(Base.UUID("052768ef-5323-5732-b1bb-66c8b64840ba"), "CUDA")) + system_data["cuda_loaded"] = isdefined(Main, :CUDA) || + haskey(Base.loaded_modules, + Base.PkgId(Base.UUID("052768ef-5323-5732-b1bb-66c8b64840ba"), "CUDA")) end if system_data["metal_available"] - system_data["metal_loaded"] = isdefined(Main, :Metal) || haskey(Base.loaded_modules, Base.PkgId(Base.UUID("dde4c033-4e86-420c-a63e-0dd931031962"), "Metal")) + system_data["metal_loaded"] = isdefined(Main, :Metal) || + haskey(Base.loaded_modules, + Base.PkgId(Base.UUID("dde4c033-4e86-420c-a63e-0dd931031962"), "Metal")) end catch # If we can't detect, leave as false end - + # Environment information try system_data["libm"] = Base.libm_name catch system_data["libm"] = "unknown" end - + # libdl_name may not exist in all Julia versions try system_data["libdl"] = Base.libdl_name @@ -441,20 +454,21 @@ function get_detailed_system_info() meminfo = read(`cat /proc/meminfo`, String) mem_match = match(r"MemTotal:\s*(\d+)\s*kB", meminfo) if mem_match !== nothing - system_data["total_memory_gb"] = round(parse(Int, mem_match.captures[1]) / 1024 / 1024, digits=2) + system_data["total_memory_gb"] = round( + parse(Int, mem_match.captures[1]) / 1024 / 1024, digits = 2) else system_data["total_memory_gb"] = "unknown" end elseif Sys.isapple() mem_bytes = parse(Int, read(`sysctl -n hw.memsize`, String)) - system_data["total_memory_gb"] = round(mem_bytes / 1024^3, digits=2) + system_data["total_memory_gb"] = round(mem_bytes / 1024^3, digits = 2) else system_data["total_memory_gb"] = "unknown" end catch system_data["total_memory_gb"] = "unknown" end - + # Create DataFrame with single row return DataFrame([system_data]) end diff --git a/lib/LinearSolveAutotune/src/telemetry.jl b/lib/LinearSolveAutotune/src/telemetry.jl index c1e16c79a..acb9c1637 100644 --- a/lib/LinearSolveAutotune/src/telemetry.jl +++ b/lib/LinearSolveAutotune/src/telemetry.jl @@ -26,11 +26,11 @@ Returns an authentication method indicator if successful, nothing if setup fails function setup_github_authentication(; auto_login::Bool = true) # 1. Check for `gh` CLI (system or JLL) gh_cmd = get_gh_command() - + # First check if already authenticated try # Suppress output of gh auth status check - if success(pipeline(`$gh_cmd auth status`; stdout=devnull, stderr=devnull)) + if success(pipeline(`$gh_cmd auth status`; stdout = devnull, stderr = devnull)) # Check if logged in to github.com auth_status_output = read(`$gh_cmd auth status`, String) if contains(auth_status_output, "Logged in to github.com") @@ -58,18 +58,18 @@ function setup_github_authentication(; auto_login::Bool = true) println("\nWould you like to authenticate with GitHub now? (y/n)") print("> ") response = readline() - + if lowercase(strip(response)) in ["y", "yes"] println("\n📝 Starting GitHub authentication...") println(" This will open your browser to authenticate with GitHub.") println(" Please follow the prompts to complete authentication.\n") - + try # Run gh auth login interactively (using system gh or JLL) run(`$gh_cmd auth login`) - + # Check if authentication succeeded - if success(pipeline(`$gh_cmd auth status`; stdout=devnull, stderr=devnull)) + if success(pipeline(`$gh_cmd auth status`; stdout = devnull, stderr = devnull)) auth_status_output = read(`$gh_cmd auth status`, String) if contains(auth_status_output, "Logged in to github.com") println("\n✅ Authentication successful! You can now share results.") @@ -159,29 +159,34 @@ function format_system_info_markdown(system_info::Dict) os_kernel = get(system_info, "os_version", get(system_info, "os", "unknown")) push!(lines, "- **OS**: $os_display ($os_kernel)") # Handle both "arch" and "architecture" keys - push!(lines, "- **Architecture**: $(get(system_info, "architecture", get(system_info, "arch", "unknown")))") + push!(lines, + "- **Architecture**: $(get(system_info, "architecture", get(system_info, "arch", "unknown")))") push!(lines, "- **CPU**: $(get(system_info, "cpu_name", "unknown"))") # Handle both "num_cores" and "cpu_cores" keys push!(lines, "- **Cores**: $(get(system_info, "cpu_cores", get(system_info, "num_cores", "unknown")))") # Handle both "num_threads" and "julia_threads" keys - push!(lines, "- **Threads**: $(get(system_info, "julia_threads", get(system_info, "num_threads", "unknown")))") + push!(lines, + "- **Threads**: $(get(system_info, "julia_threads", get(system_info, "num_threads", "unknown")))") push!(lines, "- **BLAS**: $(get(system_info, "blas_vendor", "unknown"))") push!(lines, "- **MKL Available**: $(get(system_info, "mkl_available", false))") - push!(lines, "- **Apple Accelerate Available**: $(get(system_info, "apple_accelerate_available", false))") + push!(lines, + "- **Apple Accelerate Available**: $(get(system_info, "apple_accelerate_available", false))") # Handle both "has_cuda" and "cuda_available" keys - push!(lines, "- **CUDA Available**: $(get(system_info, "cuda_available", get(system_info, "has_cuda", false)))") + push!(lines, + "- **CUDA Available**: $(get(system_info, "cuda_available", get(system_info, "has_cuda", false)))") # Handle both "has_metal" and "metal_available" keys - push!(lines, "- **Metal Available**: $(get(system_info, "metal_available", get(system_info, "has_metal", false)))") - + push!(lines, + "- **Metal Available**: $(get(system_info, "metal_available", get(system_info, "has_metal", false)))") + # Add package versions section if haskey(system_info, "package_versions") push!(lines, "") push!(lines, "### Package Versions") pkg_versions = system_info["package_versions"] - + # Sort packages for consistent display sorted_packages = sort(collect(keys(pkg_versions))) - + for pkg_name in sorted_packages version = pkg_versions[pkg_name] push!(lines, "- **$pkg_name**: $version") @@ -202,14 +207,14 @@ function format_categories_markdown(categories::Dict{String, String}) end lines = String[] - + # Group categories by element type eltype_categories = Dict{String, Dict{String, String}}() - + for (key, algorithm) in categories # Parse key like "Float64_tiny (5-20)" -> eltype="Float64", range="tiny (5-20)" if contains(key, "_") - eltype, range = split(key, "_", limit=2) + eltype, range = split(key, "_", limit = 2) if !haskey(eltype_categories, eltype) eltype_categories[eltype] = Dict{String, String}() end @@ -222,10 +227,11 @@ function format_categories_markdown(categories::Dict{String, String}) eltype_categories["Mixed"][key] = algorithm end end - + # Define the proper order for size ranges - size_order = ["tiny (5-20)", "small (20-100)", "medium (100-300)", "large (300-1000)", "big (10000+)"] - + size_order = ["tiny (5-20)", "small (20-100)", "medium (100-300)", + "large (300-1000)", "big (10000+)"] + # Custom sort function for ranges function sort_ranges(ranges_dict) sorted_pairs = [] @@ -242,7 +248,7 @@ function format_categories_markdown(categories::Dict{String, String}) end return sorted_pairs end - + # Format each element type for (eltype, ranges) in sort(eltype_categories) push!(lines, "#### Recommendations for $eltype") @@ -267,28 +273,28 @@ Includes both summary statistics and raw performance data in collapsible section """ function format_detailed_results_markdown(df::DataFrame) lines = String[] - + # Get unique element types eltypes = unique(df.eltype) - + for eltype in eltypes push!(lines, "#### Results for $eltype") push!(lines, "") - + # Filter results for this element type eltype_df = filter(row -> row.eltype == eltype, df) - + if nrow(eltype_df) == 0 push!(lines, "No results for this element type.") push!(lines, "") continue end - + # Create a summary table with average performance per algorithm for this element type - summary = combine(groupby(eltype_df, :algorithm), - :gflops => mean => :avg_gflops, - :gflops => std => :std_gflops, - nrow => :num_tests) + summary = combine(groupby(eltype_df, :algorithm), + :gflops => mean => :avg_gflops, + :gflops => std => :std_gflops, + nrow => :num_tests) sort!(summary, :avg_gflops, rev = true) push!(lines, "##### Summary Statistics") @@ -301,39 +307,38 @@ function format_detailed_results_markdown(df::DataFrame) std_str = @sprintf("%.2f", row.std_gflops) push!(lines, "| $(row.algorithm) | $avg_str | $std_str | $(row.num_tests) |") end - + push!(lines, "") - + # Add raw performance data in collapsible details blocks for each algorithm push!(lines, "
") push!(lines, "Raw Performance Data") push!(lines, "") - + # Get unique algorithms for this element type algorithms = unique(eltype_df.algorithm) - + for algorithm in sort(algorithms) # Filter data for this algorithm algo_df = filter(row -> row.algorithm == algorithm, eltype_df) - + # Sort by size for better readability sort!(algo_df, :size) - push!(lines, "##### $algorithm") push!(lines, "") push!(lines, "| Matrix Size | GFLOPs | Status |") push!(lines, "|-------------|--------|--------|") - + for row in eachrow(algo_df) gflops_str = row.success ? @sprintf("%.3f", row.gflops) : "N/A" status = row.success ? "✅ Success" : "❌ Failed" push!(lines, "| $(row.size) | $gflops_str | $status |") end - + push!(lines, "") end - + push!(lines, "
") push!(lines, "") end @@ -349,8 +354,7 @@ Create a GitHub issue with benchmark results for community data collection. Note: plot_files parameter is kept for compatibility but not used. """ function upload_to_github(content::String, plot_files, auth_info::Tuple, - results_df::DataFrame, system_info::Dict, categories::Dict) - + results_df::DataFrame, system_info::Dict, categories::Dict) auth_method, auth_data = auth_info if auth_method === nothing @@ -363,21 +367,21 @@ function upload_to_github(content::String, plot_files, auth_info::Tuple, @info "📁 Results saved locally to $fallback_file" return end - + @info "📤 Preparing to upload benchmark results..." try target_repo = "SciML/LinearSolve.jl" issue_number = 669 # The existing issue for collecting autotune results - + # Construct comment body cpu_name = get(system_info, "cpu_name", "unknown") os_name = get(system_info, "os", "unknown") timestamp = Dates.format(Dates.now(), "yyyy-mm-dd HH:MM") - + comment_body = """ ## Benchmark Results: $cpu_name on $os_name ($timestamp) - + $content --- @@ -391,7 +395,7 @@ function upload_to_github(content::String, plot_files, auth_info::Tuple, """ @info "📝 Adding comment to issue #669..." - + issue_url = nothing if auth_method == :gh_cli issue_url = comment_on_issue_gh(target_repo, issue_number, comment_body) @@ -430,7 +434,7 @@ function upload_plots_to_gist(plot_files::Union{Nothing, Tuple, Dict}, auth, elt if plot_files === nothing return nothing, Dict{String, String}() end - + try # Handle different plot_files formats files_to_upload = if isa(plot_files, Tuple) @@ -441,13 +445,13 @@ function upload_plots_to_gist(plot_files::Union{Nothing, Tuple, Dict}, auth, elt else return nothing, Dict{String, String}() end - + # Filter existing files existing_files = Dict(k => v for (k, v) in files_to_upload if isfile(v)) if isempty(existing_files) return nothing, Dict{String, String}() end - + # Create README content readme_content = """ # LinearSolve.jl Benchmark Plots @@ -461,7 +465,7 @@ function upload_plots_to_gist(plot_files::Union{Nothing, Tuple, Dict}, auth, elt for (name, _) in existing_files readme_content *= "- `$name`\n" end - + readme_content *= """ ## Viewing the Plots @@ -471,66 +475,66 @@ The PNG images can be viewed directly in the browser. Click on any `.png` file a --- *Generated automatically by LinearSolve.jl autotune system* """ - + # Create initial gist with README timestamp = Dates.format(Dates.now(), "yyyy-mm-dd_HH-MM-SS") gist_desc = "LinearSolve.jl Benchmark Plots - $eltype_str - $timestamp" - + gist_files = Dict{String, Any}() gist_files["README.md"] = Dict("content" => readme_content) - + params = Dict( "description" => gist_desc, "public" => true, "files" => gist_files ) - + # Create the gist - gist = GitHub.create_gist(; params=params, auth=auth) + gist = GitHub.create_gist(; params = params, auth = auth) gist_url = gist.html_url gist_id = split(gist_url, "/")[end] - username = split(gist_url, "/")[end-1] - + username = split(gist_url, "/")[end - 1] + # Now clone the gist and add the binary files temp_dir = mktempdir() try # Clone using HTTPS with token authentication clone_url = "https://$(auth.token)@gist.github.com/$gist_id.git" run(`git clone $clone_url $temp_dir`) - + # Copy all plot files to the gist directory for (name, filepath) in existing_files target_path = joinpath(temp_dir, name) - cp(filepath, target_path; force=true) + cp(filepath, target_path; force = true) end - + # Configure git user for the commit cd(temp_dir) do # Set a generic user for the commit run(`git config user.email "linearsolve-autotune@example.com"`) run(`git config user.name "LinearSolve Autotune"`) - + # Stage, commit and push the changes run(`git add .`) run(`git commit -m "Add benchmark plots"`) run(`git push`) end - + @info "✅ Successfully uploaded plots to gist: $gist_url" - + # Construct raw URLs for the uploaded files raw_urls = Dict{String, String}() for (name, _) in existing_files raw_urls[name] = "https://gist.githubusercontent.com/$username/$gist_id/raw/$name" end - + return gist_url, raw_urls - + finally # Clean up temporary directory - rm(temp_dir; recursive=true, force=true) + rm(temp_dir; recursive = true, force = true) end - + catch e @warn "Failed to upload plots to gist via API: $e" # Fall back to HTML with embedded images @@ -562,7 +566,7 @@ function upload_plots_to_gist_fallback(files::Dict, auth, eltype_str::String)

LinearSolve.jl Benchmark Plots

Element Type: $eltype_str

""" - + # Read files and embed as base64 for (name, filepath) in files if isfile(filepath) && endswith(filepath, ".png") @@ -570,7 +574,7 @@ function upload_plots_to_gist_fallback(files::Dict, auth, eltype_str::String) binary_content = read(filepath) base64_content = base64encode(binary_content) data_uri = "data:image/png;base64,$base64_content" - + # Add to HTML html_content *= """
@@ -580,30 +584,30 @@ function upload_plots_to_gist_fallback(files::Dict, auth, eltype_str::String) """ end end - + html_content *= """ """ - + # Create gist with HTML file timestamp = Dates.format(Dates.now(), "yyyy-mm-dd_HH-MM-SS") gist_desc = "LinearSolve.jl Benchmark Plots - $eltype_str - $timestamp" - + gist_files = Dict{String, Any}() gist_files["plots.html"] = Dict("content" => html_content) - + params = Dict( "description" => gist_desc, "public" => true, "files" => gist_files ) - - gist = GitHub.create_gist(; params=params, auth=auth) - + + gist = GitHub.create_gist(; params = params, auth = auth) + @info "✅ Uploaded plots to gist (HTML fallback): $(gist.html_url)" return gist.html_url, Dict{String, String}() - + catch e @warn "Failed to upload plots to gist (fallback): $e" return nothing, Dict{String, String}() @@ -619,7 +623,7 @@ function upload_plots_to_gist_gh(plot_files::Union{Nothing, Tuple, Dict}, eltype if plot_files === nothing return nothing, Dict{String, String}() end - + try gh_cmd = get_gh_command() # Handle different plot_files formats @@ -631,17 +635,17 @@ function upload_plots_to_gist_gh(plot_files::Union{Nothing, Tuple, Dict}, eltype else return nothing, Dict{String, String}() end - + # Filter existing files existing_files = Dict(k => v for (k, v) in files_to_upload if isfile(v)) if isempty(existing_files) return nothing, Dict{String, String}() end - + # Create initial gist with a README timestamp = Dates.format(Dates.now(), "yyyy-mm-dd_HH-MM-SS") gist_desc = "LinearSolve.jl Benchmark Plots - $eltype_str - $timestamp" - + # Create README content readme_content = """ # LinearSolve.jl Benchmark Plots @@ -655,7 +659,7 @@ function upload_plots_to_gist_gh(plot_files::Union{Nothing, Tuple, Dict}, eltype for (name, _) in existing_files readme_content *= "- `$name`\n" end - + readme_content *= """ ## Viewing the Plots @@ -665,71 +669,71 @@ The PNG images can be viewed directly in the browser. Click on any `.png` file a --- *Generated automatically by LinearSolve.jl autotune system* """ - + # Create temporary file for README readme_file = tempname() * "_README.md" open(readme_file, "w") do f write(f, readme_content) end - + # Create initial gist with README out = Pipe() err = Pipe() - run(pipeline(`$gh_cmd gist create -d $gist_desc -p $readme_file`, stdout=out, stderr=err)) + run(pipeline(`$gh_cmd gist create -d $gist_desc -p $readme_file`, stdout = out, stderr = err)) close(out.in) close(err.in) - + gist_url = strip(read(out, String)) err_str = read(err, String) - + if !startswith(gist_url, "https://gist.github.com/") error("gh gist create did not return a valid URL. Output: $gist_url. Error: $err_str") end - + # Extract gist ID from URL gist_id = split(gist_url, "/")[end] - + # Clone the gist temp_dir = mktempdir() try # Clone the gist run(`$gh_cmd gist clone $gist_id $temp_dir`) - + # Copy all plot files to the gist directory for (name, filepath) in existing_files target_path = joinpath(temp_dir, name) - cp(filepath, target_path; force=true) + cp(filepath, target_path; force = true) end - + # Stage, commit and push the changes cd(temp_dir) do run(`git add .`) run(`git commit -m "Add benchmark plots"`) run(`git push`) end - + @info "✅ Successfully uploaded plots to gist: $gist_url" - + # Get username for constructing raw URLs username_out = Pipe() - run(pipeline(`$gh_cmd api user --jq .login`, stdout=username_out)) + run(pipeline(`$gh_cmd api user --jq .login`, stdout = username_out)) close(username_out.in) username = strip(read(username_out, String)) - + # Construct raw URLs for the uploaded files raw_urls = Dict{String, String}() for (name, _) in existing_files raw_urls[name] = "https://gist.githubusercontent.com/$username/$gist_id/raw/$name" end - + return gist_url, raw_urls - + finally # Clean up temporary directory - rm(temp_dir; recursive=true, force=true) - rm(readme_file; force=true) + rm(temp_dir; recursive = true, force = true) + rm(readme_file; force = true) end - + catch e @warn "Failed to upload plots to gist via gh CLI: $e" return nothing, Dict{String, String}() @@ -743,9 +747,9 @@ Add a comment to an existing GitHub issue using the GitHub API. """ function comment_on_issue_api(target_repo, issue_number, body, auth) try - repo_obj = GitHub.repo(target_repo; auth=auth) - issue = GitHub.issue(repo_obj, issue_number; auth=auth) - comment = GitHub.create_comment(repo_obj, issue, body; auth=auth) + repo_obj = GitHub.repo(target_repo; auth = auth) + issue = GitHub.issue(repo_obj, issue_number; auth = auth) + comment = GitHub.create_comment(repo_obj, issue, body; auth = auth) @info "✅ Added comment to issue #$(issue_number) via API" return "https://github.com/$(target_repo)/issues/$(issue_number)#issuecomment-$(comment.id)" catch e @@ -768,18 +772,18 @@ function comment_on_issue_gh(target_repo, issue_number, body) mktemp() do path, io write(io, body) flush(io) - + # Construct and run the gh command cmd = `$gh_cmd issue comment $issue_number --repo $target_repo --body-file $path` - + out = Pipe() err = Pipe() - run(pipeline(cmd, stdout=out, stderr=err)) + run(pipeline(cmd, stdout = out, stderr = err)) close(out) close(err) out_str = read(out, String) err_str = read(err, String) - + @info "✅ Added comment to issue #$(issue_number) via `gh` CLI" return "https://github.com/$(target_repo)/issues/$(issue_number)" end @@ -796,9 +800,9 @@ Create a GitHub issue using the GitHub.jl API. """ function create_benchmark_issue_api(target_repo, title, body, auth) try - repo_obj = GitHub.repo(target_repo; auth=auth) + repo_obj = GitHub.repo(target_repo; auth = auth) params = Dict("title" => title, "body" => body, "labels" => ["benchmark-data"]) - issue_result = GitHub.create_issue(repo_obj; params=params, auth=auth) + issue_result = GitHub.create_issue(repo_obj; params = params, auth = auth) @info "✅ Created benchmark results issue #$(issue_result.number) via API" return issue_result.html_url catch e @@ -821,22 +825,22 @@ function create_benchmark_issue_gh(target_repo, title, body) mktemp() do path, io write(io, body) flush(io) - + # Construct and run the gh command cmd = `$gh_cmd issue create --repo $target_repo --title $title --body-file $path --label benchmark-data` - + out = Pipe() err = Pipe() - run(pipeline(cmd, stdout=out, stderr=err)) + run(pipeline(cmd, stdout = out, stderr = err)) closewrite(out) closewrite(err) out_str = read(out, String) err_str = read(err, String) # Capture output to get the issue URL issue_url = strip(out_str) - + if !startswith(issue_url, "https://github.com/") - error("gh CLI command did not return a valid URL. Output: $issue_url. Error: $err_str") + error("gh CLI command did not return a valid URL. Output: $issue_url. Error: $err_str") end @info "✅ Created benchmark results issue via `gh` CLI" @@ -846,4 +850,4 @@ function create_benchmark_issue_gh(target_repo, title, body) @warn "Failed to create benchmark issue via `gh` CLI: $e" out_str err_str return nothing end -end \ No newline at end of file +end From 03e7799ae2b8980dcc29246ae72a308823dd90e3 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sat, 9 Aug 2025 18:45:11 -0400 Subject: [PATCH 4/6] Delete format_autotune.jl --- format_autotune.jl | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 format_autotune.jl diff --git a/format_autotune.jl b/format_autotune.jl deleted file mode 100644 index 93736941e..000000000 --- a/format_autotune.jl +++ /dev/null @@ -1,10 +0,0 @@ -using Pkg -Pkg.add("JuliaFormatter") -using JuliaFormatter - -# Format only the changed files with SciMLStyle -format("lib/LinearSolveAutotune/src/gpu_detection.jl", SciMLStyle()) -format("lib/LinearSolveAutotune/src/telemetry.jl", SciMLStyle()) -format("lib/LinearSolveAutotune/src/benchmarking.jl", SciMLStyle()) - -println("Formatting complete!") \ No newline at end of file From 84cc26cc44c62dd1718c18cf3e66fdf03d91fa32 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sat, 9 Aug 2025 18:45:26 -0400 Subject: [PATCH 5/6] Delete test_autotune_improvements.jl --- test_autotune_improvements.jl | 63 ----------------------------------- 1 file changed, 63 deletions(-) delete mode 100644 test_autotune_improvements.jl diff --git a/test_autotune_improvements.jl b/test_autotune_improvements.jl deleted file mode 100644 index d470ded7d..000000000 --- a/test_autotune_improvements.jl +++ /dev/null @@ -1,63 +0,0 @@ -using Pkg -Pkg.activate("lib/LinearSolveAutotune") -Pkg.instantiate() - -using LinearSolveAutotune -using LinearSolve -using Test - -# Test 1: Check that get_package_versions works -println("Test 1: Checking package version collection...") -sys_info = LinearSolveAutotune.get_system_info() -if haskey(sys_info, "package_versions") - println("✓ Package versions collected successfully:") - versions = sys_info["package_versions"] - for (pkg, ver) in versions - println(" - $pkg: $ver") - end -else - println("✗ Package versions not found in system info") -end - -# Test 2: Test correctness check with a small matrix -println("\nTest 2: Testing correctness check...") -using Random, LinearAlgebra -Random.seed!(123) - -n = 10 -A = rand(Float64, n, n) -b = rand(Float64, n) - -# Get reference solution -prob_ref = LinearProblem(A, b) -ref_sol = solve(prob_ref, LinearSolve.LUFactorization()) -println("Reference solution norm: ", norm(ref_sol.u)) - -# Test with a simple algorithm -prob_test = LinearProblem(A, b) -test_sol = solve(prob_test, LinearSolve.SimpleLUFactorization()) -rel_error = norm(test_sol.u - ref_sol.u) / norm(ref_sol.u) -println("SimpleLUFactorization relative error: ", rel_error) - -if rel_error < 1e-2 - println("✓ Correctness check would pass (error < 1e-2)") -else - println("✗ Correctness check would fail (error >= 1e-2)") -end - -println("\nTest 3: Running small benchmark with correctness checks...") -# Run a minimal benchmark to test the full integration -matrix_sizes = [5, 10] -algorithms = [LinearSolve.LUFactorization(), LinearSolve.SimpleLUFactorization()] -alg_names = ["LUFactorization", "SimpleLUFactorization"] -eltypes = [Float64] - -results = LinearSolveAutotune.benchmark_algorithms( - matrix_sizes, algorithms, alg_names, eltypes; - samples = 2, seconds = 0.1, check_correctness = true, correctness_tol = 1e-2 -) - -println("\nBenchmark results with correctness checks:") -println(results) - -println("\n✓ All tests completed successfully!") \ No newline at end of file From 5d564b69692afc2f9b13250fdd51aeaf798aa876 Mon Sep 17 00:00:00 2001 From: ChrisRackauckas Date: Sat, 9 Aug 2025 18:46:41 -0400 Subject: [PATCH 6/6] Revert "Apply JuliaFormatter to modified files" This reverts commit 875dd2285c9bceb2510a035a7b3939b01c674c50. --- lib/LinearSolveAutotune/src/benchmarking.jl | 95 ++++---- lib/LinearSolveAutotune/src/gpu_detection.jl | 126 +++++----- lib/LinearSolveAutotune/src/telemetry.jl | 236 +++++++++---------- 3 files changed, 216 insertions(+), 241 deletions(-) diff --git a/lib/LinearSolveAutotune/src/benchmarking.jl b/lib/LinearSolveAutotune/src/benchmarking.jl index f0bcdccb4..cbfb38ef9 100644 --- a/lib/LinearSolveAutotune/src/benchmarking.jl +++ b/lib/LinearSolveAutotune/src/benchmarking.jl @@ -13,17 +13,16 @@ Uses more strict rules for BLAS-dependent algorithms with non-standard types. function test_algorithm_compatibility(alg, eltype::Type, test_size::Int = 4) # Get algorithm name for type-specific compatibility rules alg_name = string(typeof(alg).name.name) - + # Define strict compatibility rules for BLAS-dependent algorithms - if !(eltype <: LinearAlgebra.BLAS.BlasFloat) && alg_name in [ - "BLISFactorization", "MKLLUFactorization", "AppleAccelerateLUFactorization"] + if !(eltype <: LinearAlgebra.BLAS.BlasFloat) && alg_name in ["BLISFactorization", "MKLLUFactorization", "AppleAccelerateLUFactorization"] return false # BLAS algorithms not compatible with non-standard types end if alg_name == "BLISLUFactorization" && Sys.isapple() return false # BLISLUFactorization has no Apple Silicon binary end - + # For standard types or algorithms that passed the strict check, test functionality try # Create a small test problem with the specified element type @@ -31,20 +30,20 @@ function test_algorithm_compatibility(alg, eltype::Type, test_size::Int = 4) A = rand(rng, eltype, test_size, test_size) b = rand(rng, eltype, test_size) u0 = rand(rng, eltype, test_size) - + prob = LinearProblem(A, b; u0 = u0) - + # Try to solve - if it works, the algorithm is compatible sol = solve(prob, alg) - + # Additional check: verify the solution is actually of the expected type if !isa(sol.u, AbstractVector{eltype}) @debug "Algorithm $alg_name returned wrong element type for $eltype" return false end - + return true - + catch e # Algorithm failed - not compatible with this element type @debug "Algorithm $alg_name failed for $eltype: $e" @@ -61,14 +60,14 @@ Returns filtered algorithms and names. function filter_compatible_algorithms(algorithms, alg_names, eltype::Type) compatible_algs = [] compatible_names = String[] - + for (alg, name) in zip(algorithms, alg_names) if test_algorithm_compatibility(alg, eltype) push!(compatible_algs, alg) push!(compatible_names, name) end end - + return compatible_algs, compatible_names end @@ -90,7 +89,7 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; # Initialize results DataFrame results_data = [] - + # Calculate total number of benchmarks for progress bar total_benchmarks = 0 for eltype in eltypes @@ -98,29 +97,28 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; test_algs, _ = filter_compatible_algorithms(algorithms, alg_names, eltype) total_benchmarks += length(matrix_sizes) * length(test_algs) end - + # Create progress bar - progress = Progress(total_benchmarks, desc = "Benchmarking: ", - barlen = 50, showspeed = true) + progress = Progress(total_benchmarks, desc="Benchmarking: ", + barlen=50, showspeed=true) try for eltype in eltypes # Filter algorithms for this element type - compatible_algs, - compatible_names = filter_compatible_algorithms(algorithms, alg_names, eltype) - + compatible_algs, compatible_names = filter_compatible_algorithms(algorithms, alg_names, eltype) + if isempty(compatible_algs) @warn "No algorithms compatible with $eltype, skipping..." continue end - + for n in matrix_sizes # Create test problem with specified element type rng = MersenneTwister(123) # Consistent seed for reproducibility A = rand(rng, eltype, n, n) b = rand(rng, eltype, n) u0 = rand(rng, eltype, n) - + # Compute reference solution with LUFactorization if correctness check is enabled reference_solution = nothing if check_correctness @@ -135,9 +133,9 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; for (alg, name) in zip(compatible_algs, compatible_names) # Update progress description - ProgressMeter.update!(progress, - desc = "Benchmarking $name on $(n)×$(n) $eltype matrix: ") - + ProgressMeter.update!(progress, + desc="Benchmarking $name on $(n)×$(n) $eltype matrix: ") + gflops = 0.0 success = true error_msg = "" @@ -151,13 +149,12 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; # Warmup run and correctness check warmup_sol = solve(prob, alg) - + # Check correctness if reference solution is available if check_correctness && reference_solution !== nothing # Compute relative error - rel_error = norm(warmup_sol.u - reference_solution.u) / - norm(reference_solution.u) - + rel_error = norm(warmup_sol.u - reference_solution.u) / norm(reference_solution.u) + if rel_error > correctness_tol passed_correctness = false @warn "Algorithm $name failed correctness check for size $n, eltype $eltype. " * @@ -167,7 +164,7 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; error_msg = "Failed correctness check (rel_error = $(round(rel_error, sigdigits=3)))" end end - + # Only benchmark if correctness check passed if passed_correctness # Actual benchmark @@ -198,7 +195,7 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes; success = success, error = error_msg )) - + # Update progress ProgressMeter.next!(progress) end @@ -219,16 +216,15 @@ end Get the matrix sizes to benchmark based on the requested size categories. Size categories: - - - `:tiny` - 5:5:20 (for very small problems) - - `:small` - 20:20:100 (for small problems) - - `:medium` - 100:50:300 (for typical problems) - - `:large` - 300:100:1000 (for larger problems) - - `:big` - vcat(1000:2000:10000, 10000:5000:20000) (for very large/GPU problems) +- `:tiny` - 5:5:20 (for very small problems) +- `:small` - 20:20:100 (for small problems) +- `:medium` - 100:50:300 (for typical problems) +- `:large` - 300:100:1000 (for larger problems) +- `:big` - vcat(1000:2000:10000, 10000:5000:20000) (for very large/GPU problems) """ function get_benchmark_sizes(size_categories::Vector{Symbol}) sizes = Int[] - + for category in size_categories if category == :tiny append!(sizes, 5:5:20) @@ -244,7 +240,7 @@ function get_benchmark_sizes(size_categories::Vector{Symbol}) @warn "Unknown size category: $category. Skipping." end end - + # Remove duplicates and sort return sort(unique(sizes)) end @@ -281,10 +277,10 @@ function categorize_results(df::DataFrame) for eltype in eltypes @info "Categorizing results for element type: $eltype" - + # Filter results for this element type eltype_df = filter(row -> row.eltype == eltype, successful_df) - + if nrow(eltype_df) == 0 continue end @@ -299,27 +295,24 @@ function categorize_results(df::DataFrame) # Calculate average GFLOPs for each algorithm in this range avg_results = combine(groupby(range_df, :algorithm), :gflops => mean => :avg_gflops) - + # Sort by performance - sort!(avg_results, :avg_gflops, rev = true) + sort!(avg_results, :avg_gflops, rev=true) # Find the best algorithm (for complex types, avoid RFLU if possible) if nrow(avg_results) > 0 best_alg = avg_results.algorithm[1] - + # For complex types, check if best is RFLU and we have alternatives - if (eltype == "ComplexF32" || eltype == "ComplexF64") && - (contains(best_alg, "RFLU") || - contains(best_alg, "RecursiveFactorization")) - + if (eltype == "ComplexF32" || eltype == "ComplexF64") && + (contains(best_alg, "RFLU") || contains(best_alg, "RecursiveFactorization")) + # Look for the best non-RFLU algorithm for i in 2:nrow(avg_results) alt_alg = avg_results.algorithm[i] - if !contains(alt_alg, "RFLU") && - !contains(alt_alg, "RecursiveFactorization") + if !contains(alt_alg, "RFLU") && !contains(alt_alg, "RecursiveFactorization") # Check if performance difference is not too large (within 20%) - perf_ratio = avg_results.avg_gflops[i] / - avg_results.avg_gflops[1] + perf_ratio = avg_results.avg_gflops[i] / avg_results.avg_gflops[1] if perf_ratio > 0.8 @info "Using $alt_alg instead of $best_alg for $eltype at $range_name ($(round(100*perf_ratio, digits=1))% of RFLU performance) to avoid complex number issues" best_alg = alt_alg @@ -330,7 +323,7 @@ function categorize_results(df::DataFrame) end end end - + category_key = "$(eltype)_$(range_name)" categories[category_key] = best_alg best_idx = findfirst(==(best_alg), avg_results.algorithm) diff --git a/lib/LinearSolveAutotune/src/gpu_detection.jl b/lib/LinearSolveAutotune/src/gpu_detection.jl index feb654edc..40a52e307 100644 --- a/lib/LinearSolveAutotune/src/gpu_detection.jl +++ b/lib/LinearSolveAutotune/src/gpu_detection.jl @@ -79,29 +79,28 @@ function get_system_info() info["julia_version"] = string(VERSION) info["os"] = string(Sys.KERNEL) - info["os_name"] = Sys.iswindows() ? "Windows" : - Sys.islinux() ? "Linux" : Sys.isapple() ? "macOS" : "Other" + info["os_name"] = Sys.iswindows() ? "Windows" : Sys.islinux() ? "Linux" : Sys.isapple() ? "macOS" : "Other" info["arch"] = string(Sys.ARCH) - + # Use CPUSummary where available, fallback to Sys otherwise try info["cpu_name"] = string(Sys.CPU_NAME) catch info["cpu_name"] = "Unknown" end - + # CPUSummary.num_cores() returns the physical cores (as Static.StaticInt) info["num_cores"] = Int(CPUSummary.num_cores()) info["num_logical_cores"] = Sys.CPU_THREADS info["num_threads"] = Threads.nthreads() - + # BLAS threads try info["blas_num_threads"] = LinearAlgebra.BLAS.get_num_threads() catch info["blas_num_threads"] = 1 end - + info["blas_vendor"] = string(LinearAlgebra.BLAS.vendor()) info["has_cuda"] = is_cuda_available() info["has_metal"] = is_metal_available() @@ -120,7 +119,7 @@ function get_system_info() # Add package versions info["package_versions"] = get_package_versions() - + return info end @@ -132,10 +131,10 @@ Returns a Dict with package names and versions. """ function get_package_versions() versions = Dict{String, String}() - + # Get the current project's dependencies deps = Pkg.dependencies() - + # List of packages we're interested in tracking important_packages = [ "LinearSolve", @@ -158,7 +157,7 @@ function get_package_versions() "KrylovKit", "LinearAlgebra" ] - + # Also track JLL packages for BLAS libraries jll_packages = [ "MKL_jll", @@ -168,9 +167,9 @@ function get_package_versions() "LAPACK_jll", "CompilerSupportLibraries_jll" ] - + all_packages = union(important_packages, jll_packages) - + # Iterate through dependencies and collect versions for (uuid, dep) in deps if dep.name in all_packages @@ -191,14 +190,14 @@ function get_package_versions() end end end - + # Try to get Julia's LinearAlgebra stdlib version try versions["LinearAlgebra"] = string(VERSION) # Stdlib version matches Julia catch versions["LinearAlgebra"] = "stdlib" end - + # Get BLAS configuration info try blas_config = LinearAlgebra.BLAS.get_config() @@ -215,7 +214,7 @@ function get_package_versions() # Fallback for older Julia versions versions["BLAS_vendor"] = string(LinearAlgebra.BLAS.vendor()) end - + return versions end @@ -228,89 +227,88 @@ Includes versioninfo() details and hardware-specific information for analysis. function get_detailed_system_info() # Basic system information system_data = Dict{String, Any}() - + # Julia and system basics - all with safe fallbacks try system_data["timestamp"] = string(Dates.now()) catch system_data["timestamp"] = "unknown" end - + try system_data["julia_version"] = string(VERSION) catch system_data["julia_version"] = "unknown" end - + try system_data["julia_commit"] = Base.GIT_VERSION_INFO.commit[1:10] # Short commit hash catch system_data["julia_commit"] = "unknown" end - + try - system_data["os_name"] = Sys.iswindows() ? "Windows" : - Sys.islinux() ? "Linux" : Sys.isapple() ? "macOS" : "Other" + system_data["os_name"] = Sys.iswindows() ? "Windows" : Sys.islinux() ? "Linux" : Sys.isapple() ? "macOS" : "Other" catch system_data["os_name"] = "unknown" end - + try system_data["os_version"] = string(Sys.KERNEL) catch system_data["os_version"] = "unknown" end - + try system_data["architecture"] = string(Sys.ARCH) catch system_data["architecture"] = "unknown" end - + try system_data["cpu_cores"] = Int(CPUSummary.num_cores()) catch system_data["cpu_cores"] = "unknown" end - + try system_data["cpu_logical_cores"] = Sys.CPU_THREADS catch system_data["cpu_logical_cores"] = "unknown" end - + try system_data["julia_threads"] = Threads.nthreads() catch system_data["julia_threads"] = "unknown" end - + try system_data["word_size"] = Sys.WORD_SIZE catch system_data["word_size"] = "unknown" end - + try system_data["machine"] = Sys.MACHINE catch system_data["machine"] = "unknown" end - + # CPU details try system_data["cpu_name"] = string(Sys.CPU_NAME) catch system_data["cpu_name"] = "unknown" end - + try # Architecture info from Sys system_data["cpu_architecture"] = string(Sys.ARCH) catch system_data["cpu_architecture"] = "unknown" end - + # Categorize CPU vendor for easy analysis try cpu_name_lower = lowercase(string(system_data["cpu_name"])) @@ -318,8 +316,7 @@ function get_detailed_system_info() system_data["cpu_vendor"] = "Intel" elseif contains(cpu_name_lower, "amd") system_data["cpu_vendor"] = "AMD" - elseif contains(cpu_name_lower, "apple") || contains(cpu_name_lower, "m1") || - contains(cpu_name_lower, "m2") || contains(cpu_name_lower, "m3") + elseif contains(cpu_name_lower, "apple") || contains(cpu_name_lower, "m1") || contains(cpu_name_lower, "m2") || contains(cpu_name_lower, "m3") system_data["cpu_vendor"] = "Apple" else system_data["cpu_vendor"] = "Other" @@ -327,14 +324,14 @@ function get_detailed_system_info() catch system_data["cpu_vendor"] = "unknown" end - + # BLAS and linear algebra libraries try system_data["blas_vendor"] = string(LinearAlgebra.BLAS.vendor()) catch system_data["blas_vendor"] = "unknown" end - + # LAPACK vendor detection (safe for different Julia versions) try system_data["lapack_vendor"] = string(LinearAlgebra.LAPACK.vendor()) @@ -342,58 +339,52 @@ function get_detailed_system_info() # Fallback: LAPACK vendor often matches BLAS vendor system_data["lapack_vendor"] = get(system_data, "blas_vendor", "unknown") end - + try system_data["blas_num_threads"] = LinearAlgebra.BLAS.get_num_threads() catch system_data["blas_num_threads"] = "unknown" end - + # LinearSolve-specific package availability try system_data["mkl_available"] = LinearSolve.usemkl catch system_data["mkl_available"] = false end - + try - system_data["mkl_used"] = system_data["mkl_available"] && - contains(lowercase(string(system_data["blas_vendor"])), "mkl") + system_data["mkl_used"] = system_data["mkl_available"] && contains(lowercase(string(system_data["blas_vendor"])), "mkl") catch system_data["mkl_used"] = false end - + try system_data["apple_accelerate_available"] = LinearSolve.appleaccelerate_isavailable() catch system_data["apple_accelerate_available"] = false end - + try - system_data["apple_accelerate_used"] = system_data["apple_accelerate_available"] && - contains( - lowercase(string(system_data["blas_vendor"])), "accelerate") + system_data["apple_accelerate_used"] = system_data["apple_accelerate_available"] && contains(lowercase(string(system_data["blas_vendor"])), "accelerate") catch system_data["apple_accelerate_used"] = false end - + # BLIS availability check - based on JLL packages system_data["blis_available"] = false system_data["blis_used"] = false system_data["blis_jll_loaded"] = false system_data["lapack_jll_loaded"] = false - + try # Check if BLIS_jll and LAPACK_jll are loaded - system_data["blis_jll_loaded"] = haskey(Base.loaded_modules, - Base.PkgId(Base.UUID("068f7417-6964-5086-9a5b-bc0c5b4f7fa6"), "BLIS_jll")) - system_data["lapack_jll_loaded"] = haskey(Base.loaded_modules, - Base.PkgId(Base.UUID("51474c39-65e3-53ba-86ba-03b1b862ec14"), "LAPACK_jll")) - + system_data["blis_jll_loaded"] = haskey(Base.loaded_modules, Base.PkgId(Base.UUID("068f7417-6964-5086-9a5b-bc0c5b4f7fa6"), "BLIS_jll")) + system_data["lapack_jll_loaded"] = haskey(Base.loaded_modules, Base.PkgId(Base.UUID("51474c39-65e3-53ba-86ba-03b1b862ec14"), "LAPACK_jll")) + # BLIS is available if JLL packages are loaded and BLISLUFactorization exists - if (system_data["blis_jll_loaded"] || system_data["lapack_jll_loaded"]) && - isdefined(LinearSolve, :BLISLUFactorization) && - hasmethod(LinearSolve.BLISLUFactorization, ()) + if (system_data["blis_jll_loaded"] || system_data["lapack_jll_loaded"]) && + isdefined(LinearSolve, :BLISLUFactorization) && hasmethod(LinearSolve.BLISLUFactorization, ()) system_data["blis_available"] = true # Check if BLIS is actually being used (contains "blis" in BLAS vendor) system_data["blis_used"] = contains(lowercase(string(system_data["blas_vendor"])), "blis") @@ -401,46 +392,42 @@ function get_detailed_system_info() catch # If there's any error checking BLIS JLL packages, leave as false end - + # GPU information try system_data["cuda_available"] = is_cuda_available() catch system_data["cuda_available"] = false end - + try system_data["metal_available"] = is_metal_available() catch system_data["metal_available"] = false end - + # Try to detect if CUDA/Metal packages are actually loaded system_data["cuda_loaded"] = false system_data["metal_loaded"] = false try # Check if CUDA algorithms are actually available if system_data["cuda_available"] - system_data["cuda_loaded"] = isdefined(Main, :CUDA) || - haskey(Base.loaded_modules, - Base.PkgId(Base.UUID("052768ef-5323-5732-b1bb-66c8b64840ba"), "CUDA")) + system_data["cuda_loaded"] = isdefined(Main, :CUDA) || haskey(Base.loaded_modules, Base.PkgId(Base.UUID("052768ef-5323-5732-b1bb-66c8b64840ba"), "CUDA")) end if system_data["metal_available"] - system_data["metal_loaded"] = isdefined(Main, :Metal) || - haskey(Base.loaded_modules, - Base.PkgId(Base.UUID("dde4c033-4e86-420c-a63e-0dd931031962"), "Metal")) + system_data["metal_loaded"] = isdefined(Main, :Metal) || haskey(Base.loaded_modules, Base.PkgId(Base.UUID("dde4c033-4e86-420c-a63e-0dd931031962"), "Metal")) end catch # If we can't detect, leave as false end - + # Environment information try system_data["libm"] = Base.libm_name catch system_data["libm"] = "unknown" end - + # libdl_name may not exist in all Julia versions try system_data["libdl"] = Base.libdl_name @@ -454,21 +441,20 @@ function get_detailed_system_info() meminfo = read(`cat /proc/meminfo`, String) mem_match = match(r"MemTotal:\s*(\d+)\s*kB", meminfo) if mem_match !== nothing - system_data["total_memory_gb"] = round( - parse(Int, mem_match.captures[1]) / 1024 / 1024, digits = 2) + system_data["total_memory_gb"] = round(parse(Int, mem_match.captures[1]) / 1024 / 1024, digits=2) else system_data["total_memory_gb"] = "unknown" end elseif Sys.isapple() mem_bytes = parse(Int, read(`sysctl -n hw.memsize`, String)) - system_data["total_memory_gb"] = round(mem_bytes / 1024^3, digits = 2) + system_data["total_memory_gb"] = round(mem_bytes / 1024^3, digits=2) else system_data["total_memory_gb"] = "unknown" end catch system_data["total_memory_gb"] = "unknown" end - + # Create DataFrame with single row return DataFrame([system_data]) end diff --git a/lib/LinearSolveAutotune/src/telemetry.jl b/lib/LinearSolveAutotune/src/telemetry.jl index acb9c1637..c1e16c79a 100644 --- a/lib/LinearSolveAutotune/src/telemetry.jl +++ b/lib/LinearSolveAutotune/src/telemetry.jl @@ -26,11 +26,11 @@ Returns an authentication method indicator if successful, nothing if setup fails function setup_github_authentication(; auto_login::Bool = true) # 1. Check for `gh` CLI (system or JLL) gh_cmd = get_gh_command() - + # First check if already authenticated try # Suppress output of gh auth status check - if success(pipeline(`$gh_cmd auth status`; stdout = devnull, stderr = devnull)) + if success(pipeline(`$gh_cmd auth status`; stdout=devnull, stderr=devnull)) # Check if logged in to github.com auth_status_output = read(`$gh_cmd auth status`, String) if contains(auth_status_output, "Logged in to github.com") @@ -58,18 +58,18 @@ function setup_github_authentication(; auto_login::Bool = true) println("\nWould you like to authenticate with GitHub now? (y/n)") print("> ") response = readline() - + if lowercase(strip(response)) in ["y", "yes"] println("\n📝 Starting GitHub authentication...") println(" This will open your browser to authenticate with GitHub.") println(" Please follow the prompts to complete authentication.\n") - + try # Run gh auth login interactively (using system gh or JLL) run(`$gh_cmd auth login`) - + # Check if authentication succeeded - if success(pipeline(`$gh_cmd auth status`; stdout = devnull, stderr = devnull)) + if success(pipeline(`$gh_cmd auth status`; stdout=devnull, stderr=devnull)) auth_status_output = read(`$gh_cmd auth status`, String) if contains(auth_status_output, "Logged in to github.com") println("\n✅ Authentication successful! You can now share results.") @@ -159,34 +159,29 @@ function format_system_info_markdown(system_info::Dict) os_kernel = get(system_info, "os_version", get(system_info, "os", "unknown")) push!(lines, "- **OS**: $os_display ($os_kernel)") # Handle both "arch" and "architecture" keys - push!(lines, - "- **Architecture**: $(get(system_info, "architecture", get(system_info, "arch", "unknown")))") + push!(lines, "- **Architecture**: $(get(system_info, "architecture", get(system_info, "arch", "unknown")))") push!(lines, "- **CPU**: $(get(system_info, "cpu_name", "unknown"))") # Handle both "num_cores" and "cpu_cores" keys push!(lines, "- **Cores**: $(get(system_info, "cpu_cores", get(system_info, "num_cores", "unknown")))") # Handle both "num_threads" and "julia_threads" keys - push!(lines, - "- **Threads**: $(get(system_info, "julia_threads", get(system_info, "num_threads", "unknown")))") + push!(lines, "- **Threads**: $(get(system_info, "julia_threads", get(system_info, "num_threads", "unknown")))") push!(lines, "- **BLAS**: $(get(system_info, "blas_vendor", "unknown"))") push!(lines, "- **MKL Available**: $(get(system_info, "mkl_available", false))") - push!(lines, - "- **Apple Accelerate Available**: $(get(system_info, "apple_accelerate_available", false))") + push!(lines, "- **Apple Accelerate Available**: $(get(system_info, "apple_accelerate_available", false))") # Handle both "has_cuda" and "cuda_available" keys - push!(lines, - "- **CUDA Available**: $(get(system_info, "cuda_available", get(system_info, "has_cuda", false)))") + push!(lines, "- **CUDA Available**: $(get(system_info, "cuda_available", get(system_info, "has_cuda", false)))") # Handle both "has_metal" and "metal_available" keys - push!(lines, - "- **Metal Available**: $(get(system_info, "metal_available", get(system_info, "has_metal", false)))") - + push!(lines, "- **Metal Available**: $(get(system_info, "metal_available", get(system_info, "has_metal", false)))") + # Add package versions section if haskey(system_info, "package_versions") push!(lines, "") push!(lines, "### Package Versions") pkg_versions = system_info["package_versions"] - + # Sort packages for consistent display sorted_packages = sort(collect(keys(pkg_versions))) - + for pkg_name in sorted_packages version = pkg_versions[pkg_name] push!(lines, "- **$pkg_name**: $version") @@ -207,14 +202,14 @@ function format_categories_markdown(categories::Dict{String, String}) end lines = String[] - + # Group categories by element type eltype_categories = Dict{String, Dict{String, String}}() - + for (key, algorithm) in categories # Parse key like "Float64_tiny (5-20)" -> eltype="Float64", range="tiny (5-20)" if contains(key, "_") - eltype, range = split(key, "_", limit = 2) + eltype, range = split(key, "_", limit=2) if !haskey(eltype_categories, eltype) eltype_categories[eltype] = Dict{String, String}() end @@ -227,11 +222,10 @@ function format_categories_markdown(categories::Dict{String, String}) eltype_categories["Mixed"][key] = algorithm end end - + # Define the proper order for size ranges - size_order = ["tiny (5-20)", "small (20-100)", "medium (100-300)", - "large (300-1000)", "big (10000+)"] - + size_order = ["tiny (5-20)", "small (20-100)", "medium (100-300)", "large (300-1000)", "big (10000+)"] + # Custom sort function for ranges function sort_ranges(ranges_dict) sorted_pairs = [] @@ -248,7 +242,7 @@ function format_categories_markdown(categories::Dict{String, String}) end return sorted_pairs end - + # Format each element type for (eltype, ranges) in sort(eltype_categories) push!(lines, "#### Recommendations for $eltype") @@ -273,28 +267,28 @@ Includes both summary statistics and raw performance data in collapsible section """ function format_detailed_results_markdown(df::DataFrame) lines = String[] - + # Get unique element types eltypes = unique(df.eltype) - + for eltype in eltypes push!(lines, "#### Results for $eltype") push!(lines, "") - + # Filter results for this element type eltype_df = filter(row -> row.eltype == eltype, df) - + if nrow(eltype_df) == 0 push!(lines, "No results for this element type.") push!(lines, "") continue end - + # Create a summary table with average performance per algorithm for this element type - summary = combine(groupby(eltype_df, :algorithm), - :gflops => mean => :avg_gflops, - :gflops => std => :std_gflops, - nrow => :num_tests) + summary = combine(groupby(eltype_df, :algorithm), + :gflops => mean => :avg_gflops, + :gflops => std => :std_gflops, + nrow => :num_tests) sort!(summary, :avg_gflops, rev = true) push!(lines, "##### Summary Statistics") @@ -307,38 +301,39 @@ function format_detailed_results_markdown(df::DataFrame) std_str = @sprintf("%.2f", row.std_gflops) push!(lines, "| $(row.algorithm) | $avg_str | $std_str | $(row.num_tests) |") end - + push!(lines, "") - + # Add raw performance data in collapsible details blocks for each algorithm push!(lines, "
") push!(lines, "Raw Performance Data") push!(lines, "") - + # Get unique algorithms for this element type algorithms = unique(eltype_df.algorithm) - + for algorithm in sort(algorithms) # Filter data for this algorithm algo_df = filter(row -> row.algorithm == algorithm, eltype_df) - + # Sort by size for better readability sort!(algo_df, :size) + push!(lines, "##### $algorithm") push!(lines, "") push!(lines, "| Matrix Size | GFLOPs | Status |") push!(lines, "|-------------|--------|--------|") - + for row in eachrow(algo_df) gflops_str = row.success ? @sprintf("%.3f", row.gflops) : "N/A" status = row.success ? "✅ Success" : "❌ Failed" push!(lines, "| $(row.size) | $gflops_str | $status |") end - + push!(lines, "") end - + push!(lines, "
") push!(lines, "") end @@ -354,7 +349,8 @@ Create a GitHub issue with benchmark results for community data collection. Note: plot_files parameter is kept for compatibility but not used. """ function upload_to_github(content::String, plot_files, auth_info::Tuple, - results_df::DataFrame, system_info::Dict, categories::Dict) + results_df::DataFrame, system_info::Dict, categories::Dict) + auth_method, auth_data = auth_info if auth_method === nothing @@ -367,21 +363,21 @@ function upload_to_github(content::String, plot_files, auth_info::Tuple, @info "📁 Results saved locally to $fallback_file" return end - + @info "📤 Preparing to upload benchmark results..." try target_repo = "SciML/LinearSolve.jl" issue_number = 669 # The existing issue for collecting autotune results - + # Construct comment body cpu_name = get(system_info, "cpu_name", "unknown") os_name = get(system_info, "os", "unknown") timestamp = Dates.format(Dates.now(), "yyyy-mm-dd HH:MM") - + comment_body = """ ## Benchmark Results: $cpu_name on $os_name ($timestamp) - + $content --- @@ -395,7 +391,7 @@ function upload_to_github(content::String, plot_files, auth_info::Tuple, """ @info "📝 Adding comment to issue #669..." - + issue_url = nothing if auth_method == :gh_cli issue_url = comment_on_issue_gh(target_repo, issue_number, comment_body) @@ -434,7 +430,7 @@ function upload_plots_to_gist(plot_files::Union{Nothing, Tuple, Dict}, auth, elt if plot_files === nothing return nothing, Dict{String, String}() end - + try # Handle different plot_files formats files_to_upload = if isa(plot_files, Tuple) @@ -445,13 +441,13 @@ function upload_plots_to_gist(plot_files::Union{Nothing, Tuple, Dict}, auth, elt else return nothing, Dict{String, String}() end - + # Filter existing files existing_files = Dict(k => v for (k, v) in files_to_upload if isfile(v)) if isempty(existing_files) return nothing, Dict{String, String}() end - + # Create README content readme_content = """ # LinearSolve.jl Benchmark Plots @@ -465,7 +461,7 @@ function upload_plots_to_gist(plot_files::Union{Nothing, Tuple, Dict}, auth, elt for (name, _) in existing_files readme_content *= "- `$name`\n" end - + readme_content *= """ ## Viewing the Plots @@ -475,66 +471,66 @@ The PNG images can be viewed directly in the browser. Click on any `.png` file a --- *Generated automatically by LinearSolve.jl autotune system* """ - + # Create initial gist with README timestamp = Dates.format(Dates.now(), "yyyy-mm-dd_HH-MM-SS") gist_desc = "LinearSolve.jl Benchmark Plots - $eltype_str - $timestamp" - + gist_files = Dict{String, Any}() gist_files["README.md"] = Dict("content" => readme_content) - + params = Dict( "description" => gist_desc, "public" => true, "files" => gist_files ) - + # Create the gist - gist = GitHub.create_gist(; params = params, auth = auth) + gist = GitHub.create_gist(; params=params, auth=auth) gist_url = gist.html_url gist_id = split(gist_url, "/")[end] - username = split(gist_url, "/")[end - 1] - + username = split(gist_url, "/")[end-1] + # Now clone the gist and add the binary files temp_dir = mktempdir() try # Clone using HTTPS with token authentication clone_url = "https://$(auth.token)@gist.github.com/$gist_id.git" run(`git clone $clone_url $temp_dir`) - + # Copy all plot files to the gist directory for (name, filepath) in existing_files target_path = joinpath(temp_dir, name) - cp(filepath, target_path; force = true) + cp(filepath, target_path; force=true) end - + # Configure git user for the commit cd(temp_dir) do # Set a generic user for the commit run(`git config user.email "linearsolve-autotune@example.com"`) run(`git config user.name "LinearSolve Autotune"`) - + # Stage, commit and push the changes run(`git add .`) run(`git commit -m "Add benchmark plots"`) run(`git push`) end - + @info "✅ Successfully uploaded plots to gist: $gist_url" - + # Construct raw URLs for the uploaded files raw_urls = Dict{String, String}() for (name, _) in existing_files raw_urls[name] = "https://gist.githubusercontent.com/$username/$gist_id/raw/$name" end - + return gist_url, raw_urls - + finally # Clean up temporary directory - rm(temp_dir; recursive = true, force = true) + rm(temp_dir; recursive=true, force=true) end - + catch e @warn "Failed to upload plots to gist via API: $e" # Fall back to HTML with embedded images @@ -566,7 +562,7 @@ function upload_plots_to_gist_fallback(files::Dict, auth, eltype_str::String)

LinearSolve.jl Benchmark Plots

Element Type: $eltype_str

""" - + # Read files and embed as base64 for (name, filepath) in files if isfile(filepath) && endswith(filepath, ".png") @@ -574,7 +570,7 @@ function upload_plots_to_gist_fallback(files::Dict, auth, eltype_str::String) binary_content = read(filepath) base64_content = base64encode(binary_content) data_uri = "data:image/png;base64,$base64_content" - + # Add to HTML html_content *= """
@@ -584,30 +580,30 @@ function upload_plots_to_gist_fallback(files::Dict, auth, eltype_str::String) """ end end - + html_content *= """ """ - + # Create gist with HTML file timestamp = Dates.format(Dates.now(), "yyyy-mm-dd_HH-MM-SS") gist_desc = "LinearSolve.jl Benchmark Plots - $eltype_str - $timestamp" - + gist_files = Dict{String, Any}() gist_files["plots.html"] = Dict("content" => html_content) - + params = Dict( "description" => gist_desc, "public" => true, "files" => gist_files ) - - gist = GitHub.create_gist(; params = params, auth = auth) - + + gist = GitHub.create_gist(; params=params, auth=auth) + @info "✅ Uploaded plots to gist (HTML fallback): $(gist.html_url)" return gist.html_url, Dict{String, String}() - + catch e @warn "Failed to upload plots to gist (fallback): $e" return nothing, Dict{String, String}() @@ -623,7 +619,7 @@ function upload_plots_to_gist_gh(plot_files::Union{Nothing, Tuple, Dict}, eltype if plot_files === nothing return nothing, Dict{String, String}() end - + try gh_cmd = get_gh_command() # Handle different plot_files formats @@ -635,17 +631,17 @@ function upload_plots_to_gist_gh(plot_files::Union{Nothing, Tuple, Dict}, eltype else return nothing, Dict{String, String}() end - + # Filter existing files existing_files = Dict(k => v for (k, v) in files_to_upload if isfile(v)) if isempty(existing_files) return nothing, Dict{String, String}() end - + # Create initial gist with a README timestamp = Dates.format(Dates.now(), "yyyy-mm-dd_HH-MM-SS") gist_desc = "LinearSolve.jl Benchmark Plots - $eltype_str - $timestamp" - + # Create README content readme_content = """ # LinearSolve.jl Benchmark Plots @@ -659,7 +655,7 @@ function upload_plots_to_gist_gh(plot_files::Union{Nothing, Tuple, Dict}, eltype for (name, _) in existing_files readme_content *= "- `$name`\n" end - + readme_content *= """ ## Viewing the Plots @@ -669,71 +665,71 @@ The PNG images can be viewed directly in the browser. Click on any `.png` file a --- *Generated automatically by LinearSolve.jl autotune system* """ - + # Create temporary file for README readme_file = tempname() * "_README.md" open(readme_file, "w") do f write(f, readme_content) end - + # Create initial gist with README out = Pipe() err = Pipe() - run(pipeline(`$gh_cmd gist create -d $gist_desc -p $readme_file`, stdout = out, stderr = err)) + run(pipeline(`$gh_cmd gist create -d $gist_desc -p $readme_file`, stdout=out, stderr=err)) close(out.in) close(err.in) - + gist_url = strip(read(out, String)) err_str = read(err, String) - + if !startswith(gist_url, "https://gist.github.com/") error("gh gist create did not return a valid URL. Output: $gist_url. Error: $err_str") end - + # Extract gist ID from URL gist_id = split(gist_url, "/")[end] - + # Clone the gist temp_dir = mktempdir() try # Clone the gist run(`$gh_cmd gist clone $gist_id $temp_dir`) - + # Copy all plot files to the gist directory for (name, filepath) in existing_files target_path = joinpath(temp_dir, name) - cp(filepath, target_path; force = true) + cp(filepath, target_path; force=true) end - + # Stage, commit and push the changes cd(temp_dir) do run(`git add .`) run(`git commit -m "Add benchmark plots"`) run(`git push`) end - + @info "✅ Successfully uploaded plots to gist: $gist_url" - + # Get username for constructing raw URLs username_out = Pipe() - run(pipeline(`$gh_cmd api user --jq .login`, stdout = username_out)) + run(pipeline(`$gh_cmd api user --jq .login`, stdout=username_out)) close(username_out.in) username = strip(read(username_out, String)) - + # Construct raw URLs for the uploaded files raw_urls = Dict{String, String}() for (name, _) in existing_files raw_urls[name] = "https://gist.githubusercontent.com/$username/$gist_id/raw/$name" end - + return gist_url, raw_urls - + finally # Clean up temporary directory - rm(temp_dir; recursive = true, force = true) - rm(readme_file; force = true) + rm(temp_dir; recursive=true, force=true) + rm(readme_file; force=true) end - + catch e @warn "Failed to upload plots to gist via gh CLI: $e" return nothing, Dict{String, String}() @@ -747,9 +743,9 @@ Add a comment to an existing GitHub issue using the GitHub API. """ function comment_on_issue_api(target_repo, issue_number, body, auth) try - repo_obj = GitHub.repo(target_repo; auth = auth) - issue = GitHub.issue(repo_obj, issue_number; auth = auth) - comment = GitHub.create_comment(repo_obj, issue, body; auth = auth) + repo_obj = GitHub.repo(target_repo; auth=auth) + issue = GitHub.issue(repo_obj, issue_number; auth=auth) + comment = GitHub.create_comment(repo_obj, issue, body; auth=auth) @info "✅ Added comment to issue #$(issue_number) via API" return "https://github.com/$(target_repo)/issues/$(issue_number)#issuecomment-$(comment.id)" catch e @@ -772,18 +768,18 @@ function comment_on_issue_gh(target_repo, issue_number, body) mktemp() do path, io write(io, body) flush(io) - + # Construct and run the gh command cmd = `$gh_cmd issue comment $issue_number --repo $target_repo --body-file $path` - + out = Pipe() err = Pipe() - run(pipeline(cmd, stdout = out, stderr = err)) + run(pipeline(cmd, stdout=out, stderr=err)) close(out) close(err) out_str = read(out, String) err_str = read(err, String) - + @info "✅ Added comment to issue #$(issue_number) via `gh` CLI" return "https://github.com/$(target_repo)/issues/$(issue_number)" end @@ -800,9 +796,9 @@ Create a GitHub issue using the GitHub.jl API. """ function create_benchmark_issue_api(target_repo, title, body, auth) try - repo_obj = GitHub.repo(target_repo; auth = auth) + repo_obj = GitHub.repo(target_repo; auth=auth) params = Dict("title" => title, "body" => body, "labels" => ["benchmark-data"]) - issue_result = GitHub.create_issue(repo_obj; params = params, auth = auth) + issue_result = GitHub.create_issue(repo_obj; params=params, auth=auth) @info "✅ Created benchmark results issue #$(issue_result.number) via API" return issue_result.html_url catch e @@ -825,22 +821,22 @@ function create_benchmark_issue_gh(target_repo, title, body) mktemp() do path, io write(io, body) flush(io) - + # Construct and run the gh command cmd = `$gh_cmd issue create --repo $target_repo --title $title --body-file $path --label benchmark-data` - + out = Pipe() err = Pipe() - run(pipeline(cmd, stdout = out, stderr = err)) + run(pipeline(cmd, stdout=out, stderr=err)) closewrite(out) closewrite(err) out_str = read(out, String) err_str = read(err, String) # Capture output to get the issue URL issue_url = strip(out_str) - + if !startswith(issue_url, "https://github.com/") - error("gh CLI command did not return a valid URL. Output: $issue_url. Error: $err_str") + error("gh CLI command did not return a valid URL. Output: $issue_url. Error: $err_str") end @info "✅ Created benchmark results issue via `gh` CLI" @@ -850,4 +846,4 @@ function create_benchmark_issue_gh(target_repo, title, body) @warn "Failed to create benchmark issue via `gh` CLI: $e" out_str err_str return nothing end -end +end \ No newline at end of file