Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
ba049d1
Bumping version to 0.1.310
fcostaoliveira Aug 8, 2025
93b63f5
Added gcc:8.5.0-arm64-debian-buster-default
fcostaoliveira Aug 8, 2025
a9fa9ea
Moved from gcc-8 to gcc-15. runner image using debian:bookworm
fcostaoliveira Aug 10, 2025
9682d56
bumping version
fcostaoliveira Aug 10, 2025
cb4404b
Merge branch 'main' into gcc-15-and-bookworm
fcostaoliveira Aug 10, 2025
033bedd
Removed reference to gcc-8 on tests
fcostaoliveira Aug 11, 2025
7af09ba
Fixed missing gcc-8 removal
fcostaoliveira Aug 11, 2025
7ba7797
renamed buster->bookworm
fcostaoliveira Aug 11, 2025
f140ef1
Merge remote-tracking branch 'origin/main' into gcc-15-and-bookworm
fcostaoliveira Aug 12, 2025
31d0788
Added extra session caching, RPUSH, and SET benchmarks
fcostaoliveira Aug 13, 2025
d62497b
working on box plot charts on compare tool
fcostaoliveira Aug 13, 2025
7a88add
Merge remote-tracking branch 'origin/main' into box.plot
fcostaoliveira Aug 13, 2025
c7259c3
Merge remote-tracking branch 'origin/main' into box.plot
fcostaoliveira Aug 13, 2025
387d5e5
Added command filter an extra benchmark for 1:10 set/get 1KB benchmark
fcostaoliveira Aug 19, 2025
aae8699
Merge remote-tracking branch 'origin/main' into box.plot
fcostaoliveira Aug 19, 2025
31ddcaa
bumping version to 0.1.325
fcostaoliveira Aug 19, 2025
a3aabfa
added uri support for benchmark runner
fcostaoliveira Aug 22, 2025
8c20316
fixes for 0.1.334
fcostaoliveira Aug 25, 2025
10d63f1
bumping version
fcostaoliveira Aug 26, 2025
2378788
version 0.1.338
fcostaoliveira Aug 29, 2025
5a6f209
per arch streams
fcostaoliveira Aug 29, 2025
5102503
stash
fcostaoliveira Sep 24, 2025
a5d0874
Merge remote-tracking branch 'origin/main' into box.plot
fcostaoliveira Sep 24, 2025
0605fe8
Removed dump.rdb
fcostaoliveira Sep 24, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 22 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "redis-benchmarks-specification"
version = "0.1.323"
version = "0.2.11"
description = "The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute."
authors = ["filipecosta90 <filipecosta.90@gmail.com>","Redis Performance Group <performance@redis.com>"]
readme = "Readme.md"
Expand All @@ -27,6 +27,7 @@ pandas = "^2.1.2"
numpy = "^2.0.0"
jsonpath-ng = "^1.6.1"

seaborn = "^0.13.2"
[tool.poetry.dev-dependencies]
click = "8.1.7"
black = "24.4.2"
Expand Down
277 changes: 261 additions & 16 deletions redis_benchmarks_specification/__builder__/builder.py

Large diffs are not rendered by default.

8 changes: 7 additions & 1 deletion redis_benchmarks_specification/__cli__/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def spec_cli_args(parser):
parser.add_argument("--gh_repo", type=str, default="redis")
parser.add_argument("--server_name", type=str, default=None)
parser.add_argument("--run_image", type=str, default="redis")
parser.add_argument("--build_arch", type=str, default=None)
parser.add_argument("--arch", type=str, default="amd64")
parser.add_argument("--id", type=str, default="dockerhub")
parser.add_argument("--mnt_point", type=str, default="")
parser.add_argument("--trigger-unstable-commits", type=bool, default=True)
Expand Down Expand Up @@ -217,4 +217,10 @@ def spec_cli_args(parser):
default=-1,
help="Wait x sections for build. If -1, waits forever.",
)
parser.add_argument(
"--command-regex",
type=str,
default=".*",
help="Filter tests by command using regex. Only tests that include commands matching this regex will be processed.",
)
return parser
22 changes: 16 additions & 6 deletions redis_benchmarks_specification/__cli__/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
STREAM_KEYNAME_GH_EVENTS_COMMIT,
STREAM_GH_EVENTS_COMMIT_BUILDERS_CG,
STREAM_KEYNAME_NEW_BUILD_EVENTS,
get_arch_specific_stream_name,
)
from redis_benchmarks_specification.__common__.package import (
get_version_string,
Expand Down Expand Up @@ -84,7 +85,7 @@ def trigger_tests_dockerhub_cli_command_logic(args, project_name, project_versio
args.id,
conn,
args.run_image,
args.build_arch,
args.arch,
testDetails,
"n/a",
[],
Expand All @@ -104,6 +105,12 @@ def trigger_tests_dockerhub_cli_command_logic(args, project_name, project_versio
0,
10000,
args.tests_regexp,
".*", # command_regexp
False, # use_git_timestamp
"redis", # server_name
"redis", # github_org
"redis", # github_repo
None, # existing_artifact_keys
)
build_stream_fields["github_repo"] = args.gh_repo
build_stream_fields["github_org"] = args.gh_org
Expand All @@ -118,9 +125,12 @@ def trigger_tests_dockerhub_cli_command_logic(args, project_name, project_versio
store_airgap_image_redis(conn, docker_client, args.run_image)

if result is True:
benchmark_stream_id = conn.xadd(
STREAM_KEYNAME_NEW_BUILD_EVENTS, build_stream_fields
# Use architecture-specific stream
arch_specific_stream = get_arch_specific_stream_name(args.arch)
logging.info(
f"CLI adding work to architecture-specific stream: {arch_specific_stream}"
)
benchmark_stream_id = conn.xadd(arch_specific_stream, build_stream_fields)
logging.info(
"sucessfully requested a new run {}. Stream id: {}".format(
build_stream_fields, benchmark_stream_id
Expand Down Expand Up @@ -432,9 +442,9 @@ def trigger_tests_cli_command_logic(args, project_name, project_version):
commit_dict["tests_groups_regexp"] = tests_groups_regexp
commit_dict["github_org"] = args.gh_org
commit_dict["github_repo"] = args.gh_repo
if args.build_arch is not None:
commit_dict["build_arch"] = args.build_arch
commit_dict["arch"] = args.build_arch
if args.arch is not None:
commit_dict["build_arch"] = args.arch
commit_dict["arch"] = args.arch
if args.server_name is not None and args.server_name != "":
commit_dict["server_name"] = args.server_name
if args.build_artifacts != "":
Expand Down
14 changes: 14 additions & 0 deletions redis_benchmarks_specification/__common__/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,20 @@
"STREAM_KEYNAME_NEW_BUILD_EVENTS", "oss:api:gh/redis/redis/builds"
)


# Function to get architecture-specific build events stream name
def get_arch_specific_stream_name(arch):
"""Get architecture-specific stream name for build events"""
base_stream = STREAM_KEYNAME_NEW_BUILD_EVENTS
if arch in ["amd64", "x86_64"]:
return f"{base_stream}:amd64"
elif arch in ["arm64", "aarch64"]:
return f"{base_stream}:arm64"
else:
# Fallback to base stream for unknown architectures
return base_stream


STREAM_GH_NEW_BUILD_RUNNERS_CG = os.getenv(
"STREAM_GH_NEW_BUILD_RUNNERS_CG", "runners-cg:redis/redis/commits"
)
Expand Down
11 changes: 3 additions & 8 deletions redis_benchmarks_specification/__common__/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def get_benchmark_specs(testsuites_folder, test="", test_regex=".*"):
for test_name in original_files:
match_obj = re.search(test_regexp_string, test_name)
if match_obj is None:
logging.info(
logging.debug(
"Skipping test file: {} given it does not match regex {}".format(
test_name, test_regexp_string
)
Expand Down Expand Up @@ -291,13 +291,7 @@ def export_redis_metrics(
metric_name,
metric_value,
) in overall_end_time_metrics.items():
tsname_metric = "{}/{}/{}/benchmark_end/{}/{}".format(
sprefix,
test_name,
by_variant,
setup_name,
metric_name,
)
tsname_metric = f"{sprefix}/{test_name}/{by_variant}/benchmark_end/{running_platform}/{setup_name}/{metric_name}"

logging.debug(
"Adding a redis server side metric collected at the end of benchmark."
Expand Down Expand Up @@ -404,6 +398,7 @@ def exporter_datasink_common(
running_platform,
None,
git_hash,
disable_target_tables=True,
)
if collect_memory_metrics:
logging.info("Collecting memory metrics")
Expand Down
20 changes: 20 additions & 0 deletions redis_benchmarks_specification/__common__/suppress_warnings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
"""
Warning suppression module that should be imported first to suppress known warnings.
"""

import warnings

# Suppress cryptography deprecation warnings from paramiko
warnings.filterwarnings("ignore", category=DeprecationWarning, module="paramiko")
warnings.filterwarnings("ignore", message=".*TripleDES.*", category=DeprecationWarning)
warnings.filterwarnings(
"ignore", message=".*cryptography.*", category=DeprecationWarning
)

# Also suppress the specific CryptographyDeprecationWarning if it exists
try:
from cryptography.utils import CryptographyDeprecationWarning

warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning)
except ImportError:
pass
90 changes: 60 additions & 30 deletions redis_benchmarks_specification/__common__/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -838,34 +838,47 @@ def common_exporter_logic(
and artifact_version != ""
and artifact_version != "N/A"
):
# extract per-version datapoints
total_hs_ts = len(per_hash_time_series_dict.keys())
logging.info(
f"Extending the by.hash {git_hash} timeseries ({total_hs_ts}) with version info {artifact_version}"
)
for hash_timeserie in per_hash_time_series_dict.values():
hash_timeserie["labels"]["version"] = artifact_version
(
_,
per_version_time_series_dict,
version_target_tables,
) = extract_perversion_timeseries_from_results(
used_ts,
metrics,
results_dict,
artifact_version,
tf_github_org,
tf_github_repo,
deployment_name,
deployment_type,
test_name,
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
testcase_metric_context_paths,
)
total_break_by_added += 1
# Check if version 255.255.255 should only be pushed for unstable branch
should_push_version = True
if artifact_version == "255.255.255":
if tf_github_branch != "unstable":
logging.info(
f"Skipping version 255.255.255 data push for branch '{tf_github_branch}' "
f"(only pushing for 'unstable' branch)"
)
should_push_version = False
else:
logging.info(f"Pushing version 255.255.255 data for unstable branch")

if should_push_version:
# extract per-version datapoints
total_hs_ts = len(per_hash_time_series_dict.keys())
logging.info(
f"Extending the by.hash {git_hash} timeseries ({total_hs_ts}) with version info {artifact_version}"
)
for hash_timeserie in per_hash_time_series_dict.values():
hash_timeserie["labels"]["version"] = artifact_version
(
_,
per_version_time_series_dict,
version_target_tables,
) = extract_perversion_timeseries_from_results(
used_ts,
metrics,
results_dict,
artifact_version,
tf_github_org,
tf_github_repo,
deployment_name,
deployment_type,
test_name,
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
testcase_metric_context_paths,
)
total_break_by_added += 1
else:
logging.warning(
"there was no git VERSION information to push data brokedown by VERSION"
Expand Down Expand Up @@ -1054,6 +1067,9 @@ def add_standardized_metric_bybranch(
labels["deployment_name+branch"] = "{} {}".format(
deployment_name, tf_github_branch
)
labels["running_platform+branch"] = "{} {}".format(
running_platform, tf_github_branch
)
labels["test_name"] = str(test_name)
labels["metric"] = str(metric_name)
logging.info(
Expand Down Expand Up @@ -1118,11 +1134,15 @@ def add_standardized_metric_byversion(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
labels["version"] = artifact_version
labels["deployment_name+version"] = "{} {}".format(
deployment_name, artifact_version
)
labels["running_platform+version"] = "{} {}".format(
running_platform, artifact_version
)
labels["test_name"] = str(test_name)
labels["metric"] = str(metric_name)
logging.info(
Expand Down Expand Up @@ -1169,6 +1189,7 @@ def timeseries_test_sucess_flow(
running_platform=None,
timeseries_dict=None,
git_hash=None,
disable_target_tables=False,
):
testcase_metric_context_paths = []
version_target_tables = None
Expand Down Expand Up @@ -1205,7 +1226,7 @@ def timeseries_test_sucess_flow(
)
)
push_data_to_redistimeseries(rts, timeseries_dict)
if version_target_tables is not None:
if not disable_target_tables and version_target_tables is not None:
logging.info(
"There are a total of {} distinct target tables by version".format(
len(version_target_tables.keys())
Expand All @@ -1225,7 +1246,12 @@ def timeseries_test_sucess_flow(
rts.hset(
version_target_table_keyname, None, None, version_target_table_dict
)
if branch_target_tables is not None:
elif disable_target_tables:
logging.info(
"Target tables disabled - skipping version target table creation"
)

if not disable_target_tables and branch_target_tables is not None:
logging.info(
"There are a total of {} distinct target tables by branch".format(
len(branch_target_tables.keys())
Expand All @@ -1246,6 +1272,10 @@ def timeseries_test_sucess_flow(
rts.hset(
branch_target_table_keyname, None, None, branch_target_table_dict
)
elif disable_target_tables:
logging.info(
"Target tables disabled - skipping branch target table creation"
)
if test_name is not None:
if type(test_name) is str:
update_secondary_result_keys(
Expand Down
38 changes: 38 additions & 0 deletions redis_benchmarks_specification/__compare__/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,30 @@ def create_compare_arguments(parser):
default="",
help="specify a test (or a comma separated list of tests) to use for comparison. If none is specified by default will use all of them.",
)
parser.add_argument(
"--extra-filters",
type=str,
default="",
help="specify extra filters to pass to baseline and comparison.",
)
parser.add_argument(
"--use-test-suites-folder",
action="store_true",
default=False,
help="Use test names from YAML files in test-suites folder instead of database",
)
parser.add_argument(
"--generate-boxplot",
action="store_true",
default=False,
help="Generate box plot showing performance change distribution per command",
)
parser.add_argument(
"--boxplot-output",
type=str,
default="command_performance_boxplot.png",
help="Output filename for the box plot (supports .png, .svg, .pdf)",
)
parser.add_argument(
"--defaults_filename",
type=str,
Expand Down Expand Up @@ -155,6 +179,20 @@ def create_compare_arguments(parser):
parser.add_argument("--simple-table", type=bool, default=False)
parser.add_argument("--use_metric_context_path", type=bool, default=False)
parser.add_argument("--testname_regex", type=str, default=".*", required=False)
parser.add_argument(
"--command-group-regex",
type=str,
default=".*",
required=False,
help="Filter commands by command group using regex. Only commands belonging to matching groups will be included in boxplot and summary.",
)
parser.add_argument(
"--command-regex",
type=str,
default=".*",
required=False,
help="Filter tests by command using regex. Only tests that include commands matching this regex will be processed.",
)
parser.add_argument(
"--regression_str", type=str, default="REGRESSION", required=False
)
Expand Down
Loading
Loading