Skip to content

Commit b8bf90e

Browse files
test: Add integration test for Firecracker with clippy-tracing instrumentation
This test verifies that Firecracker works correctly when instrumented with clippy-tracing and trace level logs are enabled. It addresses the need for testing the log-instrument crate with the actual Firecracker binary rather than just example programs. The test covers: Building Firecracker with tracing instrumentation using clippy-tracing, verifying basic API functionality works with instrumentation enabled, confirming trace-level logs are generated with meaningful function traces, testing runtime log level filtering capabilities, and basic performance smoke testing to catch major regressions. Signed-off-by: demoncoder-crypto <dhillonprabhjitsingh@gmail.com>
1 parent e36e774 commit b8bf90e

File tree

1 file changed

+240
-0
lines changed

1 file changed

+240
-0
lines changed
Lines changed: 240 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,240 @@
1+
# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
"""Test that Firecracker works correctly when instrumented with tracing and trace level logs are enabled.
5+
6+
This test verifies that the log-instrument crate can successfully instrument the actual Firecracker
7+
binary and that the instrumented Firecracker works correctly with trace-level logging.
8+
9+
Test Coverage:
10+
1. Uses clippy-tracing tool to add instrumentation to Firecracker source files
11+
2. Builds Firecracker with tracing features enabled
12+
3. Verifies basic functionality (API calls, VM lifecycle) works with instrumentation
13+
4. Confirms trace-level logs are generated and contain meaningful information
14+
5. Checks performance impact is within acceptable bounds
15+
"""
16+
17+
import os
18+
import re
19+
import tempfile
20+
from pathlib import Path
21+
22+
import pytest
23+
24+
from framework import utils
25+
from framework.microvm import MicroVMFactory
26+
from framework.properties import global_props
27+
from host_tools.cargo_build import cargo, get_binary
28+
29+
30+
def build_instrumented_firecracker():
31+
"""Build Firecracker with tracing instrumentation enabled."""
32+
# First, add instrumentation using clippy-tracing
33+
clippy_tracing = get_binary("clippy-tracing")
34+
35+
# Add instrumentation to a subset of files to avoid performance issues
36+
# We'll instrument just the API server and main entry points for meaningful traces
37+
cargo_args = [
38+
"--action", "fix",
39+
"--path", "./src/firecracker/src/main.rs",
40+
"--path", "./src/firecracker/src/api_server",
41+
"--path", "./src/vmm/src/lib.rs",
42+
"--path", "./src/vmm/src/builder.rs"
43+
]
44+
45+
utils.check_output(f"{clippy_tracing} {' '.join(cargo_args)}")
46+
47+
# Build Firecracker with tracing feature enabled
48+
cargo("build", "--features tracing --bin firecracker")
49+
50+
return get_binary("firecracker")
51+
52+
53+
def cleanup_instrumentation():
54+
"""Remove instrumentation from source files."""
55+
clippy_tracing = get_binary("clippy-tracing")
56+
57+
# Strip instrumentation from the files we modified
58+
strip_args = [
59+
"--action", "strip",
60+
"--path", "./src/firecracker/src/main.rs",
61+
"--path", "./src/firecracker/src/api_server",
62+
"--path", "./src/vmm/src/lib.rs",
63+
"--path", "./src/vmm/src/builder.rs"
64+
]
65+
66+
utils.check_output(f"{clippy_tracing} {' '.join(strip_args)}")
67+
68+
69+
@pytest.fixture(scope="module")
70+
def instrumented_firecracker_binary():
71+
"""Fixture that builds an instrumented Firecracker binary and cleans up after tests."""
72+
try:
73+
binary_path = build_instrumented_firecracker()
74+
yield binary_path
75+
finally:
76+
cleanup_instrumentation()
77+
78+
79+
def test_firecracker_tracing_basic_functionality(instrumented_firecracker_binary):
80+
"""Test that instrumented Firecracker can start and handle basic API calls with trace logging."""
81+
# Create a temporary directory for this test
82+
with tempfile.TemporaryDirectory() as temp_dir:
83+
temp_path = Path(temp_dir)
84+
85+
# Create a MicroVM factory with the instrumented binary
86+
factory = MicroVMFactory(instrumented_firecracker_binary)
87+
88+
# Build a microVM
89+
vm = factory.build()
90+
91+
# Configure basic VM settings
92+
vm.basic_config(vcpu_count=1, mem_size_mib=128)
93+
94+
# Spawn the VM with trace level logging
95+
vm.spawn(log_level="Trace", log_show_level=True, log_show_origin=True)
96+
97+
try:
98+
# Wait for the API socket to be available
99+
vm._wait_for_api_socket()
100+
101+
# Make some basic API calls to generate trace logs
102+
# Get instance info
103+
response = vm.api.describe_instance.get()
104+
assert response.status_code == 200
105+
106+
# Get machine config
107+
response = vm.api.machine_config.get()
108+
assert response.status_code == 200
109+
110+
# Set logger to trace level to ensure we capture instrumentation logs
111+
logger_config = {
112+
"level": "Trace"
113+
}
114+
response = vm.api.logger.put(**logger_config)
115+
assert response.status_code == 204
116+
117+
# Make another API call after setting trace level
118+
response = vm.api.describe_instance.get()
119+
assert response.status_code == 200
120+
121+
# Verify that the VM is working correctly
122+
assert vm.state == "Not started"
123+
124+
finally:
125+
vm.kill()
126+
127+
# Check the logs for instrumentation traces
128+
log_data = vm.log_data
129+
130+
# Verify that trace level logs are present
131+
assert "TRACE" in log_data, "Expected TRACE level logs in output"
132+
133+
# Look for log-instrument traces (function entry/exit)
134+
# These should have the format: ThreadId(X)>>function_name or ThreadId(X)<<function_name
135+
trace_pattern = r"ThreadId\(\d+\)(?:::[^>]*)?(?:>>|<<)\w+"
136+
trace_matches = re.findall(trace_pattern, log_data)
137+
138+
assert len(trace_matches) > 0, f"Expected to find log-instrument traces in logs, but found none. Log data: {log_data[:1000]}..."
139+
140+
# Verify we see function entry and exit traces
141+
entry_traces = [match for match in trace_matches if ">>" in match]
142+
exit_traces = [match for match in trace_matches if "<<" in match]
143+
144+
assert len(entry_traces) > 0, "Expected to find function entry traces (>>)"
145+
assert len(exit_traces) > 0, "Expected to find function exit traces (<<)"
146+
147+
# Verify that meaningful functions are being traced
148+
# Look for traces from main, API handling, or VM management functions
149+
meaningful_functions = ["main", "api", "vmm", "request", "response"]
150+
found_meaningful = False
151+
152+
for trace in trace_matches:
153+
for func in meaningful_functions:
154+
if func.lower() in trace.lower():
155+
found_meaningful = True
156+
break
157+
if found_meaningful:
158+
break
159+
160+
assert found_meaningful, f"Expected to find traces from meaningful functions, but traces were: {trace_matches[:10]}"
161+
162+
163+
164+
def test_firecracker_tracing_performance_impact():
165+
"""Test that instrumented Firecracker still performs reasonably (basic smoke test)."""
166+
import time
167+
168+
# This is a basic performance smoke test to ensure tracing doesn't break functionality
169+
# We're not doing detailed performance analysis, just ensuring it doesn't hang or crash
170+
171+
# Build instrumented binary
172+
instrumented_binary = build_instrumented_firecracker()
173+
174+
try:
175+
factory = MicroVMFactory(instrumented_binary)
176+
vm = factory.build()
177+
178+
# Time the basic configuration and startup
179+
start_time = time.time()
180+
181+
vm.basic_config(vcpu_count=1, mem_size_mib=128, add_root_device=False)
182+
vm.spawn(log_level="Trace")
183+
184+
# Make several API calls
185+
for _ in range(5):
186+
response = vm.api.describe_instance.get()
187+
assert response.status_code == 200
188+
189+
elapsed = time.time() - start_time
190+
191+
# Should complete within reasonable time (30 seconds is very generous)
192+
# This is just to catch major performance regressions or hangs
193+
assert elapsed < 30, f"Instrumented Firecracker took too long to start and handle API calls: {elapsed}s"
194+
195+
vm.kill()
196+
197+
finally:
198+
cleanup_instrumentation()
199+
200+
201+
def test_trace_log_filtering():
202+
"""Test that trace log filtering works correctly with instrumented Firecracker."""
203+
instrumented_binary = build_instrumented_firecracker()
204+
205+
try:
206+
factory = MicroVMFactory(instrumented_binary)
207+
vm = factory.build()
208+
209+
vm.basic_config(vcpu_count=1, mem_size_mib=128, add_root_device=False)
210+
vm.spawn(log_level="Info") # Start with Info level
211+
212+
try:
213+
# Initially should not have trace logs
214+
initial_log_data = vm.log_data
215+
216+
# Set logger to trace level
217+
logger_config = {"level": "Trace"}
218+
response = vm.api.logger.put(**logger_config)
219+
assert response.status_code == 204
220+
221+
# Make API calls to generate traces
222+
for _ in range(3):
223+
response = vm.api.describe_instance.get()
224+
assert response.status_code == 200
225+
226+
# Now should have trace logs
227+
final_log_data = vm.log_data
228+
229+
# Verify no TRACE logs were present initially
230+
assert "TRACE" not in initial_log_data, "Expected no TRACE logs before setting log level to Trace"
231+
232+
# The new log data should contain trace information
233+
new_log_data = final_log_data[len(initial_log_data):]
234+
assert "TRACE" in new_log_data, "Expected TRACE logs after setting log level to Trace"
235+
236+
finally:
237+
vm.kill()
238+
239+
finally:
240+
cleanup_instrumentation()

0 commit comments

Comments
 (0)