Skip to content

Commit b500902

Browse files
committed
fix(vsock): pass correct index when triggering interrupts
We were confusing queue indexex with event indexes, when passing the index of the queue that needed to be triggered after handling events. Fix the logic to pass the correct index. This refactors a bit the code to signal the queues in each event handler method. With MMIO we don't need to signal each queue independently (one signal will cause the guest to scan all queues). With PCI though, we are using MSI-X, so we need to signal each queue independently. Also, change vsock functional integration tests to also run for PCI-enabled microVMs. Signed-off-by: Babis Chalios <bchalios@amazon.es>
1 parent 4f39170 commit b500902

File tree

3 files changed

+51
-41
lines changed

3 files changed

+51
-41
lines changed

src/vmm/src/devices/virtio/vsock/device.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ where
380380
// `TRANSPORT_RESET_EVENT` event we sent during snapshot creation.
381381
if self.is_activated() {
382382
info!("kick vsock {}.", self.id());
383-
self.signal_used_queue(0).unwrap();
383+
self.signal_used_queue(EVQ_INDEX).unwrap();
384384
}
385385
}
386386
}

src/vmm/src/devices/virtio/vsock/event_handler.rs

Lines changed: 38 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -46,75 +46,80 @@ where
4646
const PROCESS_EVQ: u32 = 3;
4747
const PROCESS_NOTIFY_BACKEND: u32 = 4;
4848

49-
pub fn handle_rxq_event(&mut self, evset: EventSet) -> bool {
49+
pub fn handle_rxq_event(&mut self, evset: EventSet) {
5050
if evset != EventSet::IN {
5151
warn!("vsock: rxq unexpected event {:?}", evset);
5252
METRICS.rx_queue_event_fails.inc();
53-
return false;
53+
return;
5454
}
5555

56-
let mut raise_irq = false;
5756
if let Err(err) = self.queue_events[RXQ_INDEX].read() {
5857
error!("Failed to get vsock rx queue event: {:?}", err);
5958
METRICS.rx_queue_event_fails.inc();
6059
} else if self.backend.has_pending_rx() {
61-
raise_irq |= self.process_rx();
60+
if self.process_rx() {
61+
self.signal_used_queue(RXQ_INDEX)
62+
.expect("vsock: Could not trigger device interrupt or RX queue");
63+
}
6264
METRICS.rx_queue_event_count.inc();
6365
}
64-
raise_irq
6566
}
6667

67-
pub fn handle_txq_event(&mut self, evset: EventSet) -> bool {
68+
pub fn handle_txq_event(&mut self, evset: EventSet) {
6869
if evset != EventSet::IN {
6970
warn!("vsock: txq unexpected event {:?}", evset);
7071
METRICS.tx_queue_event_fails.inc();
71-
return false;
72+
return;
7273
}
7374

74-
let mut raise_irq = false;
7575
if let Err(err) = self.queue_events[TXQ_INDEX].read() {
7676
error!("Failed to get vsock tx queue event: {:?}", err);
7777
METRICS.tx_queue_event_fails.inc();
7878
} else {
79-
raise_irq |= self.process_tx();
79+
if self.process_tx() {
80+
self.signal_used_queue(TXQ_INDEX)
81+
.expect("vsock: Could not trigger device interrupt or TX queue");
82+
}
8083
METRICS.tx_queue_event_count.inc();
8184
// The backend may have queued up responses to the packets we sent during
8285
// TX queue processing. If that happened, we need to fetch those responses
8386
// and place them into RX buffers.
84-
if self.backend.has_pending_rx() {
85-
raise_irq |= self.process_rx();
87+
if self.backend.has_pending_rx() && self.process_rx() {
88+
self.signal_used_queue(RXQ_INDEX)
89+
.expect("vsock: Could not trigger device interrupt or RX queue");
8690
}
8791
}
88-
raise_irq
8992
}
9093

91-
pub fn handle_evq_event(&mut self, evset: EventSet) -> bool {
94+
pub fn handle_evq_event(&mut self, evset: EventSet) {
9295
if evset != EventSet::IN {
9396
warn!("vsock: evq unexpected event {:?}", evset);
9497
METRICS.ev_queue_event_fails.inc();
95-
return false;
98+
return;
9699
}
97100

98101
if let Err(err) = self.queue_events[EVQ_INDEX].read() {
99102
error!("Failed to consume vsock evq event: {:?}", err);
100103
METRICS.ev_queue_event_fails.inc();
101104
}
102-
false
103105
}
104106

105107
/// Notify backend of new events.
106-
pub fn notify_backend(&mut self, evset: EventSet) -> bool {
108+
pub fn notify_backend(&mut self, evset: EventSet) {
107109
self.backend.notify(evset);
108110
// After the backend has been kicked, it might've freed up some resources, so we
109111
// can attempt to send it more data to process.
110112
// In particular, if `self.backend.send_pkt()` halted the TX queue processing (by
111113
// returning an error) at some point in the past, now is the time to try walking the
112114
// TX queue again.
113-
let mut raise_irq = self.process_tx();
114-
if self.backend.has_pending_rx() {
115-
raise_irq |= self.process_rx();
115+
if self.process_tx() {
116+
self.signal_used_queue(TXQ_INDEX)
117+
.expect("vsock: Could not trigger device interrupt or TX queue");
118+
}
119+
if self.backend.has_pending_rx() && self.process_rx() {
120+
self.signal_used_queue(RXQ_INDEX)
121+
.expect("vsock: Could not trigger device interrupt or RX queue");
116122
}
117-
raise_irq
118123
}
119124

120125
fn register_runtime_events(&self, ops: &mut EventOps) {
@@ -182,19 +187,14 @@ where
182187
let evset = event.event_set();
183188

184189
if self.is_activated() {
185-
let mut raise_irq = false;
186190
match source {
187191
Self::PROCESS_ACTIVATE => self.handle_activate_event(ops),
188-
Self::PROCESS_RXQ => raise_irq = self.handle_rxq_event(evset),
189-
Self::PROCESS_TXQ => raise_irq = self.handle_txq_event(evset),
190-
Self::PROCESS_EVQ => raise_irq = self.handle_evq_event(evset),
191-
Self::PROCESS_NOTIFY_BACKEND => raise_irq = self.notify_backend(evset),
192+
Self::PROCESS_RXQ => self.handle_rxq_event(evset),
193+
Self::PROCESS_TXQ => self.handle_txq_event(evset),
194+
Self::PROCESS_EVQ => self.handle_evq_event(evset),
195+
Self::PROCESS_NOTIFY_BACKEND => self.notify_backend(evset),
192196
_ => warn!("Unexpected vsock event received: {:?}", source),
193197
};
194-
if raise_irq {
195-
self.signal_used_queue(source as usize)
196-
.expect("vsock: Could not trigger device interrupt");
197-
}
198198
} else {
199199
warn!(
200200
"Vsock: The device is not yet activated. Spurious event received: {:?}",
@@ -302,7 +302,9 @@ mod tests {
302302
let mut ctx = test_ctx.create_event_handler_context();
303303
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
304304

305-
assert!(!ctx.device.handle_txq_event(EventSet::IN));
305+
let metric_before = METRICS.tx_queue_event_fails.count();
306+
ctx.device.handle_txq_event(EventSet::IN);
307+
assert_eq!(metric_before + 1, METRICS.tx_queue_event_fails.count());
306308
}
307309
}
308310

@@ -363,7 +365,9 @@ mod tests {
363365
let mut ctx = test_ctx.create_event_handler_context();
364366
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
365367
ctx.device.backend.set_pending_rx(false);
366-
assert!(!ctx.device.handle_rxq_event(EventSet::IN));
368+
let metric_before = METRICS.rx_queue_event_fails.count();
369+
ctx.device.handle_rxq_event(EventSet::IN);
370+
assert_eq!(metric_before + 1, METRICS.rx_queue_event_fails.count());
367371
}
368372
}
369373

@@ -374,7 +378,9 @@ mod tests {
374378
let test_ctx = TestContext::new();
375379
let mut ctx = test_ctx.create_event_handler_context();
376380
ctx.device.backend.set_pending_rx(false);
377-
assert!(!ctx.device.handle_evq_event(EventSet::IN));
381+
let metric_before = METRICS.ev_queue_event_fails.count();
382+
ctx.device.handle_evq_event(EventSet::IN);
383+
assert_eq!(metric_before + 1, METRICS.ev_queue_event_fails.count());
378384
}
379385
}
380386

tests/integration_tests/functional/test_vsock.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,15 +37,15 @@
3737
TEST_WORKER_COUNT = 10
3838

3939

40-
def test_vsock(uvm_plain_any, bin_vsock_path, test_fc_session_root_path):
40+
def test_vsock(uvm_plain_any, pci_enabled, bin_vsock_path, test_fc_session_root_path):
4141
"""
4242
Test guest and host vsock initiated connections.
4343
4444
Check the module docstring for details on the setup.
4545
"""
4646

4747
vm = uvm_plain_any
48-
vm.spawn()
48+
vm.spawn(pci=pci_enabled)
4949

5050
vm.basic_config()
5151
vm.add_net_iface()
@@ -102,12 +102,12 @@ def negative_test_host_connections(vm, blob_path, blob_hash):
102102
validate_fc_metrics(metrics)
103103

104104

105-
def test_vsock_epipe(uvm_plain, bin_vsock_path, test_fc_session_root_path):
105+
def test_vsock_epipe(uvm_plain, pci_enabled, bin_vsock_path, test_fc_session_root_path):
106106
"""
107107
Vsock negative test to validate SIGPIPE/EPIPE handling.
108108
"""
109109
vm = uvm_plain
110-
vm.spawn()
110+
vm.spawn(pci=pci_enabled)
111111
vm.basic_config()
112112
vm.add_net_iface()
113113
vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=f"/{VSOCK_UDS_PATH}")
@@ -129,7 +129,7 @@ def test_vsock_epipe(uvm_plain, bin_vsock_path, test_fc_session_root_path):
129129

130130

131131
def test_vsock_transport_reset_h2g(
132-
uvm_nano, microvm_factory, bin_vsock_path, test_fc_session_root_path
132+
uvm_plain, pci_enabled, microvm_factory, bin_vsock_path, test_fc_session_root_path
133133
):
134134
"""
135135
Vsock transport reset test.
@@ -146,7 +146,9 @@ def test_vsock_transport_reset_h2g(
146146
6. Close VM -> Load VM from Snapshot -> check that vsock
147147
device is still working.
148148
"""
149-
test_vm = uvm_nano
149+
test_vm = uvm_plain
150+
test_vm.spawn(pci=pci_enabled)
151+
test_vm.basic_config(vcpu_count=2, mem_size_mib=256)
150152
test_vm.add_net_iface()
151153
test_vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=f"/{VSOCK_UDS_PATH}")
152154
test_vm.start()
@@ -213,11 +215,13 @@ def test_vsock_transport_reset_h2g(
213215
validate_fc_metrics(metrics)
214216

215217

216-
def test_vsock_transport_reset_g2h(uvm_nano, microvm_factory):
218+
def test_vsock_transport_reset_g2h(uvm_plain, pci_enabled, microvm_factory):
217219
"""
218220
Vsock transport reset test.
219221
"""
220-
test_vm = uvm_nano
222+
test_vm = uvm_plain
223+
test_vm.spawn(pci=pci_enabled)
224+
test_vm.basic_config(vcpu_count=2, mem_size_mib=256)
221225
test_vm.add_net_iface()
222226
test_vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=f"/{VSOCK_UDS_PATH}")
223227
test_vm.start()

0 commit comments

Comments
 (0)