L4Re Operating System Framework
Interface and Usage Documentation
Loading...
Searching...
No Matches
virtio-block
1// vi:ft=cpp
2/* SPDX-License-Identifier: MIT */
3/*
4 * Copyright (C) 2017-2021 Kernkonzept GmbH.
5 * Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
6 *
7 */
8#pragma once
9
10#include <l4/cxx/unique_ptr>
11#include <l4/re/util/unique_cap>
12
13#include <climits>
14
15#include <l4/l4virtio/virtio.h>
16#include <l4/l4virtio/virtio_block.h>
17#include <l4/l4virtio/server/l4virtio>
18#include <l4/sys/cxx/ipc_epiface>
19
20namespace L4virtio { namespace Svr {
21
22template <typename Ds_data> class Block_dev_base;
23
27template<typename Ds_data>
29{
30 friend class Block_dev_base<Ds_data>;
31 enum { Header_size = sizeof(l4virtio_block_header_t) };
32
33public:
34 struct Data_block
35 {
39 void *addr;
41 l4_uint32_t len;
42
43 Data_block() = default;
44
45 Data_block(Driver_mem_region_t<Ds_data> *m, Virtqueue::Desc const &desc,
46 Request_processor const *)
47 : mem(m), addr(m->local(desc.addr)), len(desc.len)
48 {}
49 };
50
51
52
63 unsigned data_size() const
64 {
66 Data_block data;
67
68 rp.start(_mem_list, _request, &data);
69
70 unsigned total = data.len;
71
72 try
73 {
74 while (rp.has_more())
75 {
76 rp.next(_mem_list, &data);
77 total += data.len;
78 }
79 }
80 catch (Bad_descriptor const &e)
81 {
82 // need to convert the exception because e contains a raw pointer to rp
83 throw L4::Runtime_error(-L4_EIO, "bad virtio descriptor");
84 }
85
86 if (total < Header_size + 1)
87 throw L4::Runtime_error(-L4_EIO, "virtio request too short");
88
89 return total - Header_size - 1;
90 }
91
95 bool has_more()
96 {
97 // peek into the remaining data
98 while (_data.len == 0 && _rp.has_more())
99 _rp.next(_mem_list, &_data);
100
101 // there always must be one byte left for status
102 return (_data.len > 1 || _rp.has_more());
103 }
104
113 Data_block next_block()
114 {
115 Data_block out;
116
117 if (_data.len == 0)
118 {
119 if (!_rp.has_more())
121 "No more data blocks in virtio request");
122
123 if (_todo_blocks == 0)
125 --_todo_blocks;
126
127 _rp.next(_mem_list, &_data);
128 }
129
130 if (_data.len > _max_block_size)
132
133 out = _data;
134
135 if (!_rp.has_more())
136 {
137 --(out.len);
138 _data.len = 1;
139 _data.addr = static_cast<char *>(_data.addr) + out.len;
140 }
141 else
142 _data.len = 0; // is consumed
143
144 return out;
145 }
146
149 { return _header; }
150
151private:
152 Block_request(Virtqueue::Request req, Driver_mem_list_t<Ds_data> *mem_list,
153 unsigned max_blocks, l4_uint32_t max_block_size)
154 : _mem_list(mem_list),
155 _request(req),
156 _todo_blocks(max_blocks),
157 _max_block_size(max_block_size)
158 {
159 // read header which should be in the first block
160 _rp.start(mem_list, _request, &_data);
161 --_todo_blocks;
162
163 if (_data.len < Header_size)
165
166 _header = *(static_cast<l4virtio_block_header_t *>(_data.addr));
167
168 _data.addr = static_cast<char *>(_data.addr) + Header_size;
169 _data.len -= Header_size;
170
171 // if there is no space for status bit we cannot really recover
172 if (!_rp.has_more() && _data.len == 0)
174 }
175
176 int release_request(Virtqueue *queue, l4_uint8_t status, unsigned sz)
177 {
178 // write back status
179 // If there was an error on the way or the status byte is in its
180 // own block, fast-forward to the last block.
181 if (_rp.has_more())
182 {
183 while (_rp.next(_mem_list, &_data) && _todo_blocks > 0)
184 --_todo_blocks;
185
186 if (_todo_blocks > 0 && _data.len > 0)
187 *(static_cast<l4_uint8_t *>(_data.addr) + _data.len - 1) = status;
188 else
189 return -L4_EIO; // too many data blocks
190 }
191 else if (_data.len > 0)
192 *(static_cast<l4_uint8_t *>(_data.addr)) = status;
193 else
194 return -L4_EIO; // no space for final status byte
195
196 // now release the head
197 queue->consumed(_request, sz);
198
199 return L4_EOK;
200 }
201
207 Driver_mem_list_t<Ds_data> *_mem_list;
211 Request_processor _rp;
213 Data_block _data;
214
216 Virtqueue::Request _request;
218 unsigned _todo_blocks;
220 l4_uint32_t _max_block_size;
221};
222
223struct Block_features : public Dev_config::Features
224{
225 Block_features() = default;
226 Block_features(l4_uint32_t raw) : Dev_config::Features(raw) {}
227
229 CXX_BITFIELD_MEMBER( 1, 1, size_max, raw);
231 CXX_BITFIELD_MEMBER( 2, 2, seg_max, raw);
233 CXX_BITFIELD_MEMBER( 4, 4, geometry, raw);
235 CXX_BITFIELD_MEMBER( 5, 5, ro, raw);
237 CXX_BITFIELD_MEMBER( 6, 6, blk_size, raw);
239 CXX_BITFIELD_MEMBER( 9, 9, flush, raw);
241 CXX_BITFIELD_MEMBER(10, 10, topology, raw);
243 CXX_BITFIELD_MEMBER(11, 11, config_wce, raw);
245 CXX_BITFIELD_MEMBER(13, 13, discard, raw);
247 CXX_BITFIELD_MEMBER(14, 14, write_zeroes, raw);
248};
249
250
256template <typename Ds_data>
258{
259private:
260 L4Re::Util::Unique_cap<L4::Irq> _kick_guest_irq;
261 Virtqueue _queue;
262 unsigned _vq_max;
263 l4_uint32_t _max_block_size = UINT_MAX;
264 Dev_config_t<l4virtio_block_config_t> _dev_config;
265
266public:
267 typedef Block_request<Ds_data> Request;
268
269protected:
270 Block_features negotiated_features() const
271 { return _dev_config.negotiated_features(0); }
272
273 Block_features device_features() const
274 { return _dev_config.host_features(0); }
275
276 void set_device_features(Block_features df)
277 { _dev_config.host_features(0) = df.raw; }
278
289 {
290 _dev_config.priv_config()->size_max = sz;
291 Block_features df = device_features();
292 df.size_max() = true;
293 set_device_features(df);
294
295 _max_block_size = sz;
296 }
297
303 {
304 _dev_config.priv_config()->seg_max = sz;
305 Block_features df = device_features();
306 df.seg_max() = true;
307 set_device_features(df);
308 }
309
313 void set_geometry(l4_uint16_t cylinders, l4_uint8_t heads, l4_uint8_t sectors)
314 {
315 l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
316 pc->geometry.cylinders = cylinders;
317 pc->geometry.heads = heads;
318 pc->geometry.sectors = sectors;
319 Block_features df = device_features();
320 df.geometry() = true;
321 set_device_features(df);
322 }
323
331 {
332 _dev_config.priv_config()->blk_size = sz;
333 Block_features df = device_features();
334 df.blk_size() = true;
335 set_device_features(df);
336 }
337
346 void set_topology(l4_uint8_t physical_block_exp,
347 l4_uint8_t alignment_offset,
348 l4_uint32_t min_io_size,
349 l4_uint32_t opt_io_size)
350 {
351 l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
352 pc->topology.physical_block_exp = physical_block_exp;
353 pc->topology.alignment_offset = alignment_offset;
354 pc->topology.min_io_size = min_io_size;
355 pc->topology.opt_io_size = opt_io_size;
356 Block_features df = device_features();
357 df.topology() = true;
358 set_device_features(df);
359 }
360
363 {
364 Block_features df = device_features();
365 df.flush() = true;
366 set_device_features(df);
367 }
368
374 {
375 l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
376 pc->writeback = writeback;
377 Block_features df = device_features();
378 df.config_wce() = true;
379 set_device_features(df);
380 }
381
387 {
388 l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
389 return pc->writeback;
390 }
391
400 void set_discard(l4_uint32_t max_discard_sectors, l4_uint32_t max_discard_seg,
401 l4_uint32_t discard_sector_alignment)
402 {
403 l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
404 pc->max_discard_sectors = max_discard_sectors;
405 pc->max_discard_seg = max_discard_seg;
406 pc->discard_sector_alignment = discard_sector_alignment;
407 Block_features df = device_features();
408 df.discard() = true;
409 set_device_features(df);
410 }
411
420 void set_write_zeroes(l4_uint32_t max_write_zeroes_sectors,
421 l4_uint32_t max_write_zeroes_seg,
422 l4_uint8_t write_zeroes_may_unmap)
423 {
424 l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
425 pc->max_write_zeroes_sectors = max_write_zeroes_sectors;
426 pc->max_write_zeroes_seg = max_write_zeroes_seg;
427 pc->write_zeroes_may_unmap = write_zeroes_may_unmap;
428 Block_features df = device_features();
429 df.write_zeroes() = true;
430 set_device_features(df);
431 }
432
433public:
442 Block_dev_base(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity,
443 bool read_only)
444 : L4virtio::Svr::Device_t<Ds_data>(&_dev_config),
445 _vq_max(queue_size),
446 _dev_config(vendor, L4VIRTIO_ID_BLOCK, 1)
447 {
448 this->reset_queue_config(0, queue_size);
449
450 Block_features df(0);
451 df.ring_indirect_desc() = true;
452 df.ro() = read_only;
453 set_device_features(df);
454
455 _dev_config.set_host_feature(L4VIRTIO_FEATURE_VERSION_1);
456
457 _dev_config.priv_config()->capacity = capacity;
458 }
459
463 virtual void reset_device() = 0;
464
468 virtual bool queue_stopped() = 0;
469
481 void finalize_request(cxx::unique_ptr<Request> req, unsigned sz,
483 {
484 if (_dev_config.status().fail_state() || !_queue.ready())
485 return;
486
487 if (req->release_request(&_queue, status, sz) < 0)
488 this->device_error();
489
490 if (_queue.no_notify_guest())
491 return;
492
493 _dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_VRING);
494 _kick_guest_irq->trigger();
495
496 // Request can be dropped here.
497 }
498
499 int reconfig_queue(unsigned idx) override
500 {
501 if (idx == 0 && this->setup_queue(&_queue, 0, _vq_max))
502 return 0;
503
504 return -L4_EINVAL;
505 }
506
507 void reset() override
508 {
509 _queue.disable();
510 _dev_config.reset_queue(0, _vq_max);
511 _dev_config.reset_hdr();
512 reset_device();
513 }
514
515protected:
516 bool check_for_new_requests()
517 {
518 if (!_queue.ready() || queue_stopped())
519 return false;
520
521 if (_dev_config.status().fail_state())
522 return false;
523
524 return _queue.desc_avail();
525 }
526
528 cxx::unique_ptr<Request> get_request()
529 {
530 cxx::unique_ptr<Request> req;
531
532 if (!_queue.ready() || queue_stopped())
533 return req;
534
535 if (_dev_config.status().fail_state())
536 return req;
537
538 auto r = _queue.next_avail();
539 if (!r)
540 return req;
541
542 try
543 {
544 cxx::unique_ptr<Request> cur{
545 new Request(r, &(this->_mem_info), _vq_max, _max_block_size)};
546
547 req = cxx::move(cur);
548 }
549 catch (Bad_descriptor const &e)
550 {
551 this->device_error();
552 return req;
553 }
554
555 return req;
556 }
557
558private:
559 void register_single_driver_irq() override
560 {
561 _kick_guest_irq = L4Re::Util::Unique_cap<L4::Irq>(
562 L4Re::chkcap(this->server_iface()->template rcv_cap<L4::Irq>(0)));
563
564 L4Re::chksys(this->server_iface()->realloc_rcv_cap(0));
565 }
566
567 void trigger_driver_config_irq() override
568 {
569 _dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_CONFIG);
570 _kick_guest_irq->trigger();
571 }
572
573 bool check_queues() override
574 {
575 if (!_queue.ready())
576 {
577 reset();
578 return false;
579 }
580
581 return true;
582 }
583};
584
585template <typename Ds_data>
586struct Block_dev
587: Block_dev_base<Ds_data>,
588 L4::Epiface_t<Block_dev<Ds_data>, L4virtio::Device>
589{
590private:
591 class Irq_object : public L4::Irqep_t<Irq_object>
592 {
593 public:
594 Irq_object(Block_dev<Ds_data> *parent) : _parent(parent) {}
595
596 void handle_irq()
597 {
598 _parent->kick();
599 }
600
601 private:
602 Block_dev<Ds_data> *_parent;
603 };
604 Irq_object _irq_handler;
605
606protected:
607 L4::Epiface *irq_iface()
608 { return &_irq_handler; }
609
610public:
611 Block_dev(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity,
612 bool read_only)
613 : Block_dev_base<Ds_data>(vendor, queue_size, capacity, read_only),
614 _irq_handler(this)
615 {}
616
627 L4::Cap<void> register_obj(L4::Registry_iface *registry,
628 char const *service = 0)
629 {
630 L4Re::chkcap(registry->register_irq_obj(this->irq_iface()));
631 L4::Cap<void> ret;
632 if (service)
633 ret = registry->register_obj(this, service);
634 else
635 ret = registry->register_obj(this);
636 L4Re::chkcap(ret);
637
638 return ret;
639 }
640
641 L4::Cap<void> register_obj(L4::Registry_iface *registry,
643 {
644 L4Re::chkcap(registry->register_irq_obj(this->irq_iface()));
645
646 return L4Re::chkcap(registry->register_obj(this, ep));
647 }
648
649 typedef Block_request<Ds_data> Request;
664 virtual bool process_request(cxx::unique_ptr<Request> &&req) = 0;
665
666protected:
667 L4::Ipc_svr::Server_iface *server_iface() const override
668 {
669 return this->L4::Epiface::server_iface();
670 }
671
672 void kick()
673 {
674 for (;;)
675 {
676 auto req = this->get_request();
677 if (!req)
678 return;
679 if (!this->process_request(cxx::move(req)))
680 return;
681 }
682 }
683
684private:
685 L4::Cap<L4::Irq> device_notify_irq() const override
686 {
687 return L4::cap_cast<L4::Irq>(_irq_handler.obj_cap());
688 }
689};
690
691} }
C++ interface for capabilities.
Definition capability.h:219
Interface for server-loop related functions.
Definition ipc_epiface:48
Abstract interface for object registries.
Definition ipc_epiface:334
virtual L4::Cap< L4::Irq > register_irq_obj(L4::Epiface *o)=0
Register o as server-side object for asynchronous IRQs.
virtual L4::Cap< void > register_obj(L4::Epiface *o, char const *service)=0
Register an L4::Epiface for an IPC gate available in the applications environment under the name serv...
Exception for an abstract runtime error.
Definition exceptions:140
Base class for virtio block devices.
Definition virtio-block:258
void set_write_zeroes(l4_uint32_t max_write_zeroes_sectors, l4_uint32_t max_write_zeroes_seg, l4_uint8_t write_zeroes_may_unmap)
Sets constraints for and enables the write zeroes command.
Definition virtio-block:420
virtual bool queue_stopped()=0
Return true, if the queues should not be processed further.
l4_uint8_t get_writeback()
Get the writeback field from the configuration space.
Definition virtio-block:386
cxx::unique_ptr< Request > get_request()
Return one request if available.
Definition virtio-block:528
void set_blk_size(l4_uint32_t sz)
Sets block disk size to be reported to the client.
Definition virtio-block:330
void set_config_wce(l4_uint8_t writeback)
Sets cache mode and enables the writeback toggle.
Definition virtio-block:373
Block_dev_base(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity, bool read_only)
Create a new virtio block device.
Definition virtio-block:442
void set_flush()
Enables the flush command.
Definition virtio-block:362
void set_size_max(l4_uint32_t sz)
Sets the maximum size of any single segment reported to client.
Definition virtio-block:288
virtual void reset_device()=0
Reset the actual hardware device.
void set_discard(l4_uint32_t max_discard_sectors, l4_uint32_t max_discard_seg, l4_uint32_t discard_sector_alignment)
Sets constraints for and enables the discard command.
Definition virtio-block:400
void reset() override
reset callback, called for doing a device reset
Definition virtio-block:507
void set_geometry(l4_uint16_t cylinders, l4_uint8_t heads, l4_uint8_t sectors)
Set disk geometry that is reported to the client.
Definition virtio-block:313
void set_seg_max(l4_uint32_t sz)
Sets the maximum number of segments in a request that is reported to client.
Definition virtio-block:302
void set_topology(l4_uint8_t physical_block_exp, l4_uint8_t alignment_offset, l4_uint32_t min_io_size, l4_uint32_t opt_io_size)
Sets the I/O alignment information reported back to the client.
Definition virtio-block:346
void finalize_request(cxx::unique_ptr< Request > req, unsigned sz, l4_uint8_t status=L4VIRTIO_BLOCK_S_OK)
Releases resources related to a request and notifies the client.
Definition virtio-block:481
int reconfig_queue(unsigned idx) override
callback for client queue-config request
Definition virtio-block:499
A request to read or write data.
Definition virtio-block:29
unsigned data_size() const
Compute the total size of the data in the request.
Definition virtio-block:63
l4virtio_block_header_t const & header() const
Return the block request header.
Definition virtio-block:148
bool has_more()
Check if the request contains more data blocks.
Definition virtio-block:95
Data_block next_block()
Return next block in scatter-gather list.
Definition virtio-block:113
Server-side L4-VIRTIO device stub.
Definition l4virtio:796
void device_error()
Transition device into DEVICE_NEEDS_RESET state.
Definition l4virtio:1018
Mem_list _mem_info
Memory region list.
Definition l4virtio:801
bool setup_queue(Virtqueue *q, unsigned qn, unsigned num_max)
Enable/disable the specified queue.
Definition l4virtio:1041
virtual L4::Cap< L4::Irq > device_notify_irq(unsigned idx)
Callback to gather the device notification IRQ (multi IRQ).
Definition l4virtio:868
void reset_queue_config(unsigned idx, unsigned num_max, bool inc_generation=false)
Trigger reset for the configuration space for queue idx.
Definition l4virtio:996
List of driver memory regions assigned to a single L4-VIRTIO transport instance.
Definition l4virtio:630
Region of driver memory, that shall be managed locally.
Definition l4virtio:451
T * local(Ptr< T > p) const
Get the local address for driver address p.
Definition l4virtio:616
Encapsulate the state for processing a VIRTIO request.
Definition virtio:454
bool next(DESC_MAN *dm, ARGS... args)
Switch to the next descriptor in a descriptor chain.
Definition virtio:551
bool has_more() const
Are there more chained descriptors?
Definition virtio:534
void start(DESC_MAN *dm, Virtqueue *ring, Virtqueue::Head_desc const &request, ARGS... args)
Start processing a new request.
Definition virtio:482
Virtqueue implementation for the device.
Definition virtio:88
bool desc_avail() const
Test for available descriptors.
Definition virtio:156
Request next_avail()
Get the next available descriptor from the available ring.
Definition virtio:138
Descriptor in the descriptor table.
Definition virtqueue:94
l4_uint32_t len
Length of described buffer.
Definition virtqueue:116
Ptr< void > addr
Address stored in descriptor.
Definition virtqueue:115
Low-level Virtqueue.
Definition virtqueue:88
void disable()
Completely disable the queue.
Definition virtqueue:230
bool no_notify_guest() const
Get the no IRQ flag of this queue.
Definition virtqueue:420
bool ready() const
Test if this queue is in working state.
Definition virtqueue:406
unsigned char l4_uint8_t
Unsigned 8bit value.
Definition l4int.h:36
unsigned int l4_uint32_t
Unsigned 32bit value.
Definition l4int.h:40
unsigned short int l4_uint16_t
Unsigned 16bit value.
Definition l4int.h:38
unsigned long long l4_uint64_t
Unsigned 64bit value.
Definition l4int.h:42
@ L4_EEXIST
Already exists.
Definition err.h:54
@ L4_EINVAL
Invalid argument.
Definition err.h:57
@ L4_EIO
I/O error.
Definition err.h:46
@ L4_EOK
Ok.
Definition err.h:43
@ L4VIRTIO_BLOCK_S_OK
Request finished successfully.
@ L4VIRTIO_FEATURE_VERSION_1
Virtio protocol version 1 supported. Must be 1 for L4virtio.
Definition virtio.h:98
@ L4VIRTIO_ID_BLOCK
General block device.
Definition virtio.h:64
@ L4VIRTIO_IRQ_STATUS_VRING
VRING IRQ pending flag.
Definition virtio.h:109
@ L4VIRTIO_IRQ_STATUS_CONFIG
CONFIG IRQ pending flag.
Definition virtio.h:110
L4::Detail::Unique_cap_impl< T, Smart_cap_auto< L4_FP_ALL_SPACES > > Unique_cap
Unique capability that implements automatic free and unmap of the capability selector.
Definition unique_cap:54
long chksys(long err, char const *extra="", long ret=0)
Generate C++ exception on error.
Definition error_helper:68
T chkcap(T &&cap, char const *extra="", long err=-L4_ENOMEM)
Check for valid capability or raise C++ exception.
Definition error_helper:145
L4-VIRTIO Transport C++ API.
Definition l4virtio:26
Epiface implementation for Kobject-based interface implementations.
Definition ipc_epiface:515
Base class for interface implementations.
Definition ipc_epiface:157
Server_iface * server_iface() const
Get pointer to server interface at which the object is currently registered.
Definition ipc_epiface:224
Epiface implementation for interrupt handlers.
Definition ipc_epiface:294
Exception used by Queue to indicate descriptor errors.
Definition virtio:379
@ Bad_size
Invalid size of memory block.
Definition virtio:387
l4_uint32_t raw
The raw value of the features bitmap.
Definition virtio:68
Device configuration for block devices.
Header structure of a request for a block device.
Unique_cap / Unique_del_cap.