L4Re Operating System Framework
Interface and Usage Documentation
Loading...
Searching...
No Matches
virtio_client.h
1/*
2 * Copyright (C) 2018-2022 Kernkonzept GmbH.
3 * Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
4 *
5 * This file is distributed under the terms of the GNU General Public
6 * License, version 2. Please see the COPYING-GPL-2 file for details.
7 */
8#pragma once
9
10#include <l4/cxx/ref_ptr>
12#include <l4/cxx/utils>
13#include <l4/sys/cache.h>
14
15#include <l4/sys/task>
16
17#include <l4/l4virtio/server/virtio-block>
18
19#include <l4/libblock-device/debug.h>
20#include <l4/libblock-device/device.h>
21#include <l4/libblock-device/types.h>
22#include <l4/libblock-device/request.h>
23
24namespace Block_device {
25
26template <typename DEV>
27class Virtio_client
28: public L4virtio::Svr::Block_dev_base<Mem_region_info>,
29 public L4::Epiface_t<Virtio_client<DEV>, L4virtio::Device>
30{
31protected:
32 class Generic_pending_request : public Pending_request
33 {
34 protected:
35 int check_error(int result)
36 {
37 if (result < 0 && result != -L4_EBUSY)
38 client->handle_request_error(result, this);
39
40 return result;
41 }
42
43 public:
44 explicit Generic_pending_request(Virtio_client *c, cxx::unique_ptr<Request> &&req)
45 : request(cxx::move(req)), client(c)
46 {}
47
48 void fail_request() override
49 {
50 client->finalize_request(cxx::move(request), 0, L4VIRTIO_BLOCK_S_IOERR);
51 }
52
53 cxx::unique_ptr<Request> request;
54 Virtio_client *client;
55 };
56
57 struct Pending_inout_request : public Generic_pending_request
58 {
59 Inout_block blocks;
61
62 explicit Pending_inout_request(Virtio_client *c,
63 cxx::unique_ptr<Request> &&req)
64 : Generic_pending_request(c, cxx::move(req))
65 {
66 dir = this->request->header().type == L4VIRTIO_BLOCK_T_OUT
69 }
70
71 ~Pending_inout_request() override
72 {
73 this->client->release_dma(this);
74 }
75
76 int handle_request() override
77 { return this->check_error(this->client->inout_request(this)); }
78 };
79
80 struct Pending_flush_request : public Generic_pending_request
81 {
82 using Generic_pending_request::Generic_pending_request;
83
84 int handle_request() override
85 { return this->check_error(this->client->flush_request(this)); }
86 };
87
88 struct Pending_cmd_request : public Generic_pending_request
89 {
90 Inout_block blocks;
91
92 using Generic_pending_request::Generic_pending_request;
93
94 int handle_request() override
95 {
96 return this->check_error(this->client->discard_cmd_request(this, 0));
97 }
98 };
99
100public:
101 using Device_type = DEV;
102
111 Virtio_client(cxx::Ref_ptr<Device_type> const &dev, unsigned numds, bool readonly)
112 : L4virtio::Svr::Block_dev_base<Mem_region_info>(L4VIRTIO_VENDOR_KK, 0x100,
113 dev->capacity() >> 9,
114 dev->is_read_only()
115 || readonly),
116 _client_invalidate_cb(nullptr),
117 _client_idle_cb(nullptr),
118 _numds(numds),
119 _device(dev),
120 _in_flight(0)
121 {
122 reset_client();
123 init_discard_info(0);
124 }
125
129 void reset_device() override
130 {
131 if (_client_invalidate_cb)
132 _client_invalidate_cb(false);
133 _device->reset();
134 _negotiated_features.raw = 0;
135 }
136
140 void reset_client()
141 {
142 init_mem_info(_numds);
143 set_seg_max(_device->max_segments());
144 set_size_max(_device->max_size());
145 set_flush();
146 set_config_wce(0); // starting in write-through mode
147 _shutdown_state = Shutdown_type::Running;
148 _negotiated_features.raw = 0;
149 }
150
151 bool queue_stopped() override
152 { return _shutdown_state == Shutdown_type::Client_gone; }
153
154 // make these interfaces public so that a request scheduler can invoke them
155 using L4virtio::Svr::Block_dev_base<Mem_region_info>::check_for_new_requests;
156 using L4virtio::Svr::Block_dev_base<Mem_region_info>::get_request;
157
158 // make it possible for the request scheduler to register a direct callback
159 void set_client_invalidate_cb(std::function<void(bool)> &&cb)
160 {
161 _client_invalidate_cb = cb;
162 }
163
164 void set_client_idle_cb(std::function<void()> &&cb)
165 {
166 _client_idle_cb = cb;
167 }
168
169 // make it possible for the request scheduler to register a device notify IRQ
170 void set_device_notify_irq(L4::Cap<L4::Irq> irq)
171 {
172 _device_notify_irq = irq;
173 }
174
175 L4::Cap<L4::Irq> device_notify_irq() const override
176 {
177 return _device_notify_irq;
178 }
179
185 cxx::unique_ptr<Pending_request> start_request(cxx::unique_ptr<Request> &&req)
186 {
187 auto trace = Dbg::trace("virtio");
188
189 cxx::unique_ptr<Pending_request> pending;
190
191 if (_shutdown_state != Shutdown_type::Running)
192 {
193 trace.printf("Failing requests as the client is shutting down\n");
194 this->finalize_request(cxx::move(req), 0, L4VIRTIO_BLOCK_S_IOERR);
195 return pending;
196 }
197
198 trace.printf("request received: type 0x%x, sector 0x%llx\n",
199 req->header().type, req->header().sector);
200 switch (req->header().type)
201 {
204 {
205 auto p = cxx::make_unique<Pending_inout_request>(this, cxx::move(req));
206 int ret = build_inout_blocks(p.get());
207 if (ret == L4_EOK)
208 pending.reset(p.release());
209 else
210 handle_request_error(ret, p.get());
211 break;
212 }
214 {
215 auto p = cxx::make_unique<Pending_flush_request>(this, cxx::move(req));
216 int ret = check_flush_request(p.get());
217 if (ret == L4_EOK)
218 pending.reset(p.release());
219 else
220 handle_request_error(ret, p.get());
221 break;
222 }
225 {
226 auto p = cxx::make_unique<Pending_cmd_request>(this, cxx::move(req));
227 int ret = build_discard_cmd_blocks(p.get());
228 if (ret == L4_EOK)
229 pending.reset(p.release());
230 else
231 handle_request_error(ret, p.get());
232 break;
233 }
234 default:
235 finalize_request(cxx::move(req), 0, L4VIRTIO_BLOCK_S_UNSUPP);
236 break;
237 }
238
239 return pending;
240 }
241
242 void task_finished(Generic_pending_request *preq, int error, l4_size_t sz)
243 {
244 _in_flight--;
245
246 // move on to the next request
247
248 // Only finalize if the client is still alive
249 if (_shutdown_state != Client_gone)
250 finalize_request(cxx::move(preq->request), sz, error);
251
252 // New requests might be schedulable
253 if (_client_idle_cb)
254 _client_idle_cb();
255
256 // pending request can be dropped
257 cxx::unique_ptr<Pending_request> ureq(preq);
258 }
259
263 void shutdown_event(Shutdown_type type)
264 {
265 // If the client is already in the Client_gone state, it means that it was
266 // already shutdown and this is another go at its removal. This situation
267 // can occur because at the time of its previous removal attempt there were
268 // still I/O requests in progress.
269 if (_shutdown_state == Client_gone)
270 return;
271
272 // Transitions from System_shutdown are also not allowed, the initiator
273 // should take care of graceful handling of this.
274 l4_assert(_shutdown_state != System_shutdown);
275 // If we are transitioning from System_suspend, it must be only to Running,
276 // the initiator should handle this gracefully.
277 l4_assert(_shutdown_state != System_suspend
278 || type == Shutdown_type::Running);
279
280 // Update shutdown state of the client
281 _shutdown_state = type;
282
283 if (type == Shutdown_type::Client_shutdown)
284 {
285 reset();
286 reset_client();
287 // Client_shutdown must transit to the Running state
288 l4_assert(_shutdown_state == Shutdown_type::Running);
289 }
290
291 if (type != Shutdown_type::Running)
292 {
293 if (_client_invalidate_cb)
294 _client_invalidate_cb(type != Shutdown_type::Client_gone);
295 _device->reset();
296 }
297 }
298
311 L4::Cap<void> register_obj(L4::Registry_iface *registry,
312 char const *service = 0)
313 {
314 L4::Cap<void> ret;
315 if (service)
316 ret = registry->register_obj(this, service);
317 else
318 ret = registry->register_obj(this);
319 L4Re::chkcap(ret);
320
321 return ret;
322 }
323
324 L4::Cap<void> register_obj(L4::Registry_iface *registry,
326 {
327 return L4Re::chkcap(registry->register_obj(this, ep));
328 }
329
335 void unregister_obj(L4::Registry_iface *registry)
336 {
337 registry->unregister_obj(this);
338 }
339
340 bool busy() const
341 {
342 return _in_flight != 0;
343 }
344
345 Notification_domain const *notification_domain() const
346 { return _device->notification_domain(); }
347
348protected:
349 L4::Ipc_svr::Server_iface *server_iface() const override
350 {
351 return this->L4::Epiface::server_iface();
352 }
353
354private:
355 void release_dma(Pending_inout_request *req)
356 {
357 // unmap DMA regions
358 Inout_block *cur = &req->blocks;
359 while (cur)
360 {
361 if (cur->num_sectors)
362 _device->dma_unmap(cur->dma_addr, cur->num_sectors, req->dir);
363 cur = cur->next.get();
364 }
365 }
366
367 int build_inout_blocks(Pending_inout_request *preq)
368 {
369 auto *req = preq->request.get();
370 l4_size_t sps = _device->sector_size() >> 9;
371 l4_uint64_t current_sector = req->header().sector / sps;
372 l4_uint64_t sectors = _device->capacity() / _device->sector_size();
373 auto dir = preq->dir;
374
375 l4_uint32_t flags = 0;
376 if (req->header().type == L4VIRTIO_BLOCK_T_OUT)
377 {
378 // If RO was offered, every write must fail
379 if (device_features().ro())
380 return -L4_EIO;
381
382 // Figure out whether the write has a write-through or write-back semantics
383 if (_negotiated_features.config_wce())
384 {
385 if (get_writeback() == 1)
386 flags = Block_device::Inout_f_wb;
387 }
388 else if (_negotiated_features.flush())
389 flags = Block_device::Inout_f_wb;
390 }
391
392 // Check alignment of the first sector
393 if (current_sector * sps != req->header().sector)
394 return -L4_EIO;
395
396 Inout_block *last_blk = nullptr;
397
398 size_t seg = 0;
399
400 while (req->has_more())
401 {
402 Request::Data_block b;
403
404 if (++seg > _device->max_segments())
405 return -L4_EIO;
406
407 try
408 {
409 b = req->next_block();
410 }
411 catch (L4virtio::Svr::Bad_descriptor const &e)
412 {
413 Dbg::warn().printf("Descriptor error: %s\n", e.message());
414 return -L4_EIO;
415 }
416
417 l4_size_t off = b.mem->ds_offset() + (l4_addr_t) b.addr
418 - (l4_addr_t) b.mem->local_base();
419
420 l4_size_t sz = b.len / _device->sector_size();
421
422 if (sz * _device->sector_size() != b.len)
423 {
424 Dbg::warn().printf("Bad block size 0x%x\n", b.len);
425 return -L4_EIO;
426 };
427
428 // Check bounds
429 if (sz > sectors)
430 return -L4_EIO;
431 if (current_sector > sectors - sz)
432 return -L4_EIO;
433
434 Inout_block *blk;
435 if (last_blk)
436 {
437 last_blk->next = cxx::make_unique<Inout_block>();
438 blk = last_blk->next.get();
439 }
440 else
441 blk = &preq->blocks;
442
444 long ret = _device->dma_map(b.mem, off, sz, dir, &phys);
445 if (ret < 0)
446 return ret;
447
448 blk->dma_addr = phys;
449 blk->virt_addr = (void *) ((l4_addr_t)b.mem->local_base() + off);
450 blk->num_sectors = sz;
451 current_sector += sz;
452 blk->flags = flags;
453
454 last_blk = blk;
455 }
456
457 return L4_EOK;
458 }
459
460 void maintain_cache_before_req(Pending_inout_request const *preq)
461 {
462 if (preq->dir == L4Re::Dma_space::None)
463 return;
464 for (Inout_block const *cur = &preq->blocks; cur; cur = cur->next.get())
465 {
466 l4_addr_t vstart = (l4_addr_t)cur->virt_addr;
467 if (vstart)
468 {
469 l4_size_t vsize = cur->num_sectors * _device->sector_size();
470 if (preq->dir == L4Re::Dma_space::From_device)
471 l4_cache_inv_data(vstart, vstart + vsize);
472 else if (preq->dir == L4Re::Dma_space::To_device)
473 l4_cache_clean_data(vstart, vstart + vsize);
474 else // L4Re::Dma_space::Bidirectional
475 l4_cache_flush_data(vstart, vstart + vsize);
476 }
477 }
478 }
479
480 void maintain_cache_after_req(Pending_inout_request const *preq)
481 {
482 if (preq->dir == L4Re::Dma_space::None)
483 return;
484 for (Inout_block const *cur = &preq->blocks; cur; cur = cur->next.get())
485 {
486 l4_addr_t vstart = (l4_addr_t)cur->virt_addr;
487 if (vstart)
488 {
489 l4_size_t vsize = cur->num_sectors * _device->sector_size();
490 if (preq->dir != L4Re::Dma_space::To_device)
491 l4_cache_inv_data(vstart, vstart + vsize);
492 }
493 }
494 }
495
496 int inout_request(Pending_inout_request *preq)
497 {
498 auto *req = preq->request.get();
499 l4_uint64_t sector = req->header().sector / (_device->sector_size() >> 9);
500
501 maintain_cache_before_req(preq);
502 int res = _device->inout_data(
503 sector, preq->blocks,
504 [this, preq](int error, l4_size_t sz) {
505 maintain_cache_after_req(preq);
506 task_finished(preq, error, sz);
507 },
508 preq->dir);
509
510 // request successfully submitted to device
511 if (res >= 0)
512 _in_flight++;
513
514 return res;
515 }
516
517 int check_flush_request(Pending_flush_request *preq)
518 {
519 if (!_negotiated_features.flush())
520 return -L4_ENOSYS;
521
522 auto *req = preq->request.get();
523
524 // sector must be zero for FLUSH
525 if (req->header().sector)
526 return -L4_ENOSYS;
527
528 return L4_EOK;
529 }
530
531 int flush_request(Pending_flush_request *preq)
532 {
533 int res = _device->flush([this, preq](int error, l4_size_t sz) {
534 task_finished(preq, error, sz);
535 });
536
537 // request successfully submitted to device
538 if (res >= 0)
539 _in_flight++;
540
541 return res;
542 }
543
544 bool check_features(void) override
545 {
546 _negotiated_features = negotiated_features();
547 return true;
548 }
549
550 template <typename T = Device_type>
551 void init_discard_info(long) {}
552
553 template <typename T = Device_type>
554 auto init_discard_info(int)
555 -> decltype(((T*)0)->discard_info(), void())
556 {
557 _di = _device->discard_info();
558
559 // Convert sector sizes to virtio 512-byte sectors.
560 size_t sps = _device->sector_size() >> 9;
561 if (_di.max_discard_sectors)
562 set_discard(_di.max_discard_sectors * sps, _di.max_discard_seg,
563 _di.discard_sector_alignment * sps);
564 if (_di.max_write_zeroes_sectors)
565 set_write_zeroes(_di.max_write_zeroes_sectors * sps,
566 _di.max_write_zeroes_seg, _di.write_zeroes_may_unmap);
567 }
568
569 int build_discard_cmd_blocks(Pending_cmd_request *preq)
570 {
571 auto *req = preq->request.get();
572 bool discard = (req->header().type == L4VIRTIO_BLOCK_T_DISCARD);
573
574 if (this->device_features().ro())
575 return -L4_EIO;
576
577 // sector is used only for inout requests, it must be zero for WzD
578 if (req->header().sector)
579 return -L4_ENOSYS;
580
581 if (discard)
582 {
583 if (!_negotiated_features.discard())
584 return -L4_ENOSYS;
585 }
586 else
587 {
588 if (!_negotiated_features.write_zeroes())
589 return -L4_ENOSYS;
590 }
591
592 auto *d = _device.get();
593
594 size_t seg = 0;
595 size_t max_seg = discard ? _di.max_discard_seg : _di.max_write_zeroes_seg;
596
597 l4_size_t sps = d->sector_size() >> 9;
598 l4_uint64_t sectors = d->capacity() / d->sector_size();
599
600 Inout_block *last_blk = nullptr;
601
602 while (req->has_more())
603 {
604 Request::Data_block b;
605
606 try
607 {
608 b = req->next_block();
609 }
610 catch (L4virtio::Svr::Bad_descriptor const &e)
611 {
612 Dbg::warn().printf("Descriptor error: %s\n", e.message());
613 return -L4_EIO;
614 }
615
616 auto *payload = reinterpret_cast<l4virtio_block_discard_t *>(b.addr);
617
618 size_t items = b.len / sizeof(payload[0]);
619 if (items * sizeof(payload[0]) != b.len)
620 return -L4_EIO;
621
622 if (seg + items > max_seg)
623 return -L4_EIO;
624 seg += items;
625
626 for (auto i = 0u; i < items; i++)
627 {
628 auto p = cxx::access_once<l4virtio_block_discard_t>(&payload[i]);
629
630 // Check sector size alignment. Discard sector alignment is not
631 // strictly enforced as it is merely a hint to the driver.
632 if (p.sector % sps != 0)
633 return -L4_EIO;
634 if (p.num_sectors % sps != 0)
635 return -L4_EIO;
636
637 // Convert to the device sector size
638 p.sector /= sps;
639 p.num_sectors /= sps;
640
641 // Check bounds
642 if (p.num_sectors > sectors)
643 return -L4_EIO;
644 if (p.sector > sectors - p.num_sectors)
645 return -L4_EIO;
646
647 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_RESERVED)
648 return -L4_ENOSYS;
649
650 Inout_block *blk;
651 if (last_blk)
652 {
653 last_blk->next = cxx::make_unique<Inout_block>();
654 blk = last_blk->next.get();
655 }
656 else
657 blk = &preq->blocks;
658
659 blk->sector = p.sector;
660 blk->num_sectors = p.num_sectors;
661
662 if (discard)
663 {
664 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP)
665 return -L4_ENOSYS;
666 if (p.num_sectors > _di.max_discard_sectors)
667 return -L4_EIO;
668 }
669 else
670 {
671 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP
672 && _di.write_zeroes_may_unmap)
673 blk->flags = Inout_f_unmap;
674 if (p.num_sectors > _di.max_write_zeroes_sectors)
675 return -L4_EIO;
676 }
677
678 last_blk = blk;
679 }
680 }
681
682 return L4_EOK;
683 }
684
685 template <typename T = Device_type>
686 int discard_cmd_request(Pending_cmd_request *, long)
687 { return -L4_EIO; }
688
689 template <typename T = Device_type>
690 auto discard_cmd_request(Pending_cmd_request *preq, int)
691 -> decltype(((T*)0)->discard_info(), int())
692 {
693 auto *req = preq->request.get();
694 bool discard = (req->header().type == L4VIRTIO_BLOCK_T_DISCARD);
695
696 int res = _device->discard(
697 0, preq->blocks,
698 [this, preq](int error, l4_size_t sz) { task_finished(preq, error, sz); },
699 discard);
700
701 // request successfully submitted to device
702 if (res >= 0)
703 _in_flight++;
704
705 return res;
706 }
707
708 // only use on errors that are not busy
709 void handle_request_error(int error, Generic_pending_request *pending)
710 {
711 auto trace = Dbg::trace("virtio");
712
713 if (error == -L4_ENOSYS)
714 {
715 trace.printf("Unsupported operation.\n");
716 finalize_request(cxx::move(pending->request), 0,
718 }
719 else
720 {
721 trace.printf("Got IO error: %d\n", error);
722 finalize_request(cxx::move(pending->request), 0, L4VIRTIO_BLOCK_S_IOERR);
723 }
724 }
725
726protected:
727 L4::Cap<L4::Irq> _device_notify_irq;
728 std::function<void(bool)> _client_invalidate_cb;
729 std::function<void()> _client_idle_cb;
730 unsigned _numds;
731 Shutdown_type _shutdown_state;
733 Device_discard_feature::Discard_info _di;
734
735 L4virtio::Svr::Block_features _negotiated_features;
736
737 unsigned _in_flight;
738};
739
740} //name space
l4_uint64_t Dma_addr
Data type for DMA addresses.
Definition dma_space:70
Direction
Direction of the DMA transfers.
Definition dma_space:76
@ To_device
device reads the memory
Definition dma_space:78
@ None
device is coherently connected to the memory
Definition dma_space:80
@ From_device
device writes to the memory
Definition dma_space:79
C++ interface for capabilities.
Definition capability.h:219
Interface for server-loop related functions.
Definition ipc_epiface:48
Abstract interface for object registries.
Definition ipc_epiface:334
virtual void unregister_obj(L4::Epiface *o, bool unmap=true)=0
Unregister the given object o from the server.
virtual L4::Cap< void > register_obj(L4::Epiface *o, char const *service)=0
Register an L4::Epiface for an IPC gate available in the applications environment under the name serv...
Base class for virtio block devices.
Definition virtio-block:258
void set_write_zeroes(l4_uint32_t max_write_zeroes_sectors, l4_uint32_t max_write_zeroes_seg, l4_uint8_t write_zeroes_may_unmap)
Sets constraints for and enables the write zeroes command.
Definition virtio-block:420
l4_uint8_t get_writeback()
Get the writeback field from the configuration space.
Definition virtio-block:386
cxx::unique_ptr< Request > get_request()
Return one request if available.
Definition virtio-block:528
void set_config_wce(l4_uint8_t writeback)
Sets cache mode and enables the writeback toggle.
Definition virtio-block:373
Block_dev_base(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity, bool read_only)
Create a new virtio block device.
Definition virtio-block:442
void set_flush()
Enables the flush command.
Definition virtio-block:362
void set_size_max(l4_uint32_t sz)
Sets the maximum size of any single segment reported to client.
Definition virtio-block:288
void set_discard(l4_uint32_t max_discard_sectors, l4_uint32_t max_discard_seg, l4_uint32_t discard_sector_alignment)
Sets constraints for and enables the discard command.
Definition virtio-block:400
void set_seg_max(l4_uint32_t sz)
Sets the maximum number of segments in a request that is reported to client.
Definition virtio-block:302
void finalize_request(cxx::unique_ptr< Request > req, unsigned sz, l4_uint8_t status=L4VIRTIO_BLOCK_S_OK)
Releases resources related to a request and notifies the client.
Definition virtio-block:481
void init_mem_info(unsigned num)
Initialize the memory region list to the given maximum.
Definition l4virtio:1006
virtual L4::Cap< L4::Irq > device_notify_irq(unsigned idx)
Callback to gather the device notification IRQ (multi IRQ).
Definition l4virtio:868
A reference-counting pointer with automatic cleanup.
Definition ref_ptr:82
unsigned int l4_size_t
Unsigned size type.
Definition l4int.h:35
unsigned long l4_addr_t
Address type.
Definition l4int.h:45
unsigned int l4_uint32_t
Unsigned 32bit value.
Definition l4int.h:40
unsigned long long l4_uint64_t
Unsigned 64bit value.
Definition l4int.h:42
int l4_cache_flush_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache flush a range; writes back to PoC.
Definition cache.h:89
int l4_cache_clean_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache clean a range in D-cache; writes back to PoC.
Definition cache.h:81
int l4_cache_inv_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache invalidate a range; might write back to PoC.
Definition cache.h:97
@ L4_ENOSYS
No sys.
Definition err.h:61
@ L4_EBUSY
Object currently busy, try later.
Definition err.h:53
@ L4_EIO
I/O error.
Definition err.h:46
@ L4_EOK
Ok.
Definition err.h:43
@ L4VIRTIO_BLOCK_T_DISCARD
Discard a range of sectors.
@ L4VIRTIO_BLOCK_T_FLUSH
Flush data to disk.
@ L4VIRTIO_BLOCK_T_IN
Read from device.
@ L4VIRTIO_BLOCK_T_OUT
Write to device.
@ L4VIRTIO_BLOCK_T_WRITE_ZEROES
Write zeroes to a range of sectors.
@ L4VIRTIO_BLOCK_S_IOERR
IO error on device.
@ L4VIRTIO_BLOCK_S_UNSUPP
Operation is not supported.
T chkcap(T &&cap, char const *extra="", long err=-L4_ENOMEM)
Check for valid capability or raise C++ exception.
Definition error_helper:145
L4-VIRTIO Transport C++ API.
Definition l4virtio:26
Our C++ library.
Definition arith:22
Epiface implementation for Kobject-based interface implementations.
Definition ipc_epiface:515
Server_iface * server_iface() const
Get pointer to server interface at which the object is currently registered.
Definition ipc_epiface:224
Exception used by Queue to indicate descriptor errors.
Definition virtio:379
char const * message() const
Get a human readable description of the error code.
Definition virtio:411
Structure used for the write zeroes and discard commands.
#define l4_assert(expr)
Low-level assert.
Definition assert.h:43
Common task related definitions.
Implementation of a list of unique-ptr-managed objects.