L4Re Operating System Framework
Interface and Usage Documentation
Loading...
Searching...
No Matches
vfs_impl.h
1/*
2 * (c) 2008-2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3 * Alexander Warg <warg@os.inf.tu-dresden.de>,
4 * Björn Döbel <doebel@os.inf.tu-dresden.de>
5 * economic rights: Technische Universität Dresden (Germany)
6 *
7 * License: see LICENSE.spdx (in this directory or the directories above)
8 */
9
10#include "fd_store.h"
11#include "vcon_stream.h"
12#include "ns_fs.h"
13
14#include <l4/bid_config.h>
15#include <l4/re/env>
16#include <l4/re/rm>
17#include <l4/re/dataspace>
18#include <l4/sys/assert.h>
19#include <l4/cxx/hlist>
20#include <l4/cxx/pair>
21#include <l4/cxx/std_alloc>
22
23#include <l4/l4re_vfs/backend>
24#include <l4/re/shared_cap>
25
26#include <unistd.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <sys/uio.h>
30#include <sys/mman.h>
31
32#if 0
33#include <l4/sys/kdebug.h>
34static int debug_mmap = 1;
35#define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
36#else
37#define DEBUG_LOG(level, dbg...) do { } while (0)
38#endif
39
45#define USE_BIG_ANON_DS
46
47using L4Re::Rm;
48
49namespace {
50
51using cxx::Ref_ptr;
52
53class Fd_store : public L4Re::Core::Fd_store
54{
55public:
56 Fd_store() noexcept;
57};
58
59// for internal Vcon_streams we want to have a placement new operator, so
60// inherit and add one
61class Std_stream : public L4Re::Core::Vcon_stream
62{
63public:
64 Std_stream(L4::Cap<L4::Vcon> c) : L4Re::Core::Vcon_stream(c) {}
65};
66
67Fd_store::Fd_store() noexcept
68{
69 // use this strange way to prevent deletion of the stdio object
70 // this depends on Fd_store to being a singleton !!!
71 static char m[sizeof(Std_stream)] __attribute__((aligned(sizeof(long))));
72 Std_stream *s = new (m) Std_stream(L4Re::Env::env()->log());
73 // make sure that we never delete the static io stream thing
74 s->add_ref();
75 set(0, cxx::ref_ptr(s)); // stdin
76 set(1, cxx::ref_ptr(s)); // stdout
77 set(2, cxx::ref_ptr(s)); // stderr
78}
79
80class Root_mount_tree : public L4Re::Vfs::Mount_tree
81{
82public:
83 Root_mount_tree() : L4Re::Vfs::Mount_tree(0) {}
84 void operator delete (void *) {}
85};
86
87class Vfs : public L4Re::Vfs::Ops
88{
89private:
90 bool _early_oom;
91
92public:
93 Vfs()
94 : _early_oom(true), _root_mount(), _root(L4Re::Env::env())
95 {
96 _root_mount.add_ref();
97 _root.add_ref();
98 _root_mount.mount(cxx::ref_ptr(&_root));
99 _cwd = cxx::ref_ptr(&_root);
100
101#if 0
102 Ref_ptr<L4Re::Vfs::File> rom;
103 _root.openat("rom", 0, 0, &rom);
104
105 _root_mount.create_tree("lib/foo", rom);
106
107 _root.openat("lib", 0, 0, &_cwd);
108
109#endif
110 }
111
112 int alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept override;
113 Ref_ptr<L4Re::Vfs::File> free_fd(int fd) noexcept override;
114 Ref_ptr<L4Re::Vfs::File> get_root() noexcept override;
115 Ref_ptr<L4Re::Vfs::File> get_cwd() noexcept override;
116 void set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
117 Ref_ptr<L4Re::Vfs::File> get_file(int fd) noexcept override;
118 cxx::Pair<Ref_ptr<L4Re::Vfs::File>, int>
119 set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) noexcept
120 override;
121
122 int mmap2(void *start, size_t len, int prot, int flags, int fd,
123 off_t offset, void **ptr) noexcept override;
124
125 int munmap(void *start, size_t len) noexcept override;
126 int mremap(void *old, size_t old_sz, size_t new_sz, int flags,
127 void **new_addr) noexcept override;
128 int mprotect(const void *a, size_t sz, int prot) noexcept override;
129 int msync(void *addr, size_t len, int flags) noexcept override;
130 int madvise(void *addr, size_t len, int advice) noexcept override;
131
132 int register_file_system(L4Re::Vfs::File_system *f) noexcept override;
133 int unregister_file_system(L4Re::Vfs::File_system *f) noexcept override;
134 L4Re::Vfs::File_system *get_file_system(char const *fstype) noexcept override;
135 L4Re::Vfs::File_system_list file_system_list() noexcept override;
136
137 int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
138 int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
139 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) noexcept override;
140 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) noexcept override;
141 int mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
142
143 void operator delete (void *) {}
144
145 void *malloc(size_t size) noexcept override { return Vfs_config::malloc(size); }
146 void free(void *m) noexcept override { Vfs_config::free(m); }
147
148private:
149 Root_mount_tree _root_mount;
150 L4Re::Core::Env_dir _root;
151 Ref_ptr<L4Re::Vfs::File> _cwd;
152 Fd_store fds;
153
154 L4Re::Vfs::File_system *_fs_registry;
155
156 struct File_factory_item : cxx::H_list_item_t<File_factory_item>
157 {
158 cxx::Ref_ptr<L4Re::Vfs::File_factory> f;
159 explicit File_factory_item(cxx::Ref_ptr<L4Re::Vfs::File_factory> const &f)
160 : f(f) {};
161
162 File_factory_item() = default;
163 File_factory_item(File_factory_item const &) = delete;
164 File_factory_item &operator = (File_factory_item const &) = delete;
165 };
166
167 cxx::H_list_t<File_factory_item> _file_factories;
168
169 l4_addr_t _anon_offset;
171
172 int alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds);
173 int alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
174 l4_addr_t *offset);
175
176 void align_mmap_start_and_length(void **start, size_t *length);
177 int munmap_regions(void *start, size_t len);
178
179 L4Re::Vfs::File_system *find_fs_from_type(char const *fstype) noexcept;
180};
181
182static inline bool strequal(char const *a, char const *b)
183{
184 for (;*a && *a == *b; ++a, ++b)
185 ;
186 return *a == *b;
187}
188
189int
190Vfs::register_file_system(L4Re::Vfs::File_system *f) noexcept
191{
192 using L4Re::Vfs::File_system;
193
194 if (!f)
195 return -EINVAL;
196
197 for (File_system *c = _fs_registry; c; c = c->next())
198 if (strequal(c->type(), f->type()))
199 return -EEXIST;
200
201 f->next(_fs_registry);
202 _fs_registry = f;
203
204 return 0;
205}
206
207int
208Vfs::unregister_file_system(L4Re::Vfs::File_system *f) noexcept
209{
210 using L4Re::Vfs::File_system;
211
212 if (!f)
213 return -EINVAL;
214
215 File_system **p = &_fs_registry;
216
217 for (; *p; p = &(*p)->next())
218 if (*p == f)
219 {
220 *p = f->next();
221 f->next() = 0;
222 return 0;
223 }
224
225 return -ENOENT;
226}
227
228L4Re::Vfs::File_system *
229Vfs::find_fs_from_type(char const *fstype) noexcept
230{
231 L4Re::Vfs::File_system_list fsl(_fs_registry);
232 for (L4Re::Vfs::File_system_list::Iterator c = fsl.begin();
233 c != fsl.end(); ++c)
234 if (strequal(c->type(), fstype))
235 return *c;
236 return 0;
237}
238
239L4Re::Vfs::File_system_list
240Vfs::file_system_list() noexcept
241{
242 return L4Re::Vfs::File_system_list(_fs_registry);
243}
244
245L4Re::Vfs::File_system *
246Vfs::get_file_system(char const *fstype) noexcept
247{
248 L4Re::Vfs::File_system *fs;
249 if ((fs = find_fs_from_type(fstype)))
250 return fs;
251
252 // Try to load a file system module dynamically
253 int res = Vfs_config::load_module(fstype);
254 if (res < 0)
255 return 0;
256
257 // Try again
258 return find_fs_from_type(fstype);
259}
260
261int
262Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
263{
264 if (!f)
265 return -EINVAL;
266
267 void *x = this->malloc(sizeof(File_factory_item));
268 if (!x)
269 return -ENOMEM;
270
271 auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
272 _file_factories.push_front(ff);
273 return 0;
274}
275
276int
277Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
278{
279 for (auto p: _file_factories)
280 {
281 if (p->f == f)
282 {
283 _file_factories.remove(p);
284 p->~File_factory_item();
285 this->free(p);
286 return 0;
287 }
288 }
289 return -ENOENT;
290}
291
292Ref_ptr<L4Re::Vfs::File_factory>
293Vfs::get_file_factory(int proto) noexcept
294{
295 for (auto p: _file_factories)
296 if (p->f->proto() == proto)
297 return p->f;
298
299 return Ref_ptr<L4Re::Vfs::File_factory>();
300}
301
302Ref_ptr<L4Re::Vfs::File_factory>
303Vfs::get_file_factory(char const *proto_name) noexcept
304{
305 for (auto p: _file_factories)
306 {
307 auto n = p->f->proto_name();
308 if (n)
309 {
310 char const *a = n;
311 char const *b = proto_name;
312 for (; *a && *b && *a == *b; ++a, ++b)
313 ;
314
315 if ((*a == 0) && (*b == 0))
316 return p->f;
317 }
318 }
319
320 return Ref_ptr<L4Re::Vfs::File_factory>();
321}
322
323int
324Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept
325{
326 int fd = fds.alloc();
327 if (fd < 0)
328 return -EMFILE;
329
330 if (f)
331 fds.set(fd, f);
332
333 return fd;
334}
335
336Ref_ptr<L4Re::Vfs::File>
337Vfs::free_fd(int fd) noexcept
338{
339 Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
340
341 if (!f)
342 return Ref_ptr<>::Nil;
343
344 fds.free(fd);
345 return f;
346}
347
348
349Ref_ptr<L4Re::Vfs::File>
350Vfs::get_root() noexcept
351{
352 return cxx::ref_ptr(&_root);
353}
354
355Ref_ptr<L4Re::Vfs::File>
356Vfs::get_cwd() noexcept
357{
358 return _cwd;
359}
360
361void
362Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
363{
364 // FIXME: check for is dir
365 if (dir)
366 _cwd = dir;
367}
368
369Ref_ptr<L4Re::Vfs::File>
370Vfs::get_file(int fd) noexcept
371{
372 return fds.get(fd);
373}
374
375cxx::Pair<Ref_ptr<L4Re::Vfs::File>, int>
376Vfs::set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f) noexcept
377{
378 if (!fds.check_fd(fd))
379 return cxx::pair(Ref_ptr<L4Re::Vfs::File>(Ref_ptr<>::Nil), EBADF);
380
381 Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
382 fds.set(fd, f);
383 return cxx::pair(old, 0);
384}
385
386
387#define GET_FILE_DBG(fd, err) \
388 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
389 if (!fi) \
390 { \
391 return -err; \
392 }
393
394#define GET_FILE(fd, err) \
395 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
396 if (!fi) \
397 return -err;
398
399void
400Vfs::align_mmap_start_and_length(void **start, size_t *length)
401{
402 l4_addr_t const s = reinterpret_cast<l4_addr_t>(*start);
403 size_t const o = s & (L4_PAGESIZE - 1);
404
405 *start = reinterpret_cast<void *>(l4_trunc_page(s));
406 *length = l4_round_page(*length + o);
407}
408
409int
410Vfs::munmap_regions(void *start, size_t len)
411{
412 using namespace L4;
413 using namespace L4Re;
414
415 int err;
416 Cap<Dataspace> ds;
417 Cap<Rm> r = Env::env()->rm();
418
419 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
420 return -EINVAL;
421
422 align_mmap_start_and_length(&start, &len);
423
424 while (1)
425 {
426 DEBUG_LOG(debug_mmap, {
427 l4_kd_outstring("DETACH: start = 0x");
429 l4_kd_outstring(" len = 0x");
430 l4_kd_outhex32(len);
431 l4_kd_outstring("\n");
432 });
433 err = r->detach(l4_addr_t(start), len, &ds, This_task);
434 if (err < 0)
435 return err;
436
437 switch (err & Rm::Detach_result_mask)
438 {
439 case Rm::Split_ds:
440 if (ds.is_valid())
441 L4Re::virt_cap_alloc->take(ds);
442 return 0;
443 case Rm::Detached_ds:
444 if (ds.is_valid())
445 L4Re::virt_cap_alloc->release(ds);
446 break;
447 default:
448 break;
449 }
450
451 if (!(err & Rm::Detach_again))
452 return 0;
453 }
454}
455
456int
457Vfs::munmap(void *start, size_t len) L4_NOTHROW
458{
459 using namespace L4;
460 using namespace L4Re;
461
462 int err = 0;
463 Cap<Rm> r = Env::env()->rm();
464
465 // Fields for obtaining a list of areas for the calling process
466 long area_cnt = -1; // No. of areas in this process
467 Rm::Area const *area_array;
468 bool matches_area = false; // true if unmap parameters match an area
469
470 // First check if there are any areas matching the munmap request. Those
471 // might have been created by an mmap call using PROT_NONE as protection
472 // modifier.
473
474 area_cnt = r->get_areas((l4_addr_t) start, &area_array);
475
476 // It is enough to check for the very first entry, since get_areas will
477 // only return areas with a starting address equal or greater to <start>.
478 // However, we intend to unmap at most the area starting exactly at
479 // <start>.
480 if (area_cnt > 0)
481 {
482 size_t area_size = area_array[0].end - area_array[0].start + 1;
483
484 // Only free the area if the munmap parameters describe it exactly.
485 if (area_array[0].start == (l4_addr_t) start && area_size == len)
486 {
487 r->free_area((l4_addr_t) start);
488 matches_area = true;
489 }
490 }
491
492 // After clearing possible area reservations from PROT_NONE mappings, clear
493 // any regions in the address range specified. Note that errors shall be
494 // suppressed if an area was freed but no regions were found.
495 err = munmap_regions(start, len);
496 if (err == -ENOENT && matches_area)
497 return 0;
498
499 return err;
500}
501
502int
503Vfs::alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds)
504{
505 *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
506
507 if (!ds->is_valid())
508 return -ENOMEM;
509
510 int err;
511 if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
512 return err;
513
514 DEBUG_LOG(debug_mmap, {
515 l4_kd_outstring("ANON DS ALLOCATED: size=");
516 l4_kd_outhex32(size);
517 l4_kd_outstring(" cap = 0x");
518 l4_kd_outhex32(ds->cap());
519 l4_kd_outstring("\n");
520 });
521
522 return 0;
523}
524
525int
526Vfs::alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
527 l4_addr_t *offset)
528{
529#if !defined(CONFIG_MMU)
530 // Small values for !MMU systems. These platforms do not have much memory
531 // typically and the memory must be instantly allocated.
532 enum
533 {
534 ANON_MEM_DS_POOL_SIZE = 256UL << 10, // size of a pool dataspace used for anon memory
535 ANON_MEM_MAX_SIZE = 32UL << 10, // chunk size that will be allocate a dataspace
536 };
537#elif defined(USE_BIG_ANON_DS)
538 enum
539 {
540 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
541 ANON_MEM_MAX_SIZE = 32UL << 20, // chunk size that will be allocate a dataspace
542 };
543#else
544 enum
545 {
546 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
547 ANON_MEM_MAX_SIZE = 0UL << 20, // chunk size that will be allocate a dataspace
548 };
549#endif
550
551 if (size >= ANON_MEM_MAX_SIZE)
552 {
553 int err;
554 if ((err = alloc_ds(size, ds)) < 0)
555 return err;
556
557 *offset = 0;
558
559 if (!_early_oom)
560 return err;
561
562 return (*ds)->allocate(0, size);
563 }
564
565 if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
566 {
567 int err;
568 if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
569 return err;
570
571 _anon_offset = 0;
572 _anon_ds = *ds;
573 }
574 else
575 *ds = _anon_ds;
576
577 if (_early_oom)
578 {
579 if (int err = (*ds)->allocate(_anon_offset, size))
580 return err;
581 }
582
583 *offset = _anon_offset;
584 _anon_offset += size;
585 return 0;
586}
587
588int
589Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t page4k_offset,
590 void **resptr) L4_NOTHROW
591{
592 DEBUG_LOG(debug_mmap, {
593 l4_kd_outstring("MMAP params: ");
594 l4_kd_outstring("start = 0x");
596 l4_kd_outstring(", len = 0x");
597 l4_kd_outhex32(len);
598 l4_kd_outstring(", prot = 0x");
599 l4_kd_outhex32(prot);
600 l4_kd_outstring(", flags = 0x");
601 l4_kd_outhex32(flags);
602 l4_kd_outstring(", offset = 0x");
603 l4_kd_outhex32(page4k_offset);
604 l4_kd_outstring("\n");
605 });
606
607 using namespace L4Re;
608 off64_t offset = l4_trunc_page(page4k_offset << 12);
609
610 if (flags & MAP_FIXED)
611 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
612 return -EINVAL;
613
614 align_mmap_start_and_length(&start, &len);
615
616 // special code to just reserve an area of the virtual address space
617 // Same behavior should be exposed when mapping with PROT_NONE. Mind that
618 // PROT_NONE can only be specified exclusively, since it is defined to 0x0.
619 if ((flags & 0x1000000) || (prot == PROT_NONE))
620 {
621 int err;
622 L4::Cap<Rm> r = Env::env()->rm();
623 l4_addr_t area = reinterpret_cast<l4_addr_t>(start);
624 err = r->reserve_area(&area, len, L4Re::Rm::F::Search_addr);
625 if (err < 0)
626 return err;
627
628 *resptr = reinterpret_cast<void*>(area);
629
630 DEBUG_LOG(debug_mmap, {
631 l4_kd_outstring(" MMAP reserved area: 0x");
632 l4_kd_outhex32(area);
633 l4_kd_outstring(" length= 0x");
634 l4_kd_outhex32(len);
635 l4_kd_outstring("\n");
636 });
637
638 return 0;
639 }
640
642 l4_addr_t anon_offset = 0;
643 L4Re::Rm::Flags rm_flags(0);
644
645 if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
646 {
647 rm_flags |= L4Re::Rm::F::Detach_free;
648
649 int err = alloc_anon_mem(len, &ds, &anon_offset);
650 if (err)
651 return err;
652
653 DEBUG_LOG(debug_mmap, {
654 l4_kd_outstring(" USE ANON MEM: 0x");
655 l4_kd_outhex32(ds.cap());
656 l4_kd_outstring(" offs = 0x");
657 l4_kd_outhex32(anon_offset);
658 l4_kd_outstring("\n");
659 });
660 }
661
662 char const *region_name = "[unknown]";
663 l4_addr_t file_offset = 0;
664 if (!(flags & MAP_ANONYMOUS))
665 {
666 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
667 if (!fi)
668 return -EBADF;
669
670 region_name = fi->path();
671
672 L4::Cap<L4Re::Dataspace> fds = fi->data_space();
673
674 if (!fds.is_valid())
675 return -EINVAL;
676
677 if (len + offset > l4_round_page(fds->size()))
678 return -EINVAL;
679
680 if (flags & MAP_PRIVATE)
681 {
682 DEBUG_LOG(debug_mmap, l4_kd_outstring("COW\n"););
683 int err = ds->copy_in(anon_offset, fds, offset, len);
684 file_offset = offset;
685 if (err == -L4_EINVAL)
686 {
687 L4::Cap<Rm> r = Env::env()->rm();
690 err = r->attach(&src, len,
692 fds, offset);
693 if (err < 0)
694 return err;
695
696 err = r->attach(&dst, len,
698 ds.get(), anon_offset);
699 if (err < 0)
700 return err;
701
702 memcpy(dst.get(), src.get(), len);
703
704 region_name = "[mmap-private]";
705 file_offset = (unsigned long)dst.get();
706 }
707 else if (err)
708 return err;
709
710 offset = anon_offset;
711 }
712 else
713 {
714 L4Re::virt_cap_alloc->take(fds);
715 ds = L4Re::Shared_cap<L4Re::Dataspace>(fds, L4Re::virt_cap_alloc);
716 }
717 }
718 else
719 {
720 offset = anon_offset;
721 region_name = "[anon]";
722 file_offset = offset;
723 }
724
725
726 if (!(flags & MAP_FIXED) && start == 0)
727 start = reinterpret_cast<void*>(L4_PAGESIZE);
728
729 char *data = static_cast<char *>(start);
730 L4::Cap<Rm> r = Env::env()->rm();
731 l4_addr_t overmap_area = L4_INVALID_ADDR;
732
733 int err;
734 if (flags & MAP_FIXED)
735 {
736 overmap_area = l4_addr_t(start);
737
738 err = r->reserve_area(&overmap_area, len);
739 if (err < 0)
740 overmap_area = L4_INVALID_ADDR;
741
742 rm_flags |= Rm::F::In_area;
743
744 // Make sure to remove old mappings residing at the respective address
745 // range. If none exists, we are fine as well, allowing us to ignore
746 // ENOENT here.
747 err = munmap_regions(start, len);
748 if (err && err != -ENOENT)
749 return err;
750 }
751
752 if (!(flags & MAP_FIXED))
753 rm_flags |= Rm::F::Search_addr;
754 if (prot & PROT_READ)
755 rm_flags |= Rm::F::R;
756 if (prot & PROT_WRITE)
757 rm_flags |= Rm::F::W;
758 if (prot & PROT_EXEC)
759 rm_flags |= Rm::F::X;
760
761 err = r->attach(&data, len, rm_flags,
762 L4::Ipc::make_cap(ds.get(), (prot & PROT_WRITE)
766 region_name, file_offset);
767
768 DEBUG_LOG(debug_mmap, {
769 l4_kd_outstring(" MAPPED: 0x");
770 l4_kd_outhex32(ds.cap());
771 l4_kd_outstring(" addr: 0x");
773 l4_kd_outstring(" bytes: 0x");
774 l4_kd_outhex32(len);
775 l4_kd_outstring(" offset: 0x");
776 l4_kd_outhex32(offset);
777 l4_kd_outstring(" err = ");
778 l4_kd_outdec(err);
779 l4_kd_outstring("\n");
780 });
781
782
783 if (overmap_area != L4_INVALID_ADDR)
784 r->free_area(overmap_area);
785
786 if (err < 0)
787 return err;
788
789 l4_assert (!(start && !data));
790
791 // release ownership of the attached DS
792 ds.release();
793 *resptr = data;
794
795 return 0;
796}
797
798namespace {
799 class Auto_area
800 {
801 public:
802 L4::Cap<L4Re::Rm> r;
803 l4_addr_t a;
804
805 explicit Auto_area(L4::Cap<L4Re::Rm> r, l4_addr_t a = L4_INVALID_ADDR)
806 : r(r), a(a) {}
807
808 int reserve(l4_addr_t _a, l4_size_t sz, L4Re::Rm::Flags flags)
809 {
810 free();
811 a = _a;
812 int e = r->reserve_area(&a, sz, flags);
813 if (e)
814 a = L4_INVALID_ADDR;
815 return e;
816 }
817
818 void free()
819 {
820 if (is_valid())
821 {
822 r->free_area(a);
823 a = L4_INVALID_ADDR;
824 }
825 }
826
827 bool is_valid() const { return a != L4_INVALID_ADDR; }
828
829 ~Auto_area() { free(); }
830 };
831}
832
833int
834Vfs::mremap(void *old_addr, size_t old_size, size_t new_size, int flags,
835 void **new_addr) L4_NOTHROW
836{
837 using namespace L4Re;
838
839 DEBUG_LOG(debug_mmap, {
840 l4_kd_outstring("Mremap: addr = 0x");
841 l4_kd_outhex32((l4_umword_t)old_addr);
842 l4_kd_outstring(" old_size = 0x");
843 l4_kd_outhex32(old_size);
844 l4_kd_outstring(" new_size = 0x");
845 l4_kd_outhex32(new_size);
846 l4_kd_outstring("\n");
847 });
848
849 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
850 return -EINVAL;
851
852 l4_addr_t oa = l4_trunc_page(reinterpret_cast<l4_addr_t>(old_addr));
853 if (oa != reinterpret_cast<l4_addr_t>(old_addr))
854 return -EINVAL;
855
856 bool const fixed = flags & MREMAP_FIXED;
857 bool const maymove = flags & MREMAP_MAYMOVE;
858
859 L4::Cap<Rm> r = Env::env()->rm();
860
861 // sanitize input parameters to multiples of pages
862 old_size = l4_round_page(old_size);
863 new_size = l4_round_page(new_size);
864
865 if (!fixed)
866 {
867 if (new_size < old_size)
868 {
869 *new_addr = old_addr;
870 return munmap(reinterpret_cast<void*>(oa + new_size),
871 old_size - new_size);
872 }
873
874 if (new_size == old_size)
875 {
876 *new_addr = old_addr;
877 return 0;
878 }
879 }
880
881 Auto_area old_area(r);
882 int err = old_area.reserve(oa, old_size, L4Re::Rm::Flags(0));
883 if (err < 0)
884 return -EINVAL;
885
886 l4_addr_t pad_addr;
887 Auto_area new_area(r);
888 if (fixed)
889 {
890 l4_addr_t na = l4_trunc_page(reinterpret_cast<l4_addr_t>(*new_addr));
891 if (na != reinterpret_cast<l4_addr_t>(*new_addr))
892 return -EINVAL;
893
894 // check if the current virtual memory area can be expanded
895 int err = new_area.reserve(na, new_size, L4Re::Rm::Flags(0));
896 if (err < 0)
897 return err;
898
899 pad_addr = na;
900 // unmap all stuff and remap ours ....
901 }
902 else
903 {
904 l4_addr_t ta = oa + old_size;
905 unsigned long ts = new_size - old_size;
906 // check if the current virtual memory area can be expanded
907 long err = new_area.reserve(ta, ts, L4Re::Rm::Flags(0));
908 if (!maymove && err)
909 return -ENOMEM;
910
911 L4Re::Rm::Offset toffs;
912 L4Re::Rm::Flags tflags;
913 L4::Cap<L4Re::Dataspace> tds;
914
915 err = r->find(&ta, &ts, &toffs, &tflags, &tds);
916
917 // there is enough space to expand the mapping in place
918 if (err == -ENOENT || (err == 0 && (tflags & Rm::F::In_area)))
919 {
920 old_area.free(); // pad at the original address
921 pad_addr = oa + old_size;
922 *new_addr = old_addr;
923 }
924 else if (!maymove)
925 return -ENOMEM;
926 else
927 {
928 // search for a new area to remap
929 err = new_area.reserve(0, new_size, Rm::F::Search_addr);
930 if (err < 0)
931 return -ENOMEM;
932
933 pad_addr = new_area.a + old_size;
934 *new_addr = reinterpret_cast<void *>(new_area.a);
935 }
936 }
937
938 if (old_area.is_valid())
939 {
940 unsigned long size = old_size;
941
942 l4_addr_t a = old_area.a;
943 unsigned long s = 1;
944 L4Re::Rm::Offset o;
945 L4Re::Rm::Flags f;
946 L4::Cap<L4Re::Dataspace> ds;
947
948 while (r->find(&a, &s, &o, &f, &ds) >= 0 && !(f & Rm::F::In_area))
949 {
950 if (a < old_area.a)
951 {
952 auto d = old_area.a - a;
953 a = old_area.a;
954 s -= d;
955 o += d;
956 }
957
958 if (a + s > old_area.a + old_size)
959 s = old_area.a + old_size - a;
960
961 l4_addr_t x = a - old_area.a + new_area.a;
962
963 int err = r->attach(&x, s, Rm::F::In_area | f,
964 L4::Ipc::make_cap(ds, f.cap_rights()), o);
965 if (err < 0)
966 return err;
967
968 // count the new attached ds reference
969 L4Re::virt_cap_alloc->take(ds);
970
971 err = r->detach(a, s, &ds, This_task,
973 if (err < 0)
974 return err;
975
976 switch (err & Rm::Detach_result_mask)
977 {
978 case Rm::Split_ds:
979 // add a reference as we split up a mapping
980 if (ds.is_valid())
981 L4Re::virt_cap_alloc->take(ds);
982 break;
983 case Rm::Detached_ds:
984 if (ds.is_valid())
985 L4Re::virt_cap_alloc->release(ds);
986 break;
987 default:
988 break;
989 }
990
991 if (size <= s)
992 break;
993 a += s;
994 size -= s;
995 s = 1;
996 }
997
998 old_area.free();
999 }
1000
1001 if (old_size < new_size)
1002 {
1003 l4_addr_t const pad_sz = new_size - old_size;
1004 l4_addr_t toffs;
1006 int err = alloc_anon_mem(pad_sz, &tds, &toffs);
1007 if (err)
1008 return err;
1009
1010 // FIXME: must get the protection rights from the old
1011 // mapping and use the same here, for now just use RWX
1012 err = r->attach(&pad_addr, pad_sz,
1014 L4::Ipc::make_cap_rw(tds.get()), toffs);
1015 if (err < 0)
1016 return err;
1017
1018 // release ownership of tds, the region map is now the new owner
1019 tds.release();
1020 }
1021
1022 return 0;
1023}
1024
1025int
1026Vfs::mprotect(const void * /* a */, size_t /* sz */, int prot) L4_NOTHROW
1027{
1028 return (prot & PROT_WRITE) ? -ENOSYS : 0;
1029}
1030
1031int
1032Vfs::msync(void *, size_t, int) L4_NOTHROW
1033{ return 0; }
1034
1035int
1036Vfs::madvise(void *, size_t, int) L4_NOTHROW
1037{ return 0; }
1038
1039}
1040
1041L4Re::Vfs::Ops *__rtld_l4re_env_posix_vfs_ops;
1042extern void *l4re_env_posix_vfs_ops __attribute__((alias("__rtld_l4re_env_posix_vfs_ops"), visibility("default")));
1043
1044namespace {
1045 class Real_mount_tree : public L4Re::Vfs::Mount_tree
1046 {
1047 public:
1048 explicit Real_mount_tree(char *n) : Mount_tree(n) {}
1049
1050 void *operator new (size_t size)
1051 { return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
1052
1053 void operator delete (void *mem)
1054 { __rtld_l4re_env_posix_vfs_ops->free(mem); }
1055 };
1056}
1057
1059int
1060Vfs::mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
1061{
1062 using L4Re::Vfs::File;
1063 using L4Re::Vfs::Mount_tree;
1064 using L4Re::Vfs::Path;
1065
1066 cxx::Ref_ptr<Mount_tree> root = get_root()->mount_tree();
1067 if (!root)
1068 return -EINVAL;
1069
1071 Path p = root->lookup(Path(path), &base);
1072
1073 while (!p.empty())
1074 {
1075 Path f = p.strip_first();
1076
1077 if (f.empty())
1078 return -EEXIST;
1079
1080 char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
1081 if (!name)
1082 return -ENOMEM;
1083
1084 auto nt = cxx::make_ref_obj<Real_mount_tree>(name);
1085 if (!nt)
1086 {
1087 __rtld_l4re_env_posix_vfs_ops->free(name);
1088 return -ENOMEM;
1089 }
1090
1091 base->add_child_node(nt);
1092 base = nt;
1093
1094 if (p.empty())
1095 {
1096 nt->mount(dir);
1097 return 0;
1098 }
1099 }
1100
1101 return -EINVAL;
1102}
1103
1104#undef DEBUG_LOG
1105#undef GET_FILE_DBG
1106#undef GET_FILE
static Env const * env() noexcept
Returns the initial environment for the current task.
Definition env:96
Unique region.
Definition rm:435
Region map.
Definition rm:84
@ Detached_ds
Detached data sapce.
Definition rm:91
@ Detach_again
Detached data space, more to do.
Definition rm:96
@ Split_ds
Splitted data space, and done.
Definition rm:93
@ Detach_exact
Do an unmap of the exact region given.
Definition rm:226
@ Detach_keep
Do not free the detached data space, ignore the F::Detach_free.
Definition rm:246
The basic interface for an open POSIX file.
Definition vfs.h:461
Interface for the POSIX backends of an application.
Definition vfs.h:1110
l4_cap_idx_t cap() const noexcept
Return capability selector.
Definition capability.h:49
bool is_valid() const noexcept
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
Definition capability.h:57
@ Invalid
Invalid capability selector.
Definition capability.h:42
T get() const noexcept
Return the address.
Definition rm:508
A reference-counting pointer with automatic cleanup.
Definition ref_ptr:71
Dataspace interface.
Environment interface.
unsigned int l4_size_t
Unsigned size type.
Definition l4int.h:24
unsigned long l4_umword_t
Unsigned machine word.
Definition l4int.h:40
unsigned long l4_addr_t
Address type.
Definition l4int.h:34
@ L4_EINVAL
Invalid argument.
Definition err.h:46
@ L4_CAP_FPAGE_RO
Read right for capability flexpages.
Definition __l4_fpage.h:176
@ L4_CAP_FPAGE_RW
Read and interface specific 'W' right for capability flexpages.
Definition __l4_fpage.h:192
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
Definition consts.h:448
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
Definition consts.h:473
#define L4_PAGESIZE
Minimal page size (in bytes).
Definition consts.h:391
#define L4_PAGESHIFT
Size of a page, log2-based.
Definition consts.h:26
@ L4_INVALID_ADDR
Invalid address.
Definition consts.h:505
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
Definition compiler.h:161
Functionality for invoking the kernel debugger.
void l4_kd_outdec(l4_mword_t number)
Output a decimal unsigned machine word via the kernel debugger.
Definition kdebug.h:334
void l4_kd_outhex32(l4_uint32_t number)
Output a 32-bit unsigned hexadecimal number via the kernel debugger.
Definition kdebug.h:284
void l4_kd_outstring(char const *text)
Output a string via the kernel debugger.
Definition kdebug.h:237
Shared_cap< T > make_shared_cap(L4Re::Cap_alloc *ca)
Allocate a capability slot and wrap it in a Shared_cap.
Definition shared_cap:49
L4::Detail::Shared_cap_impl< T, Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
Definition shared_cap:33
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights) noexcept
Make an L4::Ipc::Cap<T> for the given capability and rights.
Definition ipc_types:785
Cap< T > make_cap_rw(L4::Cap< T > cap) noexcept
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
Definition ipc_types:795
Pair implementation.
Region mapper interface.
Shared_cap / Shared_del_cap.
An area is a range of virtual addresses which is reserved, see L4Re::Rm::reserve_area().
Definition rm:699
@ RWX
Readable, writable and executable region.
Definition rm:143
@ RW
Readable and writable region.
Definition rm:139
@ X
Executable region.
Definition rm:137
@ R
Readable region.
Definition rm:133
@ Detach_free
Free the portion of the data space after detach.
Definition rm:148
@ W
Writable region.
Definition rm:135
@ Search_addr
Search for a suitable address range.
Definition rm:113
@ In_area
Search only in area, or map into area.
Definition rm:115
Low-level assert implementation.
#define l4_assert(expr)
Low-level assert.
Definition assert.h:32