L4Re Operating System Framework
Interface and Usage Documentation
Loading...
Searching...
No Matches
vfs_impl.h
1/*
2 * (c) 2008-2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3 * Alexander Warg <warg@os.inf.tu-dresden.de>,
4 * Björn Döbel <doebel@os.inf.tu-dresden.de>
5 * economic rights: Technische Universität Dresden (Germany)
6 *
7 * This file is part of TUD:OS and distributed under the terms of the
8 * GNU General Public License 2.
9 * Please see the COPYING-GPL-2 file for details.
10 *
11 * As a special exception, you may use this file as part of a free software
12 * library without restriction. Specifically, if other files instantiate
13 * templates or use macros or inline functions from this file, or you compile
14 * this file and link it with other files to produce an executable, this
15 * file does not by itself cause the resulting executable to be covered by
16 * the GNU General Public License. This exception does not however
17 * invalidate any other reasons why the executable file might be covered by
18 * the GNU General Public License.
19 */
20
21#include "fd_store.h"
22#include "vcon_stream.h"
23#include "ns_fs.h"
24
25#include <l4/bid_config.h>
26#include <l4/re/env>
27#include <l4/re/rm>
28#include <l4/re/dataspace>
29#include <l4/sys/assert.h>
30#include <l4/cxx/hlist>
31#include <l4/cxx/pair>
32#include <l4/cxx/std_alloc>
33
34#include <l4/l4re_vfs/backend>
35#include <l4/re/shared_cap>
36
37#include <unistd.h>
38#include <cstdarg>
39#include <errno.h>
40#include <sys/uio.h>
41
42#if 0
43#include <l4/sys/kdebug.h>
44static int debug_mmap = 1;
45#define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
46#else
47#define DEBUG_LOG(level, dbg...) do { } while (0)
48#endif
49
55#define USE_BIG_ANON_DS
56
57using L4Re::Rm;
58
59namespace {
60
61using cxx::Ref_ptr;
62
63class Fd_store : public L4Re::Core::Fd_store
64{
65public:
66 Fd_store() noexcept;
67};
68
69// for internal Vcon_streams we want to have a placement new operator, so
70// inherit and add one
71class Std_stream : public L4Re::Core::Vcon_stream
72{
73public:
74 Std_stream(L4::Cap<L4::Vcon> c) : L4Re::Core::Vcon_stream(c) {}
75};
76
77Fd_store::Fd_store() noexcept
78{
79 // use this strange way to prevent deletion of the stdio object
80 // this depends on Fd_store to being a singleton !!!
81 static char m[sizeof(Std_stream)] __attribute__((aligned(sizeof(long))));
82 Std_stream *s = new (m) Std_stream(L4Re::Env::env()->log());
83 // make sure that we never delete the static io stream thing
84 s->add_ref();
85 set(0, cxx::ref_ptr(s)); // stdin
86 set(1, cxx::ref_ptr(s)); // stdout
87 set(2, cxx::ref_ptr(s)); // stderr
88}
89
90class Root_mount_tree : public L4Re::Vfs::Mount_tree
91{
92public:
93 Root_mount_tree() : L4Re::Vfs::Mount_tree(0) {}
94 void operator delete (void *) {}
95};
96
97class Vfs : public L4Re::Vfs::Ops
98{
99private:
100 bool _early_oom;
101
102public:
103 Vfs()
104 : _early_oom(true), _root_mount(), _root(L4Re::Env::env())
105 {
106 _root_mount.add_ref();
107 _root.add_ref();
108 _root_mount.mount(cxx::ref_ptr(&_root));
109 _cwd = cxx::ref_ptr(&_root);
110
111#if 0
112 Ref_ptr<L4Re::Vfs::File> rom;
113 _root.openat("rom", 0, 0, &rom);
114
115 _root_mount.create_tree("lib/foo", rom);
116
117 _root.openat("lib", 0, 0, &_cwd);
118
119#endif
120 }
121
122 int alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept override;
123 Ref_ptr<L4Re::Vfs::File> free_fd(int fd) noexcept override;
124 Ref_ptr<L4Re::Vfs::File> get_root() noexcept override;
125 Ref_ptr<L4Re::Vfs::File> get_cwd() noexcept override;
126 void set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
127 Ref_ptr<L4Re::Vfs::File> get_file(int fd) noexcept override;
128 cxx::Pair<Ref_ptr<L4Re::Vfs::File>, int>
129 set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) noexcept
130 override;
131
132 int mmap2(void *start, size_t len, int prot, int flags, int fd,
133 off_t offset, void **ptr) noexcept override;
134
135 int munmap(void *start, size_t len) noexcept override;
136 int mremap(void *old, size_t old_sz, size_t new_sz, int flags,
137 void **new_addr) noexcept override;
138 int mprotect(const void *a, size_t sz, int prot) noexcept override;
139 int msync(void *addr, size_t len, int flags) noexcept override;
140 int madvise(void *addr, size_t len, int advice) noexcept override;
141
142 int register_file_system(L4Re::Vfs::File_system *f) noexcept override;
143 int unregister_file_system(L4Re::Vfs::File_system *f) noexcept override;
144 L4Re::Vfs::File_system *get_file_system(char const *fstype) noexcept override;
145 L4Re::Vfs::File_system_list file_system_list() noexcept override;
146
147 int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
148 int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
149 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) noexcept override;
150 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) noexcept override;
151 int mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
152
153 void operator delete (void *) {}
154
155 void *malloc(size_t size) noexcept override { return Vfs_config::malloc(size); }
156 void free(void *m) noexcept override { Vfs_config::free(m); }
157
158private:
159 Root_mount_tree _root_mount;
160 L4Re::Core::Env_dir _root;
161 Ref_ptr<L4Re::Vfs::File> _cwd;
162 Fd_store fds;
163
164 L4Re::Vfs::File_system *_fs_registry;
165
166 struct File_factory_item : cxx::H_list_item_t<File_factory_item>
167 {
169 explicit File_factory_item(cxx::Ref_ptr<L4Re::Vfs::File_factory> const &f)
170 : f(f) {};
171
172 File_factory_item() = default;
173 File_factory_item(File_factory_item const &) = delete;
174 File_factory_item &operator = (File_factory_item const &) = delete;
175 };
176
177 cxx::H_list_t<File_factory_item> _file_factories;
178
179 l4_addr_t _anon_offset;
181
182 int alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds);
183 int alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
184 l4_addr_t *offset);
185
186 void align_mmap_start_and_length(void **start, size_t *length);
187 int munmap_regions(void *start, size_t len);
188
189 L4Re::Vfs::File_system *find_fs_from_type(char const *fstype) noexcept;
190};
191
192static inline bool strequal(char const *a, char const *b)
193{
194 for (;*a && *a == *b; ++a, ++b)
195 ;
196 return *a == *b;
197}
198
199int
200Vfs::register_file_system(L4Re::Vfs::File_system *f) noexcept
201{
203
204 if (!f)
205 return -EINVAL;
206
207 for (File_system *c = _fs_registry; c; c = c->next())
208 if (strequal(c->type(), f->type()))
209 return -EEXIST;
210
211 f->next(_fs_registry);
212 _fs_registry = f;
213
214 return 0;
215}
216
217int
218Vfs::unregister_file_system(L4Re::Vfs::File_system *f) noexcept
219{
221
222 if (!f)
223 return -EINVAL;
224
225 File_system **p = &_fs_registry;
226
227 for (; *p; p = &(*p)->next())
228 if (*p == f)
229 {
230 *p = f->next();
231 f->next() = 0;
232 return 0;
233 }
234
235 return -ENOENT;
236}
237
239Vfs::find_fs_from_type(char const *fstype) noexcept
240{
241 L4Re::Vfs::File_system_list fsl(_fs_registry);
242 for (L4Re::Vfs::File_system_list::Iterator c = fsl.begin();
243 c != fsl.end(); ++c)
244 if (strequal(c->type(), fstype))
245 return *c;
246 return 0;
247}
248
249L4Re::Vfs::File_system_list
250Vfs::file_system_list() noexcept
251{
252 return L4Re::Vfs::File_system_list(_fs_registry);
253}
254
256Vfs::get_file_system(char const *fstype) noexcept
257{
259 if ((fs = find_fs_from_type(fstype)))
260 return fs;
261
262 // Try to load a file system module dynamically
263 int res = Vfs_config::load_module(fstype);
264 if (res < 0)
265 return 0;
266
267 // Try again
268 return find_fs_from_type(fstype);
269}
270
271int
272Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
273{
274 if (!f)
275 return -EINVAL;
276
277 void *x = this->malloc(sizeof(File_factory_item));
278 if (!x)
279 return -ENOMEM;
280
281 auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
282 _file_factories.push_front(ff);
283 return 0;
284}
285
286int
287Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
288{
289 for (auto p: _file_factories)
290 {
291 if (p->f == f)
292 {
293 _file_factories.remove(p);
294 p->~File_factory_item();
295 this->free(p);
296 return 0;
297 }
298 }
299 return -ENOENT;
300}
301
302Ref_ptr<L4Re::Vfs::File_factory>
303Vfs::get_file_factory(int proto) noexcept
304{
305 for (auto p: _file_factories)
306 if (p->f->proto() == proto)
307 return p->f;
308
309 return Ref_ptr<L4Re::Vfs::File_factory>();
310}
311
312Ref_ptr<L4Re::Vfs::File_factory>
313Vfs::get_file_factory(char const *proto_name) noexcept
314{
315 for (auto p: _file_factories)
316 {
317 auto n = p->f->proto_name();
318 if (n)
319 {
320 char const *a = n;
321 char const *b = proto_name;
322 for (; *a && *b && *a == *b; ++a, ++b)
323 ;
324
325 if ((*a == 0) && (*b == 0))
326 return p->f;
327 }
328 }
329
330 return Ref_ptr<L4Re::Vfs::File_factory>();
331}
332
333int
334Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept
335{
336 int fd = fds.alloc();
337 if (fd < 0)
338 return -EMFILE;
339
340 if (f)
341 fds.set(fd, f);
342
343 return fd;
344}
345
346Ref_ptr<L4Re::Vfs::File>
347Vfs::free_fd(int fd) noexcept
348{
349 Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
350
351 if (!f)
352 return Ref_ptr<>::Nil;
353
354 fds.free(fd);
355 return f;
356}
357
358
359Ref_ptr<L4Re::Vfs::File>
360Vfs::get_root() noexcept
361{
362 return cxx::ref_ptr(&_root);
363}
364
365Ref_ptr<L4Re::Vfs::File>
366Vfs::get_cwd() noexcept
367{
368 return _cwd;
369}
370
371void
372Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
373{
374 // FIXME: check for is dir
375 if (dir)
376 _cwd = dir;
377}
378
379Ref_ptr<L4Re::Vfs::File>
380Vfs::get_file(int fd) noexcept
381{
382 return fds.get(fd);
383}
384
386Vfs::set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f) noexcept
387{
388 if (!fds.check_fd(fd))
389 return cxx::pair(Ref_ptr<L4Re::Vfs::File>(Ref_ptr<>::Nil), EBADF);
390
391 Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
392 fds.set(fd, f);
393 return cxx::pair(old, 0);
394}
395
396
397#define GET_FILE_DBG(fd, err) \
398 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
399 if (!fi) \
400 { \
401 return -err; \
402 }
403
404#define GET_FILE(fd, err) \
405 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
406 if (!fi) \
407 return -err;
408
409void
410Vfs::align_mmap_start_and_length(void **start, size_t *length)
411{
412 l4_addr_t const s = reinterpret_cast<l4_addr_t>(*start);
413 size_t const o = s & (L4_PAGESIZE - 1);
414
415 *start = reinterpret_cast<void *>(l4_trunc_page(s));
416 *length = l4_round_page(*length + o);
417}
418
419int
420Vfs::munmap_regions(void *start, size_t len)
421{
422 using namespace L4;
423 using namespace L4Re;
424
425 int err;
427 Cap<Rm> r = Env::env()->rm();
428
429 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
430 return -EINVAL;
431
432 align_mmap_start_and_length(&start, &len);
433
434 while (1)
435 {
436 DEBUG_LOG(debug_mmap, {
437 outstring("DETACH: start = 0x");
438 outhex32(l4_addr_t(start));
439 outstring(" len = 0x");
440 outhex32(len);
441 outstring("\n");
442 });
443 err = r->detach(l4_addr_t(start), len, &ds, This_task);
444 if (err < 0)
445 return err;
446
447 switch (err & Rm::Detach_result_mask)
448 {
449 case Rm::Split_ds:
450 if (ds.is_valid())
451 L4Re::virt_cap_alloc->take(ds);
452 return 0;
453 case Rm::Detached_ds:
454 if (ds.is_valid())
455 L4Re::virt_cap_alloc->release(ds);
456 break;
457 default:
458 break;
459 }
460
461 if (!(err & Rm::Detach_again))
462 return 0;
463 }
464}
465
466int
467Vfs::munmap(void *start, size_t len) L4_NOTHROW
468{
469 using namespace L4;
470 using namespace L4Re;
471
472 int err = 0;
473 Cap<Rm> r = Env::env()->rm();
474
475 // Fields for obtaining a list of areas for the calling process
476 long area_cnt = -1; // No. of areas in this process
477 Rm::Area const *area_array;
478 bool matches_area = false; // true if unmap parameters match an area
479
480 // First check if there are any areas matching the munmap request. Those
481 // might have been created by an mmap call using PROT_NONE as protection
482 // modifier.
483
484 area_cnt = r->get_areas((l4_addr_t) start, &area_array);
485
486 // It is enough to check for the very first entry, since get_areas will
487 // only return areas with a starting address equal or greater to <start>.
488 // However, we intend to unmap at most the area starting exactly at
489 // <start>.
490 if (area_cnt > 0)
491 {
492 size_t area_size = area_array[0].end - area_array[0].start + 1;
493
494 // Only free the area if the munmap parameters describe it exactly.
495 if (area_array[0].start == (l4_addr_t) start && area_size == len)
496 {
497 r->free_area((l4_addr_t) start);
498 matches_area = true;
499 }
500 }
501
502 // After clearing possible area reservations from PROT_NONE mappings, clear
503 // any regions in the address range specified. Note that errors shall be
504 // suppressed if an area was freed but no regions were found.
505 err = munmap_regions(start, len);
506 if (err == -ENOENT && matches_area)
507 return 0;
508
509 return err;
510}
511
512int
513Vfs::alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds)
514{
515 *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
516
517 if (!ds->is_valid())
518 return -ENOMEM;
519
520 int err;
521 if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
522 return err;
523
524 DEBUG_LOG(debug_mmap, {
525 outstring("ANON DS ALLOCATED: size=");
526 outhex32(size);
527 outstring(" cap = 0x");
528 outhex32(ds->cap());
529 outstring("\n");
530 });
531
532 return 0;
533}
534
535int
536Vfs::alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
537 l4_addr_t *offset)
538{
539#if !defined(CONFIG_MMU)
540 // Small values for !MMU systems. These platforms do not have much memory
541 // typically and the memory must be instantly allocated.
542 enum
543 {
544 ANON_MEM_DS_POOL_SIZE = 256UL << 10, // size of a pool dataspace used for anon memory
545 ANON_MEM_MAX_SIZE = 32UL << 10, // chunk size that will be allocate a dataspace
546 };
547#elif defined(USE_BIG_ANON_DS)
548 enum
549 {
550 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
551 ANON_MEM_MAX_SIZE = 32UL << 20, // chunk size that will be allocate a dataspace
552 };
553#else
554 enum
555 {
556 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
557 ANON_MEM_MAX_SIZE = 0UL << 20, // chunk size that will be allocate a dataspace
558 };
559#endif
560
561 if (size >= ANON_MEM_MAX_SIZE)
562 {
563 int err;
564 if ((err = alloc_ds(size, ds)) < 0)
565 return err;
566
567 *offset = 0;
568
569 if (!_early_oom)
570 return err;
571
572 return (*ds)->allocate(0, size);
573 }
574
575 if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
576 {
577 int err;
578 if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
579 return err;
580
581 _anon_offset = 0;
582 _anon_ds = *ds;
583 }
584 else
585 *ds = _anon_ds;
586
587 if (_early_oom)
588 {
589 if (int err = (*ds)->allocate(_anon_offset, size))
590 return err;
591 }
592
593 *offset = _anon_offset;
594 _anon_offset += size;
595 return 0;
596}
597
598int
599Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t page4k_offset,
600 void **resptr) L4_NOTHROW
601{
602 DEBUG_LOG(debug_mmap, {
603 outstring("MMAP params: ");
604 outstring("start = 0x");
605 outhex32(l4_addr_t(start));
606 outstring(", len = 0x");
607 outhex32(len);
608 outstring(", prot = 0x");
609 outhex32(prot);
610 outstring(", flags = 0x");
611 outhex32(flags);
612 outstring(", offset = 0x");
613 outhex32(page4k_offset);
614 outstring("\n");
615 });
616
617 using namespace L4Re;
618 off64_t offset = l4_trunc_page(page4k_offset << 12);
619
620 if (flags & MAP_FIXED)
621 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
622 return -EINVAL;
623
624 align_mmap_start_and_length(&start, &len);
625
626 // special code to just reserve an area of the virtual address space
627 // Same behavior should be exposed when mapping with PROT_NONE. Mind that
628 // PROT_NONE can only be specified exclusively, since it is defined to 0x0.
629 if ((flags & 0x1000000) || (prot == PROT_NONE))
630 {
631 int err;
632 L4::Cap<Rm> r = Env::env()->rm();
633 l4_addr_t area = reinterpret_cast<l4_addr_t>(start);
634 err = r->reserve_area(&area, len, L4Re::Rm::F::Search_addr);
635 if (err < 0)
636 return err;
637
638 *resptr = reinterpret_cast<void*>(area);
639
640 DEBUG_LOG(debug_mmap, {
641 outstring(" MMAP reserved area: 0x");
642 outhex32(area);
643 outstring(" length= 0x");
644 outhex32(len);
645 outstring("\n");
646 });
647
648 return 0;
649 }
650
652 l4_addr_t anon_offset = 0;
653 L4Re::Rm::Flags rm_flags(0);
654
655 if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
656 {
657 rm_flags |= L4Re::Rm::F::Detach_free;
658
659 int err = alloc_anon_mem(len, &ds, &anon_offset);
660 if (err)
661 return err;
662
663 DEBUG_LOG(debug_mmap, {
664 outstring(" USE ANON MEM: 0x");
665 outhex32(ds.cap());
666 outstring(" offs = 0x");
667 outhex32(anon_offset);
668 outstring("\n");
669 });
670 }
671
672 if (!(flags & MAP_ANONYMOUS))
673 {
674 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
675 if (!fi)
676 return -EBADF;
677
678 L4::Cap<L4Re::Dataspace> fds = fi->data_space();
679
680 if (!fds.is_valid())
681 return -EINVAL;
682
683 if (len + offset > l4_round_page(fds->size()))
684 return -EINVAL;
685
686 if (flags & MAP_PRIVATE)
687 {
688 DEBUG_LOG(debug_mmap, outstring("COW\n"););
689 int err = ds->copy_in(anon_offset, fds, offset, len);
690 if (err == -L4_EINVAL)
691 {
692 L4::Cap<Rm> r = Env::env()->rm();
695 err = r->attach(&src, len,
697 fds, offset);
698 if (err < 0)
699 return err;
700
701 err = r->attach(&dst, len,
703 ds.get(), anon_offset);
704 if (err < 0)
705 return err;
706
707 memcpy(dst.get(), src.get(), len);
708 }
709 else if (err)
710 return err;
711
712 offset = anon_offset;
713 }
714 else
715 {
716 L4Re::virt_cap_alloc->take(fds);
717 ds = L4Re::Shared_cap<L4Re::Dataspace>(fds, L4Re::virt_cap_alloc);
718 }
719 }
720 else
721 offset = anon_offset;
722
723
724 if (!(flags & MAP_FIXED) && start == 0)
725 start = reinterpret_cast<void*>(L4_PAGESIZE);
726
727 char *data = static_cast<char *>(start);
728 L4::Cap<Rm> r = Env::env()->rm();
729 l4_addr_t overmap_area = L4_INVALID_ADDR;
730
731 int err;
732 if (flags & MAP_FIXED)
733 {
734 overmap_area = l4_addr_t(start);
735
736 err = r->reserve_area(&overmap_area, len);
737 if (err < 0)
738 overmap_area = L4_INVALID_ADDR;
739
740 rm_flags |= Rm::F::In_area;
741
742 // Make sure to remove old mappings residing at the respective address
743 // range. If none exists, we are fine as well, allowing us to ignore
744 // ENOENT here.
745 err = munmap_regions(start, len);
746 if (err && err != -ENOENT)
747 return err;
748 }
749
750 if (!(flags & MAP_FIXED))
751 rm_flags |= Rm::F::Search_addr;
752 if (prot & PROT_READ)
753 rm_flags |= Rm::F::R;
754 if (prot & PROT_WRITE)
755 rm_flags |= Rm::F::W;
756 if (prot & PROT_EXEC)
757 rm_flags |= Rm::F::X;
758
759 err = r->attach(&data, len, rm_flags,
760 L4::Ipc::make_cap(ds.get(), (prot & PROT_WRITE)
763 offset);
764
765 DEBUG_LOG(debug_mmap, {
766 outstring(" MAPPED: 0x");
767 outhex32(ds.cap());
768 outstring(" addr: 0x");
769 outhex32(l4_addr_t(data));
770 outstring(" bytes: 0x");
771 outhex32(len);
772 outstring(" offset: 0x");
773 outhex32(offset);
774 outstring(" err = ");
775 outdec(err);
776 outstring("\n");
777 });
778
779
780 if (overmap_area != L4_INVALID_ADDR)
781 r->free_area(overmap_area);
782
783 if (err < 0)
784 return err;
785
786 l4_assert (!(start && !data));
787
788 // release ownership of the attached DS
789 ds.release();
790 *resptr = data;
791
792 return 0;
793}
794
795namespace {
796 class Auto_area
797 {
798 public:
800 l4_addr_t a;
801
802 explicit Auto_area(L4::Cap<L4Re::Rm> r, l4_addr_t a = L4_INVALID_ADDR)
803 : r(r), a(a) {}
804
805 int reserve(l4_addr_t _a, l4_size_t sz, L4Re::Rm::Flags flags)
806 {
807 free();
808 a = _a;
809 int e = r->reserve_area(&a, sz, flags);
810 if (e)
811 a = L4_INVALID_ADDR;
812 return e;
813 }
814
815 void free()
816 {
817 if (is_valid())
818 {
819 r->free_area(a);
820 a = L4_INVALID_ADDR;
821 }
822 }
823
824 bool is_valid() const { return a != L4_INVALID_ADDR; }
825
826 ~Auto_area() { free(); }
827 };
828}
829
830int
831Vfs::mremap(void *old_addr, size_t old_size, size_t new_size, int flags,
832 void **new_addr) L4_NOTHROW
833{
834 using namespace L4Re;
835
836 DEBUG_LOG(debug_mmap, {
837 outstring("Mremap: addr = 0x");
838 outhex32((l4_umword_t)old_addr);
839 outstring(" old_size = 0x");
840 outhex32(old_size);
841 outstring(" new_size = 0x");
842 outhex32(new_size);
843 outstring("\n");
844 });
845
846 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
847 return -EINVAL;
848
849 l4_addr_t oa = l4_trunc_page(reinterpret_cast<l4_addr_t>(old_addr));
850 if (oa != reinterpret_cast<l4_addr_t>(old_addr))
851 return -EINVAL;
852
853 bool const fixed = flags & MREMAP_FIXED;
854 bool const maymove = flags & MREMAP_MAYMOVE;
855
856 L4::Cap<Rm> r = Env::env()->rm();
857
858 // sanitize input parameters to multiples of pages
859 old_size = l4_round_page(old_size);
860 new_size = l4_round_page(new_size);
861
862 if (!fixed)
863 {
864 if (new_size < old_size)
865 {
866 *new_addr = old_addr;
867 return munmap(reinterpret_cast<void*>(oa + new_size),
868 old_size - new_size);
869 }
870
871 if (new_size == old_size)
872 {
873 *new_addr = old_addr;
874 return 0;
875 }
876 }
877
878 Auto_area old_area(r);
879 int err = old_area.reserve(oa, old_size, L4Re::Rm::Flags(0));
880 if (err < 0)
881 return -EINVAL;
882
883 l4_addr_t pad_addr;
884 Auto_area new_area(r);
885 if (fixed)
886 {
887 l4_addr_t na = l4_trunc_page(reinterpret_cast<l4_addr_t>(*new_addr));
888 if (na != reinterpret_cast<l4_addr_t>(*new_addr))
889 return -EINVAL;
890
891 // check if the current virtual memory area can be expanded
892 int err = new_area.reserve(na, new_size, L4Re::Rm::Flags(0));
893 if (err < 0)
894 return err;
895
896 pad_addr = na;
897 // unmap all stuff and remap ours ....
898 }
899 else
900 {
901 l4_addr_t ta = oa + old_size;
902 unsigned long ts = new_size - old_size;
903 // check if the current virtual memory area can be expanded
904 long err = new_area.reserve(ta, ts, L4Re::Rm::Flags(0));
905 if (!maymove && err)
906 return -ENOMEM;
907
908 L4Re::Rm::Offset toffs;
909 L4Re::Rm::Flags tflags;
911
912 err = r->find(&ta, &ts, &toffs, &tflags, &tds);
913
914 // there is enough space to expand the mapping in place
915 if (err == -ENOENT || (err == 0 && (tflags & Rm::F::In_area)))
916 {
917 old_area.free(); // pad at the original address
918 pad_addr = oa + old_size;
919 *new_addr = old_addr;
920 }
921 else if (!maymove)
922 return -ENOMEM;
923 else
924 {
925 // search for a new area to remap
926 err = new_area.reserve(0, new_size, Rm::F::Search_addr);
927 if (err < 0)
928 return -ENOMEM;
929
930 pad_addr = new_area.a + old_size;
931 *new_addr = reinterpret_cast<void *>(new_area.a);
932 }
933 }
934
935 if (old_area.is_valid())
936 {
937 unsigned long size = old_size;
938
939 l4_addr_t a = old_area.a;
940 unsigned long s = 1;
941 L4Re::Rm::Offset o;
942 L4Re::Rm::Flags f;
944
945 while (r->find(&a, &s, &o, &f, &ds) >= 0 && !(f & Rm::F::In_area))
946 {
947 if (a < old_area.a)
948 {
949 auto d = old_area.a - a;
950 a = old_area.a;
951 s -= d;
952 o += d;
953 }
954
955 if (a + s > old_area.a + old_size)
956 s = old_area.a + old_size - a;
957
958 l4_addr_t x = a - old_area.a + new_area.a;
959
960 int err = r->attach(&x, s, Rm::F::In_area | f,
961 L4::Ipc::make_cap(ds, f.cap_rights()), o);
962 if (err < 0)
963 return err;
964
965 // count the new attached ds reference
966 L4Re::virt_cap_alloc->take(ds);
967
968 err = r->detach(a, s, &ds, This_task,
969 Rm::Detach_exact | Rm::Detach_keep);
970 if (err < 0)
971 return err;
972
973 switch (err & Rm::Detach_result_mask)
974 {
975 case Rm::Split_ds:
976 // add a reference as we split up a mapping
977 if (ds.is_valid())
978 L4Re::virt_cap_alloc->take(ds);
979 break;
980 case Rm::Detached_ds:
981 if (ds.is_valid())
982 L4Re::virt_cap_alloc->release(ds);
983 break;
984 default:
985 break;
986 }
987
988 if (size <= s)
989 break;
990 a += s;
991 size -= s;
992 s = 1;
993 }
994
995 old_area.free();
996 }
997
998 if (old_size < new_size)
999 {
1000 l4_addr_t const pad_sz = new_size - old_size;
1001 l4_addr_t toffs;
1003 int err = alloc_anon_mem(pad_sz, &tds, &toffs);
1004 if (err)
1005 return err;
1006
1007 // FIXME: must get the protection rights from the old
1008 // mapping and use the same here, for now just use RWX
1009 err = r->attach(&pad_addr, pad_sz,
1010 Rm::F::In_area | Rm::F::Detach_free | Rm::F::RWX,
1011 L4::Ipc::make_cap_rw(tds.get()), toffs);
1012 if (err < 0)
1013 return err;
1014
1015 // release ownership of tds, the region map is now the new owner
1016 tds.release();
1017 }
1018
1019 return 0;
1020}
1021
1022int
1023Vfs::mprotect(const void * /* a */, size_t /* sz */, int prot) L4_NOTHROW
1024{
1025 return (prot & PROT_WRITE) ? -1 : 0;
1026}
1027
1028int
1029Vfs::msync(void *, size_t, int) L4_NOTHROW
1030{ return 0; }
1031
1032int
1033Vfs::madvise(void *, size_t, int) L4_NOTHROW
1034{ return 0; }
1035
1036}
1037
1038L4Re::Vfs::Ops *__rtld_l4re_env_posix_vfs_ops;
1039extern void *l4re_env_posix_vfs_ops __attribute__((alias("__rtld_l4re_env_posix_vfs_ops"), visibility("default")));
1040
1041namespace {
1042 class Real_mount_tree : public L4Re::Vfs::Mount_tree
1043 {
1044 public:
1045 explicit Real_mount_tree(char *n) : Mount_tree(n) {}
1046
1047 void *operator new (size_t size)
1048 { return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
1049
1050 void operator delete (void *mem)
1051 { __rtld_l4re_env_posix_vfs_ops->free(mem); }
1052 };
1053}
1054
1056int
1057Vfs::mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
1058{
1059 using L4Re::Vfs::File;
1060 using L4Re::Vfs::Mount_tree;
1061 using L4Re::Vfs::Path;
1062
1063 cxx::Ref_ptr<Mount_tree> root = get_root()->mount_tree();
1064 if (!root)
1065 return -EINVAL;
1066
1068 Path p = root->lookup(Path(path), &base);
1069
1070 while (!p.empty())
1071 {
1072 Path f = p.strip_first();
1073
1074 if (f.empty())
1075 return -EEXIST;
1076
1077 char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
1078 if (!name)
1079 return -ENOMEM;
1080
1081 auto nt = cxx::make_ref_obj<Real_mount_tree>(name);
1082 if (!nt)
1083 {
1084 __rtld_l4re_env_posix_vfs_ops->free(name);
1085 return -ENOMEM;
1086 }
1087
1088 base->add_child_node(nt);
1089 base = nt;
1090
1091 if (p.empty())
1092 {
1093 nt->mount(dir);
1094 return 0;
1095 }
1096 }
1097
1098 return -EINVAL;
1099}
1100
1101#undef DEBUG_LOG
1102#undef GET_FILE_DBG
1103#undef GET_FILE
static Env const * env() noexcept
Returns the initial environment for the current task.
Definition env:103
L4::Cap< Log > log() const noexcept
Object-capability to the logging service.
Definition env:133
Unique region.
Definition rm:435
T get() const noexcept
Return the address.
Definition rm:508
Region map.
Definition rm:95
Basic interface for an L4Re::Vfs file system.
Definition vfs.h:832
The basic interface for an open POSIX file.
Definition vfs.h:441
Interface for the POSIX backends of an application.
Definition vfs.h:1084
bool is_valid() const noexcept
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
Definition capability.h:57
C++ interface for capabilities.
Definition capability.h:219
Basic element type for a double-linked H_list.
Definition hlist:34
Helper type to distinguish the oeprator new version that does not throw exceptions.
Definition std_alloc:30
A reference-counting pointer with automatic cleanup.
Definition ref_ptr:82
Dataspace interface.
Environment interface.
unsigned int l4_size_t
Unsigned size type.
Definition l4int.h:35
unsigned long l4_umword_t
Unsigned machine word.
Definition l4int.h:51
unsigned long l4_addr_t
Address type.
Definition l4int.h:45
@ L4_EINVAL
Invalid argument.
Definition err.h:57
@ L4_CAP_FPAGE_RO
Read right for capability flex-pages.
Definition __l4_fpage.h:179
@ L4_CAP_FPAGE_RW
Read and interface specific 'W' right for capability flex-pages.
Definition __l4_fpage.h:195
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
Definition consts.h:437
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
Definition consts.h:462
#define L4_PAGESIZE
Minimal page size (in bytes).
Definition consts.h:380
@ L4_INVALID_ADDR
Invalid address.
Definition consts.h:494
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
Definition compiler.h:188
Functionality for invoking the kernel debugger.
void outhex32(l4_uint32_t number)
Output a 32-bit unsigned hexadecimal number via the kernel debugger.
Definition kdebug.h:282
void outstring(char const *text)
Output a string via the kernel debugger.
Definition kdebug.h:235
void outdec(l4_mword_t number)
Output a decimal unsigned machine word via the kernel debugger.
Definition kdebug.h:332
L4Re C++ Interfaces.
Definition l4re.dox:17
L4::Detail::Shared_cap_impl< T, Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
Definition shared_cap:44
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights) noexcept
Make an L4::Ipc::Cap<T> for the given capability and rights.
Definition ipc_types:649
Cap< T > make_cap_rw(L4::Cap< T > cap) noexcept
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
Definition ipc_types:659
L4 low-level kernel interface.
Definition io_regblock.h:19
Our C++ library.
Definition arith:22
Pair implementation.
Region mapper interface.
Shared_cap / Shared_del_cap.
@ RW
Readable and writable region.
Definition rm:150
@ R
Readable region.
Definition rm:144
@ Detach_free
Free the portion of the data space after detach.
Definition rm:157
@ Search_addr
Search for a suitable address range.
Definition rm:125
A range of virtual addresses.
Definition rm:681
l4_addr_t start
First address of the range.
Definition rm:683
l4_addr_t end
Last address of the range.
Definition rm:685
Double-linked list of typed H_list_item_t elements.
Definition hlist:260
Pair of two values.
Definition pair:39
Low-level assert implementation.
#define l4_assert(expr)
Low-level assert.
Definition assert.h:43