L4Re Operating System Framework
Interface and Usage Documentation
Loading...
Searching...
No Matches
region_mapping
Go to the documentation of this file.
1// -*- Mode: C++ -*-
2// vim:ft=cpp
7/*
8 * (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
9 * Alexander Warg <warg@os.inf.tu-dresden.de>,
10 * Björn Döbel <doebel@os.inf.tu-dresden.de>
11 * economic rights: Technische Universität Dresden (Germany)
12 *
13 * This file is part of TUD:OS and distributed under the terms of the
14 * GNU General Public License 2.
15 * Please see the COPYING-GPL-2 file for details.
16 *
17 * As a special exception, you may use this file as part of a free software
18 * library without restriction. Specifically, if other files instantiate
19 * templates or use macros or inline functions from this file, or you compile
20 * this file and link it with other files to produce an executable, this
21 * file does not by itself cause the resulting executable to be covered by
22 * the GNU General Public License. This exception does not however
23 * invalidate any other reasons why the executable file might be covered by
24 * the GNU General Public License.
25 */
26
27#pragma once
28
29#include <l4/cxx/avl_map>
30#include <l4/sys/types.h>
31#include <l4/re/rm>
32
33
34namespace L4Re { namespace Util {
35class Region
36{
37private:
38 l4_addr_t _start, _end;
39
40public:
41 Region() noexcept : _start(~0UL), _end(~0UL) {}
42 Region(l4_addr_t addr) noexcept : _start(addr), _end(addr) {}
43 Region(l4_addr_t start, l4_addr_t end) noexcept
44 : _start(start), _end(end) {}
45 l4_addr_t start() const noexcept { return _start; }
46 l4_addr_t end() const noexcept { return _end; }
47 unsigned long size() const noexcept { return end() - start() + 1; }
48 bool invalid() const noexcept { return _start == ~0UL && _end == ~0UL; }
49 bool operator < (Region const &o) const noexcept
50 { return end() < o.start(); }
51 bool contains(Region const &o) const noexcept
52 { return o.start() >= start() && o.end() <= end(); }
53 bool operator == (Region const &o) const noexcept
54 { return o.start() == start() && o.end() == end(); }
55 ~Region() noexcept {}
56};
57
58template< typename DS, typename OPS >
59class Region_handler
60{
61private:
62 L4Re::Rm::Offset _offs;
63 DS _mem;
64 l4_cap_idx_t _client_cap = L4_INVALID_CAP;
66
67public:
68 typedef DS Dataspace;
69 typedef OPS Ops;
70 typedef typename OPS::Map_result Map_result;
71
72 Region_handler() noexcept : _offs(0), _mem(), _flags() {}
73 Region_handler(Dataspace const &mem, l4_cap_idx_t client_cap,
74 L4Re::Rm::Offset offset = 0,
76 : _offs(offset), _mem(mem), _client_cap(client_cap), _flags(flags)
77 {}
78
79 Dataspace const &memory() const noexcept
80 {
81 return _mem;
82 }
83
84 l4_cap_idx_t client_cap_idx() const noexcept
85 {
86 return _client_cap;
87 }
88
89 L4Re::Rm::Offset offset() const noexcept
90 {
91 return _offs;
92 }
93
94 constexpr bool is_ro() const noexcept
95 {
96 return !(_flags & L4Re::Rm::F::W);
97 }
98
99 L4Re::Rm::Region_flags caching() const noexcept
100 {
101 return _flags & L4Re::Rm::F::Caching_mask;
102 }
103
104 L4Re::Rm::Region_flags flags() const noexcept
105 {
106 return _flags;
107 }
108
109 Region_handler operator + (l4_int64_t offset) const noexcept
110 {
111 Region_handler n = *this; n._offs += offset; return n;
112 }
113
114 void free(l4_addr_t start, unsigned long size) const noexcept
115 {
116 Ops::free(this, start, size);
117 }
118
119 int map(l4_addr_t addr, Region const &r, bool writable,
120 Map_result *result) const
121 {
122 return Ops::map(this, addr, r, writable, result);
123 }
124
125 int map_info(l4_addr_t *start_addr, l4_addr_t *end_addr) const
126 {
127 return Ops::map_info(this, start_addr, end_addr);
128 }
129
130};
131
132
133template< typename Hdlr, template<typename T> class Alloc >
134class Region_map
135{
136protected:
138 Tree _rm;
139 Tree _am;
140
141private:
142 l4_addr_t _start;
143 l4_addr_t _end;
144
145protected:
146 void set_limits(l4_addr_t start, l4_addr_t end) noexcept
147 {
148 _start = start;
149 _end = end;
150 }
151
152public:
153 typedef typename Tree::Item_type Item;
154 typedef typename Tree::Node Node;
155 typedef typename Tree::Key_type Key_type;
156 typedef Hdlr Region_handler;
157
158 typedef typename Tree::Iterator Iterator;
159 typedef typename Tree::Const_iterator Const_iterator;
160 typedef typename Tree::Rev_iterator Rev_iterator;
161 typedef typename Tree::Const_rev_iterator Const_rev_iterator;
162
163 Iterator begin() noexcept { return _rm.begin(); }
164 Const_iterator begin() const noexcept { return _rm.begin(); }
165 Iterator end() noexcept { return _rm.end(); }
166 Const_iterator end() const noexcept { return _rm.end(); }
167
168 Iterator area_begin() noexcept { return _am.begin(); }
169 Const_iterator area_begin() const noexcept { return _am.begin(); }
170 Iterator area_end() noexcept { return _am.end(); }
171 Const_iterator area_end() const noexcept { return _am.end(); }
172 Node area_find(Key_type const &c) const noexcept { return _am.find_node(c); }
173
174 l4_addr_t min_addr() const noexcept { return _start; }
175 l4_addr_t max_addr() const noexcept { return _end; }
176
177
178 Region_map(l4_addr_t start, l4_addr_t end) noexcept : _start(start), _end(end) {}
179
180 Node find(Key_type const &key) const noexcept
181 {
182 Node n = _rm.find_node(key);
183 if (!n)
184 return Node();
185
186 // 'find' should find any region overlapping with the searched one, the
187 // caller should check for further requirements
188 if (0)
189 if (!n->first.contains(key))
190 return Node();
191
192 return n;
193 }
194
195 Node lower_bound(Key_type const &key) const noexcept
196 {
197 Node n = _rm.lower_bound_node(key);
198 return n;
199 }
200
201 Node lower_bound_area(Key_type const &key) const noexcept
202 {
203 Node n = _am.lower_bound_node(key);
204 return n;
205 }
206
207 l4_addr_t attach_area(l4_addr_t addr, unsigned long size,
208 L4Re::Rm::Flags flags = L4Re::Rm::Flags(0),
209 unsigned char align = L4_PAGESHIFT) noexcept
210 {
211 if (size < 2)
212 return L4_INVALID_ADDR;
213
214
215 Region c;
216
217 if (!(flags & L4Re::Rm::F::Search_addr))
218 {
219 c = Region(addr, addr + size - 1);
220 Node r = _am.find_node(c);
221 if (r)
222 return L4_INVALID_ADDR;
223 }
224
225 while (flags & L4Re::Rm::F::Search_addr)
226 {
227 if (addr < min_addr() || (addr + size - 1) > max_addr())
228 addr = min_addr();
229 addr = find_free(addr, max_addr(), size, align, flags);
230 if (addr == L4_INVALID_ADDR)
231 return L4_INVALID_ADDR;
232
233 c = Region(addr, addr + size - 1);
234 Node r = _am.find_node(c);
235 if (!r)
236 break;
237
238 if (r->first.end() >= max_addr())
239 return L4_INVALID_ADDR;
240
241 addr = r->first.end() + 1;
242 }
243
244 if (_am.insert(c, Hdlr(typename Hdlr::Dataspace(), 0, 0, flags.region_flags())).second == 0)
245 return addr;
246
247 return L4_INVALID_ADDR;
248 }
249
250 bool detach_area(l4_addr_t addr) noexcept
251 {
252 if (_am.remove(addr))
253 return false;
254
255 return true;
256 }
257
258 void *attach(void *addr, unsigned long size, Hdlr const &hdlr,
259 L4Re::Rm::Flags flags = L4Re::Rm::Flags(0),
260 unsigned char align = L4_PAGESHIFT) noexcept
261 {
262 if (size < 2)
263 return L4_INVALID_PTR;
264
265 l4_addr_t beg, end;
266 int err = hdlr.map_info(&beg, &end);
267 if (err > 0)
268 {
269 // Mapping address determined by underlying dataspace. Make sure we
270 // prevent any additional alignment. We already know the place!
271 beg += hdlr.offset();
272 end = beg + size - 1U;
273 align = L4_PAGESHIFT;
274
275 // In case of exact mappings, the supplied address must match because
276 // we cannot remap.
277 if (!(flags & L4Re::Rm::F::Search_addr)
278 && reinterpret_cast<l4_addr_t>(addr) != beg)
279 return L4_INVALID_PTR;
280
281 // When searching for a suitable address, the start must cover the
282 // dataspace beginning to "find" the right spot.
283 if ((flags & L4Re::Rm::F::Search_addr)
284 && reinterpret_cast<l4_addr_t>(addr) > beg)
285 return L4_INVALID_PTR;
286 }
287 else if (err == 0)
288 {
289 beg = reinterpret_cast<l4_addr_t>(addr);
290 end = max_addr();
291 }
292 else if (err < 0)
293 return L4_INVALID_PTR;
294
295 if (flags & L4Re::Rm::F::In_area)
296 {
297 Node r = _am.find_node(Region(beg, beg + size - 1));
298 if (!r || (r->second.flags() & L4Re::Rm::F::Reserved))
299 return L4_INVALID_PTR;
300
301 end = r->first.end();
302 }
303
304 if (flags & L4Re::Rm::F::Search_addr)
305 {
306 beg = find_free(beg, end, size, align, flags);
307 if (beg == L4_INVALID_ADDR)
308 return L4_INVALID_PTR;
309 }
310
312 && _am.find_node(Region(beg, beg + size - 1)))
313 return L4_INVALID_PTR;
314
315 if (beg < min_addr() || beg + size - 1 > end)
316 return L4_INVALID_PTR;
317
318 if (_rm.insert(Region(beg, beg + size - 1), hdlr).second == 0)
319 return reinterpret_cast<void*>(beg);
320
321 return L4_INVALID_PTR;
322 }
323
324 int detach(void *addr, unsigned long sz, unsigned flags,
325 Region *reg, Hdlr *hdlr) noexcept
326 {
327 l4_addr_t a = reinterpret_cast<l4_addr_t>(addr);
328 Region dr(a, a + sz - 1);
329 Region res(~0UL, 0);
330
331 Node r = find(dr);
332 if (!r)
333 return -L4_ENOENT;
334
335 Region g = r->first;
336 Hdlr const &h = r->second;
337
338 if (flags & L4Re::Rm::Detach_overlap || dr.contains(g))
339 {
340 // successful removal of the AVL tree item also frees the node
341 Hdlr h_copy = h;
342
343 if (_rm.remove(g))
344 return -L4_ENOENT;
345
346 if (!(flags & L4Re::Rm::Detach_keep) && (h_copy.flags() & L4Re::Rm::F::Detach_free))
347 h_copy.free(0, g.size());
348
349 if (hdlr)
350 *hdlr = h_copy;
351 if (reg)
352 *reg = g;
353
354 if (find(dr))
356 else
357 return Rm::Detached_ds;
358 }
359 else if (dr.start() <= g.start())
360 {
361 // move the start of a region
362
363 if (!(flags & L4Re::Rm::Detach_keep) && (h.flags() & L4Re::Rm::F::Detach_free))
364 h.free(0, dr.end() + 1 - g.start());
365
366 unsigned long sz = dr.end() + 1 - g.start();
367 Item &cn = const_cast<Item &>(*r);
368 cn.first = Region(dr.end() + 1, g.end());
369 cn.second = cn.second + sz;
370 if (hdlr)
371 *hdlr = Hdlr();
372 if (reg)
373 *reg = Region(g.start(), dr.end());
374 if (find(dr))
376 else
377 return Rm::Kept_ds;
378 }
379 else if (dr.end() >= g.end())
380 {
381 // move the end of a region
382
383 if (!(flags & L4Re::Rm::Detach_keep)
384 && (h.flags() & L4Re::Rm::F::Detach_free))
385 h.free(dr.start() - g.start(), g.end() + 1 - dr.start());
386
387 Item &cn = const_cast<Item &>(*r);
388 cn.first = Region(g.start(), dr.start() - 1);
389 if (hdlr)
390 *hdlr = Hdlr();
391 if (reg)
392 *reg = Region(dr.start(), g.end());
393
394 if (find(dr))
396 else
397 return Rm::Kept_ds;
398 }
399 else if (g.contains(dr))
400 {
401 // split a single region that contains the new region
402
403 if (!(flags & L4Re::Rm::Detach_keep) && (h.flags() & L4Re::Rm::F::Detach_free))
404 h.free(dr.start() - g.start(), dr.size());
405
406 // first move the end off the existing region before the new one
407 Item &cn = const_cast<Item &>(*r);
408 cn.first = Region(g.start(), dr.start()-1);
409
410 int err;
411
412 // insert a second region for the remaining tail of
413 // the old existing region
414 err = _rm.insert(Region(dr.end() + 1, g.end()),
415 h + (dr.end() + 1 - g.start())).second;
416
417 if (err)
418 return err;
419
420 if (hdlr)
421 *hdlr = h;
422 if (reg)
423 *reg = dr;
424 return Rm::Split_ds;
425 }
426 return -L4_ENOENT;
427 }
428
429 l4_addr_t find_free(l4_addr_t start, l4_addr_t end, l4_addr_t size,
430 unsigned char align, L4Re::Rm::Flags flags) const noexcept;
431
432};
433
434
435template< typename Hdlr, template<typename T> class Alloc >
437Region_map<Hdlr, Alloc>::find_free(l4_addr_t start, l4_addr_t end,
438 unsigned long size, unsigned char align, L4Re::Rm::Flags flags) const noexcept
439{
440 l4_addr_t addr = start;
441
442 if (addr == ~0UL || addr < min_addr() || addr >= end)
443 addr = min_addr();
444
445 addr = l4_round_size(addr, align);
446 Node r;
447
448 for(;;)
449 {
450 if (addr > 0 && addr - 1 > end - size)
451 return L4_INVALID_ADDR;
452
453 Region c(addr, addr + size - 1);
454 r = _rm.find_node(c);
455
456 if (!r)
457 {
458 if (!(flags & L4Re::Rm::F::In_area) && (r = _am.find_node(c)))
459 {
460 if (r->first.end() > end - size)
461 return L4_INVALID_ADDR;
462
463 addr = l4_round_size(r->first.end() + 1, align);
464 continue;
465 }
466 break;
467 }
468 else if (r->first.end() > end - size)
469 return L4_INVALID_ADDR;
470
471 addr = l4_round_size(r->first.end() + 1, align);
472 }
473
474 if (!r)
475 return addr;
476
477 return L4_INVALID_ADDR;
478}
479
480}}
AVL map.
@ Detached_ds
Detached data sapce.
Definition rm:102
@ Detach_again
Detached data space, more to do.
Definition rm:107
@ Split_ds
Splitted data space, and done.
Definition rm:104
@ Kept_ds
Kept data space.
Definition rm:103
@ Detach_overlap
Do an unmap of all overlapping regions.
Definition rm:250
@ Detach_keep
Do not free the detached data space, ignore the F::Detach_free.
Definition rm:259
Region Key_type
Type of the key values.
Definition avl_map:70
Base_type::Node Node
Return type for find.
Definition avl_map:74
ITEM_TYPE Item_type
Type for the items store in the set.
Definition avl_set:152
unsigned long l4_addr_t
Address type.
Definition l4int.h:45
signed long long l4_int64_t
Signed 64bit value.
Definition l4int.h:41
unsigned long l4_cap_idx_t
Capability selector type.
Definition types.h:359
@ L4_INVALID_CAP
Invalid capability selector.
Definition consts.h:168
@ L4_ENOENT
No such entity.
Definition err.h:45
#define L4_INVALID_PTR
Invalid address as pointer type.
Definition consts.h:501
#define L4_PAGESHIFT
Size of a page, log2-based.
Definition consts.h:37
l4_addr_t l4_round_size(l4_addr_t value, unsigned char bits) L4_NOTHROW
Round value up to the next alignment with bits size.
Definition consts.h:473
@ L4_INVALID_ADDR
Invalid address.
Definition consts.h:494
Common L4 ABI Data Types.
L4Re C++ Interfaces.
Definition l4re.dox:17
Region mapper interface.
Region_flags
Region flags (permissions, cacheability, special).
Definition rm:140
@ Reserved
Region is reserved (blocked)
Definition rm:161
@ Detach_free
Free the portion of the data space after detach.
Definition rm:157
@ W
Writable region.
Definition rm:146
@ Caching_mask
Mask of all Rm cache bits.
Definition rm:165
@ Search_addr
Search for a suitable address range.
Definition rm:125
@ In_area
Search only in area, or map into area.
Definition rm:127