summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Wingo <wingo@pobox.com>2017-03-08 22:39:29 +0100
committerAndy Wingo <wingo@pobox.com>2017-03-08 22:49:24 +0100
commitc62f0b025649eadc28cb1cb1afd1be183414b9b0 (patch)
treed44e2a055a4bdcb2f1ba28c7e73dc765e34e22d0
parent8157c2a3acc61b561903957f69e7e83163d5a1b5 (diff)
downloadguile-c62f0b025649eadc28cb1cb1afd1be183414b9b0.tar.gz
64KB segment alignment
* module/system/vm/linker.scm (*lcm-page-size*): Rename from *page-size*, change to 64 KB. * libguile/loader.c (load_thunk_from_memory): Only require page size alignment, knowing that although Guile might emit ELF with 64k alignment, it only really needs page alignment.
-rw-r--r--libguile/loader.c15
-rw-r--r--module/system/vm/linker.scm39
2 files changed, 50 insertions, 4 deletions
diff --git a/libguile/loader.c b/libguile/loader.c
index 558a722ea..7b1adc9c9 100644
--- a/libguile/loader.c
+++ b/libguile/loader.c
@@ -420,7 +420,18 @@ load_thunk_from_memory (char *data, size_t len, int is_read_only)
if (dynamic_segment < 0)
ABORT ("no PT_DYNAMIC segment");
- if (!IS_ALIGNED ((scm_t_uintptr) data, alignment))
+ /* The ELF images that Guile currently emits have segments that are
+ aligned on 64 KB boundaries, which might be larger than the actual
+ page size (usually 4 KB). However Guile doesn't actually use the
+ absolute addresses at all. All Guile needs is for the loaded image
+ to be able to make the data section writable (for the mmap path),
+ and for that the segment just needs to be page-aligned, and a page
+ is always bigger than Guile's minimum alignment. Since we know
+ (for the mmap path) that the base _is_ page-aligned, we proceed
+ ahead even if the image alignment is greater than the page
+ size. */
+ if (!IS_ALIGNED ((scm_t_uintptr) data, alignment)
+ && !IS_ALIGNED (alignment, page_size))
ABORT ("incorrectly aligned base");
/* Allow writes to writable pages. */
@@ -433,7 +444,7 @@ load_thunk_from_memory (char *data, size_t len, int is_read_only)
continue;
if (ph[i].p_flags == PF_R)
continue;
- if (ph[i].p_align != page_size)
+ if (ph[i].p_align < page_size)
continue;
if (mprotect (data + ph[i].p_vaddr,
diff --git a/module/system/vm/linker.scm b/module/system/vm/linker.scm
index 952837737..6ad582a9d 100644
--- a/module/system/vm/linker.scm
+++ b/module/system/vm/linker.scm
@@ -317,7 +317,42 @@ segment, the order of the linker objects is preserved."
#:addralign (elf-section-addralign sec)
#:entsize (elf-section-entsize sec)))
-(define *page-size* 4096)
+
+;; We assume that 64K is a multiple of the page size. A
+;; least-common-multiple, if you will.
+;;
+;; It would be possible to choose smaller, target-specific page sizes.
+;; This is still a little tricky; on amd64 for example, systems commonly
+;; have 4KB pages, but they are allowed by the ABI to have any
+;; multiple-of-2 page size up to 64 KB. On Cygwin, pages are 4kB but
+;; they can only be allocated 16 at a time. MIPS and ARM64 can use 64K
+;; pages too and that's not uncommon.
+;;
+;; At the current time, in Guile we would like to reduce the number of
+;; binaries we ship to the existing 32-or-64-bit and
+;; big-or-little-endian variants, if possible. It would seem that with
+;; the least-common-multiple of 64 KB pages, we can do that.
+;;
+;; See https://github.com/golang/go/issues/10180 for a discussion of
+;; this issue in the Go context.
+;;
+;; Using 64KB instead of the more usual 4KB will increase the size of
+;; our .go files, but not the prebuilt/ part of the tarball as that part
+;; of the file will be zeroes and compress well. Additionally on a
+;; system with 4KB pages, the extra padding will never be paged in, nor
+;; read from disk (though it causes more seeking etc so on spinning
+;; metal it's a bit of a lose).
+;;
+;; By way of comparison, on many 64-bit platforms, binutils currently
+;; defaults to aligning segments on 2MB boundaries. It does so by
+;; making the file and the memory images not the same: the pages are all
+;; together on disk, but then when loading, the loader will mmap a
+;; region "memsz" large which might be greater than the file size, then
+;; map segments into that region. We can avoid this complication for
+;; now. We can consider adding it in the future in a compatible way in
+;; 2.2 if it is important.
+;;
+(define *lcm-page-size* (ash 1 16))
(define (add-symbols symbols offset symtab)
"Add @var{symbols} to the symbol table @var{symtab}, relocating them
@@ -631,7 +666,7 @@ relocated headers, and the global symbol table."
;; loadable segments to share pages
;; with PF_R segments.
(not (and (not type) (= PF_R prev-flags))))
- *page-size*
+ *lcm-page-size*
8))
(lp seglists
(fold-values cons objs-out objects)