summaryrefslogtreecommitdiffstats
path: root/mozglue/linker
diff options
context:
space:
mode:
Diffstat (limited to 'mozglue/linker')
-rw-r--r--mozglue/linker/BaseElf.cpp215
-rw-r--r--mozglue/linker/BaseElf.h141
-rw-r--r--mozglue/linker/CustomElf.cpp774
-rw-r--r--mozglue/linker/CustomElf.h159
-rw-r--r--mozglue/linker/ElfLoader.cpp1310
-rw-r--r--mozglue/linker/ElfLoader.h674
-rw-r--r--mozglue/linker/Elfxx.h241
-rw-r--r--mozglue/linker/Logging.h87
-rw-r--r--mozglue/linker/Mappable.cpp681
-rw-r--r--mozglue/linker/Mappable.h267
-rw-r--r--mozglue/linker/SeekableZStream.cpp261
-rw-r--r--mozglue/linker/SeekableZStream.h154
-rw-r--r--mozglue/linker/Utils.h618
-rw-r--r--mozglue/linker/XZStream.cpp213
-rw-r--r--mozglue/linker/XZStream.h48
-rw-r--r--mozglue/linker/Zip.cpp229
-rw-r--r--mozglue/linker/Zip.h500
-rw-r--r--mozglue/linker/dladdr.h16
-rw-r--r--mozglue/linker/moz.build54
-rw-r--r--mozglue/linker/szip.cpp593
-rw-r--r--mozglue/linker/tests/TestZip.cpp69
-rw-r--r--mozglue/linker/tests/moz.build23
-rw-r--r--mozglue/linker/tests/no_central_dir.zipbin0 -> 281 bytes
-rw-r--r--mozglue/linker/tests/run_test_zip.py21
-rw-r--r--mozglue/linker/tests/test.zipbin0 -> 574 bytes
25 files changed, 7348 insertions, 0 deletions
diff --git a/mozglue/linker/BaseElf.cpp b/mozglue/linker/BaseElf.cpp
new file mode 100644
index 000000000..8a78285ef
--- /dev/null
+++ b/mozglue/linker/BaseElf.cpp
@@ -0,0 +1,215 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "BaseElf.h"
+#include "Elfxx.h"
+#include "Logging.h"
+#include "mozilla/RefPtr.h"
+
+using namespace Elf;
+using namespace mozilla;
+
+unsigned long
+BaseElf::Hash(const char *symbol)
+{
+ const unsigned char *sym = reinterpret_cast<const unsigned char *>(symbol);
+ unsigned long h = 0, g;
+ while (*sym) {
+ h = (h << 4) + *sym++;
+ g = h & 0xf0000000;
+ h ^= g;
+ h ^= g >> 24;
+ }
+ return h;
+}
+
+void *
+BaseElf::GetSymbolPtr(const char *symbol) const
+{
+ return GetSymbolPtr(symbol, Hash(symbol));
+}
+
+void *
+BaseElf::GetSymbolPtr(const char *symbol, unsigned long hash) const
+{
+ const Sym *sym = GetSymbol(symbol, hash);
+ void *ptr = nullptr;
+ if (sym && sym->st_shndx != SHN_UNDEF)
+ ptr = GetPtr(sym->st_value);
+ DEBUG_LOG("BaseElf::GetSymbolPtr(%p [\"%s\"], \"%s\") = %p",
+ reinterpret_cast<const void *>(this), GetPath(), symbol, ptr);
+ return ptr;
+}
+
+const Sym *
+BaseElf::GetSymbol(const char *symbol, unsigned long hash) const
+{
+ /* Search symbol with the buckets and chains tables.
+ * The hash computed from the symbol name gives an index in the buckets
+ * table. The corresponding value in the bucket table is an index in the
+ * symbols table and in the chains table.
+ * If the corresponding symbol in the symbols table matches, we're done.
+ * Otherwise, the corresponding value in the chains table is a new index
+ * in both tables, which corresponding symbol is tested and so on and so
+ * forth */
+ size_t bucket = hash % buckets.numElements();
+ for (size_t y = buckets[bucket]; y != STN_UNDEF; y = chains[y]) {
+ if (strcmp(symbol, strtab.GetStringAt(symtab[y].st_name)))
+ continue;
+ return &symtab[y];
+ }
+ return nullptr;
+}
+
+bool
+BaseElf::Contains(void *addr) const
+{
+ return base.Contains(addr);
+}
+
+#ifdef __ARM_EABI__
+const void *
+BaseElf::FindExidx(int *pcount) const
+{
+ if (arm_exidx) {
+ *pcount = arm_exidx.numElements();
+ return arm_exidx;
+ }
+ *pcount = 0;
+ return nullptr;
+}
+#endif
+
+already_AddRefed<LibHandle>
+LoadedElf::Create(const char *path, void *base_addr)
+{
+ DEBUG_LOG("LoadedElf::Create(\"%s\", %p) = ...", path, base_addr);
+
+ uint8_t mapped;
+ /* If the page is not mapped, mincore returns an error. If base_addr is
+ * nullptr, as would happen if the corresponding binary is prelinked with
+ * the prelink look (but not with the android apriori tool), no page being
+ * mapped there (right?), mincore returns an error, too, which makes
+ * prelinked libraries on glibc unsupported. This is not an interesting
+ * use case for now, so don't try supporting that case.
+ */
+ if (mincore(const_cast<void*>(base_addr), PageSize(), &mapped))
+ return nullptr;
+
+ RefPtr<LoadedElf> elf = new LoadedElf(path);
+
+ const Ehdr *ehdr = Ehdr::validate(base_addr);
+ if (!ehdr)
+ return nullptr;
+
+ Addr min_vaddr = (Addr) -1; // We want to find the lowest and biggest
+ Addr max_vaddr = 0; // virtual address used by this Elf.
+ const Phdr *dyn = nullptr;
+#ifdef __ARM_EABI__
+ const Phdr *arm_exidx_phdr = nullptr;
+#endif
+
+ Array<Phdr> phdrs(reinterpret_cast<const char *>(ehdr) + ehdr->e_phoff,
+ ehdr->e_phnum);
+ for (auto phdr = phdrs.begin(); phdr < phdrs.end(); ++phdr) {
+ switch (phdr->p_type) {
+ case PT_LOAD:
+ if (phdr->p_vaddr < min_vaddr)
+ min_vaddr = phdr->p_vaddr;
+ if (max_vaddr < phdr->p_vaddr + phdr->p_memsz)
+ max_vaddr = phdr->p_vaddr + phdr->p_memsz;
+ break;
+ case PT_DYNAMIC:
+ dyn = &*phdr;
+ break;
+#ifdef __ARM_EABI__
+ case PT_ARM_EXIDX:
+ /* We cannot initialize arm_exidx here
+ because we don't have a base yet */
+ arm_exidx_phdr = &*phdr;
+ break;
+#endif
+ }
+ }
+
+ /* If the lowest PT_LOAD virtual address in headers is not 0, then the ELF
+ * is either prelinked or a non-PIE executable. The former case is not
+ * possible, because base_addr would be nullptr and the mincore test above
+ * would already have made us return.
+ * For a non-PIE executable, PT_LOADs contain absolute addresses, so in
+ * practice, this means min_vaddr should be equal to base_addr. max_vaddr
+ * can thus be adjusted accordingly.
+ */
+ if (min_vaddr != 0) {
+ void *min_vaddr_ptr = reinterpret_cast<void *>(
+ static_cast<uintptr_t>(min_vaddr));
+ if (min_vaddr_ptr != base_addr) {
+ LOG("%s: %p != %p", elf->GetPath(), min_vaddr_ptr, base_addr);
+ return nullptr;
+ }
+ max_vaddr -= min_vaddr;
+ }
+ if (!dyn) {
+ LOG("%s: No PT_DYNAMIC segment found", elf->GetPath());
+ return nullptr;
+ }
+
+ elf->base.Assign(base_addr, max_vaddr);
+
+ if (!elf->InitDyn(dyn))
+ return nullptr;
+
+#ifdef __ARM_EABI__
+ if (arm_exidx_phdr)
+ elf->arm_exidx.InitSize(elf->GetPtr(arm_exidx_phdr->p_vaddr),
+ arm_exidx_phdr->p_memsz);
+#endif
+
+ DEBUG_LOG("LoadedElf::Create(\"%s\", %p) = %p", path, base_addr,
+ static_cast<void *>(elf));
+
+ ElfLoader::Singleton.Register(elf);
+ return elf.forget();
+}
+
+bool
+LoadedElf::InitDyn(const Phdr *pt_dyn)
+{
+ Array<Dyn> dyns;
+ dyns.InitSize(GetPtr<Dyn>(pt_dyn->p_vaddr), pt_dyn->p_filesz);
+
+ size_t symnum = 0;
+ for (auto dyn = dyns.begin(); dyn < dyns.end() && dyn->d_tag; ++dyn) {
+ switch (dyn->d_tag) {
+ case DT_HASH:
+ {
+ DEBUG_LOG("%s 0x%08" PRIxAddr, "DT_HASH", dyn->d_un.d_val);
+ const Elf::Word *hash_table_header = \
+ GetPtr<Elf::Word>(dyn->d_un.d_ptr);
+ symnum = hash_table_header[1];
+ buckets.Init(&hash_table_header[2], hash_table_header[0]);
+ chains.Init(&*buckets.end());
+ }
+ break;
+ case DT_STRTAB:
+ DEBUG_LOG("%s 0x%08" PRIxAddr, "DT_STRTAB", dyn->d_un.d_val);
+ strtab.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_SYMTAB:
+ DEBUG_LOG("%s 0x%08" PRIxAddr, "DT_SYMTAB", dyn->d_un.d_val);
+ symtab.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ }
+ }
+ if (!buckets || !symnum) {
+ ERROR("%s: Missing or broken DT_HASH", GetPath());
+ } else if (!strtab) {
+ ERROR("%s: Missing DT_STRTAB", GetPath());
+ } else if (!symtab) {
+ ERROR("%s: Missing DT_SYMTAB", GetPath());
+ } else {
+ return true;
+ }
+ return false;
+}
diff --git a/mozglue/linker/BaseElf.h b/mozglue/linker/BaseElf.h
new file mode 100644
index 000000000..19f9061bc
--- /dev/null
+++ b/mozglue/linker/BaseElf.h
@@ -0,0 +1,141 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef BaseElf_h
+#define BaseElf_h
+
+#include "ElfLoader.h"
+#include "Elfxx.h"
+
+
+/**
+ * Base class for ELF libraries. This class includes things that will be
+ * common between SystemElfs and CustomElfs.
+ */
+class BaseElf: public LibHandle
+{
+public:
+ /**
+ * Hash function for symbol lookup, as defined in ELF standard for System V.
+ */
+ static unsigned long Hash(const char *symbol);
+
+ /**
+ * Returns the address corresponding to the given symbol name (with a
+ * pre-computed hash).
+ */
+ void *GetSymbolPtr(const char *symbol, unsigned long hash) const;
+
+ /**
+ * Returns a pointer to the Elf Symbol in the Dynamic Symbol table
+ * corresponding to the given symbol name (with a pre-computed hash).
+ */
+ const Elf::Sym *GetSymbol(const char *symbol, unsigned long hash) const;
+
+ BaseElf(const char *path, Mappable *mappable = nullptr)
+ : LibHandle(path)
+ , mappable(mappable)
+ {
+ }
+
+protected:
+ /**
+ * Inherited from LibHandle. Those are temporary and are not supposed to
+ * be used.
+ */
+ virtual void *GetSymbolPtr(const char *symbol) const;
+ virtual bool Contains(void *addr) const;
+ virtual void *GetBase() const { return GetPtr(0); }
+
+#ifdef __ARM_EABI__
+ virtual const void *FindExidx(int *pcount) const;
+#endif
+
+ virtual Mappable *GetMappable() const { return NULL; };
+
+public:
+/* private: */
+ /**
+ * Returns a pointer relative to the base address where the library is
+ * loaded.
+ */
+ void *GetPtr(const Elf::Addr offset) const
+ {
+ if (reinterpret_cast<void *>(offset) > base)
+ return reinterpret_cast<void *>(offset);
+ return base + offset;
+ }
+
+ /**
+ * Like the above, but returns a typed (const) pointer
+ */
+ template <typename T>
+ const T *GetPtr(const Elf::Addr offset) const
+ {
+ if (reinterpret_cast<void *>(offset) > base)
+ return reinterpret_cast<const T *>(offset);
+ return reinterpret_cast<const T *>(base + offset);
+ }
+
+ /* Appropriated Mappable */
+ /* /!\ we rely on this being nullptr for BaseElf instances, but not
+ * CustomElf instances. */
+ RefPtr<Mappable> mappable;
+
+ /* Base address where the library is loaded */
+ MappedPtr base;
+
+ /* Buckets and chains for the System V symbol hash table */
+ Array<Elf::Word> buckets;
+ UnsizedArray<Elf::Word> chains;
+
+/* protected: */
+ /* String table */
+ Elf::Strtab strtab;
+
+ /* Symbol table */
+ UnsizedArray<Elf::Sym> symtab;
+
+#ifdef __ARM_EABI__
+ /* ARM.exidx information used by FindExidx */
+ Array<uint32_t[2]> arm_exidx;
+#endif
+};
+
+
+/**
+ * Class for ELF libraries that already loaded in memory.
+ */
+class LoadedElf: public BaseElf
+{
+public:
+ /**
+ * Returns a LoadedElf corresponding to the already loaded ELF
+ * at the given base address.
+ */
+ static already_AddRefed<LibHandle> Create(const char *path,
+ void *base_addr);
+
+private:
+ LoadedElf(const char *path)
+ : BaseElf(path) { }
+
+ ~LoadedElf()
+ {
+ /* Avoid base's destructor unmapping something that doesn't actually
+ * belong to it. */
+ base.release();
+ ElfLoader::Singleton.Forget(this);
+ }
+
+ /**
+ * Initializes the library according to information found in the given
+ * PT_DYNAMIC header.
+ * Returns whether this succeeded or failed.
+ */
+ bool InitDyn(const Elf::Phdr *pt_dyn);
+};
+
+
+#endif /* BaseElf_h */
diff --git a/mozglue/linker/CustomElf.cpp b/mozglue/linker/CustomElf.cpp
new file mode 100644
index 000000000..dbab0cf0d
--- /dev/null
+++ b/mozglue/linker/CustomElf.cpp
@@ -0,0 +1,774 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cstring>
+#include <sys/mman.h>
+#include <vector>
+#include <dlfcn.h>
+#include <signal.h>
+#include <string.h>
+#include "CustomElf.h"
+#include "BaseElf.h"
+#include "Mappable.h"
+#include "Logging.h"
+
+using namespace Elf;
+using namespace mozilla;
+
+/* TODO: Fill ElfLoader::Singleton.lastError on errors. */
+
+/* Function used to report library mappings from the custom linker to Gecko
+ * crash reporter */
+#ifdef ANDROID
+extern "C" {
+ void report_mapping(char *name, void *base, uint32_t len, uint32_t offset);
+ void delete_mapping(const char *name);
+}
+#else
+#define report_mapping(...)
+#define delete_mapping(...)
+#endif
+
+const Ehdr *Ehdr::validate(const void *buf)
+{
+ if (!buf || buf == MAP_FAILED)
+ return nullptr;
+
+ const Ehdr *ehdr = reinterpret_cast<const Ehdr *>(buf);
+
+ /* Only support ELF executables or libraries for the host system */
+ if (memcmp(ELFMAG, &ehdr->e_ident, SELFMAG) ||
+ ehdr->e_ident[EI_CLASS] != ELFCLASS ||
+ ehdr->e_ident[EI_DATA] != ELFDATA ||
+ ehdr->e_ident[EI_VERSION] != 1 ||
+ (ehdr->e_ident[EI_OSABI] != ELFOSABI && ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE) ||
+#ifdef EI_ABIVERSION
+ ehdr->e_ident[EI_ABIVERSION] != ELFABIVERSION ||
+#endif
+ (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) ||
+ ehdr->e_machine != ELFMACHINE ||
+ ehdr->e_version != 1 ||
+ ehdr->e_phentsize != sizeof(Phdr))
+ return nullptr;
+
+ return ehdr;
+}
+
+namespace {
+
+void debug_phdr(const char *type, const Phdr *phdr)
+{
+ DEBUG_LOG("%s @0x%08" PRIxAddr " ("
+ "filesz: 0x%08" PRIxAddr ", "
+ "memsz: 0x%08" PRIxAddr ", "
+ "offset: 0x%08" PRIxAddr ", "
+ "flags: %c%c%c)",
+ type, phdr->p_vaddr, phdr->p_filesz, phdr->p_memsz,
+ phdr->p_offset, phdr->p_flags & PF_R ? 'r' : '-',
+ phdr->p_flags & PF_W ? 'w' : '-', phdr->p_flags & PF_X ? 'x' : '-');
+}
+
+static int p_flags_to_mprot(Word flags)
+{
+ return ((flags & PF_X) ? PROT_EXEC : 0) |
+ ((flags & PF_W) ? PROT_WRITE : 0) |
+ ((flags & PF_R) ? PROT_READ : 0);
+}
+
+} /* anonymous namespace */
+
+/**
+ * RAII wrapper for a mapping of the first page off a Mappable object.
+ * This calls Mappable::munmap instead of system munmap.
+ */
+class Mappable1stPagePtr: public GenericMappedPtr<Mappable1stPagePtr> {
+public:
+ Mappable1stPagePtr(Mappable *mappable)
+ : GenericMappedPtr<Mappable1stPagePtr>(
+ mappable->mmap(nullptr, PageSize(), PROT_READ, MAP_PRIVATE, 0))
+ , mappable(mappable)
+ {
+ /* Ensure the content of this page */
+ mappable->ensure(*this);
+ }
+
+private:
+ friend class GenericMappedPtr<Mappable1stPagePtr>;
+ void munmap(void *buf, size_t length) {
+ mappable->munmap(buf, length);
+ }
+
+ RefPtr<Mappable> mappable;
+};
+
+
+already_AddRefed<LibHandle>
+CustomElf::Load(Mappable *mappable, const char *path, int flags)
+{
+ DEBUG_LOG("CustomElf::Load(\"%s\", 0x%x) = ...", path, flags);
+ if (!mappable)
+ return nullptr;
+ /* Keeping a RefPtr of the CustomElf is going to free the appropriate
+ * resources when returning nullptr */
+ RefPtr<CustomElf> elf = new CustomElf(mappable, path);
+ /* Map the first page of the Elf object to access Elf and program headers */
+ Mappable1stPagePtr ehdr_raw(mappable);
+ if (ehdr_raw == MAP_FAILED)
+ return nullptr;
+
+ const Ehdr *ehdr = Ehdr::validate(ehdr_raw);
+ if (!ehdr)
+ return nullptr;
+
+ /* Scan Elf Program Headers and gather some information about them */
+ std::vector<const Phdr *> pt_loads;
+ Addr min_vaddr = (Addr) -1; // We want to find the lowest and biggest
+ Addr max_vaddr = 0; // virtual address used by this Elf.
+ const Phdr *dyn = nullptr;
+
+ const Phdr *first_phdr = reinterpret_cast<const Phdr *>(
+ reinterpret_cast<const char *>(ehdr) + ehdr->e_phoff);
+ const Phdr *end_phdr = &first_phdr[ehdr->e_phnum];
+#ifdef __ARM_EABI__
+ const Phdr *arm_exidx_phdr = nullptr;
+#endif
+
+ for (const Phdr *phdr = first_phdr; phdr < end_phdr; phdr++) {
+ switch (phdr->p_type) {
+ case PT_LOAD:
+ debug_phdr("PT_LOAD", phdr);
+ pt_loads.push_back(phdr);
+ if (phdr->p_vaddr < min_vaddr)
+ min_vaddr = phdr->p_vaddr;
+ if (max_vaddr < phdr->p_vaddr + phdr->p_memsz)
+ max_vaddr = phdr->p_vaddr + phdr->p_memsz;
+ break;
+ case PT_DYNAMIC:
+ debug_phdr("PT_DYNAMIC", phdr);
+ if (!dyn) {
+ dyn = phdr;
+ } else {
+ ERROR("%s: Multiple PT_DYNAMIC segments detected", elf->GetPath());
+ return nullptr;
+ }
+ break;
+ case PT_TLS:
+ debug_phdr("PT_TLS", phdr);
+ if (phdr->p_memsz) {
+ ERROR("%s: TLS is not supported", elf->GetPath());
+ return nullptr;
+ }
+ break;
+ case PT_GNU_STACK:
+ debug_phdr("PT_GNU_STACK", phdr);
+// Skip on Android until bug 706116 is fixed
+#ifndef ANDROID
+ if (phdr->p_flags & PF_X) {
+ ERROR("%s: Executable stack is not supported", elf->GetPath());
+ return nullptr;
+ }
+#endif
+ break;
+#ifdef __ARM_EABI__
+ case PT_ARM_EXIDX:
+ /* We cannot initialize arm_exidx here
+ because we don't have a base yet */
+ arm_exidx_phdr = phdr;
+ break;
+#endif
+ default:
+ DEBUG_LOG("%s: Program header type #%d not handled",
+ elf->GetPath(), phdr->p_type);
+ }
+ }
+
+ if (min_vaddr != 0) {
+ ERROR("%s: Unsupported minimal virtual address: 0x%08" PRIxAddr,
+ elf->GetPath(), min_vaddr);
+ return nullptr;
+ }
+ if (!dyn) {
+ ERROR("%s: No PT_DYNAMIC segment found", elf->GetPath());
+ return nullptr;
+ }
+
+ /* Reserve enough memory to map the complete virtual address space for this
+ * library.
+ * As we are using the base address from here to mmap something else with
+ * MAP_FIXED | MAP_SHARED, we need to make sure these mmaps will work. For
+ * instance, on armv6, MAP_SHARED mappings require a 16k alignment, but mmap
+ * MAP_PRIVATE only returns a 4k aligned address. So we first get a base
+ * address with MAP_SHARED, which guarantees the kernel returns an address
+ * that we'll be able to use with MAP_FIXED, and then remap MAP_PRIVATE at
+ * the same address, because of some bad side effects of keeping it as
+ * MAP_SHARED. */
+ elf->base.Assign(MemoryRange::mmap(nullptr, max_vaddr, PROT_NONE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0));
+ if ((elf->base == MAP_FAILED) ||
+ (mmap(elf->base, max_vaddr, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != elf->base)) {
+ ERROR("%s: Failed to mmap", elf->GetPath());
+ return nullptr;
+ }
+
+ /* Load and initialize library */
+ for (std::vector<const Phdr *>::iterator it = pt_loads.begin();
+ it < pt_loads.end(); ++it)
+ if (!elf->LoadSegment(*it))
+ return nullptr;
+
+ /* We're not going to mmap anymore */
+ mappable->finalize();
+
+ report_mapping(const_cast<char *>(elf->GetName()), elf->base,
+ (max_vaddr + PAGE_SIZE - 1) & PAGE_MASK, 0);
+
+ elf->l_addr = elf->base;
+ elf->l_name = elf->GetPath();
+ elf->l_ld = elf->GetPtr<Dyn>(dyn->p_vaddr);
+ ElfLoader::Singleton.Register(elf);
+
+ if (!elf->InitDyn(dyn))
+ return nullptr;
+
+ if (elf->has_text_relocs) {
+ for (std::vector<const Phdr *>::iterator it = pt_loads.begin();
+ it < pt_loads.end(); ++it)
+ mprotect(PageAlignedPtr(elf->GetPtr((*it)->p_vaddr)),
+ PageAlignedEndPtr((*it)->p_memsz),
+ p_flags_to_mprot((*it)->p_flags) | PROT_WRITE);
+ }
+
+ if (!elf->Relocate() || !elf->RelocateJumps())
+ return nullptr;
+
+ if (elf->has_text_relocs) {
+ for (std::vector<const Phdr *>::iterator it = pt_loads.begin();
+ it < pt_loads.end(); ++it)
+ mprotect(PageAlignedPtr(elf->GetPtr((*it)->p_vaddr)),
+ PageAlignedEndPtr((*it)->p_memsz),
+ p_flags_to_mprot((*it)->p_flags));
+ }
+
+ if (!elf->CallInit())
+ return nullptr;
+
+#ifdef __ARM_EABI__
+ if (arm_exidx_phdr)
+ elf->arm_exidx.InitSize(elf->GetPtr(arm_exidx_phdr->p_vaddr),
+ arm_exidx_phdr->p_memsz);
+#endif
+
+ if (MOZ_UNLIKELY(Logging::isVerbose())) {
+ elf->stats("oneLibLoaded");
+ }
+ DEBUG_LOG("CustomElf::Load(\"%s\", 0x%x) = %p", path, flags,
+ static_cast<void *>(elf));
+ return elf.forget();
+}
+
+CustomElf::~CustomElf()
+{
+ DEBUG_LOG("CustomElf::~CustomElf(%p [\"%s\"])",
+ reinterpret_cast<void *>(this), GetPath());
+ CallFini();
+ /* Normally, __cxa_finalize is called by the .fini function. However,
+ * Android NDK before r6b doesn't do that. Our wrapped cxa_finalize only
+ * calls destructors once, so call it in all cases. */
+ ElfLoader::__wrap_cxa_finalize(this);
+ ElfLoader::Singleton.Forget(this);
+ delete_mapping(GetName());
+}
+
+void *
+CustomElf::GetSymbolPtrInDeps(const char *symbol) const
+{
+ /* Resolve dlopen and related functions to point to ours */
+ if (symbol[0] == 'd' && symbol[1] == 'l') {
+ if (strcmp(symbol + 2, "open") == 0)
+ return FunctionPtr(__wrap_dlopen);
+ if (strcmp(symbol + 2, "error") == 0)
+ return FunctionPtr(__wrap_dlerror);
+ if (strcmp(symbol + 2, "close") == 0)
+ return FunctionPtr(__wrap_dlclose);
+ if (strcmp(symbol + 2, "sym") == 0)
+ return FunctionPtr(__wrap_dlsym);
+ if (strcmp(symbol + 2, "addr") == 0)
+ return FunctionPtr(__wrap_dladdr);
+ if (strcmp(symbol + 2, "_iterate_phdr") == 0)
+ return FunctionPtr(__wrap_dl_iterate_phdr);
+ } else if (symbol[0] == '_' && symbol[1] == '_') {
+ /* Resolve a few C++ ABI specific functions to point to ours */
+#ifdef __ARM_EABI__
+ if (strcmp(symbol + 2, "aeabi_atexit") == 0)
+ return FunctionPtr(&ElfLoader::__wrap_aeabi_atexit);
+#else
+ if (strcmp(symbol + 2, "cxa_atexit") == 0)
+ return FunctionPtr(&ElfLoader::__wrap_cxa_atexit);
+#endif
+ if (strcmp(symbol + 2, "cxa_finalize") == 0)
+ return FunctionPtr(&ElfLoader::__wrap_cxa_finalize);
+ if (strcmp(symbol + 2, "dso_handle") == 0)
+ return const_cast<CustomElf *>(this);
+ if (strcmp(symbol + 2, "moz_linker_stats") == 0)
+ return FunctionPtr(&ElfLoader::stats);
+#ifdef __ARM_EABI__
+ if (strcmp(symbol + 2, "gnu_Unwind_Find_exidx") == 0)
+ return FunctionPtr(__wrap___gnu_Unwind_Find_exidx);
+#endif
+ } else if (symbol[0] == 's' && symbol[1] == 'i') {
+ if (strcmp(symbol + 2, "gnal") == 0)
+ return FunctionPtr(signal);
+ if (strcmp(symbol + 2, "gaction") == 0)
+ return FunctionPtr(sigaction);
+ }
+
+ void *sym;
+
+ unsigned long hash = Hash(symbol);
+
+ /* self_elf should never be NULL, but better safe than sorry. */
+ if (ElfLoader::Singleton.self_elf) {
+ /* We consider the library containing this code a permanent LD_PRELOAD,
+ * so, check if the symbol exists here first. */
+ sym = static_cast<BaseElf *>(
+ ElfLoader::Singleton.self_elf.get())->GetSymbolPtr(symbol, hash);
+ if (sym)
+ return sym;
+ }
+
+ /* Then search the symbol in our dependencies. Since we already searched in
+ * libraries the system linker loaded, skip those (on glibc systems). We
+ * also assume the symbol is to be found in one of the dependent libraries
+ * directly, not in their own dependent libraries. Building libraries with
+ * --no-allow-shlib-undefined ensures such indirect symbol dependency don't
+ * happen. */
+ for (std::vector<RefPtr<LibHandle> >::const_iterator it = dependencies.begin();
+ it < dependencies.end(); ++it) {
+ /* Skip if it's the library containing this code, since we've already
+ * looked at it above. */
+ if (*it == ElfLoader::Singleton.self_elf)
+ continue;
+ if (BaseElf *be = (*it)->AsBaseElf()) {
+ sym = be->GetSymbolPtr(symbol, hash);
+ } else {
+ sym = (*it)->GetSymbolPtr(symbol);
+ }
+ if (sym)
+ return sym;
+ }
+ return nullptr;
+}
+
+void
+CustomElf::stats(const char *when) const
+{
+ mappable->stats(when, GetPath());
+}
+
+bool
+CustomElf::LoadSegment(const Phdr *pt_load) const
+{
+ if (pt_load->p_type != PT_LOAD) {
+ DEBUG_LOG("%s: Elf::LoadSegment only takes PT_LOAD program headers", GetPath());
+ return false;;
+ }
+
+ int prot = p_flags_to_mprot(pt_load->p_flags);
+
+ /* Mmap at page boundary */
+ Addr align = PageSize();
+ Addr align_offset;
+ void *mapped, *where;
+ do {
+ align_offset = pt_load->p_vaddr - AlignedPtr(pt_load->p_vaddr, align);
+ where = GetPtr(pt_load->p_vaddr - align_offset);
+ DEBUG_LOG("%s: Loading segment @%p %c%c%c", GetPath(), where,
+ prot & PROT_READ ? 'r' : '-',
+ prot & PROT_WRITE ? 'w' : '-',
+ prot & PROT_EXEC ? 'x' : '-');
+ mapped = mappable->mmap(where, pt_load->p_filesz + align_offset,
+ prot, MAP_PRIVATE | MAP_FIXED,
+ pt_load->p_offset - align_offset);
+ if ((mapped != MAP_FAILED) || (pt_load->p_vaddr == 0) ||
+ (pt_load->p_align == align))
+ break;
+ /* The virtual address space for the library is properly aligned at
+ * 16k on ARMv6 (see CustomElf::Load), and so is the first segment
+ * (p_vaddr == 0). But subsequent segments may not be 16k aligned
+ * and fail to mmap. In such case, try to mmap again at the p_align
+ * boundary instead of page boundary. */
+ DEBUG_LOG("%s: Failed to mmap, retrying", GetPath());
+ align = pt_load->p_align;
+ } while (1);
+
+ if (mapped != where) {
+ if (mapped == MAP_FAILED) {
+ ERROR("%s: Failed to mmap", GetPath());
+ } else {
+ ERROR("%s: Didn't map at the expected location (wanted: %p, got: %p)",
+ GetPath(), where, mapped);
+ }
+ return false;
+ }
+
+ /* Ensure the availability of all pages within the mapping if on-demand
+ * decompression is disabled (MOZ_LINKER_ONDEMAND=0 or signal handler not
+ * registered). */
+ const char *ondemand = getenv("MOZ_LINKER_ONDEMAND");
+ if (!ElfLoader::Singleton.hasRegisteredHandler() ||
+ (ondemand && !strncmp(ondemand, "0", 2 /* Including '\0' */))) {
+ for (Addr off = 0; off < pt_load->p_filesz + align_offset;
+ off += PageSize()) {
+ mappable->ensure(reinterpret_cast<char *>(mapped) + off);
+ }
+ }
+ /* When p_memsz is greater than p_filesz, we need to have nulled out memory
+ * after p_filesz and before p_memsz.
+ * Above the end of the last page, and up to p_memsz, we already have nulled
+ * out memory because we mapped anonymous memory on the whole library virtual
+ * address space. We just need to adjust this anonymous memory protection
+ * flags. */
+ if (pt_load->p_memsz > pt_load->p_filesz) {
+ Addr file_end = pt_load->p_vaddr + pt_load->p_filesz;
+ Addr mem_end = pt_load->p_vaddr + pt_load->p_memsz;
+ Addr next_page = PageAlignedEndPtr(file_end);
+ if (next_page > file_end) {
+ /* The library is not registered at this point, so we can't rely on
+ * on-demand decompression to handle missing pages here. */
+ void *ptr = GetPtr(file_end);
+ mappable->ensure(ptr);
+ memset(ptr, 0, next_page - file_end);
+ }
+ if (mem_end > next_page) {
+ if (mprotect(GetPtr(next_page), mem_end - next_page, prot) < 0) {
+ ERROR("%s: Failed to mprotect", GetPath());
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+namespace {
+
+void debug_dyn(const char *type, const Dyn *dyn)
+{
+ DEBUG_LOG("%s 0x%08" PRIxAddr, type, dyn->d_un.d_val);
+}
+
+} /* anonymous namespace */
+
+bool
+CustomElf::InitDyn(const Phdr *pt_dyn)
+{
+ /* Scan PT_DYNAMIC segment and gather some information */
+ const Dyn *first_dyn = GetPtr<Dyn>(pt_dyn->p_vaddr);
+ const Dyn *end_dyn = GetPtr<Dyn>(pt_dyn->p_vaddr + pt_dyn->p_filesz);
+ std::vector<Word> dt_needed;
+ size_t symnum = 0;
+ for (const Dyn *dyn = first_dyn; dyn < end_dyn && dyn->d_tag; dyn++) {
+ switch (dyn->d_tag) {
+ case DT_NEEDED:
+ debug_dyn("DT_NEEDED", dyn);
+ dt_needed.push_back(dyn->d_un.d_val);
+ break;
+ case DT_HASH:
+ {
+ debug_dyn("DT_HASH", dyn);
+ const Word *hash_table_header = GetPtr<Word>(dyn->d_un.d_ptr);
+ symnum = hash_table_header[1];
+ buckets.Init(&hash_table_header[2], hash_table_header[0]);
+ chains.Init(&*buckets.end());
+ }
+ break;
+ case DT_STRTAB:
+ debug_dyn("DT_STRTAB", dyn);
+ strtab.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_SYMTAB:
+ debug_dyn("DT_SYMTAB", dyn);
+ symtab.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_SYMENT:
+ debug_dyn("DT_SYMENT", dyn);
+ if (dyn->d_un.d_val != sizeof(Sym)) {
+ ERROR("%s: Unsupported DT_SYMENT", GetPath());
+ return false;
+ }
+ break;
+ case DT_TEXTREL:
+ if (strcmp("libflashplayer.so", GetName()) == 0) {
+ has_text_relocs = true;
+ } else {
+ ERROR("%s: Text relocations are not supported", GetPath());
+ return false;
+ }
+ break;
+ case DT_STRSZ: /* Ignored */
+ debug_dyn("DT_STRSZ", dyn);
+ break;
+ case UNSUPPORTED_RELOC():
+ case UNSUPPORTED_RELOC(SZ):
+ case UNSUPPORTED_RELOC(ENT):
+ ERROR("%s: Unsupported relocations", GetPath());
+ return false;
+ case RELOC():
+ debug_dyn(STR_RELOC(), dyn);
+ relocations.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case RELOC(SZ):
+ debug_dyn(STR_RELOC(SZ), dyn);
+ relocations.InitSize(dyn->d_un.d_val);
+ break;
+ case RELOC(ENT):
+ debug_dyn(STR_RELOC(ENT), dyn);
+ if (dyn->d_un.d_val != sizeof(Reloc)) {
+ ERROR("%s: Unsupported DT_RELENT", GetPath());
+ return false;
+ }
+ break;
+ case DT_JMPREL:
+ debug_dyn("DT_JMPREL", dyn);
+ jumprels.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_PLTRELSZ:
+ debug_dyn("DT_PLTRELSZ", dyn);
+ jumprels.InitSize(dyn->d_un.d_val);
+ break;
+ case DT_PLTGOT:
+ debug_dyn("DT_PLTGOT", dyn);
+ break;
+ case DT_INIT:
+ debug_dyn("DT_INIT", dyn);
+ init = dyn->d_un.d_ptr;
+ break;
+ case DT_INIT_ARRAY:
+ debug_dyn("DT_INIT_ARRAY", dyn);
+ init_array.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_INIT_ARRAYSZ:
+ debug_dyn("DT_INIT_ARRAYSZ", dyn);
+ init_array.InitSize(dyn->d_un.d_val);
+ break;
+ case DT_FINI:
+ debug_dyn("DT_FINI", dyn);
+ fini = dyn->d_un.d_ptr;
+ break;
+ case DT_FINI_ARRAY:
+ debug_dyn("DT_FINI_ARRAY", dyn);
+ fini_array.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_FINI_ARRAYSZ:
+ debug_dyn("DT_FINI_ARRAYSZ", dyn);
+ fini_array.InitSize(dyn->d_un.d_val);
+ break;
+ case DT_PLTREL:
+ if (dyn->d_un.d_val != RELOC()) {
+ ERROR("%s: Error: DT_PLTREL is not " STR_RELOC(), GetPath());
+ return false;
+ }
+ break;
+ case DT_FLAGS:
+ {
+ Addr flags = dyn->d_un.d_val;
+ /* Treat as a DT_TEXTREL tag */
+ if (flags & DF_TEXTREL) {
+ if (strcmp("libflashplayer.so", GetName()) == 0) {
+ has_text_relocs = true;
+ } else {
+ ERROR("%s: Text relocations are not supported", GetPath());
+ return false;
+ }
+ }
+ /* we can treat this like having a DT_SYMBOLIC tag */
+ flags &= ~DF_SYMBOLIC;
+ if (flags)
+ WARN("%s: unhandled flags #%" PRIxAddr" not handled",
+ GetPath(), flags);
+ }
+ break;
+ case DT_SONAME: /* Should match GetName(), but doesn't matter */
+ case DT_SYMBOLIC: /* Indicates internal symbols should be looked up in
+ * the library itself first instead of the executable,
+ * which is actually what this linker does by default */
+ case RELOC(COUNT): /* Indicates how many relocations are relative, which
+ * is usually used to skip relocations on prelinked
+ * libraries. They are not supported anyways. */
+ case UNSUPPORTED_RELOC(COUNT): /* This should error out, but it doesn't
+ * really matter. */
+ case DT_FLAGS_1: /* Additional linker-internal flags that we don't care about. See
+ * DF_1_* values in src/include/elf/common.h in binutils. */
+ case DT_VERSYM: /* DT_VER* entries are used for symbol versioning, which */
+ case DT_VERDEF: /* this linker doesn't support yet. */
+ case DT_VERDEFNUM:
+ case DT_VERNEED:
+ case DT_VERNEEDNUM:
+ /* Ignored */
+ break;
+ default:
+ WARN("%s: dynamic header type #%" PRIxAddr" not handled",
+ GetPath(), dyn->d_tag);
+ }
+ }
+
+ if (!buckets || !symnum) {
+ ERROR("%s: Missing or broken DT_HASH", GetPath());
+ return false;
+ }
+ if (!strtab) {
+ ERROR("%s: Missing DT_STRTAB", GetPath());
+ return false;
+ }
+ if (!symtab) {
+ ERROR("%s: Missing DT_SYMTAB", GetPath());
+ return false;
+ }
+
+ /* Load dependent libraries */
+ for (size_t i = 0; i < dt_needed.size(); i++) {
+ const char *name = strtab.GetStringAt(dt_needed[i]);
+ RefPtr<LibHandle> handle =
+ ElfLoader::Singleton.Load(name, RTLD_GLOBAL | RTLD_LAZY, this);
+ if (!handle)
+ return false;
+ dependencies.push_back(handle);
+ }
+
+ return true;
+}
+
+bool
+CustomElf::Relocate()
+{
+ DEBUG_LOG("Relocate %s @%p", GetPath(), static_cast<void *>(base));
+ uint32_t symtab_index = (uint32_t) -1;
+ void *symptr = nullptr;
+ for (Array<Reloc>::iterator rel = relocations.begin();
+ rel < relocations.end(); ++rel) {
+ /* Location of the relocation */
+ void *ptr = GetPtr(rel->r_offset);
+
+ /* R_*_RELATIVE relocations apply directly at the given location */
+ if (ELF_R_TYPE(rel->r_info) == R_RELATIVE) {
+ *(void **) ptr = GetPtr(rel->GetAddend(base));
+ continue;
+ }
+ /* Other relocation types need a symbol resolution */
+ /* Avoid symbol resolution when it's the same symbol as last iteration */
+ if (symtab_index != ELF_R_SYM(rel->r_info)) {
+ symtab_index = ELF_R_SYM(rel->r_info);
+ const Sym sym = symtab[symtab_index];
+ if (sym.st_shndx != SHN_UNDEF) {
+ symptr = GetPtr(sym.st_value);
+ } else {
+ /* TODO: handle symbol resolving to nullptr vs. being undefined. */
+ symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name));
+ }
+ }
+
+ if (symptr == nullptr)
+ WARN("%s: Relocation to NULL @0x%08" PRIxAddr,
+ GetPath(), rel->r_offset);
+
+ /* Apply relocation */
+ switch (ELF_R_TYPE(rel->r_info)) {
+ case R_GLOB_DAT:
+ /* R_*_GLOB_DAT relocations simply use the symbol value */
+ *(void **) ptr = symptr;
+ break;
+ case R_ABS:
+ /* R_*_ABS* relocations add the relocation added to the symbol value */
+ *(const char **) ptr = (const char *)symptr + rel->GetAddend(base);
+ break;
+ default:
+ ERROR("%s: Unsupported relocation type: 0x%" PRIxAddr,
+ GetPath(), ELF_R_TYPE(rel->r_info));
+ return false;
+ }
+ }
+ return true;
+}
+
+bool
+CustomElf::RelocateJumps()
+{
+ /* TODO: Dynamic symbol resolution */
+ for (Array<Reloc>::iterator rel = jumprels.begin();
+ rel < jumprels.end(); ++rel) {
+ /* Location of the relocation */
+ void *ptr = GetPtr(rel->r_offset);
+
+ /* Only R_*_JMP_SLOT relocations are expected */
+ if (ELF_R_TYPE(rel->r_info) != R_JMP_SLOT) {
+ ERROR("%s: Jump relocation type mismatch", GetPath());
+ return false;
+ }
+
+ /* TODO: Avoid code duplication with the relocations above */
+ const Sym sym = symtab[ELF_R_SYM(rel->r_info)];
+ void *symptr;
+ if (sym.st_shndx != SHN_UNDEF)
+ symptr = GetPtr(sym.st_value);
+ else
+ symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name));
+
+ if (symptr == nullptr) {
+ if (ELF_ST_BIND(sym.st_info) == STB_WEAK) {
+ WARN("%s: Relocation to NULL @0x%08" PRIxAddr " for symbol \"%s\"",
+ GetPath(),
+ rel->r_offset, strtab.GetStringAt(sym.st_name));
+ } else {
+ ERROR("%s: Relocation to NULL @0x%08" PRIxAddr " for symbol \"%s\"",
+ GetPath(),
+ rel->r_offset, strtab.GetStringAt(sym.st_name));
+ return false;
+ }
+ }
+ /* Apply relocation */
+ *(void **) ptr = symptr;
+ }
+ return true;
+}
+
+bool
+CustomElf::CallInit()
+{
+ if (init)
+ CallFunction(init);
+
+ for (Array<void *>::iterator it = init_array.begin();
+ it < init_array.end(); ++it) {
+ /* Android x86 NDK wrongly puts 0xffffffff in INIT_ARRAY */
+ if (*it && *it != reinterpret_cast<void *>(-1))
+ CallFunction(*it);
+ }
+ initialized = true;
+ return true;
+}
+
+void
+CustomElf::CallFini()
+{
+ if (!initialized)
+ return;
+ for (Array<void *>::reverse_iterator it = fini_array.rbegin();
+ it < fini_array.rend(); ++it) {
+ /* Android x86 NDK wrongly puts 0xffffffff in FINI_ARRAY */
+ if (*it && *it != reinterpret_cast<void *>(-1))
+ CallFunction(*it);
+ }
+ if (fini)
+ CallFunction(fini);
+}
+
+Mappable *
+CustomElf::GetMappable() const
+{
+ if (!mappable)
+ return nullptr;
+ if (mappable->GetKind() == Mappable::MAPPABLE_EXTRACT_FILE)
+ return mappable;
+ return ElfLoader::GetMappableFromPath(GetPath());
+}
diff --git a/mozglue/linker/CustomElf.h b/mozglue/linker/CustomElf.h
new file mode 100644
index 000000000..5ecc7da08
--- /dev/null
+++ b/mozglue/linker/CustomElf.h
@@ -0,0 +1,159 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CustomElf_h
+#define CustomElf_h
+
+#include "ElfLoader.h"
+#include "BaseElf.h"
+#include "Logging.h"
+#include "Elfxx.h"
+
+/**
+ * Library Handle class for ELF libraries we don't let the system linker
+ * handle.
+ */
+class CustomElf: public BaseElf, private ElfLoader::link_map
+{
+ friend class ElfLoader;
+ friend class SEGVHandler;
+public:
+ /**
+ * Returns a new CustomElf using the given file descriptor to map ELF
+ * content. The file descriptor ownership is stolen, and it will be closed
+ * in CustomElf's destructor if an instance is created, or by the Load
+ * method otherwise. The path corresponds to the file descriptor, and flags
+ * are the same kind of flags that would be given to dlopen(), though
+ * currently, none are supported and the behaviour is more or less that of
+ * RTLD_GLOBAL | RTLD_BIND_NOW.
+ */
+ static already_AddRefed<LibHandle> Load(Mappable *mappable,
+ const char *path, int flags);
+
+ /**
+ * Inherited from LibHandle/BaseElf
+ */
+ virtual ~CustomElf();
+
+protected:
+ virtual Mappable *GetMappable() const;
+
+public:
+ /**
+ * Shows some stats about the Mappable instance. The when argument is to be
+ * used by the caller to give an identifier of the when the stats call is
+ * made.
+ */
+ virtual void stats(const char *when) const;
+
+ /**
+ * Returns the instance, casted as BaseElf. (short of a better way to do
+ * this without RTTI)
+ */
+ virtual BaseElf *AsBaseElf() { return this; }
+
+private:
+ /**
+ * Scan dependent libraries to find the address corresponding to the
+ * given symbol name. This is used to find symbols that are undefined
+ * in the Elf object.
+ */
+ void *GetSymbolPtrInDeps(const char *symbol) const;
+
+ /**
+ * Private constructor
+ */
+ CustomElf(Mappable *mappable, const char *path)
+ : BaseElf(path, mappable)
+ , link_map()
+ , init(0)
+ , fini(0)
+ , initialized(false)
+ , has_text_relocs(false)
+ { }
+
+ /**
+ * Loads an Elf segment defined by the given PT_LOAD header.
+ * Returns whether this succeeded or failed.
+ */
+ bool LoadSegment(const Elf::Phdr *pt_load) const;
+
+ /**
+ * Initializes the library according to information found in the given
+ * PT_DYNAMIC header.
+ * Returns whether this succeeded or failed.
+ */
+ bool InitDyn(const Elf::Phdr *pt_dyn);
+
+ /**
+ * Apply .rel.dyn/.rela.dyn relocations.
+ * Returns whether this succeeded or failed.
+ */
+ bool Relocate();
+
+ /**
+ * Apply .rel.plt/.rela.plt relocations.
+ * Returns whether this succeeded or failed.
+ */
+ bool RelocateJumps();
+
+ /**
+ * Call initialization functions (.init/.init_array)
+ * Returns true;
+ */
+ bool CallInit();
+
+ /**
+ * Call destructor functions (.fini_array/.fini)
+ * Returns whether this succeeded or failed.
+ */
+ void CallFini();
+
+ /**
+ * Call a function given a pointer to its location.
+ */
+ void CallFunction(void *ptr) const
+ {
+ /* C++ doesn't allow direct conversion between pointer-to-object
+ * and pointer-to-function. */
+ union {
+ void *ptr;
+ void (*func)(void);
+ } f;
+ f.ptr = ptr;
+ DEBUG_LOG("%s: Calling function @%p", GetPath(), ptr);
+ f.func();
+ }
+
+ /**
+ * Call a function given a an address relative to the library base
+ */
+ void CallFunction(Elf::Addr addr) const
+ {
+ return CallFunction(GetPtr(addr));
+ }
+
+ /* List of dependent libraries */
+ std::vector<RefPtr<LibHandle> > dependencies;
+
+ /* List of .rel.dyn/.rela.dyn relocations */
+ Array<Elf::Reloc> relocations;
+
+ /* List of .rel.plt/.rela.plt relocation */
+ Array<Elf::Reloc> jumprels;
+
+ /* Relative address of the initialization and destruction functions
+ * (.init/.fini) */
+ Elf::Addr init, fini;
+
+ /* List of initialization and destruction functions
+ * (.init_array/.fini_array) */
+ Array<void *> init_array, fini_array;
+
+ bool initialized;
+
+ bool has_text_relocs;
+};
+
+#endif /* CustomElf_h */
diff --git a/mozglue/linker/ElfLoader.cpp b/mozglue/linker/ElfLoader.cpp
new file mode 100644
index 000000000..76225d1e7
--- /dev/null
+++ b/mozglue/linker/ElfLoader.cpp
@@ -0,0 +1,1310 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <string>
+#include <cstring>
+#include <cstdlib>
+#include <cstdio>
+#include <dlfcn.h>
+#include <unistd.h>
+#include <errno.h>
+#include <algorithm>
+#include <fcntl.h>
+#include "ElfLoader.h"
+#include "BaseElf.h"
+#include "CustomElf.h"
+#include "Mappable.h"
+#include "Logging.h"
+#include <inttypes.h>
+
+#if defined(ANDROID)
+#include <sys/syscall.h>
+
+#include <android/api-level.h>
+#if __ANDROID_API__ < 8
+/* Android API < 8 doesn't provide sigaltstack */
+
+extern "C" {
+
+inline int sigaltstack(const stack_t *ss, stack_t *oss) {
+ return syscall(__NR_sigaltstack, ss, oss);
+}
+
+} /* extern "C" */
+#endif /* __ANDROID_API__ */
+#endif /* ANDROID */
+
+#ifdef __ARM_EABI__
+extern "C" MOZ_EXPORT const void *
+__gnu_Unwind_Find_exidx(void *pc, int *pcount) __attribute__((weak));
+#endif
+
+/* Pointer to the PT_DYNAMIC section of the executable or library
+ * containing this code. */
+extern "C" Elf::Dyn _DYNAMIC[];
+
+using namespace mozilla;
+
+/**
+ * dlfcn.h replacements functions
+ */
+
+void *
+__wrap_dlopen(const char *path, int flags)
+{
+ RefPtr<LibHandle> handle = ElfLoader::Singleton.Load(path, flags);
+ if (handle)
+ handle->AddDirectRef();
+ return handle;
+}
+
+const char *
+__wrap_dlerror(void)
+{
+ const char *error = ElfLoader::Singleton.lastError;
+ ElfLoader::Singleton.lastError = nullptr;
+ return error;
+}
+
+void *
+__wrap_dlsym(void *handle, const char *symbol)
+{
+ if (!handle) {
+ ElfLoader::Singleton.lastError = "dlsym(NULL, sym) unsupported";
+ return nullptr;
+ }
+ if (handle != RTLD_DEFAULT && handle != RTLD_NEXT) {
+ LibHandle *h = reinterpret_cast<LibHandle *>(handle);
+ return h->GetSymbolPtr(symbol);
+ }
+ return dlsym(handle, symbol);
+}
+
+int
+__wrap_dlclose(void *handle)
+{
+ if (!handle) {
+ ElfLoader::Singleton.lastError = "No handle given to dlclose()";
+ return -1;
+ }
+ reinterpret_cast<LibHandle *>(handle)->ReleaseDirectRef();
+ return 0;
+}
+
+int
+__wrap_dladdr(void *addr, Dl_info *info)
+{
+ RefPtr<LibHandle> handle = ElfLoader::Singleton.GetHandleByPtr(addr);
+ if (!handle) {
+ return dladdr(addr, info);
+ }
+ info->dli_fname = handle->GetPath();
+ info->dli_fbase = handle->GetBase();
+ return 1;
+}
+
+int
+__wrap_dl_iterate_phdr(dl_phdr_cb callback, void *data)
+{
+ if (!ElfLoader::Singleton.dbg)
+ return -1;
+
+ int pipefd[2];
+ bool valid_pipe = (pipe(pipefd) == 0);
+ AutoCloseFD read_fd(pipefd[0]);
+ AutoCloseFD write_fd(pipefd[1]);
+
+ for (ElfLoader::DebuggerHelper::iterator it = ElfLoader::Singleton.dbg.begin();
+ it < ElfLoader::Singleton.dbg.end(); ++it) {
+ dl_phdr_info info;
+ info.dlpi_addr = reinterpret_cast<Elf::Addr>(it->l_addr);
+ info.dlpi_name = it->l_name;
+ info.dlpi_phdr = nullptr;
+ info.dlpi_phnum = 0;
+
+ // Assuming l_addr points to Elf headers (in most cases, this is true),
+ // get the Phdr location from there.
+ // Unfortunately, when l_addr doesn't point to Elf headers, it may point
+ // to unmapped memory, or worse, unreadable memory. The only way to detect
+ // the latter without causing a SIGSEGV is to use the pointer in a system
+ // call that will try to read from there, and return an EFAULT error if
+ // it can't. One such system call is write(). It used to be possible to
+ // use a file descriptor on /dev/null for these kind of things, but recent
+ // Linux kernels never return an EFAULT error when using /dev/null.
+ // So instead, we use a self pipe. We do however need to read() from the
+ // read end of the pipe as well so as to not fill up the pipe buffer and
+ // block on subsequent writes.
+ // In the unlikely event reads from or write to the pipe fail for some
+ // other reason than EFAULT, we don't try any further and just skip setting
+ // the Phdr location for all subsequent libraries, rather than trying to
+ // start over with a new pipe.
+ int can_read = true;
+ if (valid_pipe) {
+ int ret;
+ char raw_ehdr[sizeof(Elf::Ehdr)];
+ static_assert(sizeof(raw_ehdr) < PIPE_BUF, "PIPE_BUF is too small");
+ do {
+ // writes are atomic when smaller than PIPE_BUF, per POSIX.1-2008.
+ ret = write(write_fd, it->l_addr, sizeof(raw_ehdr));
+ } while (ret == -1 && errno == EINTR);
+ if (ret != sizeof(raw_ehdr)) {
+ if (ret == -1 && errno == EFAULT) {
+ can_read = false;
+ } else {
+ valid_pipe = false;
+ }
+ } else {
+ size_t nbytes = 0;
+ do {
+ // Per POSIX.1-2008, interrupted reads can return a length smaller
+ // than the given one instead of failing with errno EINTR.
+ ret = read(read_fd, raw_ehdr + nbytes, sizeof(raw_ehdr) - nbytes);
+ if (ret > 0)
+ nbytes += ret;
+ } while ((nbytes != sizeof(raw_ehdr) && ret > 0) ||
+ (ret == -1 && errno == EINTR));
+ if (nbytes != sizeof(raw_ehdr)) {
+ valid_pipe = false;
+ }
+ }
+ }
+
+ if (valid_pipe && can_read) {
+ const Elf::Ehdr *ehdr = Elf::Ehdr::validate(it->l_addr);
+ if (ehdr) {
+ info.dlpi_phdr = reinterpret_cast<const Elf::Phdr *>(
+ reinterpret_cast<const char *>(ehdr) + ehdr->e_phoff);
+ info.dlpi_phnum = ehdr->e_phnum;
+ }
+ }
+
+ int ret = callback(&info, sizeof(dl_phdr_info), data);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+#ifdef __ARM_EABI__
+const void *
+__wrap___gnu_Unwind_Find_exidx(void *pc, int *pcount)
+{
+ RefPtr<LibHandle> handle = ElfLoader::Singleton.GetHandleByPtr(pc);
+ if (handle)
+ return handle->FindExidx(pcount);
+ if (__gnu_Unwind_Find_exidx)
+ return __gnu_Unwind_Find_exidx(pc, pcount);
+ *pcount = 0;
+ return nullptr;
+}
+#endif
+
+/**
+ * faulty.lib public API
+ */
+
+MFBT_API size_t
+__dl_get_mappable_length(void *handle) {
+ if (!handle)
+ return 0;
+ return reinterpret_cast<LibHandle *>(handle)->GetMappableLength();
+}
+
+MFBT_API void *
+__dl_mmap(void *handle, void *addr, size_t length, off_t offset)
+{
+ if (!handle)
+ return nullptr;
+ return reinterpret_cast<LibHandle *>(handle)->MappableMMap(addr, length,
+ offset);
+}
+
+MFBT_API void
+__dl_munmap(void *handle, void *addr, size_t length)
+{
+ if (!handle)
+ return;
+ return reinterpret_cast<LibHandle *>(handle)->MappableMUnmap(addr, length);
+}
+
+MFBT_API bool
+IsSignalHandlingBroken()
+{
+ return ElfLoader::Singleton.isSignalHandlingBroken();
+}
+
+namespace {
+
+/**
+ * Returns the part after the last '/' for the given path
+ */
+const char *
+LeafName(const char *path)
+{
+ const char *lastSlash = strrchr(path, '/');
+ if (lastSlash)
+ return lastSlash + 1;
+ return path;
+}
+
+} /* Anonymous namespace */
+
+/**
+ * LibHandle
+ */
+LibHandle::~LibHandle()
+{
+ free(path);
+}
+
+const char *
+LibHandle::GetName() const
+{
+ return path ? LeafName(path) : nullptr;
+}
+
+size_t
+LibHandle::GetMappableLength() const
+{
+ if (!mappable)
+ mappable = GetMappable();
+ if (!mappable)
+ return 0;
+ return mappable->GetLength();
+}
+
+void *
+LibHandle::MappableMMap(void *addr, size_t length, off_t offset) const
+{
+ if (!mappable)
+ mappable = GetMappable();
+ if (!mappable)
+ return MAP_FAILED;
+ void* mapped = mappable->mmap(addr, length, PROT_READ, MAP_PRIVATE, offset);
+ if (mapped != MAP_FAILED) {
+ /* Ensure the availability of all pages within the mapping */
+ for (size_t off = 0; off < length; off += PageSize()) {
+ mappable->ensure(reinterpret_cast<char *>(mapped) + off);
+ }
+ }
+ return mapped;
+}
+
+void
+LibHandle::MappableMUnmap(void *addr, size_t length) const
+{
+ if (mappable)
+ mappable->munmap(addr, length);
+}
+
+/**
+ * SystemElf
+ */
+already_AddRefed<LibHandle>
+SystemElf::Load(const char *path, int flags)
+{
+ /* The Android linker returns a handle when the file name matches an
+ * already loaded library, even when the full path doesn't exist */
+ if (path && path[0] == '/' && (access(path, F_OK) == -1)){
+ DEBUG_LOG("dlopen(\"%s\", 0x%x) = %p", path, flags, (void *)nullptr);
+ return nullptr;
+ }
+
+ void *handle = dlopen(path, flags);
+ DEBUG_LOG("dlopen(\"%s\", 0x%x) = %p", path, flags, handle);
+ ElfLoader::Singleton.lastError = dlerror();
+ if (handle) {
+ SystemElf *elf = new SystemElf(path, handle);
+ ElfLoader::Singleton.Register(elf);
+ RefPtr<LibHandle> lib(elf);
+ return lib.forget();
+ }
+ return nullptr;
+}
+
+SystemElf::~SystemElf()
+{
+ if (!dlhandle)
+ return;
+ DEBUG_LOG("dlclose(%p [\"%s\"])", dlhandle, GetPath());
+ dlclose(dlhandle);
+ ElfLoader::Singleton.lastError = dlerror();
+ ElfLoader::Singleton.Forget(this);
+}
+
+void *
+SystemElf::GetSymbolPtr(const char *symbol) const
+{
+ void *sym = dlsym(dlhandle, symbol);
+ DEBUG_LOG("dlsym(%p [\"%s\"], \"%s\") = %p", dlhandle, GetPath(), symbol, sym);
+ ElfLoader::Singleton.lastError = dlerror();
+ return sym;
+}
+
+Mappable *
+SystemElf::GetMappable() const
+{
+ const char *path = GetPath();
+ if (!path)
+ return nullptr;
+#ifdef ANDROID
+ /* On Android, if we don't have the full path, try in /system/lib */
+ const char *name = LeafName(path);
+ std::string systemPath;
+ if (name == path) {
+ systemPath = "/system/lib/";
+ systemPath += path;
+ path = systemPath.c_str();
+ }
+#endif
+
+ return MappableFile::Create(path);
+}
+
+#ifdef __ARM_EABI__
+const void *
+SystemElf::FindExidx(int *pcount) const
+{
+ /* TODO: properly implement when ElfLoader::GetHandleByPtr
+ does return SystemElf handles */
+ *pcount = 0;
+ return nullptr;
+}
+#endif
+
+/**
+ * ElfLoader
+ */
+
+/* Unique ElfLoader instance */
+ElfLoader ElfLoader::Singleton;
+
+already_AddRefed<LibHandle>
+ElfLoader::Load(const char *path, int flags, LibHandle *parent)
+{
+ /* Ensure logging is initialized or refresh if environment changed. */
+ Logging::Init();
+
+ /* Ensure self_elf initialization. */
+ if (!self_elf)
+ Init();
+
+ RefPtr<LibHandle> handle;
+
+ /* Handle dlopen(nullptr) directly. */
+ if (!path) {
+ handle = SystemElf::Load(nullptr, flags);
+ return handle.forget();
+ }
+
+ /* TODO: Handle relative paths correctly */
+ const char *name = LeafName(path);
+
+ /* Search the list of handles we already have for a match. When the given
+ * path is not absolute, compare file names, otherwise compare full paths. */
+ if (name == path) {
+ for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it)
+ if ((*it)->GetName() && (strcmp((*it)->GetName(), name) == 0)) {
+ handle = *it;
+ return handle.forget();
+ }
+ } else {
+ for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it)
+ if ((*it)->GetPath() && (strcmp((*it)->GetPath(), path) == 0)) {
+ handle = *it;
+ return handle.forget();
+ }
+ }
+
+ char *abs_path = nullptr;
+ const char *requested_path = path;
+
+ /* When the path is not absolute and the library is being loaded for
+ * another, first try to load the library from the directory containing
+ * that parent library. */
+ if ((name == path) && parent) {
+ const char *parentPath = parent->GetPath();
+ abs_path = new char[strlen(parentPath) + strlen(path)];
+ strcpy(abs_path, parentPath);
+ char *slash = strrchr(abs_path, '/');
+ strcpy(slash + 1, path);
+ path = abs_path;
+ }
+
+ Mappable *mappable = GetMappableFromPath(path);
+
+ /* Try loading with the custom linker if we have a Mappable */
+ if (mappable)
+ handle = CustomElf::Load(mappable, path, flags);
+
+ /* Try loading with the system linker if everything above failed */
+ if (!handle)
+ handle = SystemElf::Load(path, flags);
+
+ /* If we didn't have an absolute path and haven't been able to load
+ * a library yet, try in the system search path */
+ if (!handle && abs_path)
+ handle = SystemElf::Load(name, flags);
+
+ delete [] abs_path;
+ DEBUG_LOG("ElfLoader::Load(\"%s\", 0x%x, %p [\"%s\"]) = %p", requested_path, flags,
+ reinterpret_cast<void *>(parent), parent ? parent->GetPath() : "",
+ static_cast<void *>(handle));
+
+ return handle.forget();
+}
+
+already_AddRefed<LibHandle>
+ElfLoader::GetHandleByPtr(void *addr)
+{
+ /* Scan the list of handles we already have for a match */
+ for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it) {
+ if ((*it)->Contains(addr)) {
+ RefPtr<LibHandle> lib = *it;
+ return lib.forget();
+ }
+ }
+ return nullptr;
+}
+
+Mappable *
+ElfLoader::GetMappableFromPath(const char *path)
+{
+ const char *name = LeafName(path);
+ Mappable *mappable = nullptr;
+ RefPtr<Zip> zip;
+ const char *subpath;
+ if ((subpath = strchr(path, '!'))) {
+ char *zip_path = strndup(path, subpath - path);
+ while (*(++subpath) == '/') { }
+ zip = ZipCollection::GetZip(zip_path);
+ free(zip_path);
+ Zip::Stream s;
+ if (zip && zip->GetStream(subpath, &s)) {
+ /* When the MOZ_LINKER_EXTRACT environment variable is set to "1",
+ * compressed libraries are going to be (temporarily) extracted as
+ * files, in the directory pointed by the MOZ_LINKER_CACHE
+ * environment variable. */
+ const char *extract = getenv("MOZ_LINKER_EXTRACT");
+ if (extract && !strncmp(extract, "1", 2 /* Including '\0' */))
+ mappable = MappableExtractFile::Create(name, zip, &s);
+ if (!mappable) {
+ if (s.GetType() == Zip::Stream::DEFLATE) {
+ mappable = MappableDeflate::Create(name, zip, &s);
+ } else if (s.GetType() == Zip::Stream::STORE) {
+ mappable = MappableSeekableZStream::Create(name, zip, &s);
+ }
+ }
+ }
+ }
+ /* If we couldn't load above, try with a MappableFile */
+ if (!mappable && !zip)
+ mappable = MappableFile::Create(path);
+
+ return mappable;
+}
+
+void
+ElfLoader::Register(LibHandle *handle)
+{
+ handles.push_back(handle);
+}
+
+void
+ElfLoader::Register(CustomElf *handle)
+{
+ Register(static_cast<LibHandle *>(handle));
+ if (dbg) {
+ dbg.Add(handle);
+ }
+}
+
+void
+ElfLoader::Forget(LibHandle *handle)
+{
+ /* Ensure logging is initialized or refresh if environment changed. */
+ Logging::Init();
+
+ LibHandleList::iterator it = std::find(handles.begin(), handles.end(), handle);
+ if (it != handles.end()) {
+ DEBUG_LOG("ElfLoader::Forget(%p [\"%s\"])", reinterpret_cast<void *>(handle),
+ handle->GetPath());
+ handles.erase(it);
+ } else {
+ DEBUG_LOG("ElfLoader::Forget(%p [\"%s\"]): Handle not found",
+ reinterpret_cast<void *>(handle), handle->GetPath());
+ }
+}
+
+void
+ElfLoader::Forget(CustomElf *handle)
+{
+ Forget(static_cast<LibHandle *>(handle));
+ if (dbg) {
+ dbg.Remove(handle);
+ }
+}
+
+void
+ElfLoader::Init()
+{
+ Dl_info info;
+ /* On Android < 4.1 can't reenter dl* functions. So when the library
+ * containing this code is dlopen()ed, it can't call dladdr from a
+ * static initializer. */
+ if (dladdr(_DYNAMIC, &info) != 0) {
+ self_elf = LoadedElf::Create(info.dli_fname, info.dli_fbase);
+ }
+#if defined(ANDROID)
+ if (dladdr(FunctionPtr(syscall), &info) != 0) {
+ libc = LoadedElf::Create(info.dli_fname, info.dli_fbase);
+ }
+#endif
+}
+
+ElfLoader::~ElfLoader()
+{
+ LibHandleList list;
+
+ if (!Singleton.IsShutdownExpected()) {
+ MOZ_CRASH("Unexpected shutdown");
+ }
+
+ /* Release self_elf and libc */
+ self_elf = nullptr;
+#if defined(ANDROID)
+ libc = nullptr;
+#endif
+
+ /* Build up a list of all library handles with direct (external) references.
+ * We actually skip system library handles because we want to keep at least
+ * some of these open. Most notably, Mozilla codebase keeps a few libgnome
+ * libraries deliberately open because of the mess that libORBit destruction
+ * is. dlclose()ing these libraries actually leads to problems. */
+ for (LibHandleList::reverse_iterator it = handles.rbegin();
+ it < handles.rend(); ++it) {
+ if ((*it)->DirectRefCount()) {
+ if (SystemElf *se = (*it)->AsSystemElf()) {
+ se->Forget();
+ } else {
+ list.push_back(*it);
+ }
+ }
+ }
+ /* Force release all external references to the handles collected above */
+ for (LibHandleList::iterator it = list.begin(); it < list.end(); ++it) {
+ while ((*it)->ReleaseDirectRef()) { }
+ }
+ /* Remove the remaining system handles. */
+ if (handles.size()) {
+ list = handles;
+ for (LibHandleList::reverse_iterator it = list.rbegin();
+ it < list.rend(); ++it) {
+ if ((*it)->AsSystemElf()) {
+ DEBUG_LOG("ElfLoader::~ElfLoader(): Remaining handle for \"%s\" "
+ "[%d direct refs, %d refs total]", (*it)->GetPath(),
+ (*it)->DirectRefCount(), (*it)->refCount());
+ } else {
+ DEBUG_LOG("ElfLoader::~ElfLoader(): Unexpected remaining handle for \"%s\" "
+ "[%d direct refs, %d refs total]", (*it)->GetPath(),
+ (*it)->DirectRefCount(), (*it)->refCount());
+ /* Not removing, since it could have references to other libraries,
+ * destroying them as a side effect, and possibly leaving dangling
+ * pointers in the handle list we're scanning */
+ }
+ }
+ }
+}
+
+void
+ElfLoader::stats(const char *when)
+{
+ if (MOZ_LIKELY(!Logging::isVerbose()))
+ return;
+
+ for (LibHandleList::iterator it = Singleton.handles.begin();
+ it < Singleton.handles.end(); ++it)
+ (*it)->stats(when);
+}
+
+#ifdef __ARM_EABI__
+int
+ElfLoader::__wrap_aeabi_atexit(void *that, ElfLoader::Destructor destructor,
+ void *dso_handle)
+{
+ Singleton.destructors.push_back(
+ DestructorCaller(destructor, that, dso_handle));
+ return 0;
+}
+#else
+int
+ElfLoader::__wrap_cxa_atexit(ElfLoader::Destructor destructor, void *that,
+ void *dso_handle)
+{
+ Singleton.destructors.push_back(
+ DestructorCaller(destructor, that, dso_handle));
+ return 0;
+}
+#endif
+
+void
+ElfLoader::__wrap_cxa_finalize(void *dso_handle)
+{
+ /* Call all destructors for the given DSO handle in reverse order they were
+ * registered. */
+ std::vector<DestructorCaller>::reverse_iterator it;
+ for (it = Singleton.destructors.rbegin();
+ it < Singleton.destructors.rend(); ++it) {
+ if (it->IsForHandle(dso_handle)) {
+ it->Call();
+ }
+ }
+}
+
+void
+ElfLoader::DestructorCaller::Call()
+{
+ if (destructor) {
+ DEBUG_LOG("ElfLoader::DestructorCaller::Call(%p, %p, %p)",
+ FunctionPtr(destructor), object, dso_handle);
+ destructor(object);
+ destructor = nullptr;
+ }
+}
+
+ElfLoader::DebuggerHelper::DebuggerHelper(): dbg(nullptr), firstAdded(nullptr)
+{
+ /* Find ELF auxiliary vectors.
+ *
+ * The kernel stores the following data on the stack when starting a
+ * program:
+ * argc
+ * argv[0] (pointer into argv strings defined below)
+ * argv[1] (likewise)
+ * ...
+ * argv[argc - 1] (likewise)
+ * nullptr
+ * envp[0] (pointer into environment strings defined below)
+ * envp[1] (likewise)
+ * ...
+ * envp[n] (likewise)
+ * nullptr
+ * ... (more NULLs on some platforms such as Android 4.3)
+ * auxv[0] (first ELF auxiliary vector)
+ * auxv[1] (second ELF auxiliary vector)
+ * ...
+ * auxv[p] (last ELF auxiliary vector)
+ * (AT_NULL, nullptr)
+ * padding
+ * argv strings, separated with '\0'
+ * environment strings, separated with '\0'
+ * nullptr
+ *
+ * What we are after are the auxv values defined by the following struct.
+ */
+ struct AuxVector {
+ Elf::Addr type;
+ Elf::Addr value;
+ };
+
+ /* Pointer to the environment variables list */
+ extern char **environ;
+
+ /* The environment may have changed since the program started, in which
+ * case the environ variables list isn't the list the kernel put on stack
+ * anymore. But in this new list, variables that didn't change still point
+ * to the strings the kernel put on stack. It is quite unlikely that two
+ * modified environment variables point to two consecutive strings in memory,
+ * so we assume that if two consecutive environment variables point to two
+ * consecutive strings, we found strings the kernel put on stack. */
+ char **env;
+ for (env = environ; *env; env++)
+ if (*env + strlen(*env) + 1 == env[1])
+ break;
+ if (!*env)
+ return;
+
+ /* Next, we scan the stack backwards to find a pointer to one of those
+ * strings we found above, which will give us the location of the original
+ * envp list. As we are looking for pointers, we need to look at 32-bits or
+ * 64-bits aligned values, depening on the architecture. */
+ char **scan = reinterpret_cast<char **>(
+ reinterpret_cast<uintptr_t>(*env) & ~(sizeof(void *) - 1));
+ while (*env != *scan)
+ scan--;
+
+ /* Finally, scan forward to find the last environment variable pointer and
+ * thus the first auxiliary vector. */
+ while (*scan++);
+
+ /* Some platforms have more NULLs here, so skip them if we encounter them */
+ while (!*scan)
+ scan++;
+
+ AuxVector *auxv = reinterpret_cast<AuxVector *>(scan);
+
+ /* The two values of interest in the auxiliary vectors are AT_PHDR and
+ * AT_PHNUM, which gives us the the location and size of the ELF program
+ * headers. */
+ Array<Elf::Phdr> phdrs;
+ char *base = nullptr;
+ while (auxv->type) {
+ if (auxv->type == AT_PHDR) {
+ phdrs.Init(reinterpret_cast<Elf::Phdr*>(auxv->value));
+ /* Assume the base address is the first byte of the same page */
+ base = reinterpret_cast<char *>(PageAlignedPtr(auxv->value));
+ }
+ if (auxv->type == AT_PHNUM)
+ phdrs.Init(auxv->value);
+ auxv++;
+ }
+
+ if (!phdrs) {
+ DEBUG_LOG("Couldn't find program headers");
+ return;
+ }
+
+ /* In some cases, the address for the program headers we get from the
+ * auxiliary vectors is not mapped, because of the PT_LOAD segments
+ * definitions in the program executable. Trying to map anonymous memory
+ * with a hint giving the base address will return a different address
+ * if something is mapped there, and the base address otherwise. */
+ MappedPtr mem(MemoryRange::mmap(base, PageSize(), PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
+ if (mem == base) {
+ /* If program headers aren't mapped, try to map them */
+ int fd = open("/proc/self/exe", O_RDONLY);
+ if (fd == -1) {
+ DEBUG_LOG("Failed to open /proc/self/exe");
+ return;
+ }
+ mem.Assign(MemoryRange::mmap(base, PageSize(), PROT_READ, MAP_PRIVATE,
+ fd, 0));
+ /* If we don't manage to map at the right address, just give up. */
+ if (mem != base) {
+ DEBUG_LOG("Couldn't read program headers");
+ return;
+ }
+ }
+ /* Sanity check: the first bytes at the base address should be an ELF
+ * header. */
+ if (!Elf::Ehdr::validate(base)) {
+ DEBUG_LOG("Couldn't find program base");
+ return;
+ }
+
+ /* Search for the program PT_DYNAMIC segment */
+ Array<Elf::Dyn> dyns;
+ for (Array<Elf::Phdr>::iterator phdr = phdrs.begin(); phdr < phdrs.end();
+ ++phdr) {
+ /* While the program headers are expected within the first mapped page of
+ * the program executable, the executable PT_LOADs may actually make them
+ * loaded at an address that is not the wanted base address of the
+ * library. We thus need to adjust the base address, compensating for the
+ * virtual address of the PT_LOAD segment corresponding to offset 0. */
+ if (phdr->p_type == PT_LOAD && phdr->p_offset == 0)
+ base -= phdr->p_vaddr;
+ if (phdr->p_type == PT_DYNAMIC)
+ dyns.Init(base + phdr->p_vaddr, phdr->p_filesz);
+ }
+ if (!dyns) {
+ DEBUG_LOG("Failed to find PT_DYNAMIC section in program");
+ return;
+ }
+
+ /* Search for the DT_DEBUG information */
+ for (Array<Elf::Dyn>::iterator dyn = dyns.begin(); dyn < dyns.end(); ++dyn) {
+ if (dyn->d_tag == DT_DEBUG) {
+ dbg = reinterpret_cast<r_debug *>(dyn->d_un.d_ptr);
+ break;
+ }
+ }
+ DEBUG_LOG("DT_DEBUG points at %p", static_cast<void *>(dbg));
+}
+
+/**
+ * Helper class to ensure the given pointer is writable within the scope of
+ * an instance. Permissions to the memory page where the pointer lies are
+ * restored to their original value when the instance is destroyed.
+ */
+class EnsureWritable
+{
+public:
+ template <typename T>
+ EnsureWritable(T *ptr, size_t length_ = sizeof(T))
+ {
+ MOZ_ASSERT(length_ < PageSize());
+ prot = -1;
+ page = MAP_FAILED;
+
+ char *firstPage = PageAlignedPtr(reinterpret_cast<char *>(ptr));
+ char *lastPageEnd = PageAlignedEndPtr(reinterpret_cast<char *>(ptr) + length_);
+ length = lastPageEnd - firstPage;
+ uintptr_t start = reinterpret_cast<uintptr_t>(firstPage);
+ uintptr_t end;
+
+ prot = getProt(start, &end);
+ if (prot == -1 || (start + length) > end)
+ MOZ_CRASH();
+
+ if (prot & PROT_WRITE)
+ return;
+
+ page = firstPage;
+ mprotect(page, length, prot | PROT_WRITE);
+ }
+
+ ~EnsureWritable()
+ {
+ if (page != MAP_FAILED) {
+ mprotect(page, length, prot);
+}
+ }
+
+private:
+ int getProt(uintptr_t addr, uintptr_t *end)
+ {
+ /* The interesting part of the /proc/self/maps format looks like:
+ * startAddr-endAddr rwxp */
+ int result = 0;
+ AutoCloseFILE f(fopen("/proc/self/maps", "r"));
+ while (f) {
+ unsigned long long startAddr, endAddr;
+ char perms[5];
+ if (fscanf(f, "%llx-%llx %4s %*1024[^\n] ", &startAddr, &endAddr, perms) != 3)
+ return -1;
+ if (addr < startAddr || addr >= endAddr)
+ continue;
+ if (perms[0] == 'r')
+ result |= PROT_READ;
+ else if (perms[0] != '-')
+ return -1;
+ if (perms[1] == 'w')
+ result |= PROT_WRITE;
+ else if (perms[1] != '-')
+ return -1;
+ if (perms[2] == 'x')
+ result |= PROT_EXEC;
+ else if (perms[2] != '-')
+ return -1;
+ *end = endAddr;
+ return result;
+ }
+ return -1;
+ }
+
+ int prot;
+ void *page;
+ size_t length;
+};
+
+/**
+ * The system linker maintains a doubly linked list of library it loads
+ * for use by the debugger. Unfortunately, it also uses the list pointers
+ * in a lot of operations and adding our data in the list is likely to
+ * trigger crashes when the linker tries to use data we don't provide or
+ * that fall off the amount data we allocated. Fortunately, the linker only
+ * traverses the list forward and accesses the head of the list from a
+ * private pointer instead of using the value in the r_debug structure.
+ * This means we can safely add members at the beginning of the list.
+ * Unfortunately, gdb checks the coherency of l_prev values, so we have
+ * to adjust the l_prev value for the first element the system linker
+ * knows about. Fortunately, it doesn't use l_prev, and the first element
+ * is not ever going to be released before our elements, since it is the
+ * program executable, so the system linker should not be changing
+ * r_debug::r_map.
+ */
+void
+ElfLoader::DebuggerHelper::Add(ElfLoader::link_map *map)
+{
+ if (!dbg->r_brk)
+ return;
+ dbg->r_state = r_debug::RT_ADD;
+ dbg->r_brk();
+ map->l_prev = nullptr;
+ map->l_next = dbg->r_map;
+ if (!firstAdded) {
+ firstAdded = map;
+ /* When adding a library for the first time, r_map points to data
+ * handled by the system linker, and that data may be read-only */
+ EnsureWritable w(&dbg->r_map->l_prev);
+ dbg->r_map->l_prev = map;
+ } else
+ dbg->r_map->l_prev = map;
+ dbg->r_map = map;
+ dbg->r_state = r_debug::RT_CONSISTENT;
+ dbg->r_brk();
+}
+
+void
+ElfLoader::DebuggerHelper::Remove(ElfLoader::link_map *map)
+{
+ if (!dbg->r_brk)
+ return;
+ dbg->r_state = r_debug::RT_DELETE;
+ dbg->r_brk();
+ if (dbg->r_map == map)
+ dbg->r_map = map->l_next;
+ else if (map->l_prev) {
+ map->l_prev->l_next = map->l_next;
+ }
+ if (map == firstAdded) {
+ firstAdded = map->l_prev;
+ /* When removing the first added library, its l_next is going to be
+ * data handled by the system linker, and that data may be read-only */
+ EnsureWritable w(&map->l_next->l_prev);
+ map->l_next->l_prev = map->l_prev;
+ } else if (map->l_next) {
+ map->l_next->l_prev = map->l_prev;
+ }
+ dbg->r_state = r_debug::RT_CONSISTENT;
+ dbg->r_brk();
+}
+
+#if defined(ANDROID)
+/* As some system libraries may be calling signal() or sigaction() to
+ * set a SIGSEGV handler, effectively breaking MappableSeekableZStream,
+ * or worse, restore our SIGSEGV handler with wrong flags (which using
+ * signal() will do), we want to hook into the system's sigaction() to
+ * replace it with our own wrapper instead, so that our handler is never
+ * replaced. We used to only do that with libraries this linker loads,
+ * but it turns out at least one system library does call signal() and
+ * breaks us (libsc-a3xx.so on the Samsung Galaxy S4).
+ * As libc's signal (bsd_signal/sysv_signal, really) calls sigaction
+ * under the hood, instead of calling the signal system call directly,
+ * we only need to hook sigaction. This is true for both bionic and
+ * glibc.
+ */
+
+/* libc's sigaction */
+extern "C" int
+sigaction(int signum, const struct sigaction *act,
+ struct sigaction *oldact);
+
+/* Simple reimplementation of sigaction. This is roughly equivalent
+ * to the assembly that comes in bionic, but not quite equivalent to
+ * glibc's implementation, so we only use this on Android. */
+int
+sys_sigaction(int signum, const struct sigaction *act,
+ struct sigaction *oldact)
+{
+ return syscall(__NR_sigaction, signum, act, oldact);
+}
+
+/* Replace the first instructions of the given function with a jump
+ * to the given new function. */
+template <typename T>
+static bool
+Divert(T func, T new_func)
+{
+ void *ptr = FunctionPtr(func);
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+
+#if defined(__i386__)
+ // A 32-bit jump is a 5 bytes instruction.
+ EnsureWritable w(ptr, 5);
+ *reinterpret_cast<unsigned char *>(addr) = 0xe9; // jmp
+ *reinterpret_cast<intptr_t *>(addr + 1) =
+ reinterpret_cast<uintptr_t>(new_func) - addr - 5; // target displacement
+ return true;
+#elif defined(__arm__)
+ const unsigned char trampoline[] = {
+ // .thumb
+ 0x46, 0x04, // nop
+ 0x78, 0x47, // bx pc
+ 0x46, 0x04, // nop
+ // .arm
+ 0x04, 0xf0, 0x1f, 0xe5, // ldr pc, [pc, #-4]
+ // .word <new_func>
+ };
+ const unsigned char *start;
+ if (addr & 0x01) {
+ /* Function is thumb, the actual address of the code is without the
+ * least significant bit. */
+ addr--;
+ /* The arm part of the trampoline needs to be 32-bit aligned */
+ if (addr & 0x02)
+ start = trampoline;
+ else
+ start = trampoline + 2;
+ } else {
+ /* Function is arm, we only need the arm part of the trampoline */
+ start = trampoline + 6;
+ }
+
+ size_t len = sizeof(trampoline) - (start - trampoline);
+ EnsureWritable w(reinterpret_cast<void *>(addr), len + sizeof(void *));
+ memcpy(reinterpret_cast<void *>(addr), start, len);
+ *reinterpret_cast<void **>(addr + len) = FunctionPtr(new_func);
+ cacheflush(addr, addr + len + sizeof(void *), 0);
+ return true;
+#else
+ return false;
+#endif
+}
+#else
+#define sys_sigaction sigaction
+template <typename T>
+static bool
+Divert(T func, T new_func)
+{
+ return false;
+}
+#endif
+
+namespace {
+
+/* Clock that only accounts for time spent in the current process. */
+static uint64_t ProcessTimeStamp_Now()
+{
+ struct timespec ts;
+ int rv = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
+
+ if (rv != 0) {
+ return 0;
+ }
+
+ uint64_t baseNs = (uint64_t)ts.tv_sec * 1000000000;
+ return baseNs + (uint64_t)ts.tv_nsec;
+}
+
+}
+
+/* Data structure used to pass data to the temporary signal handler,
+ * as well as triggering a test crash. */
+struct TmpData {
+ volatile int crash_int;
+ volatile uint64_t crash_timestamp;
+};
+
+SEGVHandler::SEGVHandler()
+: initialized(false), registeredHandler(false), signalHandlingBroken(true)
+, signalHandlingSlow(true)
+{
+ /* Ensure logging is initialized before the DEBUG_LOG in the test_handler.
+ * As this constructor runs before the ElfLoader constructor (by effect
+ * of ElfLoader inheriting from this class), this also initializes on behalf
+ * of ElfLoader and DebuggerHelper. */
+ Logging::Init();
+
+ /* Initialize oldStack.ss_flags to an invalid value when used to set
+ * an alternative stack, meaning we haven't got information about the
+ * original alternative stack and thus don't mean to restore it in
+ * the destructor. */
+ oldStack.ss_flags = SS_ONSTACK;
+
+ /* Get the current segfault signal handler. */
+ struct sigaction old_action;
+ sys_sigaction(SIGSEGV, nullptr, &old_action);
+
+ /* Some devices don't provide useful information to their SIGSEGV handlers,
+ * making it impossible for on-demand decompression to work. To check if
+ * we're on such a device, setup a temporary handler and deliberately
+ * trigger a segfault. The handler will set signalHandlingBroken if the
+ * provided information is bogus.
+ * Some other devices have a kernel option enabled that makes SIGSEGV handler
+ * have an overhead so high that it affects how on-demand decompression
+ * performs. The handler will also set signalHandlingSlow if the triggered
+ * SIGSEGV took too much time. */
+ struct sigaction action;
+ action.sa_sigaction = &SEGVHandler::test_handler;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = SA_SIGINFO | SA_NODEFER;
+ action.sa_restorer = nullptr;
+ stackPtr.Assign(MemoryRange::mmap(nullptr, PageSize(),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
+ if (stackPtr.get() == MAP_FAILED)
+ return;
+ if (sys_sigaction(SIGSEGV, &action, nullptr))
+ return;
+
+ TmpData *data = reinterpret_cast<TmpData*>(stackPtr.get());
+ data->crash_timestamp = ProcessTimeStamp_Now();
+ mprotect(stackPtr, stackPtr.GetLength(), PROT_NONE);
+ data->crash_int = 123;
+ /* Restore the original segfault signal handler. */
+ sys_sigaction(SIGSEGV, &old_action, nullptr);
+ stackPtr.Assign(MAP_FAILED, 0);
+}
+
+void
+SEGVHandler::FinishInitialization()
+{
+ /* Ideally, we'd need some locking here, but in practice, we're not
+ * going to race with another thread. */
+ initialized = true;
+
+ if (signalHandlingBroken || signalHandlingSlow)
+ return;
+
+ typedef int (*sigaction_func)(int, const struct sigaction *,
+ struct sigaction *);
+
+ sigaction_func libc_sigaction;
+
+#if defined(ANDROID)
+ /* Android > 4.4 comes with a sigaction wrapper in a LD_PRELOADed library
+ * (libsigchain) for ART. That wrapper kind of does the same trick as we
+ * do, so we need extra care in handling it.
+ * - Divert the libc's sigaction, assuming the LD_PRELOADed library uses
+ * it under the hood (which is more or less true according to the source
+ * of that library, since it's doing a lookup in RTLD_NEXT)
+ * - With the LD_PRELOADed library in place, all calls to sigaction from
+ * from system libraries will go to the LD_PRELOADed library.
+ * - The LD_PRELOADed library calls to sigaction go to our __wrap_sigaction.
+ * - The calls to sigaction from libraries faulty.lib loads are sent to
+ * the LD_PRELOADed library.
+ * In practice, for signal handling, this means:
+ * - The signal handler registered to the kernel is ours.
+ * - Our handler redispatches to the LD_PRELOADed library's if there's a
+ * segfault we don't handle.
+ * - The LD_PRELOADed library redispatches according to whatever system
+ * library or faulty.lib-loaded library set with sigaction.
+ *
+ * When there is no sigaction wrapper in place:
+ * - Divert the libc's sigaction.
+ * - Calls to sigaction from system library and faulty.lib-loaded libraries
+ * all go to the libc's sigaction, which end up in our __wrap_sigaction.
+ * - The signal handler registered to the kernel is ours.
+ * - Our handler redispatches according to whatever system library or
+ * faulty.lib-loaded library set with sigaction.
+ */
+ void *libc = dlopen("libc.so", RTLD_GLOBAL | RTLD_LAZY);
+ if (libc) {
+ /*
+ * Lollipop bionic only has a small trampoline in sigaction, with the real
+ * work happening in __sigaction. Divert there instead of sigaction if it exists.
+ * Bug 1154803
+ */
+ libc_sigaction = reinterpret_cast<sigaction_func>(dlsym(libc, "__sigaction"));
+
+ if (!libc_sigaction) {
+ libc_sigaction =
+ reinterpret_cast<sigaction_func>(dlsym(libc, "sigaction"));
+ }
+ } else
+#endif
+ {
+ libc_sigaction = sigaction;
+ }
+
+ if (!Divert(libc_sigaction, __wrap_sigaction))
+ return;
+
+ /* Setup an alternative stack if the already existing one is not big
+ * enough, or if there is none. */
+ if (sigaltstack(nullptr, &oldStack) == 0) {
+ if (oldStack.ss_flags == SS_ONSTACK)
+ oldStack.ss_flags = 0;
+ if (!oldStack.ss_sp || oldStack.ss_size < stackSize) {
+ stackPtr.Assign(MemoryRange::mmap(nullptr, stackSize,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
+ if (stackPtr.get() == MAP_FAILED)
+ return;
+ stack_t stack;
+ stack.ss_sp = stackPtr;
+ stack.ss_size = stackSize;
+ stack.ss_flags = 0;
+ if (sigaltstack(&stack, nullptr) != 0)
+ return;
+ }
+ }
+ /* Register our own handler, and store the already registered one in
+ * SEGVHandler's struct sigaction member */
+ action.sa_sigaction = &SEGVHandler::handler;
+ action.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
+ registeredHandler = !sys_sigaction(SIGSEGV, &action, &this->action);
+}
+
+SEGVHandler::~SEGVHandler()
+{
+ /* Restore alternative stack for signals */
+ if (oldStack.ss_flags != SS_ONSTACK)
+ sigaltstack(&oldStack, nullptr);
+ /* Restore original signal handler */
+ if (registeredHandler)
+ sys_sigaction(SIGSEGV, &this->action, nullptr);
+}
+
+/* Test handler for a deliberately triggered SIGSEGV that determines whether
+ * useful information is provided to signal handlers, particularly whether
+ * si_addr is filled in properly, and whether the segfault handler is called
+ * quickly enough. */
+void SEGVHandler::test_handler(int signum, siginfo_t *info, void *context)
+{
+ SEGVHandler &that = ElfLoader::Singleton;
+ if (signum == SIGSEGV && info &&
+ info->si_addr == that.stackPtr.get())
+ that.signalHandlingBroken = false;
+ mprotect(that.stackPtr, that.stackPtr.GetLength(), PROT_READ | PROT_WRITE);
+ TmpData *data = reinterpret_cast<TmpData*>(that.stackPtr.get());
+ uint64_t latency = ProcessTimeStamp_Now() - data->crash_timestamp;
+ DEBUG_LOG("SEGVHandler latency: %" PRIu64, latency);
+ /* See bug 886736 for timings on different devices, 150 µs is reasonably above
+ * the latency on "working" devices and seems to be short enough to not incur
+ * a huge overhead to on-demand decompression. */
+ if (latency <= 150000)
+ that.signalHandlingSlow = false;
+}
+
+/* TODO: "properly" handle signal masks and flags */
+void SEGVHandler::handler(int signum, siginfo_t *info, void *context)
+{
+ //ASSERT(signum == SIGSEGV);
+ DEBUG_LOG("Caught segmentation fault @%p", info->si_addr);
+
+ /* Check whether we segfaulted in the address space of a CustomElf. We're
+ * only expecting that to happen as an access error. */
+ if (info->si_code == SEGV_ACCERR) {
+ RefPtr<LibHandle> handle =
+ ElfLoader::Singleton.GetHandleByPtr(info->si_addr);
+ BaseElf *elf;
+ if (handle && (elf = handle->AsBaseElf())) {
+ DEBUG_LOG("Within the address space of %s", handle->GetPath());
+ if (elf->mappable && elf->mappable->ensure(info->si_addr)) {
+ return;
+ }
+ }
+ }
+
+ /* Redispatch to the registered handler */
+ SEGVHandler &that = ElfLoader::Singleton;
+ if (that.action.sa_flags & SA_SIGINFO) {
+ DEBUG_LOG("Redispatching to registered handler @%p",
+ FunctionPtr(that.action.sa_sigaction));
+ that.action.sa_sigaction(signum, info, context);
+ } else if (that.action.sa_handler == SIG_DFL) {
+ DEBUG_LOG("Redispatching to default handler");
+ /* Reset the handler to the default one, and trigger it. */
+ sys_sigaction(signum, &that.action, nullptr);
+ raise(signum);
+ } else if (that.action.sa_handler != SIG_IGN) {
+ DEBUG_LOG("Redispatching to registered handler @%p",
+ FunctionPtr(that.action.sa_handler));
+ that.action.sa_handler(signum);
+ } else {
+ DEBUG_LOG("Ignoring");
+ }
+}
+
+int
+SEGVHandler::__wrap_sigaction(int signum, const struct sigaction *act,
+ struct sigaction *oldact)
+{
+ SEGVHandler &that = ElfLoader::Singleton;
+
+ /* Use system sigaction() function for all but SIGSEGV signals. */
+ if (!that.registeredHandler || (signum != SIGSEGV))
+ return sys_sigaction(signum, act, oldact);
+
+ if (oldact)
+ *oldact = that.action;
+ if (act)
+ that.action = *act;
+ return 0;
+}
+
+Logging Logging::Singleton;
diff --git a/mozglue/linker/ElfLoader.h b/mozglue/linker/ElfLoader.h
new file mode 100644
index 000000000..0d26a011e
--- /dev/null
+++ b/mozglue/linker/ElfLoader.h
@@ -0,0 +1,674 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ElfLoader_h
+#define ElfLoader_h
+
+#include <vector>
+#include <dlfcn.h>
+#include <signal.h>
+#include "mozilla/RefCounted.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+#include "Zip.h"
+#include "Elfxx.h"
+#include "Mappable.h"
+
+/**
+ * dlfcn.h replacement functions
+ */
+extern "C" {
+ void *__wrap_dlopen(const char *path, int flags);
+ const char *__wrap_dlerror(void);
+ void *__wrap_dlsym(void *handle, const char *symbol);
+ int __wrap_dlclose(void *handle);
+
+#ifndef HAVE_DLADDR
+ typedef struct {
+ const char *dli_fname;
+ void *dli_fbase;
+ const char *dli_sname;
+ void *dli_saddr;
+ } Dl_info;
+#endif
+ int __wrap_dladdr(void *addr, Dl_info *info);
+
+ struct dl_phdr_info {
+ Elf::Addr dlpi_addr;
+ const char *dlpi_name;
+ const Elf::Phdr *dlpi_phdr;
+ Elf::Half dlpi_phnum;
+ };
+
+ typedef int (*dl_phdr_cb)(struct dl_phdr_info *, size_t, void *);
+ int __wrap_dl_iterate_phdr(dl_phdr_cb callback, void *data);
+
+#ifdef __ARM_EABI__
+ const void *__wrap___gnu_Unwind_Find_exidx(void *pc, int *pcount);
+#endif
+
+/**
+ * faulty.lib public API
+ */
+MFBT_API size_t
+__dl_get_mappable_length(void *handle);
+
+MFBT_API void *
+__dl_mmap(void *handle, void *addr, size_t length, off_t offset);
+
+MFBT_API void
+__dl_munmap(void *handle, void *addr, size_t length);
+
+MFBT_API bool
+IsSignalHandlingBroken();
+
+}
+
+/* Forward declarations for use in LibHandle */
+class BaseElf;
+class CustomElf;
+class SystemElf;
+
+/**
+ * Specialize RefCounted template for LibHandle. We may get references to
+ * LibHandles during the execution of their destructor, so we need
+ * RefCounted<LibHandle>::Release to support some reentrancy. See further
+ * below.
+ */
+class LibHandle;
+
+namespace mozilla {
+namespace detail {
+
+template <> inline void RefCounted<LibHandle, AtomicRefCount>::Release() const;
+
+template <> inline RefCounted<LibHandle, AtomicRefCount>::~RefCounted()
+{
+ MOZ_ASSERT(mRefCnt == 0x7fffdead);
+}
+
+} /* namespace detail */
+} /* namespace mozilla */
+
+/**
+ * Abstract class for loaded libraries. Libraries may be loaded through the
+ * system linker or this linker, both cases will be derived from this class.
+ */
+class LibHandle: public mozilla::external::AtomicRefCounted<LibHandle>
+{
+public:
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(LibHandle)
+ /**
+ * Constructor. Takes the path of the loaded library and will store a copy
+ * of the leaf name.
+ */
+ LibHandle(const char *path)
+ : directRefCnt(0), path(path ? strdup(path) : nullptr), mappable(nullptr) { }
+
+ /**
+ * Destructor.
+ */
+ virtual ~LibHandle();
+
+ /**
+ * Returns the pointer to the address to which the given symbol resolves
+ * inside the library. It is not supposed to resolve the symbol in other
+ * libraries, although in practice, it will for system libraries.
+ */
+ virtual void *GetSymbolPtr(const char *symbol) const = 0;
+
+ /**
+ * Returns whether the given address is part of the virtual address space
+ * covered by the loaded library.
+ */
+ virtual bool Contains(void *addr) const = 0;
+
+ /**
+ * Returns the base address of the loaded library.
+ */
+ virtual void *GetBase() const = 0;
+
+ /**
+ * Returns the file name of the library without the containing directory.
+ */
+ const char *GetName() const;
+
+ /**
+ * Returns the full path of the library, when available. Otherwise, returns
+ * the file name.
+ */
+ const char *GetPath() const
+ {
+ return path;
+ }
+
+ /**
+ * Library handles can be referenced from other library handles or
+ * externally (when dlopen()ing using this linker). We need to be
+ * able to distinguish between the two kind of referencing for better
+ * bookkeeping.
+ */
+ void AddDirectRef()
+ {
+ ++directRefCnt;
+ mozilla::external::AtomicRefCounted<LibHandle>::AddRef();
+ }
+
+ /**
+ * Releases a direct reference, and returns whether there are any direct
+ * references left.
+ */
+ bool ReleaseDirectRef()
+ {
+ bool ret = false;
+ if (directRefCnt) {
+ MOZ_ASSERT(directRefCnt <=
+ mozilla::external::AtomicRefCounted<LibHandle>::refCount());
+ if (--directRefCnt)
+ ret = true;
+ mozilla::external::AtomicRefCounted<LibHandle>::Release();
+ }
+ return ret;
+ }
+
+ /**
+ * Returns the number of direct references
+ */
+ MozRefCountType DirectRefCount()
+ {
+ return directRefCnt;
+ }
+
+ /**
+ * Returns the complete size of the file or stream behind the library
+ * handle.
+ */
+ size_t GetMappableLength() const;
+
+ /**
+ * Returns a memory mapping of the file or stream behind the library
+ * handle.
+ */
+ void *MappableMMap(void *addr, size_t length, off_t offset) const;
+
+ /**
+ * Unmaps a memory mapping of the file or stream behind the library
+ * handle.
+ */
+ void MappableMUnmap(void *addr, size_t length) const;
+
+#ifdef __ARM_EABI__
+ /**
+ * Find the address and entry count of the ARM.exidx section
+ * associated with the library
+ */
+ virtual const void *FindExidx(int *pcount) const = 0;
+#endif
+
+ /**
+ * Shows some stats about the Mappable instance. The when argument is to be
+ * used by the caller to give an identifier of the when the stats call is
+ * made.
+ */
+ virtual void stats(const char *when) const { };
+
+protected:
+ /**
+ * Returns a mappable object for use by MappableMMap and related functions.
+ */
+ virtual Mappable *GetMappable() const = 0;
+
+ /**
+ * Returns the instance, casted as the wanted type. Returns nullptr if
+ * that's not the actual type. (short of a better way to do this without
+ * RTTI)
+ */
+ friend class ElfLoader;
+ friend class CustomElf;
+ friend class SEGVHandler;
+ virtual BaseElf *AsBaseElf() { return nullptr; }
+ virtual SystemElf *AsSystemElf() { return nullptr; }
+
+private:
+ MozRefCountType directRefCnt;
+ char *path;
+
+ /* Mappable object keeping the result of GetMappable() */
+ mutable RefPtr<Mappable> mappable;
+};
+
+/**
+ * Specialized RefCounted<LibHandle>::Release. Under normal operation, when
+ * mRefCnt reaches 0, the LibHandle is deleted. Its mRefCnt is however
+ * increased to 1 on normal builds, and 0x7fffdead on debug builds so that the
+ * LibHandle can still be referenced while the destructor is executing. The
+ * mRefCnt is allowed to grow > 0x7fffdead, but not to decrease under that
+ * value, which would mean too many Releases from within the destructor.
+ */
+namespace mozilla {
+namespace detail {
+
+template <> inline void RefCounted<LibHandle, AtomicRefCount>::Release() const {
+#ifdef DEBUG
+ if (mRefCnt > 0x7fff0000)
+ MOZ_ASSERT(mRefCnt > 0x7fffdead);
+#endif
+ MOZ_ASSERT(mRefCnt > 0);
+ if (mRefCnt > 0) {
+ if (0 == --mRefCnt) {
+#ifdef DEBUG
+ mRefCnt = 0x7fffdead;
+#else
+ mRefCnt = 1;
+#endif
+ delete static_cast<const LibHandle*>(this);
+ }
+ }
+}
+
+} /* namespace detail */
+} /* namespace mozilla */
+
+/**
+ * Class handling libraries loaded by the system linker
+ */
+class SystemElf: public LibHandle
+{
+public:
+ /**
+ * Returns a new SystemElf for the given path. The given flags are passed
+ * to dlopen().
+ */
+ static already_AddRefed<LibHandle> Load(const char *path, int flags);
+
+ /**
+ * Inherited from LibHandle
+ */
+ virtual ~SystemElf();
+ virtual void *GetSymbolPtr(const char *symbol) const;
+ virtual bool Contains(void *addr) const { return false; /* UNIMPLEMENTED */ }
+ virtual void *GetBase() const { return nullptr; /* UNIMPLEMENTED */ }
+
+#ifdef __ARM_EABI__
+ virtual const void *FindExidx(int *pcount) const;
+#endif
+
+protected:
+ virtual Mappable *GetMappable() const;
+
+ /**
+ * Returns the instance, casted as SystemElf. (short of a better way to do
+ * this without RTTI)
+ */
+ friend class ElfLoader;
+ virtual SystemElf *AsSystemElf() { return this; }
+
+ /**
+ * Remove the reference to the system linker handle. This avoids dlclose()
+ * being called when the instance is destroyed.
+ */
+ void Forget()
+ {
+ dlhandle = nullptr;
+ }
+
+private:
+ /**
+ * Private constructor
+ */
+ SystemElf(const char *path, void *handle)
+ : LibHandle(path), dlhandle(handle) { }
+
+ /* Handle as returned by system dlopen() */
+ void *dlhandle;
+};
+
+/**
+ * The ElfLoader registers its own SIGSEGV handler to handle segmentation
+ * faults within the address space of the loaded libraries. It however
+ * allows a handler to be set for faults in other places, and redispatches
+ * to the handler set through signal() or sigaction().
+ */
+class SEGVHandler
+{
+public:
+ bool hasRegisteredHandler() {
+ if (! initialized)
+ FinishInitialization();
+ return registeredHandler;
+ }
+
+ bool isSignalHandlingBroken() {
+ return signalHandlingBroken;
+ }
+
+ static int __wrap_sigaction(int signum, const struct sigaction *act,
+ struct sigaction *oldact);
+
+protected:
+ SEGVHandler();
+ ~SEGVHandler();
+
+private:
+
+ /**
+ * The constructor doesn't do all initialization, and the tail is done
+ * at a later time.
+ */
+ void FinishInitialization();
+
+ /**
+ * SIGSEGV handler registered with __wrap_signal or __wrap_sigaction.
+ */
+ struct sigaction action;
+
+ /**
+ * ElfLoader SIGSEGV handler.
+ */
+ static void handler(int signum, siginfo_t *info, void *context);
+
+ /**
+ * Temporary test handler.
+ */
+ static void test_handler(int signum, siginfo_t *info, void *context);
+
+ /**
+ * Size of the alternative stack. The printf family requires more than 8KB
+ * of stack, and our signal handler may print a few things.
+ */
+ static const size_t stackSize = 12 * 1024;
+
+ /**
+ * Alternative stack information used before initialization.
+ */
+ stack_t oldStack;
+
+ /**
+ * Pointer to an alternative stack for signals. Only set if oldStack is
+ * not set or not big enough.
+ */
+ MappedPtr stackPtr;
+
+ bool initialized;
+ bool registeredHandler;
+ bool signalHandlingBroken;
+ bool signalHandlingSlow;
+};
+
+/**
+ * Elf Loader class in charge of loading and bookkeeping libraries.
+ */
+class ElfLoader: public SEGVHandler
+{
+public:
+ /**
+ * The Elf Loader instance
+ */
+ static ElfLoader Singleton;
+
+ /**
+ * Loads the given library with the given flags. Equivalent to dlopen()
+ * The extra "parent" argument optionally gives the handle of the library
+ * requesting the given library to be loaded. The loader may look in the
+ * directory containing that parent library for the library to load.
+ */
+ already_AddRefed<LibHandle> Load(const char *path, int flags,
+ LibHandle *parent = nullptr);
+
+ /**
+ * Returns the handle of the library containing the given address in
+ * its virtual address space, i.e. the library handle for which
+ * LibHandle::Contains returns true. Its purpose is to allow to
+ * implement dladdr().
+ */
+ already_AddRefed<LibHandle> GetHandleByPtr(void *addr);
+
+ /**
+ * Returns a Mappable object for the path. Paths in the form
+ * /foo/bar/baz/archive!/directory/lib.so
+ * try to load the directory/lib.so in /foo/bar/baz/archive, provided
+ * that file is a Zip archive.
+ */
+ static Mappable *GetMappableFromPath(const char *path);
+
+ void ExpectShutdown(bool val) { expect_shutdown = val; }
+ bool IsShutdownExpected() { return expect_shutdown; }
+
+private:
+ bool expect_shutdown;
+
+protected:
+ /**
+ * Registers the given handle. This method is meant to be called by
+ * LibHandle subclass creators.
+ */
+ void Register(LibHandle *handle);
+ void Register(CustomElf *handle);
+
+ /**
+ * Forget about the given handle. This method is meant to be called by
+ * LibHandle subclass destructors.
+ */
+ void Forget(LibHandle *handle);
+ void Forget(CustomElf *handle);
+
+ /* Last error. Used for dlerror() */
+ friend class SystemElf;
+ friend const char *__wrap_dlerror(void);
+ friend void *__wrap_dlsym(void *handle, const char *symbol);
+ friend int __wrap_dlclose(void *handle);
+ const char *lastError;
+
+private:
+ ElfLoader() : expect_shutdown(true) {}
+ ~ElfLoader();
+
+ /* Initialization code that can't run during static initialization. */
+ void Init();
+
+ /* System loader handle for the library/program containing our code. This
+ * is used to resolve wrapped functions. */
+ RefPtr<LibHandle> self_elf;
+
+#if defined(ANDROID)
+ /* System loader handle for the libc. This is used to resolve weak symbols
+ * that some libcs contain that the Android linker won't dlsym(). Normally,
+ * we wouldn't treat non-Android differently, but glibc uses versioned
+ * symbols which this linker doesn't support. */
+ RefPtr<LibHandle> libc;
+#endif
+
+ /* Bookkeeping */
+ typedef std::vector<LibHandle *> LibHandleList;
+ LibHandleList handles;
+
+protected:
+ friend class CustomElf;
+ friend class LoadedElf;
+ /**
+ * Show some stats about Mappables in CustomElfs. The when argument is to
+ * be used by the caller to give an identifier of the when the stats call
+ * is made.
+ */
+ static void stats(const char *when);
+
+ /* Definition of static destructors as to be used for C++ ABI compatibility */
+ typedef void (*Destructor)(void *object);
+
+ /**
+ * C++ ABI makes static initializers register destructors through a specific
+ * atexit interface. On glibc/linux systems, the dso_handle is a pointer
+ * within a given library. On bionic/android systems, it is an undefined
+ * symbol. Making sense of the value is not really important, and all that
+ * is really important is that it is different for each loaded library, so
+ * that they can be discriminated when shutting down. For convenience, on
+ * systems where the dso handle is a symbol, that symbol is resolved to
+ * point at corresponding CustomElf.
+ *
+ * Destructors are registered with __*_atexit with an associated object to
+ * be passed as argument when it is called.
+ *
+ * When __cxa_finalize is called, destructors registered for the given
+ * DSO handle are called in the reverse order they were registered.
+ */
+#ifdef __ARM_EABI__
+ static int __wrap_aeabi_atexit(void *that, Destructor destructor,
+ void *dso_handle);
+#else
+ static int __wrap_cxa_atexit(Destructor destructor, void *that,
+ void *dso_handle);
+#endif
+
+ static void __wrap_cxa_finalize(void *dso_handle);
+
+ /**
+ * Registered destructor. Keeps track of the destructor function pointer,
+ * associated object to call it with, and DSO handle.
+ */
+ class DestructorCaller {
+ public:
+ DestructorCaller(Destructor destructor, void *object, void *dso_handle)
+ : destructor(destructor), object(object), dso_handle(dso_handle) { }
+
+ /**
+ * Call the destructor function with the associated object.
+ * Call only once, see CustomElf::~CustomElf.
+ */
+ void Call();
+
+ /**
+ * Returns whether the destructor is associated to the given DSO handle
+ */
+ bool IsForHandle(void *handle) const
+ {
+ return handle == dso_handle;
+ }
+
+ private:
+ Destructor destructor;
+ void *object;
+ void *dso_handle;
+ };
+
+private:
+ /* Keep track of all registered destructors */
+ std::vector<DestructorCaller> destructors;
+
+ /* Forward declaration, see further below */
+ class DebuggerHelper;
+public:
+ /* Loaded object descriptor for the debugger interface below*/
+ struct link_map {
+ /* Base address of the loaded object. */
+ const void *l_addr;
+ /* File name */
+ const char *l_name;
+ /* Address of the PT_DYNAMIC segment. */
+ const void *l_ld;
+
+ private:
+ friend class ElfLoader::DebuggerHelper;
+ /* Double linked list of loaded objects. */
+ link_map *l_next, *l_prev;
+ };
+
+private:
+ /* Data structure used by the linker to give details about shared objects it
+ * loaded to debuggers. This is normally defined in link.h, but Android
+ * headers lack this file. */
+ struct r_debug {
+ /* Version number of the protocol. */
+ int r_version;
+
+ /* Head of the linked list of loaded objects. */
+ link_map *r_map;
+
+ /* Function to be called when updates to the linked list of loaded objects
+ * are going to occur. The function is to be called before and after
+ * changes. */
+ void (*r_brk)(void);
+
+ /* Indicates to the debugger what state the linked list of loaded objects
+ * is in when the function above is called. */
+ enum {
+ RT_CONSISTENT, /* Changes are complete */
+ RT_ADD, /* Beginning to add a new object */
+ RT_DELETE /* Beginning to remove an object */
+ } r_state;
+ };
+
+ /* Memory representation of ELF Auxiliary Vectors */
+ struct AuxVector {
+ Elf::Addr type;
+ Elf::Addr value;
+ };
+
+ /* Helper class used to integrate libraries loaded by this linker in
+ * r_debug */
+ class DebuggerHelper
+ {
+ public:
+ DebuggerHelper();
+
+ void Init(AuxVector *auvx);
+
+ operator bool()
+ {
+ return dbg;
+ }
+
+ /* Make the debugger aware of a new loaded object */
+ void Add(link_map *map);
+
+ /* Make the debugger aware of the unloading of an object */
+ void Remove(link_map *map);
+
+ /* Iterates over all link_maps */
+ class iterator
+ {
+ public:
+ const link_map *operator ->() const
+ {
+ return item;
+ }
+
+ const link_map &operator ++()
+ {
+ item = item->l_next;
+ return *item;
+ }
+
+ bool operator<(const iterator &other) const
+ {
+ if (other.item == nullptr)
+ return item ? true : false;
+ MOZ_CRASH("DebuggerHelper::iterator::operator< called with something else than DebuggerHelper::end()");
+ }
+ protected:
+ friend class DebuggerHelper;
+ iterator(const link_map *item): item(item) { }
+
+ private:
+ const link_map *item;
+ };
+
+ iterator begin() const
+ {
+ return iterator(dbg ? dbg->r_map : nullptr);
+ }
+
+ iterator end() const
+ {
+ return iterator(nullptr);
+ }
+
+ private:
+ r_debug *dbg;
+ link_map *firstAdded;
+ };
+ friend int __wrap_dl_iterate_phdr(dl_phdr_cb callback, void *data);
+ DebuggerHelper dbg;
+};
+
+#endif /* ElfLoader_h */
diff --git a/mozglue/linker/Elfxx.h b/mozglue/linker/Elfxx.h
new file mode 100644
index 000000000..b21a89336
--- /dev/null
+++ b/mozglue/linker/Elfxx.h
@@ -0,0 +1,241 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Elfxx_h
+#define Elfxx_h
+
+/**
+ * Android system headers have two different elf.h file. The one under linux/
+ * is the most complete on older android API versions.
+ */
+#if defined(ANDROID) && __ANDROID_API__ < 21
+#include <linux/elf.h>
+#else
+#include <elf.h>
+#endif
+#include <endian.h>
+
+#if defined(__ARM_EABI__) && !defined(PT_ARM_EXIDX)
+#define PT_ARM_EXIDX 0x70000001
+#endif
+
+/**
+ * Generic ELF macros for the target system
+ */
+#ifdef __LP64__
+#define Elf_(type) Elf64_ ## type
+#define ELFCLASS ELFCLASS64
+#define ELF_R_TYPE ELF64_R_TYPE
+#define ELF_R_SYM ELF64_R_SYM
+#ifndef ELF_ST_BIND
+#define ELF_ST_BIND ELF64_ST_BIND
+#endif
+#else
+#define Elf_(type) Elf32_ ## type
+#define ELFCLASS ELFCLASS32
+#define ELF_R_TYPE ELF32_R_TYPE
+#define ELF_R_SYM ELF32_R_SYM
+#ifndef ELF_ST_BIND
+#define ELF_ST_BIND ELF32_ST_BIND
+#endif
+#endif
+
+#ifndef __BYTE_ORDER
+#error Cannot find endianness
+#endif
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define ELFDATA ELFDATA2LSB
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define ELFDATA ELFDATA2MSB
+#endif
+
+#ifdef __linux__
+#define ELFOSABI ELFOSABI_LINUX
+#ifdef EI_ABIVERSION
+#define ELFABIVERSION 0
+#endif
+#else
+#error Unknown ELF OSABI
+#endif
+
+#if defined(__i386__)
+#define ELFMACHINE EM_386
+
+// Doing this way probably doesn't scale to other architectures
+#define R_ABS R_386_32
+#define R_GLOB_DAT R_386_GLOB_DAT
+#define R_JMP_SLOT R_386_JMP_SLOT
+#define R_RELATIVE R_386_RELATIVE
+#define RELOC(n) DT_REL ## n
+#define UNSUPPORTED_RELOC(n) DT_RELA ## n
+#define STR_RELOC(n) "DT_REL" # n
+#define Reloc Rel
+
+#elif defined(__x86_64__)
+#define ELFMACHINE EM_X86_64
+
+#define R_ABS R_X86_64_64
+#define R_GLOB_DAT R_X86_64_GLOB_DAT
+#define R_JMP_SLOT R_X86_64_JUMP_SLOT
+#define R_RELATIVE R_X86_64_RELATIVE
+#define RELOC(n) DT_RELA ## n
+#define UNSUPPORTED_RELOC(n) DT_REL ## n
+#define STR_RELOC(n) "DT_RELA" # n
+#define Reloc Rela
+
+#elif defined(__arm__)
+#define ELFMACHINE EM_ARM
+
+#ifndef R_ARM_ABS32
+#define R_ARM_ABS32 2
+#endif
+#ifndef R_ARM_GLOB_DAT
+#define R_ARM_GLOB_DAT 21
+#endif
+#ifndef R_ARM_JUMP_SLOT
+#define R_ARM_JUMP_SLOT 22
+#endif
+#ifndef R_ARM_RELATIVE
+#define R_ARM_RELATIVE 23
+#endif
+
+#define R_ABS R_ARM_ABS32
+#define R_GLOB_DAT R_ARM_GLOB_DAT
+#define R_JMP_SLOT R_ARM_JUMP_SLOT
+#define R_RELATIVE R_ARM_RELATIVE
+#define RELOC(n) DT_REL ## n
+#define UNSUPPORTED_RELOC(n) DT_RELA ## n
+#define STR_RELOC(n) "DT_REL" # n
+#define Reloc Rel
+
+#else
+#error Unknown ELF machine type
+#endif
+
+/**
+ * Android system headers don't have all definitions
+ */
+#ifndef STN_UNDEF
+#define STN_UNDEF 0
+#endif
+#ifndef DT_INIT_ARRAY
+#define DT_INIT_ARRAY 25
+#endif
+#ifndef DT_FINI_ARRAY
+#define DT_FINI_ARRAY 26
+#endif
+#ifndef DT_INIT_ARRAYSZ
+#define DT_INIT_ARRAYSZ 27
+#endif
+#ifndef DT_FINI_ARRAYSZ
+#define DT_FINI_ARRAYSZ 28
+#endif
+#ifndef DT_RELACOUNT
+#define DT_RELACOUNT 0x6ffffff9
+#endif
+#ifndef DT_RELCOUNT
+#define DT_RELCOUNT 0x6ffffffa
+#endif
+#ifndef DT_VERSYM
+#define DT_VERSYM 0x6ffffff0
+#endif
+#ifndef DT_VERDEF
+#define DT_VERDEF 0x6ffffffc
+#endif
+#ifndef DT_VERDEFNUM
+#define DT_VERDEFNUM 0x6ffffffd
+#endif
+#ifndef DT_VERNEED
+#define DT_VERNEED 0x6ffffffe
+#endif
+#ifndef DT_VERNEEDNUM
+#define DT_VERNEEDNUM 0x6fffffff
+#endif
+#ifndef DT_FLAGS_1
+#define DT_FLAGS_1 0x6ffffffb
+#endif
+#ifndef DT_FLAGS
+#define DT_FLAGS 30
+#endif
+#ifndef DF_SYMBOLIC
+#define DF_SYMBOLIC 0x00000002
+#endif
+#ifndef DF_TEXTREL
+#define DF_TEXTREL 0x00000004
+#endif
+
+namespace Elf {
+
+/**
+ * Define a few basic Elf Types
+ */
+typedef Elf_(Phdr) Phdr;
+typedef Elf_(Dyn) Dyn;
+typedef Elf_(Sym) Sym;
+typedef Elf_(Addr) Addr;
+typedef Elf_(Word) Word;
+typedef Elf_(Half) Half;
+
+/**
+ * Helper class around the standard Elf header struct
+ */
+struct Ehdr: public Elf_(Ehdr)
+{
+ /**
+ * Equivalent to reinterpret_cast<const Ehdr *>(buf), but additionally
+ * checking that this is indeed an Elf header and that the Elf type
+ * corresponds to that of the system
+ */
+ static const Ehdr *validate(const void *buf);
+};
+
+/**
+ * Elf String table
+ */
+class Strtab: public UnsizedArray<const char>
+{
+public:
+ /**
+ * Returns the string at the given index in the table
+ */
+ const char *GetStringAt(off_t index) const
+ {
+ return &UnsizedArray<const char>::operator[](index);
+ }
+};
+
+/**
+ * Helper class around Elf relocation.
+ */
+struct Rel: public Elf_(Rel)
+{
+ /**
+ * Returns the addend for the relocation, which is the value stored
+ * at r_offset.
+ */
+ Addr GetAddend(void *base) const
+ {
+ return *(reinterpret_cast<const Addr *>(
+ reinterpret_cast<const char *>(base) + r_offset));
+ }
+};
+
+/**
+ * Helper class around Elf relocation with addend.
+ */
+struct Rela: public Elf_(Rela)
+{
+ /**
+ * Returns the addend for the relocation.
+ */
+ Addr GetAddend(void *base) const
+ {
+ return r_addend;
+ }
+};
+
+} /* namespace Elf */
+
+#endif /* Elfxx_h */
diff --git a/mozglue/linker/Logging.h b/mozglue/linker/Logging.h
new file mode 100644
index 000000000..046d918f4
--- /dev/null
+++ b/mozglue/linker/Logging.h
@@ -0,0 +1,87 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Logging_h
+#define Logging_h
+
+#include "mozilla/Likely.h"
+
+#ifdef ANDROID
+#include <android/log.h>
+#define LOG(...) __android_log_print(ANDROID_LOG_INFO, "GeckoLinker", __VA_ARGS__)
+#define WARN(...) __android_log_print(ANDROID_LOG_WARN, "GeckoLinker", __VA_ARGS__)
+#define ERROR(...) __android_log_print(ANDROID_LOG_ERROR, "GeckoLinker", __VA_ARGS__)
+#else
+#include <cstdio>
+
+/* Expand to 1 or m depending on whether there is one argument or more
+ * given. */
+#define MOZ_ONE_OR_MORE_ARGS_IMPL2(_1, _2, _3, _4, _5, _6, _7, _8, _9, N, ...) \
+ N
+#define MOZ_ONE_OR_MORE_ARGS_IMPL(args) MOZ_ONE_OR_MORE_ARGS_IMPL2 args
+#define MOZ_ONE_OR_MORE_ARGS(...) \
+ MOZ_ONE_OR_MORE_ARGS_IMPL((__VA_ARGS__, m, m, m, m, m, m, m, m, 1, 0))
+
+#define MOZ_MACRO_GLUE(a, b) a b
+#define MOZ_CONCAT2(a, b) a ## b
+#define MOZ_CONCAT1(a, b) MOZ_CONCAT2(a, b)
+#define MOZ_CONCAT(a, b) MOZ_CONCAT1(a, b)
+
+/* Some magic to choose between LOG1 and LOGm depending on the number of
+ * arguments */
+#define MOZ_CHOOSE_LOG(...) \
+ MOZ_MACRO_GLUE(MOZ_CONCAT(LOG, MOZ_ONE_OR_MORE_ARGS(__VA_ARGS__)), \
+ (__VA_ARGS__))
+
+#define LOG1(format) fprintf(stderr, format "\n")
+#define LOGm(format, ...) fprintf(stderr, format "\n", __VA_ARGS__)
+#define LOG(...) MOZ_CHOOSE_LOG(__VA_ARGS__)
+#define WARN(...) MOZ_CHOOSE_LOG("Warning: " __VA_ARGS__)
+#define ERROR(...) MOZ_CHOOSE_LOG("Error: " __VA_ARGS__)
+
+#endif
+
+class Logging
+{
+public:
+ static bool isVerbose()
+ {
+ return Singleton.verbose;
+ }
+
+private:
+ bool verbose;
+
+public:
+ static void Init()
+ {
+ const char *env = getenv("MOZ_DEBUG_LINKER");
+ if (env && *env == '1')
+ Singleton.verbose = true;
+ }
+
+private:
+ static Logging Singleton;
+};
+
+#define DEBUG_LOG(...) \
+ do { \
+ if (MOZ_UNLIKELY(Logging::isVerbose())) { \
+ LOG(__VA_ARGS__); \
+ } \
+ } while(0)
+
+#if defined(__LP64__)
+# define PRIxAddr "lx"
+# define PRIxSize "lx"
+# define PRIdSize "ld"
+# define PRIuSize "lu"
+#else
+# define PRIxAddr "x"
+# define PRIxSize "x"
+# define PRIdSize "d"
+# define PRIuSize "u"
+#endif
+
+#endif /* Logging_h */
diff --git a/mozglue/linker/Mappable.cpp b/mozglue/linker/Mappable.cpp
new file mode 100644
index 000000000..47b883d2d
--- /dev/null
+++ b/mozglue/linker/Mappable.cpp
@@ -0,0 +1,681 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <cstring>
+#include <cstdlib>
+#include <cstdio>
+#include <string>
+
+#include "Mappable.h"
+
+#include "mozilla/UniquePtr.h"
+
+#ifdef ANDROID
+#include <linux/ashmem.h>
+#endif
+#include <sys/stat.h>
+#include <errno.h>
+#include "ElfLoader.h"
+#include "SeekableZStream.h"
+#include "XZStream.h"
+#include "Logging.h"
+
+using mozilla::MakeUnique;
+using mozilla::UniquePtr;
+
+class CacheValidator
+{
+public:
+ CacheValidator(const char* aCachedLibPath, Zip* aZip, Zip::Stream* aStream)
+ : mCachedLibPath(aCachedLibPath)
+ {
+ static const char kChecksumSuffix[] = ".crc";
+
+ mCachedChecksumPath =
+ MakeUnique<char[]>(strlen(aCachedLibPath) + sizeof(kChecksumSuffix));
+ sprintf(mCachedChecksumPath.get(), "%s%s", aCachedLibPath, kChecksumSuffix);
+ DEBUG_LOG("mCachedChecksumPath: %s", mCachedChecksumPath.get());
+
+ mChecksum = aStream->GetCRC32();
+ DEBUG_LOG("mChecksum: %x", mChecksum);
+ }
+
+ // Returns whether the cache is valid and up-to-date.
+ bool IsValid() const
+ {
+ // Validate based on checksum.
+ RefPtr<Mappable> checksumMap = MappableFile::Create(mCachedChecksumPath.get());
+ if (!checksumMap) {
+ // Force caching if checksum is missing in cache.
+ return false;
+ }
+
+ DEBUG_LOG("Comparing %x with %s", mChecksum, mCachedChecksumPath.get());
+ MappedPtr checksumBuf = checksumMap->mmap(nullptr, checksumMap->GetLength(),
+ PROT_READ, MAP_PRIVATE, 0);
+ if (checksumBuf == MAP_FAILED) {
+ WARN("Couldn't map %s to validate checksum", mCachedChecksumPath.get());
+ return false;
+ }
+ if (memcmp(checksumBuf, &mChecksum, sizeof(mChecksum))) {
+ return false;
+ }
+ return !access(mCachedLibPath.c_str(), R_OK);
+ }
+
+ // Caches the APK-provided checksum used in future cache validations.
+ void CacheChecksum() const
+ {
+ AutoCloseFD fd(open(mCachedChecksumPath.get(),
+ O_TRUNC | O_RDWR | O_CREAT | O_NOATIME,
+ S_IRUSR | S_IWUSR));
+ if (fd == -1) {
+ WARN("Couldn't open %s to update checksum", mCachedChecksumPath.get());
+ return;
+ }
+
+ DEBUG_LOG("Updating checksum %s", mCachedChecksumPath.get());
+
+ const size_t size = sizeof(mChecksum);
+ size_t written = 0;
+ while (written < size) {
+ ssize_t ret = write(fd,
+ reinterpret_cast<const uint8_t*>(&mChecksum) + written,
+ size - written);
+ if (ret >= 0) {
+ written += ret;
+ } else if (errno != EINTR) {
+ WARN("Writing checksum %s failed with errno %d",
+ mCachedChecksumPath.get(), errno);
+ break;
+ }
+ }
+ }
+
+private:
+ const std::string mCachedLibPath;
+ UniquePtr<char[]> mCachedChecksumPath;
+ uint32_t mChecksum;
+};
+
+Mappable *
+MappableFile::Create(const char *path)
+{
+ int fd = open(path, O_RDONLY);
+ if (fd != -1)
+ return new MappableFile(fd);
+ return nullptr;
+}
+
+MemoryRange
+MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
+ off_t offset)
+{
+ MOZ_ASSERT(fd != -1);
+ MOZ_ASSERT(!(flags & MAP_SHARED));
+ flags |= MAP_PRIVATE;
+
+ return MemoryRange::mmap(const_cast<void *>(addr), length, prot, flags,
+ fd, offset);
+}
+
+void
+MappableFile::finalize()
+{
+ /* Close file ; equivalent to close(fd.forget()) */
+ fd = -1;
+}
+
+size_t
+MappableFile::GetLength() const
+{
+ struct stat st;
+ return fstat(fd, &st) ? 0 : st.st_size;
+}
+
+Mappable *
+MappableExtractFile::Create(const char *name, Zip *zip, Zip::Stream *stream)
+{
+ MOZ_ASSERT(zip && stream);
+
+ const char *cachePath = getenv("MOZ_LINKER_CACHE");
+ if (!cachePath || !*cachePath) {
+ WARN("MOZ_LINKER_EXTRACT is set, but not MOZ_LINKER_CACHE; "
+ "not extracting");
+ return nullptr;
+ }
+
+ // Ensure that the cache dir is private.
+ chmod(cachePath, 0770);
+
+ UniquePtr<char[]> path =
+ MakeUnique<char[]>(strlen(cachePath) + strlen(name) + 2);
+ sprintf(path.get(), "%s/%s", cachePath, name);
+
+ CacheValidator validator(path.get(), zip, stream);
+ if (validator.IsValid()) {
+ DEBUG_LOG("Reusing %s", static_cast<char *>(path.get()));
+ return MappableFile::Create(path.get());
+ }
+ DEBUG_LOG("Extracting to %s", static_cast<char *>(path.get()));
+ AutoCloseFD fd;
+ fd = open(path.get(), O_TRUNC | O_RDWR | O_CREAT | O_NOATIME,
+ S_IRUSR | S_IWUSR);
+ if (fd == -1) {
+ ERROR("Couldn't open %s to decompress library", path.get());
+ return nullptr;
+ }
+ AutoUnlinkFile file(path.release());
+ if (stream->GetType() == Zip::Stream::DEFLATE) {
+ if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
+ ERROR("Couldn't ftruncate %s to decompress library", file.get());
+ return nullptr;
+ }
+ /* Map the temporary file for use as inflate buffer */
+ MappedPtr buffer(MemoryRange::mmap(nullptr, stream->GetUncompressedSize(),
+ PROT_WRITE, MAP_SHARED, fd, 0));
+ if (buffer == MAP_FAILED) {
+ ERROR("Couldn't map %s to decompress library", file.get());
+ return nullptr;
+ }
+
+ zxx_stream zStream = stream->GetZStream(buffer);
+
+ /* Decompress */
+ if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) {
+ ERROR("inflateInit failed: %s", zStream.msg);
+ return nullptr;
+ }
+ if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) {
+ ERROR("inflate failed: %s", zStream.msg);
+ return nullptr;
+ }
+ if (inflateEnd(&zStream) != Z_OK) {
+ ERROR("inflateEnd failed: %s", zStream.msg);
+ return nullptr;
+ }
+ if (zStream.total_out != stream->GetUncompressedSize()) {
+ ERROR("File not fully uncompressed! %ld / %d", zStream.total_out,
+ static_cast<unsigned int>(stream->GetUncompressedSize()));
+ return nullptr;
+ }
+ } else if (XZStream::IsXZ(stream->GetBuffer(), stream->GetSize())) {
+ XZStream xzStream(stream->GetBuffer(), stream->GetSize());
+
+ if (!xzStream.Init()) {
+ ERROR("Couldn't initialize XZ decoder");
+ return nullptr;
+ }
+ DEBUG_LOG("XZStream created, compressed=%u, uncompressed=%u",
+ xzStream.Size(), xzStream.UncompressedSize());
+
+ if (ftruncate(fd, xzStream.UncompressedSize()) == -1) {
+ ERROR("Couldn't ftruncate %s to decompress library", file.get());
+ return nullptr;
+ }
+ MappedPtr buffer(MemoryRange::mmap(nullptr, xzStream.UncompressedSize(),
+ PROT_WRITE, MAP_SHARED, fd, 0));
+ if (buffer == MAP_FAILED) {
+ ERROR("Couldn't map %s to decompress library", file.get());
+ return nullptr;
+ }
+ const size_t written = xzStream.Decode(buffer, buffer.GetLength());
+ DEBUG_LOG("XZStream decoded %u", written);
+ if (written != buffer.GetLength()) {
+ ERROR("Error decoding XZ file %s", file.get());
+ return nullptr;
+ }
+ } else if (stream->GetType() == Zip::Stream::STORE) {
+ SeekableZStream zStream;
+ if (!zStream.Init(stream->GetBuffer(), stream->GetSize())) {
+ ERROR("Couldn't initialize SeekableZStream for %s", name);
+ return nullptr;
+ }
+ if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
+ ERROR("Couldn't ftruncate %s to decompress library", file.get());
+ return nullptr;
+ }
+ MappedPtr buffer(MemoryRange::mmap(nullptr, zStream.GetUncompressedSize(),
+ PROT_WRITE, MAP_SHARED, fd, 0));
+ if (buffer == MAP_FAILED) {
+ ERROR("Couldn't map %s to decompress library", file.get());
+ return nullptr;
+ }
+
+ if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
+ ERROR("%s: failed to decompress", name);
+ return nullptr;
+ }
+ } else {
+ return nullptr;
+ }
+
+ validator.CacheChecksum();
+ return new MappableExtractFile(fd.forget(), file.release());
+}
+
+/**
+ * _MappableBuffer is a buffer which content can be mapped at different
+ * locations in the virtual address space.
+ * On Linux, uses a (deleted) temporary file on a tmpfs for sharable content.
+ * On Android, uses ashmem.
+ */
+class _MappableBuffer: public MappedPtr
+{
+public:
+ /**
+ * Returns a _MappableBuffer instance with the given name and the given
+ * length.
+ */
+ static _MappableBuffer *Create(const char *name, size_t length)
+ {
+ AutoCloseFD fd;
+#ifdef ANDROID
+ /* On Android, initialize an ashmem region with the given length */
+ fd = open("/" ASHMEM_NAME_DEF, O_RDWR, 0600);
+ if (fd == -1)
+ return nullptr;
+ char str[ASHMEM_NAME_LEN];
+ strlcpy(str, name, sizeof(str));
+ ioctl(fd, ASHMEM_SET_NAME, str);
+ if (ioctl(fd, ASHMEM_SET_SIZE, length))
+ return nullptr;
+
+ /* The Gecko crash reporter is confused by adjacent memory mappings of
+ * the same file and chances are we're going to map from the same file
+ * descriptor right away. To avoid problems with the crash reporter,
+ * create an empty anonymous page before or after the ashmem mapping,
+ * depending on how mappings grow in the address space.
+ */
+#if defined(__arm__)
+ void *buf = ::mmap(nullptr, length + PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (buf != MAP_FAILED) {
+ ::mmap(AlignedEndPtr(reinterpret_cast<char *>(buf) + length, PAGE_SIZE),
+ PAGE_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
+ length, str, buf);
+ return new _MappableBuffer(fd.forget(), buf, length);
+ }
+#elif defined(__i386__)
+ size_t anon_mapping_length = length + PAGE_SIZE;
+ void *buf = ::mmap(nullptr, anon_mapping_length, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (buf != MAP_FAILED) {
+ char *first_page = reinterpret_cast<char *>(buf);
+ char *map_page = first_page + PAGE_SIZE;
+
+ void *actual_buf = ::mmap(map_page, length, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED, fd, 0);
+ if (actual_buf == MAP_FAILED) {
+ ::munmap(buf, anon_mapping_length);
+ DEBUG_LOG("Fixed allocation of decompression buffer at %p failed", map_page);
+ return nullptr;
+ }
+
+ DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
+ length, str, actual_buf);
+ return new _MappableBuffer(fd.forget(), actual_buf, length);
+ }
+#else
+#error need to add a case for your CPU
+#endif
+#else
+ /* On Linux, use /dev/shm as base directory for temporary files, assuming
+ * it's on tmpfs */
+ /* TODO: check that /dev/shm is tmpfs */
+ char path[256];
+ sprintf(path, "/dev/shm/%s.XXXXXX", name);
+ fd = mkstemp(path);
+ if (fd == -1)
+ return nullptr;
+ unlink(path);
+ ftruncate(fd, length);
+
+ void *buf = ::mmap(nullptr, length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (buf != MAP_FAILED) {
+ DEBUG_LOG("Decompression buffer of size %ld in \"%s\", mapped @%p",
+ length, path, buf);
+ return new _MappableBuffer(fd.forget(), buf, length);
+ }
+#endif
+ return nullptr;
+ }
+
+ void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
+ {
+ MOZ_ASSERT(fd != -1);
+#ifdef ANDROID
+ /* Mapping ashmem MAP_PRIVATE is like mapping anonymous memory, even when
+ * there is content in the ashmem */
+ if (flags & MAP_PRIVATE) {
+ flags &= ~MAP_PRIVATE;
+ flags |= MAP_SHARED;
+ }
+#endif
+ return ::mmap(const_cast<void *>(addr), length, prot, flags, fd, offset);
+ }
+
+#ifdef ANDROID
+ ~_MappableBuffer() {
+ /* Free the additional page we allocated. See _MappableBuffer::Create */
+#if defined(__arm__)
+ ::munmap(AlignedEndPtr(*this + GetLength(), PAGE_SIZE), PAGE_SIZE);
+#elif defined(__i386__)
+ ::munmap(*this - PAGE_SIZE, GetLength() + PAGE_SIZE);
+#else
+#error need to add a case for your CPU
+#endif
+ }
+#endif
+
+private:
+ _MappableBuffer(int fd, void *buf, size_t length)
+ : MappedPtr(buf, length), fd(fd) { }
+
+ /* File descriptor for the temporary file or ashmem */
+ AutoCloseFD fd;
+};
+
+
+Mappable *
+MappableDeflate::Create(const char *name, Zip *zip, Zip::Stream *stream)
+{
+ MOZ_ASSERT(stream->GetType() == Zip::Stream::DEFLATE);
+ _MappableBuffer *buf = _MappableBuffer::Create(name, stream->GetUncompressedSize());
+ if (buf)
+ return new MappableDeflate(buf, zip, stream);
+ return nullptr;
+}
+
+MappableDeflate::MappableDeflate(_MappableBuffer *buf, Zip *zip,
+ Zip::Stream *stream)
+: zip(zip), buffer(buf), zStream(stream->GetZStream(*buf)) { }
+
+MappableDeflate::~MappableDeflate() { }
+
+MemoryRange
+MappableDeflate::mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
+{
+ MOZ_ASSERT(buffer);
+ MOZ_ASSERT(!(flags & MAP_SHARED));
+ flags |= MAP_PRIVATE;
+
+ /* The deflate stream is uncompressed up to the required offset + length, if
+ * it hasn't previously been uncompressed */
+ ssize_t missing = offset + length + zStream.avail_out - buffer->GetLength();
+ if (missing > 0) {
+ uInt avail_out = zStream.avail_out;
+ zStream.avail_out = missing;
+ if ((*buffer == zStream.next_out) &&
+ (inflateInit2(&zStream, -MAX_WBITS) != Z_OK)) {
+ ERROR("inflateInit failed: %s", zStream.msg);
+ return MemoryRange(MAP_FAILED, 0);
+ }
+ int ret = inflate(&zStream, Z_SYNC_FLUSH);
+ if (ret < 0) {
+ ERROR("inflate failed: %s", zStream.msg);
+ return MemoryRange(MAP_FAILED, 0);
+ }
+ if (ret == Z_NEED_DICT) {
+ ERROR("zstream requires a dictionary. %s", zStream.msg);
+ return MemoryRange(MAP_FAILED, 0);
+ }
+ zStream.avail_out = avail_out - missing + zStream.avail_out;
+ if (ret == Z_STREAM_END) {
+ if (inflateEnd(&zStream) != Z_OK) {
+ ERROR("inflateEnd failed: %s", zStream.msg);
+ return MemoryRange(MAP_FAILED, 0);
+ }
+ if (zStream.total_out != buffer->GetLength()) {
+ ERROR("File not fully uncompressed! %ld / %d", zStream.total_out,
+ static_cast<unsigned int>(buffer->GetLength()));
+ return MemoryRange(MAP_FAILED, 0);
+ }
+ }
+ }
+#if defined(ANDROID) && defined(__arm__)
+ if (prot & PROT_EXEC) {
+ /* We just extracted data that may be executed in the future.
+ * We thus need to ensure Instruction and Data cache coherency. */
+ DEBUG_LOG("cacheflush(%p, %p)", *buffer + offset, *buffer + (offset + length));
+ cacheflush(reinterpret_cast<uintptr_t>(*buffer + offset),
+ reinterpret_cast<uintptr_t>(*buffer + (offset + length)), 0);
+ }
+#endif
+
+ return MemoryRange(buffer->mmap(addr, length, prot, flags, offset), length);
+}
+
+void
+MappableDeflate::finalize()
+{
+ /* Free zlib internal buffers */
+ inflateEnd(&zStream);
+ /* Free decompression buffer */
+ buffer = nullptr;
+ /* Remove reference to Zip archive */
+ zip = nullptr;
+}
+
+size_t
+MappableDeflate::GetLength() const
+{
+ return buffer->GetLength();
+}
+
+Mappable *
+MappableSeekableZStream::Create(const char *name, Zip *zip,
+ Zip::Stream *stream)
+{
+ MOZ_ASSERT(stream->GetType() == Zip::Stream::STORE);
+ UniquePtr<MappableSeekableZStream> mappable(new MappableSeekableZStream(zip));
+
+ pthread_mutexattr_t recursiveAttr;
+ pthread_mutexattr_init(&recursiveAttr);
+ pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE);
+
+ if (pthread_mutex_init(&mappable->mutex, &recursiveAttr))
+ return nullptr;
+
+ if (!mappable->zStream.Init(stream->GetBuffer(), stream->GetSize()))
+ return nullptr;
+
+ mappable->buffer.reset(_MappableBuffer::Create(name,
+ mappable->zStream.GetUncompressedSize()));
+ if (!mappable->buffer)
+ return nullptr;
+
+ mappable->chunkAvail = MakeUnique<unsigned char[]>(mappable->zStream.GetChunksNum());
+
+ return mappable.release();
+}
+
+MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
+: zip(zip), chunkAvailNum(0) { }
+
+MappableSeekableZStream::~MappableSeekableZStream()
+{
+ pthread_mutex_destroy(&mutex);
+}
+
+MemoryRange
+MappableSeekableZStream::mmap(const void *addr, size_t length, int prot,
+ int flags, off_t offset)
+{
+ /* Map with PROT_NONE so that accessing the mapping would segfault, and
+ * bring us to ensure() */
+ void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset);
+ if (res == MAP_FAILED)
+ return MemoryRange(MAP_FAILED, 0);
+
+ /* Store the mapping, ordered by offset and length */
+ std::vector<LazyMap>::reverse_iterator it;
+ for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) {
+ if ((it->offset < offset) ||
+ ((it->offset == offset) && (it->length < length)))
+ break;
+ }
+ LazyMap map = { res, length, prot, offset };
+ lazyMaps.insert(it.base(), map);
+ return MemoryRange(res, length);
+}
+
+void
+MappableSeekableZStream::munmap(void *addr, size_t length)
+{
+ std::vector<LazyMap>::iterator it;
+ for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it)
+ if ((it->addr = addr) && (it->length == length)) {
+ lazyMaps.erase(it);
+ ::munmap(addr, length);
+ return;
+ }
+ MOZ_CRASH("munmap called with unknown mapping");
+}
+
+void
+MappableSeekableZStream::finalize() { }
+
+bool
+MappableSeekableZStream::ensure(const void *addr)
+{
+ DEBUG_LOG("ensure @%p", addr);
+ const void *addrPage = PageAlignedPtr(addr);
+ /* Find the mapping corresponding to the given page */
+ std::vector<LazyMap>::iterator map;
+ for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
+ if (map->Contains(addrPage))
+ break;
+ }
+ if (map == lazyMaps.end())
+ return false;
+
+ /* Find corresponding chunk */
+ off_t mapOffset = map->offsetOf(addrPage);
+ off_t chunk = mapOffset / zStream.GetChunkSize();
+
+ /* In the typical case, we just need to decompress the chunk entirely. But
+ * when the current mapping ends in the middle of the chunk, we want to
+ * stop at the end of the corresponding page.
+ * However, if another mapping needs the last part of the chunk, we still
+ * need to continue. As mappings are ordered by offset and length, we don't
+ * need to scan the entire list of mappings.
+ * It is safe to run through lazyMaps here because the linker is never
+ * going to call mmap (which adds lazyMaps) while this function is
+ * called. */
+ size_t length = zStream.GetChunkSize(chunk);
+ off_t chunkStart = chunk * zStream.GetChunkSize();
+ off_t chunkEnd = chunkStart + length;
+ std::vector<LazyMap>::iterator it;
+ for (it = map; it < lazyMaps.end(); ++it) {
+ if (chunkEnd <= it->endOffset())
+ break;
+ }
+ if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
+ /* The mapping "it" points at now is past the interesting one */
+ --it;
+ length = it->endOffset() - chunkStart;
+ }
+
+ length = PageAlignedSize(length);
+
+ /* The following lock can be re-acquired by the thread holding it.
+ * If this happens, it means the following code is interrupted somehow by
+ * some signal, and ends up retriggering a chunk decompression for the
+ * same MappableSeekableZStream.
+ * If the chunk to decompress is different the second time, then everything
+ * is safe as the only common data touched below is chunkAvailNum, and it is
+ * atomically updated (leaving out any chance of an interruption while it is
+ * updated affecting the result). If the chunk to decompress is the same, the
+ * worst thing that can happen is chunkAvailNum being incremented one too
+ * many times, which doesn't affect functionality. The chances of it
+ * happening being pretty slim, and the effect being harmless, we can just
+ * ignore the issue. Other than that, we'd just be wasting time decompressing
+ * the same chunk twice. */
+ AutoLock lock(&mutex);
+
+ /* The very first page is mapped and accessed separately of the rest, and
+ * as such, only the first page of the first chunk is decompressed this way.
+ * When we fault in the remaining pages of that chunk, we want to decompress
+ * the complete chunk again. Short of doing that, we would end up with
+ * no data between PageSize() and chunkSize, which would effectively corrupt
+ * symbol resolution in the underlying library. */
+ if (chunkAvail[chunk] < PageNumber(length)) {
+ if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
+ return false;
+
+#if defined(ANDROID) && defined(__arm__)
+ if (map->prot & PROT_EXEC) {
+ /* We just extracted data that may be executed in the future.
+ * We thus need to ensure Instruction and Data cache coherency. */
+ DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
+ cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
+ reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
+ }
+#endif
+ /* Only count if we haven't already decompressed parts of the chunk */
+ if (chunkAvail[chunk] == 0)
+ chunkAvailNum++;
+
+ chunkAvail[chunk] = PageNumber(length);
+ }
+
+ /* Flip the chunk mapping protection to the recorded flags. We could
+ * also flip the protection for other mappings of the same chunk,
+ * but it's easier to skip that and let further segfaults call
+ * ensure again. */
+ const void *chunkAddr = reinterpret_cast<const void *>
+ (reinterpret_cast<uintptr_t>(addrPage)
+ - mapOffset % zStream.GetChunkSize());
+ const void *chunkEndAddr = reinterpret_cast<const void *>
+ (reinterpret_cast<uintptr_t>(chunkAddr) + length);
+
+ const void *start = std::max(map->addr, chunkAddr);
+ const void *end = std::min(map->end(), chunkEndAddr);
+ length = reinterpret_cast<uintptr_t>(end)
+ - reinterpret_cast<uintptr_t>(start);
+
+ if (mprotect(const_cast<void *>(start), length, map->prot) == 0) {
+ DEBUG_LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x", start, length, map->prot);
+ return true;
+ }
+
+ ERROR("mprotect @%p, 0x%" PRIxSize ", 0x%x failed with errno %d",
+ start, length, map->prot, errno);
+ return false;
+}
+
+void
+MappableSeekableZStream::stats(const char *when, const char *name) const
+{
+ size_t nEntries = zStream.GetChunksNum();
+ DEBUG_LOG("%s: %s; %" PRIdSize "/%" PRIdSize " chunks decompressed",
+ name, when, static_cast<size_t>(chunkAvailNum), nEntries);
+
+ size_t len = 64;
+ UniquePtr<char[]> map = MakeUnique<char[]>(len + 3);
+ map[0] = '[';
+
+ for (size_t i = 0, j = 1; i < nEntries; i++, j++) {
+ map[j] = chunkAvail[i] ? '*' : '_';
+ if ((j == len) || (i == nEntries - 1)) {
+ map[j + 1] = ']';
+ map[j + 2] = '\0';
+ DEBUG_LOG("%s", static_cast<char *>(map.get()));
+ j = 0;
+ }
+ }
+}
+
+size_t
+MappableSeekableZStream::GetLength() const
+{
+ return buffer->GetLength();
+}
diff --git a/mozglue/linker/Mappable.h b/mozglue/linker/Mappable.h
new file mode 100644
index 000000000..0224ae3b5
--- /dev/null
+++ b/mozglue/linker/Mappable.h
@@ -0,0 +1,267 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Mappable_h
+#define Mappable_h
+
+#include "Zip.h"
+#include "SeekableZStream.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+#include "zlib.h"
+
+/**
+ * Abstract class to handle mmap()ing from various kind of entities, such as
+ * plain files or Zip entries. The virtual members are meant to act as the
+ * equivalent system functions, except mapped memory is always MAP_PRIVATE,
+ * even though a given implementation may use something different internally.
+ */
+class Mappable: public mozilla::RefCounted<Mappable>
+{
+public:
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(Mappable)
+ virtual ~Mappable() { }
+
+ virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags,
+ off_t offset) = 0;
+
+ enum Kind {
+ MAPPABLE_FILE,
+ MAPPABLE_EXTRACT_FILE,
+ MAPPABLE_DEFLATE,
+ MAPPABLE_SEEKABLE_ZSTREAM
+ };
+
+ virtual Kind GetKind() const = 0;
+
+private:
+ virtual void munmap(void *addr, size_t length) {
+ ::munmap(addr, length);
+ }
+ /* Limit use of Mappable::munmap to classes that keep track of the address
+ * and size of the mapping. This allows to ignore ::munmap return value. */
+ friend class Mappable1stPagePtr;
+ friend class LibHandle;
+
+public:
+ /**
+ * Ensures the availability of the memory pages for the page(s) containing
+ * the given address. Returns whether the pages were successfully made
+ * available.
+ */
+ virtual bool ensure(const void *addr) { return true; }
+
+ /**
+ * Indicate to a Mappable instance that no further mmap is going to happen.
+ */
+ virtual void finalize() = 0;
+
+ /**
+ * Shows some stats about the Mappable instance.
+ * Meant for MappableSeekableZStream only.
+ * As Mappables don't keep track of what they are instanciated for, the name
+ * argument is used to make the stats logging useful to the reader. The when
+ * argument is to be used by the caller to give an identifier of the when
+ * the stats call is made.
+ */
+ virtual void stats(const char *when, const char *name) const { }
+
+ /**
+ * Returns the maximum length that can be mapped from this Mappable for
+ * offset = 0.
+ */
+ virtual size_t GetLength() const = 0;
+};
+
+/**
+ * Mappable implementation for plain files
+ */
+class MappableFile: public Mappable
+{
+public:
+ ~MappableFile() { }
+
+ /**
+ * Create a MappableFile instance for the given file path.
+ */
+ static Mappable *Create(const char *path);
+
+ /* Inherited from Mappable */
+ virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
+ virtual void finalize();
+ virtual size_t GetLength() const;
+
+ virtual Kind GetKind() const { return MAPPABLE_FILE; };
+protected:
+ MappableFile(int fd): fd(fd) { }
+
+private:
+ /* File descriptor */
+ AutoCloseFD fd;
+};
+
+/**
+ * Mappable implementation for deflated stream in a Zip archive
+ * Inflates the complete stream into a cache file.
+ */
+class MappableExtractFile: public MappableFile
+{
+public:
+ ~MappableExtractFile() = default;
+
+ /**
+ * Create a MappableExtractFile instance for the given Zip stream. The name
+ * argument is used to create the cache file in the cache directory.
+ */
+ static Mappable *Create(const char *name, Zip *zip, Zip::Stream *stream);
+
+ /* Override finalize from MappableFile */
+ virtual void finalize() {}
+
+ virtual Kind GetKind() const { return MAPPABLE_EXTRACT_FILE; };
+private:
+ /**
+ * AutoUnlinkFile keeps track of a file name and removes (unlinks) the file
+ * when the instance is destroyed.
+ */
+ struct UnlinkFile
+ {
+ void operator()(char *value) {
+ unlink(value);
+ delete [] value;
+ }
+ };
+ typedef mozilla::UniquePtr<char[], UnlinkFile> AutoUnlinkFile;
+
+ MappableExtractFile(int fd, const char* path)
+ : MappableFile(fd), path(path) { }
+
+ /* Extracted file path */
+ mozilla::UniquePtr<const char[]> path;
+};
+
+class _MappableBuffer;
+
+/**
+ * Mappable implementation for deflated stream in a Zip archive.
+ * Inflates the mapped bits in a temporary buffer.
+ */
+class MappableDeflate: public Mappable
+{
+public:
+ ~MappableDeflate();
+
+ /**
+ * Create a MappableDeflate instance for the given Zip stream. The name
+ * argument is used for an appropriately named temporary file, and the Zip
+ * instance is given for the MappableDeflate to keep a reference of it.
+ */
+ static Mappable *Create(const char *name, Zip *zip, Zip::Stream *stream);
+
+ /* Inherited from Mappable */
+ virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
+ virtual void finalize();
+ virtual size_t GetLength() const;
+
+ virtual Kind GetKind() const { return MAPPABLE_DEFLATE; };
+private:
+ MappableDeflate(_MappableBuffer *buf, Zip *zip, Zip::Stream *stream);
+
+ /* Zip reference */
+ RefPtr<Zip> zip;
+
+ /* Decompression buffer */
+ mozilla::UniquePtr<_MappableBuffer> buffer;
+
+ /* Zlib data */
+ zxx_stream zStream;
+};
+
+/**
+ * Mappable implementation for seekable zStreams.
+ * Inflates the mapped bits in a temporary buffer, on demand.
+ */
+class MappableSeekableZStream: public Mappable
+{
+public:
+ ~MappableSeekableZStream();
+
+ /**
+ * Create a MappableSeekableZStream instance for the given Zip stream. The
+ * name argument is used for an appropriately named temporary file, and the
+ * Zip instance is given for the MappableSeekableZStream to keep a reference
+ * of it.
+ */
+ static Mappable *Create(const char *name, Zip *zip,
+ Zip::Stream *stream);
+
+ /* Inherited from Mappable */
+ virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
+ virtual void munmap(void *addr, size_t length);
+ virtual void finalize();
+ virtual bool ensure(const void *addr);
+ virtual void stats(const char *when, const char *name) const;
+ virtual size_t GetLength() const;
+
+ virtual Kind GetKind() const { return MAPPABLE_SEEKABLE_ZSTREAM; };
+private:
+ MappableSeekableZStream(Zip *zip);
+
+ /* Zip reference */
+ RefPtr<Zip> zip;
+
+ /* Decompression buffer */
+ mozilla::UniquePtr<_MappableBuffer> buffer;
+
+ /* Seekable ZStream */
+ SeekableZStream zStream;
+
+ /* Keep track of mappings performed with MappableSeekableZStream::mmap so
+ * that they can be realized by MappableSeekableZStream::ensure.
+ * Values stored in the struct are those passed to mmap */
+ struct LazyMap
+ {
+ const void *addr;
+ size_t length;
+ int prot;
+ off_t offset;
+
+ /* Returns addr + length, as a pointer */
+ const void *end() const {
+ return reinterpret_cast<const void *>
+ (reinterpret_cast<const unsigned char *>(addr) + length);
+ }
+
+ /* Returns offset + length */
+ off_t endOffset() const {
+ return offset + length;
+ }
+
+ /* Returns the offset corresponding to the given address */
+ off_t offsetOf(const void *ptr) const {
+ return reinterpret_cast<uintptr_t>(ptr)
+ - reinterpret_cast<uintptr_t>(addr) + offset;
+ }
+
+ /* Returns whether the given address is in the LazyMap range */
+ bool Contains(const void *ptr) const {
+ return (ptr >= addr) && (ptr < end());
+ }
+ };
+
+ /* List of all mappings */
+ std::vector<LazyMap> lazyMaps;
+
+ /* Array keeping track of which chunks have already been decompressed.
+ * Each value is the number of pages decompressed for the given chunk. */
+ mozilla::UniquePtr<unsigned char[]> chunkAvail;
+
+ /* Number of chunks that have already been decompressed. */
+ mozilla::Atomic<size_t> chunkAvailNum;
+
+ /* Mutex protecting decompression */
+ pthread_mutex_t mutex;
+};
+
+#endif /* Mappable_h */
diff --git a/mozglue/linker/SeekableZStream.cpp b/mozglue/linker/SeekableZStream.cpp
new file mode 100644
index 000000000..6dd0ef6d5
--- /dev/null
+++ b/mozglue/linker/SeekableZStream.cpp
@@ -0,0 +1,261 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <algorithm>
+#include "SeekableZStream.h"
+#include "Logging.h"
+
+bool
+SeekableZStream::Init(const void *buf, size_t length)
+{
+ const SeekableZStreamHeader *header = SeekableZStreamHeader::validate(buf);
+ if (!header) {
+ ERROR("Not a seekable zstream");
+ return false;
+ }
+
+ buffer = reinterpret_cast<const unsigned char *>(buf);
+ totalSize = header->totalSize;
+ chunkSize = header->chunkSize;
+ lastChunkSize = header->lastChunkSize;
+ windowBits = header->windowBits;
+ dictionary.Init(buffer + sizeof(SeekableZStreamHeader), header->dictSize);
+ offsetTable.Init(buffer + sizeof(SeekableZStreamHeader) + header->dictSize,
+ header->nChunks);
+ filter = GetFilter(header->filter);
+
+ /* Sanity check */
+ if ((chunkSize == 0) ||
+ (!IsPageAlignedSize(chunkSize)) ||
+ (chunkSize > 8 * PageSize()) ||
+ (offsetTable.numElements() < 1) ||
+ (lastChunkSize == 0) ||
+ (lastChunkSize > chunkSize) ||
+ (length < totalSize)) {
+ ERROR("Malformed or broken seekable zstream");
+ return false;
+ }
+
+ return true;
+}
+
+bool
+SeekableZStream::Decompress(void *where, size_t chunk, size_t length)
+{
+ while (length) {
+ size_t len = std::min(length, static_cast<size_t>(chunkSize));
+ if (!DecompressChunk(where, chunk, len))
+ return false;
+ where = reinterpret_cast<unsigned char *>(where) + len;
+ length -= len;
+ chunk++;
+ }
+ return true;
+}
+
+bool
+SeekableZStream::DecompressChunk(void *where, size_t chunk, size_t length)
+{
+ if (chunk >= offsetTable.numElements()) {
+ ERROR("DecompressChunk: chunk #%" PRIdSize " out of range [0-%" PRIdSize ")",
+ chunk, offsetTable.numElements());
+ return false;
+ }
+
+ bool isLastChunk = (chunk == offsetTable.numElements() - 1);
+
+ size_t chunkLen = isLastChunk ? lastChunkSize : chunkSize;
+
+ if (length == 0 || length > chunkLen)
+ length = chunkLen;
+
+ DEBUG_LOG("DecompressChunk #%" PRIdSize " @%p (%" PRIdSize "/% " PRIdSize ")",
+ chunk, where, length, chunkLen);
+ zxx_stream zStream(&allocator);
+ zStream.avail_in = (isLastChunk ? totalSize : uint32_t(offsetTable[chunk + 1]))
+ - uint32_t(offsetTable[chunk]);
+ zStream.next_in = const_cast<Bytef *>(buffer + uint32_t(offsetTable[chunk]));
+ zStream.avail_out = length;
+ zStream.next_out = reinterpret_cast<Bytef *>(where);
+
+ /* Decompress chunk */
+ if (inflateInit2(&zStream, windowBits) != Z_OK) {
+ ERROR("inflateInit failed: %s", zStream.msg);
+ return false;
+ }
+ if (dictionary && inflateSetDictionary(&zStream, dictionary,
+ dictionary.numElements()) != Z_OK) {
+ ERROR("inflateSetDictionary failed: %s", zStream.msg);
+ return false;
+ }
+ if (inflate(&zStream, (length == chunkLen) ? Z_FINISH : Z_SYNC_FLUSH)
+ != (length == chunkLen) ? Z_STREAM_END : Z_OK) {
+ ERROR("inflate failed: %s", zStream.msg);
+ return false;
+ }
+ if (inflateEnd(&zStream) != Z_OK) {
+ ERROR("inflateEnd failed: %s", zStream.msg);
+ return false;
+ }
+ if (filter)
+ filter(chunk * chunkSize, UNFILTER, (unsigned char *)where, chunkLen);
+
+ return true;
+}
+
+/* Branch/Call/Jump conversion filter for Thumb, derived from xz-utils
+ * by Igor Pavlov and Lasse Collin, published in the public domain */
+static void
+BCJ_Thumb_filter(off_t offset, SeekableZStream::FilterDirection dir,
+ unsigned char *buf, size_t size)
+{
+ size_t i;
+ for (i = 0; i + 4 <= size; i += 2) {
+ if ((buf[i + 1] & 0xf8) == 0xf0 && (buf[i + 3] & 0xf8) == 0xf8) {
+ uint32_t src = (buf[i] << 11)
+ | ((buf[i + 1] & 0x07) << 19)
+ | buf[i + 2]
+ | ((buf[i + 3] & 0x07) << 8);
+ src <<= 1;
+ uint32_t dest;
+ if (dir == SeekableZStream::FILTER)
+ dest = offset + (uint32_t)(i) + 4 + src;
+ else
+ dest = src - (offset + (uint32_t)(i) + 4);
+
+ dest >>= 1;
+ buf[i] = dest >> 11;
+ buf[i + 1] = 0xf0 | ((dest >> 19) & 0x07);
+ buf[i + 2] = dest;
+ buf[i + 3] = 0xf8 | ((dest >> 8) & 0x07);
+ i += 2;
+ }
+ }
+}
+
+/* Branch/Call/Jump conversion filter for ARM, derived from xz-utils
+ * by Igor Pavlov and Lasse Collin, published in the public domain */
+static void
+BCJ_ARM_filter(off_t offset, SeekableZStream::FilterDirection dir,
+ unsigned char *buf, size_t size)
+{
+ size_t i;
+ for (i = 0; i + 4 <= size; i += 4) {
+ if (buf[i + 3] == 0xeb) {
+ uint32_t src = buf[i]
+ | (buf[i + 1] << 8)
+ | (buf[i + 2] << 16);
+ src <<= 2;
+ uint32_t dest;
+ if (dir == SeekableZStream::FILTER)
+ dest = offset + (uint32_t)(i) + 8 + src;
+ else
+ dest = src - (offset + (uint32_t)(i) + 8);
+
+ dest >>= 2;
+ buf[i] = dest;
+ buf[i + 1] = dest >> 8;
+ buf[i + 2] = dest >> 16;
+ }
+ }
+}
+
+/* Branch/Call/Jump conversion filter for x86, derived from xz-utils
+ * by Igor Pavlov and Lasse Collin, published in the public domain */
+
+#define Test86MSByte(b) ((b) == 0 || (b) == 0xff)
+
+static void
+BCJ_X86_filter(off_t offset, SeekableZStream::FilterDirection dir,
+ unsigned char *buf, size_t size)
+{
+ static const bool MASK_TO_ALLOWED_STATUS[8] =
+ { true, true, true, false, true, false, false, false };
+
+ static const uint32_t MASK_TO_BIT_NUMBER[8] =
+ { 0, 1, 2, 2, 3, 3, 3, 3 };
+
+ uint32_t prev_mask = 0;
+ uint32_t prev_pos = 0;
+
+ for (size_t i = 0; i + 5 <= size;) {
+ uint8_t b = buf[i];
+ if (b != 0xe8 && b != 0xe9) {
+ ++i;
+ continue;
+ }
+
+ const uint32_t off = offset + (uint32_t)(i) - prev_pos;
+ prev_pos = offset + (uint32_t)(i);
+
+ if (off > 5) {
+ prev_mask = 0;
+ } else {
+ for (uint32_t i = 0; i < off; ++i) {
+ prev_mask &= 0x77;
+ prev_mask <<= 1;
+ }
+ }
+
+ b = buf[i + 4];
+
+ if (Test86MSByte(b) && MASK_TO_ALLOWED_STATUS[(prev_mask >> 1) & 0x7]
+ && (prev_mask >> 1) < 0x10) {
+
+ uint32_t src = ((uint32_t)(b) << 24)
+ | ((uint32_t)(buf[i + 3]) << 16)
+ | ((uint32_t)(buf[i + 2]) << 8)
+ | (buf[i + 1]);
+
+ uint32_t dest;
+ while (true) {
+ if (dir == SeekableZStream::FILTER)
+ dest = src + (offset + (uint32_t)(i) + 5);
+ else
+ dest = src - (offset + (uint32_t)(i) + 5);
+
+ if (prev_mask == 0)
+ break;
+
+ const uint32_t i = MASK_TO_BIT_NUMBER[prev_mask >> 1];
+
+ b = (uint8_t)(dest >> (24 - i * 8));
+
+ if (!Test86MSByte(b))
+ break;
+
+ src = dest ^ ((1 << (32 - i * 8)) - 1);
+ }
+
+ buf[i + 4] = (uint8_t)(~(((dest >> 24) & 1) - 1));
+ buf[i + 3] = (uint8_t)(dest >> 16);
+ buf[i + 2] = (uint8_t)(dest >> 8);
+ buf[i + 1] = (uint8_t)(dest);
+ i += 5;
+ prev_mask = 0;
+
+ } else {
+ ++i;
+ prev_mask |= 1;
+ if (Test86MSByte(b))
+ prev_mask |= 0x10;
+ }
+ }
+}
+
+SeekableZStream::ZStreamFilter
+SeekableZStream::GetFilter(SeekableZStream::FilterId id)
+{
+ switch (id) {
+ case BCJ_THUMB:
+ return BCJ_Thumb_filter;
+ case BCJ_ARM:
+ return BCJ_ARM_filter;
+ case BCJ_X86:
+ return BCJ_X86_filter;
+ default:
+ return nullptr;
+ }
+ return nullptr;
+}
diff --git a/mozglue/linker/SeekableZStream.h b/mozglue/linker/SeekableZStream.h
new file mode 100644
index 000000000..3505c681e
--- /dev/null
+++ b/mozglue/linker/SeekableZStream.h
@@ -0,0 +1,154 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef SeekableZStream_h
+#define SeekableZStream_h
+
+#include "Zip.h"
+
+/**
+ * Seekable compressed stream are created by splitting the original
+ * decompressed data in small chunks and compress these chunks
+ * individually.
+ *
+ * The seekable compressed file format consists in a header defined below,
+ * followed by a table of 32-bits words containing the offsets for each
+ * individual compressed chunk, then followed by the compressed chunks.
+ */
+
+#pragma pack(1)
+struct SeekableZStreamHeader: public Zip::SignedEntity<SeekableZStreamHeader>
+{
+ SeekableZStreamHeader()
+ : Zip::SignedEntity<SeekableZStreamHeader>(magic)
+ , totalSize(0), chunkSize(0), dictSize(0), nChunks(0), lastChunkSize(0)
+ , windowBits(0), filter(0) { }
+
+ /* Reuse Zip::SignedEntity to handle the magic number used in the Seekable
+ * ZStream file format. The magic number is "SeZz". */
+ static const uint32_t magic = 0x7a5a6553;
+
+ /* Total size of the stream, including the 4 magic bytes. */
+ le_uint32 totalSize;
+
+ /* Chunk size */
+ le_uint16 chunkSize;
+
+ /* Size of the dictionary */
+ le_uint16 dictSize;
+
+ /* Number of chunks */
+ le_uint32 nChunks;
+
+ /* Size of last chunk (> 0, <= Chunk size) */
+ le_uint16 lastChunkSize;
+
+ /* windowBits value used when deflating */
+ signed char windowBits;
+
+ /* Filter Id */
+ unsigned char filter;
+};
+#pragma pack()
+
+static_assert(sizeof(SeekableZStreamHeader) == 5 * 4,
+ "SeekableZStreamHeader should be 5 32-bits words");
+
+/**
+ * Helper class used to decompress Seekable ZStreams.
+ */
+class SeekableZStream {
+public:
+ /* Initialize from the given buffer. Returns whether initialization
+ * succeeded (true) or failed (false). */
+ bool Init(const void *buf, size_t length);
+
+ /* Decompresses starting from the given chunk. The decompressed data is
+ * stored at the given location. The given length, in bytes, indicates
+ * how much data to decompress. If length is 0, then exactly one chunk
+ * is decompressed.
+ * Returns whether decompression succeeded (true) or failed (false). */
+ bool Decompress(void *where, size_t chunk, size_t length = 0);
+
+ /* Decompresses the given chunk at the given address. If a length is given,
+ * only decompresses that amount of data instead of the entire chunk.
+ * Returns whether decompression succeeded (true) or failed (false). */
+ bool DecompressChunk(void *where, size_t chunk, size_t length = 0);
+
+ /* Returns the uncompressed size of the complete zstream */
+ size_t GetUncompressedSize() const
+ {
+ return (offsetTable.numElements() - 1) * chunkSize + lastChunkSize;
+ }
+
+ /* Returns the chunk size of the given chunk */
+ size_t GetChunkSize(size_t chunk = 0) const {
+ return (chunk == offsetTable.numElements() - 1) ? lastChunkSize : chunkSize;
+ }
+
+ /* Returns the number of chunks */
+ size_t GetChunksNum() const {
+ return offsetTable.numElements();
+ }
+
+ /**
+ * Filters used to improve compression rate.
+ */
+ enum FilterDirection {
+ FILTER,
+ UNFILTER
+ };
+ typedef void (*ZStreamFilter)(off_t, FilterDirection,
+ unsigned char *, size_t);
+
+ enum FilterId {
+ NONE,
+ BCJ_THUMB,
+ BCJ_ARM,
+ BCJ_X86,
+ FILTER_MAX
+ };
+ static ZStreamFilter GetFilter(FilterId id);
+
+ static ZStreamFilter GetFilter(uint16_t id) {
+ return GetFilter(static_cast<FilterId>(id));
+ }
+
+private:
+ /* RAW Seekable SZtream buffer */
+ const unsigned char *buffer;
+
+ /* Total size of the stream, including the 4 magic bytes. */
+ uint32_t totalSize;
+
+ /* Chunk size */
+ uint32_t chunkSize;
+
+ /* Size of last chunk (> 0, <= Chunk size) */
+ uint32_t lastChunkSize;
+
+ /* windowBits value used when deflating */
+ int windowBits;
+
+ /* Offsets table */
+ Array<le_uint32> offsetTable;
+
+ /* Filter */
+ ZStreamFilter filter;
+
+ /* Deflate dictionary */
+ Array<unsigned char> dictionary;
+
+ /* Special allocator for inflate to use the same buffers for every chunk */
+ zxx_stream::StaticAllocator allocator;
+};
+
+inline void
+operator++(SeekableZStream::FilterId &other)
+{
+ const int orig = static_cast<int>(other);
+ other = static_cast<SeekableZStream::FilterId>(orig + 1);
+}
+
+#endif /* SeekableZStream_h */
diff --git a/mozglue/linker/Utils.h b/mozglue/linker/Utils.h
new file mode 100644
index 000000000..c5314ef60
--- /dev/null
+++ b/mozglue/linker/Utils.h
@@ -0,0 +1,618 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Utils_h
+#define Utils_h
+
+#include <pthread.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include "mozilla/Assertions.h"
+#include "mozilla/Scoped.h"
+
+/**
+ * On architectures that are little endian and that support unaligned reads,
+ * we can use direct type, but on others, we want to have a special class
+ * to handle conversion and alignment issues.
+ */
+#if !defined(DEBUG) && (defined(__i386__) || defined(__x86_64__))
+typedef uint16_t le_uint16;
+typedef uint32_t le_uint32;
+#else
+
+/**
+ * Template that allows to find an unsigned int type from a (computed) bit size
+ */
+template <int s> struct UInt { };
+template <> struct UInt<16> { typedef uint16_t Type; };
+template <> struct UInt<32> { typedef uint32_t Type; };
+
+/**
+ * Template to access 2 n-bit sized words as a 2*n-bit sized word, doing
+ * conversion from little endian and avoiding alignment issues.
+ */
+template <typename T>
+class le_to_cpu
+{
+public:
+ typedef typename UInt<16 * sizeof(T)>::Type Type;
+
+ operator Type() const
+ {
+ return (b << (sizeof(T) * 8)) | a;
+ }
+
+ const le_to_cpu& operator =(const Type &v)
+ {
+ a = v & ((1 << (sizeof(T) * 8)) - 1);
+ b = v >> (sizeof(T) * 8);
+ return *this;
+ }
+
+ le_to_cpu() { }
+ le_to_cpu(const Type &v)
+ {
+ operator =(v);
+ }
+
+ const le_to_cpu& operator +=(const Type &v)
+ {
+ return operator =(operator Type() + v);
+ }
+
+ const le_to_cpu& operator ++(int)
+ {
+ return operator =(operator Type() + 1);
+ }
+
+private:
+ T a, b;
+};
+
+/**
+ * Type definitions
+ */
+typedef le_to_cpu<unsigned char> le_uint16;
+typedef le_to_cpu<le_uint16> le_uint32;
+#endif
+
+
+/**
+ * AutoCloseFD is a RAII wrapper for POSIX file descriptors
+ */
+struct AutoCloseFDTraits
+{
+ typedef int type;
+ static int empty() { return -1; }
+ static void release(int fd) { if (fd != -1) close(fd); }
+};
+typedef mozilla::Scoped<AutoCloseFDTraits> AutoCloseFD;
+
+/**
+ * AutoCloseFILE is a RAII wrapper for POSIX streams
+ */
+struct AutoCloseFILETraits
+{
+ typedef FILE *type;
+ static FILE *empty() { return nullptr; }
+ static void release(FILE *f) { if (f) fclose(f); }
+};
+typedef mozilla::Scoped<AutoCloseFILETraits> AutoCloseFILE;
+
+/**
+ * Page alignment helpers
+ */
+static inline size_t PageSize()
+{
+ return 4096;
+}
+
+static inline uintptr_t AlignedPtr(uintptr_t ptr, size_t alignment)
+{
+ return ptr & ~(alignment - 1);
+}
+
+template <typename T>
+static inline T *AlignedPtr(T *ptr, size_t alignment)
+{
+ return reinterpret_cast<T *>(
+ AlignedPtr(reinterpret_cast<uintptr_t>(ptr), alignment));
+}
+
+template <typename T>
+static inline T PageAlignedPtr(T ptr)
+{
+ return AlignedPtr(ptr, PageSize());
+}
+
+static inline uintptr_t AlignedEndPtr(uintptr_t ptr, size_t alignment)
+{
+ return AlignedPtr(ptr + alignment - 1, alignment);
+}
+
+template <typename T>
+static inline T *AlignedEndPtr(T *ptr, size_t alignment)
+{
+ return reinterpret_cast<T *>(
+ AlignedEndPtr(reinterpret_cast<uintptr_t>(ptr), alignment));
+}
+
+template <typename T>
+static inline T PageAlignedEndPtr(T ptr)
+{
+ return AlignedEndPtr(ptr, PageSize());
+}
+
+static inline size_t AlignedSize(size_t size, size_t alignment)
+{
+ return (size + alignment - 1) & ~(alignment - 1);
+}
+
+static inline size_t PageAlignedSize(size_t size)
+{
+ return AlignedSize(size, PageSize());
+}
+
+static inline bool IsAlignedPtr(uintptr_t ptr, size_t alignment)
+{
+ return ptr % alignment == 0;
+}
+
+template <typename T>
+static inline bool IsAlignedPtr(T *ptr, size_t alignment)
+{
+ return IsAlignedPtr(reinterpret_cast<uintptr_t>(ptr), alignment);
+}
+
+template <typename T>
+static inline bool IsPageAlignedPtr(T ptr)
+{
+ return IsAlignedPtr(ptr, PageSize());
+}
+
+static inline bool IsAlignedSize(size_t size, size_t alignment)
+{
+ return size % alignment == 0;
+}
+
+static inline bool IsPageAlignedSize(size_t size)
+{
+ return IsAlignedSize(size, PageSize());
+}
+
+static inline size_t PageNumber(size_t size)
+{
+ return (size + PageSize() - 1) / PageSize();
+}
+
+/**
+ * MemoryRange stores a pointer, size pair.
+ */
+class MemoryRange
+{
+public:
+ MemoryRange(void *buf, size_t length): buf(buf), length(length) { }
+
+ void Assign(void *b, size_t len) {
+ buf = b;
+ length = len;
+ }
+
+ void Assign(const MemoryRange& other) {
+ buf = other.buf;
+ length = other.length;
+ }
+
+ void *get() const
+ {
+ return buf;
+ }
+
+ operator void *() const
+ {
+ return buf;
+ }
+
+ operator unsigned char *() const
+ {
+ return reinterpret_cast<unsigned char *>(buf);
+ }
+
+ bool operator ==(void *ptr) const {
+ return buf == ptr;
+ }
+
+ bool operator ==(unsigned char *ptr) const {
+ return buf == ptr;
+ }
+
+ void *operator +(off_t offset) const
+ {
+ return reinterpret_cast<char *>(buf) + offset;
+ }
+
+ /**
+ * Returns whether the given address is within the mapped range
+ */
+ bool Contains(void *ptr) const
+ {
+ return (ptr >= buf) && (ptr < reinterpret_cast<char *>(buf) + length);
+ }
+
+ /**
+ * Returns the length of the mapped range
+ */
+ size_t GetLength() const
+ {
+ return length;
+ }
+
+ static MemoryRange mmap(void *addr, size_t length, int prot, int flags,
+ int fd, off_t offset) {
+ return MemoryRange(::mmap(addr, length, prot, flags, fd, offset), length);
+ }
+
+private:
+ void *buf;
+ size_t length;
+};
+
+/**
+ * MappedPtr is a RAII wrapper for mmap()ed memory. It can be used as
+ * a simple void * or unsigned char *.
+ *
+ * It is defined as a derivative of a template that allows to use a
+ * different unmapping strategy.
+ */
+template <typename T>
+class GenericMappedPtr: public MemoryRange
+{
+public:
+ GenericMappedPtr(void *buf, size_t length): MemoryRange(buf, length) { }
+ GenericMappedPtr(const MemoryRange& other): MemoryRange(other) { }
+ GenericMappedPtr(): MemoryRange(MAP_FAILED, 0) { }
+
+ void Assign(void *b, size_t len) {
+ if (get() != MAP_FAILED)
+ static_cast<T *>(this)->munmap(get(), GetLength());
+ MemoryRange::Assign(b, len);
+ }
+
+ void Assign(const MemoryRange& other) {
+ Assign(other.get(), other.GetLength());
+ }
+
+ ~GenericMappedPtr()
+ {
+ if (get() != MAP_FAILED)
+ static_cast<T *>(this)->munmap(get(), GetLength());
+ }
+
+ void release()
+ {
+ MemoryRange::Assign(MAP_FAILED, 0);
+ }
+};
+
+struct MappedPtr: public GenericMappedPtr<MappedPtr>
+{
+ MappedPtr(void *buf, size_t length)
+ : GenericMappedPtr<MappedPtr>(buf, length) { }
+ MappedPtr(const MemoryRange& other)
+ : GenericMappedPtr<MappedPtr>(other) { }
+ MappedPtr(): GenericMappedPtr<MappedPtr>() { }
+
+private:
+ friend class GenericMappedPtr<MappedPtr>;
+ void munmap(void *buf, size_t length)
+ {
+ ::munmap(buf, length);
+ }
+};
+
+/**
+ * UnsizedArray is a way to access raw arrays of data in memory.
+ *
+ * struct S { ... };
+ * UnsizedArray<S> a(buf);
+ * UnsizedArray<S> b; b.Init(buf);
+ *
+ * This is roughly equivalent to
+ * const S *a = reinterpret_cast<const S *>(buf);
+ * const S *b = nullptr; b = reinterpret_cast<const S *>(buf);
+ *
+ * An UnsizedArray has no known length, and it's up to the caller to make
+ * sure the accessed memory is mapped and makes sense.
+ */
+template <typename T>
+class UnsizedArray
+{
+public:
+ typedef size_t idx_t;
+
+ /**
+ * Constructors and Initializers
+ */
+ UnsizedArray(): contents(nullptr) { }
+ UnsizedArray(const void *buf): contents(reinterpret_cast<const T *>(buf)) { }
+
+ void Init(const void *buf)
+ {
+ MOZ_ASSERT(contents == nullptr);
+ contents = reinterpret_cast<const T *>(buf);
+ }
+
+ /**
+ * Returns the nth element of the array
+ */
+ const T &operator[](const idx_t index) const
+ {
+ MOZ_ASSERT(contents);
+ return contents[index];
+ }
+
+ operator const T *() const
+ {
+ return contents;
+ }
+ /**
+ * Returns whether the array points somewhere
+ */
+ operator bool() const
+ {
+ return contents != nullptr;
+ }
+private:
+ const T *contents;
+};
+
+/**
+ * Array, like UnsizedArray, is a way to access raw arrays of data in memory.
+ * Unlike UnsizedArray, it has a known length, and is enumerable with an
+ * iterator.
+ *
+ * struct S { ... };
+ * Array<S> a(buf, len);
+ * UnsizedArray<S> b; b.Init(buf, len);
+ *
+ * In the above examples, len is the number of elements in the array. It is
+ * also possible to initialize an Array with the buffer size:
+ *
+ * Array<S> c; c.InitSize(buf, size);
+ *
+ * It is also possible to initialize an Array in two steps, only providing
+ * one data at a time:
+ *
+ * Array<S> d;
+ * d.Init(buf);
+ * d.Init(len); // or d.InitSize(size);
+ *
+ */
+template <typename T>
+class Array: public UnsizedArray<T>
+{
+public:
+ typedef typename UnsizedArray<T>::idx_t idx_t;
+
+ /**
+ * Constructors and Initializers
+ */
+ Array(): UnsizedArray<T>(), length(0) { }
+ Array(const void *buf, const idx_t length)
+ : UnsizedArray<T>(buf), length(length) { }
+
+ void Init(const void *buf)
+ {
+ UnsizedArray<T>::Init(buf);
+ }
+
+ void Init(const idx_t len)
+ {
+ MOZ_ASSERT(length == 0);
+ length = len;
+ }
+
+ void InitSize(const idx_t size)
+ {
+ Init(size / sizeof(T));
+ }
+
+ void Init(const void *buf, const idx_t len)
+ {
+ UnsizedArray<T>::Init(buf);
+ Init(len);
+ }
+
+ void InitSize(const void *buf, const idx_t size)
+ {
+ UnsizedArray<T>::Init(buf);
+ InitSize(size);
+ }
+
+ /**
+ * Returns the nth element of the array
+ */
+ const T &operator[](const idx_t index) const
+ {
+ MOZ_ASSERT(index < length);
+ MOZ_ASSERT(operator bool());
+ return UnsizedArray<T>::operator[](index);
+ }
+
+ /**
+ * Returns the number of elements in the array
+ */
+ idx_t numElements() const
+ {
+ return length;
+ }
+
+ /**
+ * Returns whether the array points somewhere and has at least one element.
+ */
+ operator bool() const
+ {
+ return (length > 0) && UnsizedArray<T>::operator bool();
+ }
+
+ /**
+ * Iterator for an Array. Use is similar to that of STL const_iterators:
+ *
+ * struct S { ... };
+ * Array<S> a(buf, len);
+ * for (Array<S>::iterator it = a.begin(); it < a.end(); ++it) {
+ * // Do something with *it.
+ * }
+ */
+ class iterator
+ {
+ public:
+ iterator(): item(nullptr) { }
+
+ const T &operator *() const
+ {
+ return *item;
+ }
+
+ const T *operator ->() const
+ {
+ return item;
+ }
+
+ iterator &operator ++()
+ {
+ ++item;
+ return *this;
+ }
+
+ bool operator<(const iterator &other) const
+ {
+ return item < other.item;
+ }
+ protected:
+ friend class Array<T>;
+ iterator(const T &item): item(&item) { }
+
+ private:
+ const T *item;
+ };
+
+ /**
+ * Returns an iterator pointing at the beginning of the Array
+ */
+ iterator begin() const {
+ if (length)
+ return iterator(UnsizedArray<T>::operator[](0));
+ return iterator();
+ }
+
+ /**
+ * Returns an iterator pointing past the end of the Array
+ */
+ iterator end() const {
+ if (length)
+ return iterator(UnsizedArray<T>::operator[](length));
+ return iterator();
+ }
+
+ /**
+ * Reverse iterator for an Array. Use is similar to that of STL
+ * const_reverse_iterators:
+ *
+ * struct S { ... };
+ * Array<S> a(buf, len);
+ * for (Array<S>::reverse_iterator it = a.rbegin(); it < a.rend(); ++it) {
+ * // Do something with *it.
+ * }
+ */
+ class reverse_iterator
+ {
+ public:
+ reverse_iterator(): item(nullptr) { }
+
+ const T &operator *() const
+ {
+ const T *tmp = item;
+ return *--tmp;
+ }
+
+ const T *operator ->() const
+ {
+ return &operator*();
+ }
+
+ reverse_iterator &operator ++()
+ {
+ --item;
+ return *this;
+ }
+
+ bool operator<(const reverse_iterator &other) const
+ {
+ return item > other.item;
+ }
+ protected:
+ friend class Array<T>;
+ reverse_iterator(const T &item): item(&item) { }
+
+ private:
+ const T *item;
+ };
+
+ /**
+ * Returns a reverse iterator pointing at the end of the Array
+ */
+ reverse_iterator rbegin() const {
+ if (length)
+ return reverse_iterator(UnsizedArray<T>::operator[](length));
+ return reverse_iterator();
+ }
+
+ /**
+ * Returns a reverse iterator pointing past the beginning of the Array
+ */
+ reverse_iterator rend() const {
+ if (length)
+ return reverse_iterator(UnsizedArray<T>::operator[](0));
+ return reverse_iterator();
+ }
+private:
+ idx_t length;
+};
+
+/**
+ * Transforms a pointer-to-function to a pointer-to-object pointing at the
+ * same address.
+ */
+template <typename T>
+void *FunctionPtr(T func)
+{
+ union {
+ void *ptr;
+ T func;
+ } f;
+ f.func = func;
+ return f.ptr;
+}
+
+class AutoLock {
+public:
+ AutoLock(pthread_mutex_t *mutex): mutex(mutex)
+ {
+ if (pthread_mutex_lock(mutex))
+ MOZ_CRASH("pthread_mutex_lock failed");
+ }
+ ~AutoLock()
+ {
+ if (pthread_mutex_unlock(mutex))
+ MOZ_CRASH("pthread_mutex_unlock failed");
+ }
+private:
+ pthread_mutex_t *mutex;
+};
+
+#endif /* Utils_h */
+
diff --git a/mozglue/linker/XZStream.cpp b/mozglue/linker/XZStream.cpp
new file mode 100644
index 000000000..bd71655f5
--- /dev/null
+++ b/mozglue/linker/XZStream.cpp
@@ -0,0 +1,213 @@
+#include "XZStream.h"
+
+#include <algorithm>
+#include "mozilla/Assertions.h"
+#include "Logging.h"
+
+// LZMA dictionary size, should have a minimum size for the given compression
+// rate, see XZ Utils docs for details.
+static const uint32_t kDictSize = 1 << 24;
+
+static const size_t kFooterSize = 12;
+
+// Parses a variable-length integer (VLI),
+// see http://tukaani.org/xz/xz-file-format.txt for details.
+static size_t
+ParseVarLenInt(const uint8_t* aBuf, size_t aBufSize, uint64_t* aValue)
+{
+ if (!aBufSize) {
+ return 0;
+ }
+ aBufSize = std::min(9u, aBufSize);
+
+ *aValue = aBuf[0] & 0x7F;
+ size_t i = 0;
+
+ while (aBuf[i++] & 0x80) {
+ if (i >= aBufSize || aBuf[i] == 0x0) {
+ return 0;
+ }
+ *aValue |= static_cast<uint64_t>(aBuf[i] & 0x7F) << (i * 7);
+ }
+ return i;
+}
+
+/* static */ bool
+XZStream::IsXZ(const void* aBuf, size_t aBufSize)
+{
+ static const uint8_t kXzMagic[] = {0xfd, '7', 'z', 'X', 'Z', 0x0};
+ MOZ_ASSERT(aBuf);
+ return aBufSize > sizeof(kXzMagic) &&
+ !memcmp(reinterpret_cast<const void*>(kXzMagic), aBuf, sizeof(kXzMagic));
+}
+
+XZStream::XZStream(const void* aInBuf, size_t aInSize)
+ : mInBuf(static_cast<const uint8_t*>(aInBuf))
+ , mUncompSize(0)
+ , mDec(nullptr)
+{
+ mBuffers.in = mInBuf;
+ mBuffers.in_pos = 0;
+ mBuffers.in_size = aInSize;
+}
+
+XZStream::~XZStream()
+{
+ xz_dec_end(mDec);
+}
+
+bool
+XZStream::Init()
+{
+#ifdef XZ_USE_CRC64
+ xz_crc64_init();
+#endif
+ xz_crc32_init();
+
+ mDec = xz_dec_init(XZ_DYNALLOC, kDictSize);
+
+ if (!mDec) {
+ return false;
+ }
+
+ mUncompSize = ParseUncompressedSize();
+
+ return true;
+}
+
+size_t
+XZStream::Decode(void* aOutBuf, size_t aOutSize)
+{
+ if (!mDec) {
+ return 0;
+ }
+
+ mBuffers.out = static_cast<uint8_t*>(aOutBuf);
+ mBuffers.out_pos = 0;
+ mBuffers.out_size = aOutSize;
+
+ while (mBuffers.in_pos < mBuffers.in_size &&
+ mBuffers.out_pos < mBuffers.out_size) {
+ const xz_ret ret = xz_dec_run(mDec, &mBuffers);
+
+ switch (ret) {
+ case XZ_STREAM_END:
+ // Stream ended, the next loop iteration should terminate.
+ MOZ_ASSERT(mBuffers.in_pos == mBuffers.in_size);
+ MOZ_FALLTHROUGH;
+#ifdef XZ_DEC_ANY_CHECK
+ case XZ_UNSUPPORTED_CHECK:
+ // Ignore unsupported check.
+ MOZ_FALLTHROUGH;
+#endif
+ case XZ_OK:
+ // Chunk decoded, proceed.
+ break;
+
+ case XZ_MEM_ERROR:
+ ERROR("XZ decoding: memory allocation failed");
+ return 0;
+
+ case XZ_MEMLIMIT_ERROR:
+ ERROR("XZ decoding: memory usage limit reached");
+ return 0;
+
+ case XZ_FORMAT_ERROR:
+ ERROR("XZ decoding: invalid stream format");
+ return 0;
+
+ case XZ_OPTIONS_ERROR:
+ ERROR("XZ decoding: unsupported header options");
+ return 0;
+
+ case XZ_DATA_ERROR:
+ MOZ_FALLTHROUGH;
+ case XZ_BUF_ERROR:
+ ERROR("XZ decoding: corrupt input stream");
+ return 0;
+
+ default:
+ MOZ_ASSERT_UNREACHABLE("XZ decoding: unknown error condition");
+ return 0;
+ }
+ }
+ return mBuffers.out_pos;
+}
+
+size_t
+XZStream::RemainingInput() const
+{
+ return mBuffers.in_size - mBuffers.in_pos;
+}
+
+size_t
+XZStream::Size() const
+{
+ return mBuffers.in_size;
+}
+
+size_t
+XZStream::UncompressedSize() const
+{
+ return mUncompSize;
+}
+
+size_t
+XZStream::ParseIndexSize() const
+{
+ static const uint8_t kFooterMagic[] = {'Y', 'Z'};
+
+ const uint8_t* footer = mInBuf + mBuffers.in_size - kFooterSize;
+ // The magic bytes are at the end of the footer.
+ if (memcmp(reinterpret_cast<const void*>(kFooterMagic),
+ footer + kFooterSize - sizeof(kFooterMagic),
+ sizeof(kFooterMagic))) {
+ // Not a valid footer at stream end.
+ return 0;
+ }
+ // Backward size is a 32 bit LE integer field positioned after the 32 bit CRC32
+ // code. It encodes the index size as a multiple of 4 bytes with a minimum
+ // size of 4 bytes.
+ const uint32_t backwardSize = *(footer + 4);
+ return (backwardSize + 1) * 4;
+}
+
+size_t
+XZStream::ParseUncompressedSize() const
+{
+ static const uint8_t kIndexIndicator[] = {0x0};
+
+ const size_t indexSize = ParseIndexSize();
+ if (!indexSize) {
+ return 0;
+ }
+ // The footer follows directly the index, so we can use it as a reference.
+ const uint8_t* end = mInBuf + mBuffers.in_size;
+ const uint8_t* index = end - kFooterSize - indexSize;
+
+ // The index consists of a one byte indicator followed by a VLI field for the
+ // number of records (1 expected) followed by a list of records. One record
+ // contains a VLI field for unpadded size followed by a VLI field for
+ // uncompressed size.
+ if (memcmp(reinterpret_cast<const void*>(kIndexIndicator),
+ index, sizeof(kIndexIndicator))) {
+ // Not a valid index.
+ return 0;
+ }
+
+ index += sizeof(kIndexIndicator);
+ uint64_t numRecords = 0;
+ index += ParseVarLenInt(index, end - index, &numRecords);
+ if (!numRecords) {
+ return 0;
+ }
+ uint64_t unpaddedSize = 0;
+ index += ParseVarLenInt(index, end - index, &unpaddedSize);
+ if (!unpaddedSize) {
+ return 0;
+ }
+ uint64_t uncompressedSize = 0;
+ index += ParseVarLenInt(index, end - index, &uncompressedSize);
+
+ return uncompressedSize;
+}
diff --git a/mozglue/linker/XZStream.h b/mozglue/linker/XZStream.h
new file mode 100644
index 000000000..76a979b13
--- /dev/null
+++ b/mozglue/linker/XZStream.h
@@ -0,0 +1,48 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef XZSTREAM_h
+#define XZSTREAM_h
+
+#include <cstdlib>
+
+#define XZ_DEC_DYNALLOC
+#include "xz.h"
+
+// Used to decode XZ stream buffers.
+class XZStream
+{
+public:
+ // Returns whether the provided buffer is likely a XZ stream.
+ static bool IsXZ(const void* aBuf, size_t aBufSize);
+
+ // Creates a XZ stream object for the given input buffer.
+ XZStream(const void* aInBuf, size_t aInSize);
+ ~XZStream();
+
+ // Initializes the decoder and returns whether decoding may commence.
+ bool Init();
+ // Decodes the next chunk of input into the given output buffer.
+ size_t Decode(void* aOutBuf, size_t aOutSize);
+ // Returns the number of yet undecoded bytes in the input buffer.
+ size_t RemainingInput() const;
+ // Returns the total number of bytes in the input buffer (compressed size).
+ size_t Size() const;
+ // Returns the expected final number of bytes in the output buffer.
+ // Note: will return 0 before successful Init().
+ size_t UncompressedSize() const;
+
+private:
+ // Parses the stream footer and returns the size of the index in bytes.
+ size_t ParseIndexSize() const;
+ // Parses the stream index and returns the expected uncompressed size in bytes.
+ size_t ParseUncompressedSize() const;
+
+ const uint8_t* mInBuf;
+ size_t mUncompSize;
+ xz_buf mBuffers;
+ xz_dec* mDec;
+};
+
+#endif // XZSTREAM_h
diff --git a/mozglue/linker/Zip.cpp b/mozglue/linker/Zip.cpp
new file mode 100644
index 000000000..d141eaa45
--- /dev/null
+++ b/mozglue/linker/Zip.cpp
@@ -0,0 +1,229 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <unistd.h>
+#include <cstdlib>
+#include <algorithm>
+#include "Logging.h"
+#include "Zip.h"
+
+already_AddRefed<Zip>
+Zip::Create(const char *filename)
+{
+ /* Open and map the file in memory */
+ AutoCloseFD fd(open(filename, O_RDONLY));
+ if (fd == -1) {
+ ERROR("Error opening %s: %s", filename, strerror(errno));
+ return nullptr;
+ }
+ struct stat st;
+ if (fstat(fd, &st) == -1) {
+ ERROR("Error stating %s: %s", filename, strerror(errno));
+ return nullptr;
+ }
+ size_t size = st.st_size;
+ if (size <= sizeof(CentralDirectoryEnd)) {
+ ERROR("Error reading %s: too short", filename);
+ return nullptr;
+ }
+ void *mapped = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd, 0);
+ if (mapped == MAP_FAILED) {
+ ERROR("Error mmapping %s: %s", filename, strerror(errno));
+ return nullptr;
+ }
+ DEBUG_LOG("Mapped %s @%p", filename, mapped);
+
+ return Create(filename, mapped, size);
+}
+
+already_AddRefed<Zip>
+Zip::Create(const char *filename, void *mapped, size_t size)
+{
+ RefPtr<Zip> zip = new Zip(filename, mapped, size);
+
+ // If neither the first Local File entry nor central directory entries
+ // have been found, the zip was invalid.
+ if (!zip->nextFile && !zip->entries) {
+ ERROR("%s - Invalid zip", filename);
+ return nullptr;
+ }
+
+ ZipCollection::Singleton.Register(zip);
+ return zip.forget();
+}
+
+Zip::Zip(const char *filename, void *mapped, size_t size)
+: name(filename ? strdup(filename) : nullptr)
+, mapped(mapped)
+, size(size)
+, nextFile(LocalFile::validate(mapped)) // first Local File entry
+, nextDir(nullptr)
+, entries(nullptr)
+{
+ pthread_mutex_init(&mutex, nullptr);
+ // If the first local file entry couldn't be found (which can happen
+ // with optimized jars), check the first central directory entry.
+ if (!nextFile)
+ GetFirstEntry();
+}
+
+Zip::~Zip()
+{
+ ZipCollection::Forget(this);
+ if (name) {
+ munmap(mapped, size);
+ DEBUG_LOG("Unmapped %s @%p", name, mapped);
+ free(name);
+ }
+ pthread_mutex_destroy(&mutex);
+}
+
+bool
+Zip::GetStream(const char *path, Zip::Stream *out) const
+{
+ AutoLock lock(&mutex);
+
+ DEBUG_LOG("%s - GetFile %s", name, path);
+ /* Fast path: if the Local File header on store matches, we can return the
+ * corresponding stream right away.
+ * However, the Local File header may not contain enough information, in
+ * which case the 3rd bit on the generalFlag is set. Unfortunately, this
+ * bit is also set in some archives even when we do have the data (most
+ * notably the android packages as built by the Mozilla build system).
+ * So instead of testing the generalFlag bit, only use the fast path when
+ * we haven't read the central directory entries yet, and when the
+ * compressed size as defined in the header is not filled (which is a
+ * normal condition for the bit to be set). */
+ if (nextFile && nextFile->GetName().Equals(path) &&
+ !entries && (nextFile->compressedSize != 0)) {
+ DEBUG_LOG("%s - %s was next file: fast path", name, path);
+ /* Fill Stream info from Local File header content */
+ const char *data = reinterpret_cast<const char *>(nextFile->GetData());
+ out->compressedBuf = data;
+ out->compressedSize = nextFile->compressedSize;
+ out->uncompressedSize = nextFile->uncompressedSize;
+ out->CRC32 = nextFile->CRC32;
+ out->type = static_cast<Stream::Type>(uint16_t(nextFile->compression));
+
+ /* Find the next Local File header. It is usually simply following the
+ * compressed stream, but in cases where the 3rd bit of the generalFlag
+ * is set, there is a Data Descriptor header before. */
+ data += nextFile->compressedSize;
+ if ((nextFile->generalFlag & 0x8) && DataDescriptor::validate(data)) {
+ data += sizeof(DataDescriptor);
+ }
+ nextFile = LocalFile::validate(data);
+ return true;
+ }
+
+ /* If the directory entry we have in store doesn't match, scan the Central
+ * Directory for the entry corresponding to the given path */
+ if (!nextDir || !nextDir->GetName().Equals(path)) {
+ const DirectoryEntry *entry = GetFirstEntry();
+ DEBUG_LOG("%s - Scan directory entries in search for %s", name, path);
+ while (entry && !entry->GetName().Equals(path)) {
+ entry = entry->GetNext();
+ }
+ nextDir = entry;
+ }
+ if (!nextDir) {
+ DEBUG_LOG("%s - Couldn't find %s", name, path);
+ return false;
+ }
+
+ /* Find the Local File header corresponding to the Directory entry that
+ * was found. */
+ nextFile = LocalFile::validate(static_cast<const char *>(mapped)
+ + nextDir->offset);
+ if (!nextFile) {
+ ERROR("%s - Couldn't find the Local File header for %s", name, path);
+ return false;
+ }
+
+ /* Fill Stream info from Directory entry content */
+ const char *data = reinterpret_cast<const char *>(nextFile->GetData());
+ out->compressedBuf = data;
+ out->compressedSize = nextDir->compressedSize;
+ out->uncompressedSize = nextDir->uncompressedSize;
+ out->CRC32 = nextDir->CRC32;
+ out->type = static_cast<Stream::Type>(uint16_t(nextDir->compression));
+
+ /* Store the next directory entry */
+ nextDir = nextDir->GetNext();
+ nextFile = nullptr;
+ return true;
+}
+
+const Zip::DirectoryEntry *
+Zip::GetFirstEntry() const
+{
+ if (entries)
+ return entries;
+
+ const CentralDirectoryEnd *end = nullptr;
+ const char *_end = static_cast<const char *>(mapped) + size
+ - sizeof(CentralDirectoryEnd);
+
+ /* Scan for the Central Directory End */
+ for (; _end > mapped && !end; _end--)
+ end = CentralDirectoryEnd::validate(_end);
+ if (!end) {
+ ERROR("%s - Couldn't find end of central directory record", name);
+ return nullptr;
+ }
+
+ entries = DirectoryEntry::validate(static_cast<const char *>(mapped)
+ + end->offset);
+ if (!entries) {
+ ERROR("%s - Couldn't find central directory record", name);
+ }
+ return entries;
+}
+
+ZipCollection ZipCollection::Singleton;
+
+static pthread_mutex_t sZipCollectionMutex = PTHREAD_MUTEX_INITIALIZER;
+
+already_AddRefed<Zip>
+ZipCollection::GetZip(const char *path)
+{
+ {
+ AutoLock lock(&sZipCollectionMutex);
+ /* Search the list of Zips we already have for a match */
+ for (std::vector<Zip *>::iterator it = Singleton.zips.begin();
+ it < Singleton.zips.end(); ++it) {
+ if ((*it)->GetName() && (strcmp((*it)->GetName(), path) == 0)) {
+ RefPtr<Zip> zip = *it;
+ return zip.forget();
+ }
+ }
+ }
+ return Zip::Create(path);
+}
+
+void
+ZipCollection::Register(Zip *zip)
+{
+ AutoLock lock(&sZipCollectionMutex);
+ DEBUG_LOG("ZipCollection::Register(\"%s\")", zip->GetName());
+ Singleton.zips.push_back(zip);
+}
+
+void
+ZipCollection::Forget(Zip *zip)
+{
+ AutoLock lock(&sZipCollectionMutex);
+ DEBUG_LOG("ZipCollection::Forget(\"%s\")", zip->GetName());
+ std::vector<Zip *>::iterator it = std::find(Singleton.zips.begin(),
+ Singleton.zips.end(), zip);
+ if (*it == zip) {
+ Singleton.zips.erase(it);
+ } else {
+ DEBUG_LOG("ZipCollection::Forget: didn't find \"%s\" in bookkeeping", zip->GetName());
+ }
+}
diff --git a/mozglue/linker/Zip.h b/mozglue/linker/Zip.h
new file mode 100644
index 000000000..29e42592a
--- /dev/null
+++ b/mozglue/linker/Zip.h
@@ -0,0 +1,500 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Zip_h
+#define Zip_h
+
+#include <cstring>
+#include <stdint.h>
+#include <vector>
+#include <zlib.h>
+#include <pthread.h>
+#include "Utils.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/RefCounted.h"
+#include "mozilla/RefPtr.h"
+
+/**
+ * Helper class wrapping z_stream to avoid malloc() calls during
+ * inflate. Do not use for deflate.
+ * inflateInit allocates two buffers:
+ * - one for its internal state, which is "approximately 10K bytes" according
+ * to inflate.h from zlib.
+ * - one for the compression window, which depends on the window size passed
+ * to inflateInit2, but is never greater than 32K (1 << MAX_WBITS).
+ * Those buffers are created at instantiation time instead of when calling
+ * inflateInit2. When inflateInit2 is called, it will call zxx_stream::Alloc
+ * to get both these buffers. zxx_stream::Alloc will choose one of the
+ * pre-allocated buffers depending on the requested size.
+ */
+class zxx_stream: public z_stream
+{
+public:
+ /* Forward declaration */
+ class StaticAllocator;
+
+ explicit zxx_stream(StaticAllocator *allocator_=nullptr)
+ : allocator(allocator_)
+ {
+ memset(this, 0, sizeof(z_stream));
+ zalloc = Alloc;
+ zfree = Free;
+ opaque = this;
+ }
+
+private:
+ static void *Alloc(void *data, uInt items, uInt size)
+ {
+ zxx_stream *zStream = reinterpret_cast<zxx_stream *>(data);
+ if (zStream->allocator) {
+ return zStream->allocator->Alloc(items, size);
+ }
+ size_t buf_size = items * size;
+ return ::operator new(buf_size);
+ }
+
+ static void Free(void *data, void *ptr)
+ {
+ zxx_stream *zStream = reinterpret_cast<zxx_stream *>(data);
+ if (zStream->allocator) {
+ zStream->allocator->Free(ptr);
+ } else {
+ ::operator delete(ptr);
+ }
+ }
+
+ /**
+ * Helper class for each buffer in StaticAllocator.
+ */
+ template <size_t Size>
+ class ZStreamBuf
+ {
+ public:
+ ZStreamBuf() : inUse(false) { }
+
+ bool get(char*& out)
+ {
+ if (!inUse) {
+ inUse = true;
+ out = buf;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ void Release()
+ {
+ memset(buf, 0, Size);
+ inUse = false;
+ }
+
+ bool Equals(const void *other) { return other == buf; }
+
+ static const size_t size = Size;
+
+ private:
+ char buf[Size];
+ bool inUse;
+ };
+
+public:
+ /**
+ * Special allocator that uses static buffers to allocate from.
+ */
+ class StaticAllocator
+ {
+ public:
+ void *Alloc(uInt items, uInt size)
+ {
+ if (items == 1 && size <= stateBuf1.size) {
+ char* res = nullptr;
+ if (stateBuf1.get(res) || stateBuf2.get(res)) {
+ return res;
+ }
+ MOZ_CRASH("ZStreamBuf already in use");
+ } else if (items * size == windowBuf1.size) {
+ char* res = nullptr;
+ if (windowBuf1.get(res) || windowBuf2.get(res)) {
+ return res;
+ }
+ MOZ_CRASH("ZStreamBuf already in use");
+ } else {
+ MOZ_CRASH("No ZStreamBuf for allocation");
+ }
+ }
+
+ void Free(void *ptr)
+ {
+ if (stateBuf1.Equals(ptr)) {
+ stateBuf1.Release();
+ } else if (stateBuf2.Equals(ptr)) {
+ stateBuf2.Release();
+ }else if (windowBuf1.Equals(ptr)) {
+ windowBuf1.Release();
+ } else if (windowBuf2.Equals(ptr)) {
+ windowBuf2.Release();
+ } else {
+ MOZ_CRASH("Pointer doesn't match a ZStreamBuf");
+ }
+ }
+
+ // 0x3000 is an arbitrary size above 10K.
+ ZStreamBuf<0x3000> stateBuf1, stateBuf2;
+ ZStreamBuf<1 << MAX_WBITS> windowBuf1, windowBuf2;
+ };
+
+private:
+ StaticAllocator *allocator;
+};
+
+/**
+ * Forward declaration
+ */
+class ZipCollection;
+
+/**
+ * Class to handle access to Zip archive streams. The Zip archive is mapped
+ * in memory, and streams are direct references to that mapped memory.
+ * Zip files are assumed to be correctly formed. No boundary checks are
+ * performed, which means hand-crafted malicious Zip archives can make the
+ * code fail in bad ways. However, since the only intended use is to load
+ * libraries from Zip archives, there is no interest in making this code
+ * safe, since the libraries could contain malicious code anyways.
+ */
+class Zip: public mozilla::external::AtomicRefCounted<Zip>
+{
+public:
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(Zip)
+ /**
+ * Create a Zip instance for the given file name. Returns nullptr in case
+ * of failure.
+ */
+ static already_AddRefed<Zip> Create(const char *filename);
+
+ /**
+ * Create a Zip instance using the given buffer.
+ */
+ static already_AddRefed<Zip> Create(void *buffer, size_t size) {
+ return Create(nullptr, buffer, size);
+ }
+
+private:
+ static already_AddRefed<Zip> Create(const char *filename,
+ void *buffer, size_t size);
+
+ /**
+ * Private constructor
+ */
+ Zip(const char *filename, void *buffer, size_t size);
+
+public:
+ /**
+ * Destructor
+ */
+ ~Zip();
+
+ /**
+ * Class used to access Zip archive item streams
+ */
+ class Stream
+ {
+ public:
+ /**
+ * Stream types
+ */
+ enum Type {
+ STORE = 0,
+ DEFLATE = 8
+ };
+
+ /**
+ * Constructor
+ */
+ Stream(): compressedBuf(nullptr), compressedSize(0), uncompressedSize(0)
+ , CRC32(0)
+ , type(STORE) { }
+
+ /**
+ * Getters
+ */
+ const void *GetBuffer() { return compressedBuf; }
+ size_t GetSize() { return compressedSize; }
+ size_t GetUncompressedSize() { return uncompressedSize; }
+ size_t GetCRC32() { return CRC32; }
+ Type GetType() { return type; }
+
+ /**
+ * Returns a zxx_stream for use with inflate functions using the given
+ * buffer as inflate output. The caller is expected to allocate enough
+ * memory for the Stream uncompressed size.
+ */
+ zxx_stream GetZStream(void *buf)
+ {
+ zxx_stream zStream;
+ zStream.avail_in = compressedSize;
+ zStream.next_in = reinterpret_cast<Bytef *>(
+ const_cast<void *>(compressedBuf));
+ zStream.avail_out = uncompressedSize;
+ zStream.next_out = static_cast<Bytef *>(buf);
+ return zStream;
+ }
+
+ protected:
+ friend class Zip;
+ const void *compressedBuf;
+ size_t compressedSize;
+ size_t uncompressedSize;
+ size_t CRC32;
+ Type type;
+ };
+
+ /**
+ * Returns a stream from the Zip archive.
+ */
+ bool GetStream(const char *path, Stream *out) const;
+
+ /**
+ * Returns the file name of the archive
+ */
+ const char *GetName() const
+ {
+ return name;
+ }
+
+private:
+ /* File name of the archive */
+ char *name;
+ /* Address where the Zip archive is mapped */
+ void *mapped;
+ /* Size of the archive */
+ size_t size;
+
+ /**
+ * Strings (file names, comments, etc.) in the Zip headers are NOT zero
+ * terminated. This class is a helper around them.
+ */
+ class StringBuf
+ {
+ public:
+ /**
+ * Constructor
+ */
+ StringBuf(const char *buf, size_t length): buf(buf), length(length) { }
+
+ /**
+ * Returns whether the string has the same content as the given zero
+ * terminated string.
+ */
+ bool Equals(const char *str) const
+ {
+ return (strncmp(str, buf, length) == 0 && str[length] == '\0');
+ }
+
+ private:
+ const char *buf;
+ size_t length;
+ };
+
+/* All the following types need to be packed */
+#pragma pack(1)
+public:
+ /**
+ * A Zip archive is an aggregate of entities which all start with a
+ * signature giving their type. This template is to be used as a base
+ * class for these entities.
+ */
+ template <typename T>
+ class SignedEntity
+ {
+ public:
+ /**
+ * Equivalent to reinterpret_cast<const T *>(buf), with an additional
+ * check of the signature.
+ */
+ static const T *validate(const void *buf)
+ {
+ const T *ret = static_cast<const T *>(buf);
+ if (ret->signature == T::magic)
+ return ret;
+ return nullptr;
+ }
+
+ SignedEntity(uint32_t magic): signature(magic) { }
+ private:
+ le_uint32 signature;
+ };
+
+private:
+ /**
+ * Header used to describe a Local File entry. The header is followed by
+ * the file name and an extra field, then by the data stream.
+ */
+ struct LocalFile: public SignedEntity<LocalFile>
+ {
+ /* Signature for a Local File header */
+ static const uint32_t magic = 0x04034b50;
+
+ /**
+ * Returns the file name
+ */
+ StringBuf GetName() const
+ {
+ return StringBuf(reinterpret_cast<const char *>(this) + sizeof(*this),
+ filenameSize);
+ }
+
+ /**
+ * Returns a pointer to the data associated with this header
+ */
+ const void *GetData() const
+ {
+ return reinterpret_cast<const char *>(this) + sizeof(*this)
+ + filenameSize + extraFieldSize;
+ }
+
+ le_uint16 minVersion;
+ le_uint16 generalFlag;
+ le_uint16 compression;
+ le_uint16 lastModifiedTime;
+ le_uint16 lastModifiedDate;
+ le_uint32 CRC32;
+ le_uint32 compressedSize;
+ le_uint32 uncompressedSize;
+ le_uint16 filenameSize;
+ le_uint16 extraFieldSize;
+ };
+
+ /**
+ * In some cases, when a zip archive is created, compressed size and CRC
+ * are not known when writing the Local File header. In these cases, the
+ * 3rd bit of the general flag in the Local File header is set, and there
+ * is an additional header following the compressed data.
+ */
+ struct DataDescriptor: public SignedEntity<DataDescriptor>
+ {
+ /* Signature for a Data Descriptor header */
+ static const uint32_t magic = 0x08074b50;
+
+ le_uint32 CRC32;
+ le_uint32 compressedSize;
+ le_uint32 uncompressedSize;
+ };
+
+ /**
+ * Header used to describe a Central Directory Entry. The header is
+ * followed by the file name, an extra field, and a comment.
+ */
+ struct DirectoryEntry: public SignedEntity<DirectoryEntry>
+ {
+ /* Signature for a Central Directory Entry header */
+ static const uint32_t magic = 0x02014b50;
+
+ /**
+ * Returns the file name
+ */
+ StringBuf GetName() const
+ {
+ return StringBuf(reinterpret_cast<const char *>(this) + sizeof(*this),
+ filenameSize);
+ }
+
+ /**
+ * Returns the Central Directory Entry following this one.
+ */
+ const DirectoryEntry *GetNext() const
+ {
+ return validate(reinterpret_cast<const char *>(this) + sizeof(*this)
+ + filenameSize + extraFieldSize + fileCommentSize);
+ }
+
+ le_uint16 creatorVersion;
+ le_uint16 minVersion;
+ le_uint16 generalFlag;
+ le_uint16 compression;
+ le_uint16 lastModifiedTime;
+ le_uint16 lastModifiedDate;
+ le_uint32 CRC32;
+ le_uint32 compressedSize;
+ le_uint32 uncompressedSize;
+ le_uint16 filenameSize;
+ le_uint16 extraFieldSize;
+ le_uint16 fileCommentSize;
+ le_uint16 diskNum;
+ le_uint16 internalAttributes;
+ le_uint32 externalAttributes;
+ le_uint32 offset;
+ };
+
+ /**
+ * Header used to describe the End of Central Directory Record.
+ */
+ struct CentralDirectoryEnd: public SignedEntity<CentralDirectoryEnd>
+ {
+ /* Signature for the End of Central Directory Record */
+ static const uint32_t magic = 0x06054b50;
+
+ le_uint16 diskNum;
+ le_uint16 startDisk;
+ le_uint16 recordsOnDisk;
+ le_uint16 records;
+ le_uint32 size;
+ le_uint32 offset;
+ le_uint16 commentSize;
+ };
+#pragma pack()
+
+ /**
+ * Returns the first Directory entry
+ */
+ const DirectoryEntry *GetFirstEntry() const;
+
+ /* Pointer to the Local File Entry following the last one GetStream() used.
+ * This is used by GetStream to avoid scanning the Directory Entries when the
+ * requested entry is that one. */
+ mutable const LocalFile *nextFile;
+
+ /* Likewise for the next Directory entry */
+ mutable const DirectoryEntry *nextDir;
+
+ /* Pointer to the Directory entries */
+ mutable const DirectoryEntry *entries;
+
+ mutable pthread_mutex_t mutex;
+};
+
+/**
+ * Class for bookkeeping Zip instances
+ */
+class ZipCollection
+{
+public:
+ static ZipCollection Singleton;
+
+ /**
+ * Get a Zip instance for the given path. If there is an existing one
+ * already, return that one, otherwise create a new one.
+ */
+ static already_AddRefed<Zip> GetZip(const char *path);
+
+protected:
+ friend class Zip;
+ /**
+ * Register the given Zip instance. This method is meant to be called
+ * by Zip::Create.
+ */
+ static void Register(Zip *zip);
+
+ /**
+ * Forget about the given Zip instance. This method is meant to be called
+ * by the Zip destructor.
+ */
+ static void Forget(Zip *zip);
+
+private:
+ /* Zip instances bookkept in this collection */
+ std::vector<Zip *> zips;
+};
+
+#endif /* Zip_h */
diff --git a/mozglue/linker/dladdr.h b/mozglue/linker/dladdr.h
new file mode 100644
index 000000000..9a209547a
--- /dev/null
+++ b/mozglue/linker/dladdr.h
@@ -0,0 +1,16 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <dlfcn.h>
+
+#ifndef HAVE_DLADDR
+typedef struct {
+ const char *dli_fname;
+ void *dli_fbase;
+ const char *dli_sname;
+ void *dli_saddr;
+} Dl_info;
+extern int dladdr(void *addr, Dl_info *info) __attribute__((weak));
+#define HAVE_DLADDR 1
+#endif
diff --git a/mozglue/linker/moz.build b/mozglue/linker/moz.build
new file mode 100644
index 000000000..49f4d62f0
--- /dev/null
+++ b/mozglue/linker/moz.build
@@ -0,0 +1,54 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ 'BaseElf.cpp',
+ 'CustomElf.cpp',
+ 'ElfLoader.cpp',
+ 'Mappable.cpp',
+ 'SeekableZStream.cpp',
+ 'XZStream.cpp',
+ 'Zip.cpp',
+]
+
+Library('linker')
+
+HOST_SOURCES += [
+ 'SeekableZStream.cpp',
+ 'szip.cpp',
+]
+
+HostProgram('szip')
+
+FINAL_LIBRARY = 'mozglue'
+
+DEFINES['IMPL_MFBT'] = True
+
+DISABLE_STL_WRAPPING = True
+
+TEST_DIRS += ['tests']
+
+HOST_OS_LIBS += [
+ 'z',
+]
+
+if CONFIG['TARGET_CPU'] == 'arm':
+ if CONFIG['MOZ_THUMB2']:
+ HOST_DEFINES['TARGET_THUMB'] = True
+ else:
+ HOST_DEFINES['TARGET_ARM'] = True
+
+if CONFIG['CPU_ARCH'] == 'x86':
+ HOST_DEFINES['TARGET_X86'] = True
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += ['-Wno-error=shadow']
+
+DEFINES['XZ_USE_CRC64'] = 1
+
+USE_LIBS += [
+ 'xz-embedded',
+]
diff --git a/mozglue/linker/szip.cpp b/mozglue/linker/szip.cpp
new file mode 100644
index 000000000..bfc882fbe
--- /dev/null
+++ b/mozglue/linker/szip.cpp
@@ -0,0 +1,593 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <algorithm>
+#include <map>
+#include <sys/stat.h>
+#include <string>
+#include <sstream>
+#include <cstring>
+#include <cstdlib>
+#include <zlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include "mozilla/Assertions.h"
+#include "mozilla/Scoped.h"
+#include "mozilla/UniquePtr.h"
+#include "SeekableZStream.h"
+#include "Utils.h"
+#include "Logging.h"
+
+Logging Logging::Singleton;
+
+const char *filterName[] = {
+ "none",
+ "thumb",
+ "arm",
+ "x86",
+ "auto"
+};
+
+/* Maximum supported size for chunkSize */
+static const size_t maxChunkSize =
+ 1 << (8 * std::min(sizeof(((SeekableZStreamHeader *)nullptr)->chunkSize),
+ sizeof(((SeekableZStreamHeader *)nullptr)->lastChunkSize)) - 1);
+
+class Buffer: public MappedPtr
+{
+public:
+ virtual ~Buffer() { }
+
+ virtual bool Resize(size_t size)
+ {
+ MemoryRange buf = mmap(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (buf == MAP_FAILED)
+ return false;
+ if (*this != MAP_FAILED)
+ memcpy(buf, *this, std::min(size, GetLength()));
+ Assign(buf);
+ return true;
+ }
+
+ bool Fill(Buffer &other)
+ {
+ size_t size = other.GetLength();
+ if (!size || !Resize(size))
+ return false;
+ memcpy(static_cast<void *>(*this), static_cast<void *>(other), size);
+ return true;
+ }
+};
+
+class FileBuffer: public Buffer
+{
+public:
+ bool Init(const char *name, bool writable_ = false)
+ {
+ fd = open(name, writable_ ? O_RDWR | O_CREAT | O_TRUNC : O_RDONLY, 0666);
+ if (fd == -1)
+ return false;
+ writable = writable_;
+ return true;
+ }
+
+ virtual bool Resize(size_t size)
+ {
+ if (writable) {
+ if (ftruncate(fd, size) == -1)
+ return false;
+ }
+ Assign(MemoryRange::mmap(nullptr, size,
+ PROT_READ | (writable ? PROT_WRITE : 0),
+ writable ? MAP_SHARED : MAP_PRIVATE, fd, 0));
+ return this != MAP_FAILED;
+ }
+
+ int getFd()
+ {
+ return fd;
+ }
+
+private:
+ AutoCloseFD fd;
+ bool writable;
+};
+
+class FilteredBuffer: public Buffer
+{
+public:
+ void Filter(Buffer &other, SeekableZStream::FilterId filter, size_t chunkSize)
+ {
+ SeekableZStream::ZStreamFilter filterCB =
+ SeekableZStream::GetFilter(filter);
+ MOZ_ASSERT(filterCB);
+ Fill(other);
+ size_t size = other.GetLength();
+ Bytef *data = reinterpret_cast<Bytef *>(static_cast<void *>(*this));
+ size_t avail = 0;
+ /* Filter needs to be applied in chunks. */
+ while (size) {
+ avail = std::min(size, chunkSize);
+ filterCB(data - static_cast<unsigned char *>(static_cast<void *>(*this)),
+ SeekableZStream::FILTER, data, avail);
+ size -= avail;
+ data += avail;
+ }
+ }
+};
+
+template <typename T>
+class Dictionary: public Buffer
+{
+ typedef T piece;
+ typedef std::pair<piece, int> stat_pair;
+
+ static bool stat_cmp(stat_pair a, stat_pair b)
+ {
+ return a.second < b.second;
+ }
+
+public:
+ Dictionary(Buffer &inBuf, size_t size)
+ {
+ if (!size || !Resize(size))
+ return;
+ DEBUG_LOG("Creating dictionary");
+ piece *origBufPieces = reinterpret_cast<piece *>(
+ static_cast<void *>(inBuf));
+ std::map<piece, int> stats;
+ for (unsigned int i = 0; i < inBuf.GetLength() / sizeof(piece); i++) {
+ stats[origBufPieces[i]]++;
+ }
+ std::vector<stat_pair> statsVec(stats.begin(), stats.end());
+ std::sort(statsVec.begin(), statsVec.end(), stat_cmp);
+
+ piece *dictPieces = reinterpret_cast<piece *>(
+ static_cast<void *>(*this));
+ typename std::vector<stat_pair>::reverse_iterator it = statsVec.rbegin();
+ for (int i = size / sizeof(piece); i > 0 && it < statsVec.rend();
+ i--, ++it) {
+ dictPieces[i - 1] = it->first;
+ }
+ }
+};
+
+class SzipAction
+{
+public:
+ virtual int run(const char *name, Buffer &origBuf,
+ const char *outName, Buffer &outBuf) = 0;
+
+ virtual ~SzipAction() {}
+};
+
+class SzipDecompress: public SzipAction
+{
+public:
+ int run(const char *name, Buffer &origBuf,
+ const char *outName, Buffer &outBuf);
+};
+
+
+class SzipCompress: public SzipAction
+{
+public:
+ int run(const char *name, Buffer &origBuf,
+ const char *outName, Buffer &outBuf);
+
+ SzipCompress(size_t aChunkSize, SeekableZStream::FilterId aFilter,
+ size_t aDictSize)
+ : chunkSize(aChunkSize ? aChunkSize : 16384)
+ , filter(aFilter)
+ , dictSize(aDictSize)
+ {}
+
+ const static signed char winSizeLog = 15;
+ const static size_t winSize = 1 << winSizeLog;
+
+ const static SeekableZStream::FilterId DEFAULT_FILTER =
+#if defined(TARGET_THUMB)
+ SeekableZStream::BCJ_THUMB;
+#elif defined(TARGET_ARM)
+ SeekableZStream::BCJ_ARM;
+#elif defined(TARGET_X86)
+ SeekableZStream::BCJ_X86;
+#else
+ SeekableZStream::NONE;
+#endif
+
+private:
+
+ int do_compress(Buffer &origBuf, Buffer &outBuf, const unsigned char *aDict,
+ size_t aDictSize, SeekableZStream::FilterId aFilter);
+
+ size_t chunkSize;
+ SeekableZStream::FilterId filter;
+ size_t dictSize;
+};
+
+/* Decompress a seekable compressed stream */
+int SzipDecompress::run(const char *name, Buffer &origBuf,
+ const char *outName, Buffer &outBuf)
+{
+ size_t origSize = origBuf.GetLength();
+ if (origSize < sizeof(SeekableZStreamHeader)) {
+ ERROR("%s is not compressed", name);
+ return 0;
+ }
+
+ SeekableZStream zstream;
+ if (!zstream.Init(origBuf, origSize))
+ return 0;
+
+ size_t size = zstream.GetUncompressedSize();
+
+ /* Give enough room for the uncompressed data */
+ if (!outBuf.Resize(size)) {
+ ERROR("Error resizing %s: %s", outName, strerror(errno));
+ return 1;
+ }
+
+ if (!zstream.Decompress(outBuf, 0, size))
+ return 1;
+
+ return 0;
+}
+
+/* Generate a seekable compressed stream. */
+int SzipCompress::run(const char *name, Buffer &origBuf,
+ const char *outName, Buffer &outBuf)
+{
+ size_t origSize = origBuf.GetLength();
+ if (origSize == 0) {
+ ERROR("Won't compress %s: it's empty", name);
+ return 1;
+ }
+ if (SeekableZStreamHeader::validate(origBuf)) {
+ WARN("Skipping %s: it's already a szip", name);
+ return 0;
+ }
+ bool compressed = false;
+ LOG("Size = %" PRIuSize, origSize);
+
+ /* Allocate a buffer the size of the uncompressed data: we don't want
+ * a compressed file larger than that anyways. */
+ if (!outBuf.Resize(origSize)) {
+ ERROR("Couldn't allocate output buffer: %s", strerror(errno));
+ return 1;
+ }
+
+ /* Find the most appropriate filter */
+ SeekableZStream::FilterId firstFilter, lastFilter;
+ bool scanFilters;
+ if (filter == SeekableZStream::FILTER_MAX) {
+ firstFilter = SeekableZStream::NONE;
+ lastFilter = SeekableZStream::FILTER_MAX;
+ scanFilters = true;
+ } else {
+ firstFilter = lastFilter = filter;
+ ++lastFilter;
+ scanFilters = false;
+ }
+
+ mozilla::UniquePtr<Buffer> filteredBuf;
+ Buffer *origData;
+ for (SeekableZStream::FilterId f = firstFilter; f < lastFilter; ++f) {
+ mozilla::UniquePtr<FilteredBuffer> filteredTmp;
+ Buffer tmpBuf;
+ if (f != SeekableZStream::NONE) {
+ DEBUG_LOG("Applying filter \"%s\"", filterName[f]);
+ filteredTmp = mozilla::MakeUnique<FilteredBuffer>();
+ filteredTmp->Filter(origBuf, f, chunkSize);
+ origData = filteredTmp.get();
+ } else {
+ origData = &origBuf;
+ }
+ if (dictSize && !scanFilters) {
+ filteredBuf = mozilla::Move(filteredTmp);
+ break;
+ }
+ DEBUG_LOG("Compressing with no dictionary");
+ if (do_compress(*origData, tmpBuf, nullptr, 0, f) == 0) {
+ if (tmpBuf.GetLength() < outBuf.GetLength()) {
+ outBuf.Fill(tmpBuf);
+ compressed = true;
+ filter = f;
+ filteredBuf = mozilla::Move(filteredTmp);
+ continue;
+ }
+ }
+ }
+
+ origData = filteredBuf ? filteredBuf.get() : &origBuf;
+
+ if (dictSize) {
+ Dictionary<uint64_t> dict(*origData, dictSize ? SzipCompress::winSize : 0);
+
+ /* Find the most appropriate dictionary size */
+ size_t firstDictSize, lastDictSize;
+ if (dictSize == (size_t) -1) {
+ /* If we scanned for filters, we effectively already tried dictSize=0 */
+ firstDictSize = scanFilters ? 4096 : 0;
+ lastDictSize = SzipCompress::winSize;
+ } else {
+ firstDictSize = lastDictSize = dictSize;
+ }
+
+ Buffer tmpBuf;
+ for (size_t d = firstDictSize; d <= lastDictSize; d += 4096) {
+ DEBUG_LOG("Compressing with dictionary of size %" PRIuSize, d);
+ if (do_compress(*origData, tmpBuf, static_cast<unsigned char *>(dict)
+ + SzipCompress::winSize - d, d, filter))
+ continue;
+ if (!compressed || tmpBuf.GetLength() < outBuf.GetLength()) {
+ outBuf.Fill(tmpBuf);
+ compressed = true;
+ dictSize = d;
+ }
+ }
+ }
+
+ if (!compressed) {
+ outBuf.Fill(origBuf);
+ LOG("Not compressed");
+ return 0;
+ }
+
+ if (dictSize == (size_t) -1)
+ dictSize = 0;
+
+ DEBUG_LOG("Used filter \"%s\" and dictionary size of %" PRIuSize,
+ filterName[filter], dictSize);
+ LOG("Compressed size is %" PRIuSize, outBuf.GetLength());
+
+ /* Sanity check */
+ Buffer tmpBuf;
+ SzipDecompress decompress;
+ if (decompress.run("buffer", outBuf, "buffer", tmpBuf))
+ return 1;
+
+ size_t size = tmpBuf.GetLength();
+ if (size != origSize) {
+ ERROR("Compression error: %" PRIuSize " != %" PRIuSize, size, origSize);
+ return 1;
+ }
+ if (memcmp(static_cast<void *>(origBuf), static_cast<void *>(tmpBuf), size)) {
+ ERROR("Compression error: content mismatch");
+ return 1;
+ }
+ return 0;
+}
+
+int SzipCompress::do_compress(Buffer &origBuf, Buffer &outBuf,
+ const unsigned char *aDict, size_t aDictSize,
+ SeekableZStream::FilterId aFilter)
+{
+ size_t origSize = origBuf.GetLength();
+ MOZ_ASSERT(origSize != 0);
+
+ /* Expected total number of chunks */
+ size_t nChunks = ((origSize + chunkSize - 1) / chunkSize);
+
+ /* The first chunk is going to be stored after the header, the dictionary
+ * and the offset table */
+ size_t offset = sizeof(SeekableZStreamHeader) + aDictSize
+ + nChunks * sizeof(uint32_t);
+
+ if (offset >= origSize)
+ return 1;
+
+ /* Allocate a buffer the size of the uncompressed data: we don't want
+ * a compressed file larger than that anyways. */
+ if (!outBuf.Resize(origSize)) {
+ ERROR("Couldn't allocate output buffer: %s", strerror(errno));
+ return 1;
+ }
+
+ SeekableZStreamHeader *header = new (outBuf) SeekableZStreamHeader;
+ unsigned char *dictionary = static_cast<unsigned char *>(
+ outBuf + sizeof(SeekableZStreamHeader));
+ le_uint32 *entry =
+ reinterpret_cast<le_uint32 *>(dictionary + aDictSize);
+
+ /* Initialize header */
+ header->chunkSize = chunkSize;
+ header->dictSize = aDictSize;
+ header->totalSize = offset;
+ header->windowBits = -SzipCompress::winSizeLog; // Raw stream,
+ // window size of 32k.
+ header->filter = aFilter;
+ if (aDictSize)
+ memcpy(dictionary, aDict, aDictSize);
+
+ /* Initialize zlib structure */
+ z_stream zStream;
+ memset(&zStream, 0, sizeof(zStream));
+ zStream.avail_out = origSize - offset;
+ zStream.next_out = static_cast<Bytef*>(outBuf) + offset;
+
+ size_t avail = 0;
+ size_t size = origSize;
+ unsigned char *data = reinterpret_cast<unsigned char *>(
+ static_cast<void *>(origBuf));
+ while (size) {
+ avail = std::min(size, chunkSize);
+
+ /* Compress chunk */
+ int ret = deflateInit2(&zStream, 9, Z_DEFLATED, header->windowBits,
+ MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY);
+ if (aDictSize)
+ deflateSetDictionary(&zStream, dictionary, aDictSize);
+ MOZ_ASSERT(ret == Z_OK);
+ zStream.avail_in = avail;
+ zStream.next_in = data;
+ ret = deflate(&zStream, Z_FINISH);
+ /* Under normal conditions, deflate returns Z_STREAM_END. If there is not
+ * enough room to compress, deflate returns Z_OK and avail_out is 0. We
+ * still want to deflateEnd in that case, so fall through. It will bail
+ * on the avail_out test that follows. */
+ MOZ_ASSERT(ret == Z_STREAM_END || ret == Z_OK);
+ ret = deflateEnd(&zStream);
+ MOZ_ASSERT(ret == Z_OK);
+ if (zStream.avail_out <= 0)
+ return 1;
+
+ size_t len = origSize - offset - zStream.avail_out;
+
+ /* Adjust headers */
+ header->totalSize += len;
+ *entry++ = offset;
+ header->nChunks++;
+
+ /* Prepare for next iteration */
+ size -= avail;
+ data += avail;
+ offset += len;
+ }
+ header->lastChunkSize = avail;
+ MOZ_ASSERT(header->totalSize == offset);
+ MOZ_ASSERT(header->nChunks == nChunks);
+
+ if (!outBuf.Resize(offset)) {
+ ERROR("Error truncating output: %s", strerror(errno));
+ return 1;
+ }
+
+ return 0;
+
+}
+
+bool GetSize(const char *str, size_t *out)
+{
+ char *end;
+ MOZ_ASSERT(out);
+ errno = 0;
+ *out = strtol(str, &end, 10);
+ return (!errno && !*end);
+}
+
+int main(int argc, char* argv[])
+{
+ mozilla::UniquePtr<SzipAction> action;
+ char **firstArg;
+ bool compress = true;
+ size_t chunkSize = 0;
+ SeekableZStream::FilterId filter = SzipCompress::DEFAULT_FILTER;
+ size_t dictSize = (size_t) 0;
+
+ Logging::Init();
+
+ for (firstArg = &argv[1]; argc > 2; argc--, firstArg++) {
+ if (!firstArg[0] || firstArg[0][0] != '-')
+ break;
+ if (strcmp(firstArg[0], "-d") == 0) {
+ compress = false;
+ } else if (strcmp(firstArg[0], "-c") == 0) {
+ firstArg++;
+ argc--;
+ if (!firstArg[0])
+ break;
+ if (!GetSize(firstArg[0], &chunkSize) || !chunkSize ||
+ (chunkSize % 4096) || (chunkSize > maxChunkSize)) {
+ ERROR("Invalid chunk size");
+ return 1;
+ }
+ } else if (strcmp(firstArg[0], "-f") == 0) {
+ firstArg++;
+ argc--;
+ if (!firstArg[0])
+ break;
+ bool matched = false;
+ for (unsigned int i = 0; i < sizeof(filterName) / sizeof(char *); ++i) {
+ if (strcmp(firstArg[0], filterName[i]) == 0) {
+ filter = static_cast<SeekableZStream::FilterId>(i);
+ matched = true;
+ break;
+ }
+ }
+ if (!matched) {
+ ERROR("Invalid filter");
+ return 1;
+ }
+ } else if (strcmp(firstArg[0], "-D") == 0) {
+ firstArg++;
+ argc--;
+ if (!firstArg[0])
+ break;
+ if (strcmp(firstArg[0], "auto") == 0) {
+ dictSize = -1;
+ } else if (!GetSize(firstArg[0], &dictSize) || (dictSize >= 1 << 16)) {
+ ERROR("Invalid dictionary size");
+ return 1;
+ }
+ }
+ }
+
+ if (argc != 2 || !firstArg[0]) {
+ LOG("usage: %s [-d] [-c CHUNKSIZE] [-f FILTER] [-D DICTSIZE] file",
+ argv[0]);
+ return 1;
+ }
+
+ if (compress) {
+ action.reset(new SzipCompress(chunkSize, filter, dictSize));
+ } else {
+ if (chunkSize) {
+ ERROR("-c is incompatible with -d");
+ return 1;
+ }
+ if (dictSize) {
+ ERROR("-D is incompatible with -d");
+ return 1;
+ }
+ action.reset(new SzipDecompress());
+ }
+
+ std::stringstream tmpOutStream;
+ tmpOutStream << firstArg[0] << ".sz." << getpid();
+ std::string tmpOut(tmpOutStream.str());
+ int ret;
+ struct stat st;
+ {
+ FileBuffer origBuf;
+ if (!origBuf.Init(firstArg[0])) {
+ ERROR("Couldn't open %s: %s", firstArg[0], strerror(errno));
+ return 1;
+ }
+
+ ret = fstat(origBuf.getFd(), &st);
+ if (ret == -1) {
+ ERROR("Couldn't stat %s: %s", firstArg[0], strerror(errno));
+ return 1;
+ }
+
+ size_t origSize = st.st_size;
+
+ /* Mmap the original file */
+ if (!origBuf.Resize(origSize)) {
+ ERROR("Couldn't mmap %s: %s", firstArg[0], strerror(errno));
+ return 1;
+ }
+
+ /* Create the compressed file */
+ FileBuffer outBuf;
+ if (!outBuf.Init(tmpOut.c_str(), true)) {
+ ERROR("Couldn't open %s: %s", tmpOut.c_str(), strerror(errno));
+ return 1;
+ }
+
+ ret = action->run(firstArg[0], origBuf, tmpOut.c_str(), outBuf);
+ if ((ret == 0) && (fstat(outBuf.getFd(), &st) == -1)) {
+ st.st_size = 0;
+ }
+ }
+
+ if ((ret == 0) && st.st_size) {
+ rename(tmpOut.c_str(), firstArg[0]);
+ } else {
+ unlink(tmpOut.c_str());
+ }
+ return ret;
+}
diff --git a/mozglue/linker/tests/TestZip.cpp b/mozglue/linker/tests/TestZip.cpp
new file mode 100644
index 000000000..25873f5e5
--- /dev/null
+++ b/mozglue/linker/tests/TestZip.cpp
@@ -0,0 +1,69 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cstdio>
+#include <unistd.h>
+#include "Zip.h"
+#include "mozilla/RefPtr.h"
+
+extern "C" void report_mapping() { }
+extern "C" void delete_mapping() { }
+
+/**
+ * test.zip is a basic test zip file with a central directory. It contains
+ * four entries, in the following order:
+ * "foo", "bar", "baz", "qux".
+ * The entries are going to be read out of order.
+ */
+const char *test_entries[] = {
+ "baz", "foo", "bar", "qux"
+};
+
+/**
+ * no_central_dir.zip is a hand crafted test zip with no central directory
+ * entries. The Zip reader is expected to be able to traverse these entries
+ * if requested in order, without reading a central directory
+ * - First entry is a file "a", STOREd.
+ * - Second entry is a file "b", STOREd, using a data descriptor. CRC is
+ * unknown, but compressed and uncompressed sizes are known in the local
+ * file header.
+ * - Third entry is a file "c", DEFLATEd, using a data descriptor. CRC,
+ * compressed and uncompressed sizes are known in the local file header.
+ * This is the kind of entry that can be found in a zip that went through
+ * zipalign if it had a data descriptor originally.
+ * - Fourth entry is a file "d", STOREd.
+ */
+const char *no_central_dir_entries[] = {
+ "a", "b", "c", "d"
+};
+
+int main(int argc, char *argv[])
+{
+ if (argc != 2) {
+ fprintf(stderr, "TEST-FAIL | TestZip | Expecting the directory containing test Zips\n");
+ return 1;
+ }
+ chdir(argv[1]);
+ Zip::Stream s;
+ RefPtr<Zip> z = ZipCollection::GetZip("test.zip");
+ for (size_t i = 0; i < sizeof(test_entries) / sizeof(*test_entries); i++) {
+ if (!z->GetStream(test_entries[i], &s)) {
+ fprintf(stderr, "TEST-UNEXPECTED-FAIL | TestZip | test.zip: Couldn't get entry \"%s\"\n", test_entries[i]);
+ return 1;
+ }
+ }
+ fprintf(stderr, "TEST-PASS | TestZip | test.zip could be accessed fully\n");
+
+ z = ZipCollection::GetZip("no_central_dir.zip");
+ for (size_t i = 0; i < sizeof(no_central_dir_entries)
+ / sizeof(*no_central_dir_entries); i++) {
+ if (!z->GetStream(no_central_dir_entries[i], &s)) {
+ fprintf(stderr, "TEST-UNEXPECTED-FAIL | TestZip | no_central_dir.zip: Couldn't get entry \"%s\"\n", no_central_dir_entries[i]);
+ return 1;
+ }
+ }
+ fprintf(stderr, "TEST-PASS | TestZip | no_central_dir.zip could be accessed in order\n");
+
+ return 0;
+}
diff --git a/mozglue/linker/tests/moz.build b/mozglue/linker/tests/moz.build
new file mode 100644
index 000000000..3d4c1c616
--- /dev/null
+++ b/mozglue/linker/tests/moz.build
@@ -0,0 +1,23 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DIST_INSTALL = False
+
+SimplePrograms([
+ 'TestZip',
+])
+LOCAL_INCLUDES += ['..']
+USE_LIBS += [
+ 'linker',
+ 'mfbt',
+]
+OS_LIBS += CONFIG['MOZ_ZLIB_LIBS']
+DISABLE_STL_WRAPPING = True
+
+PYTHON_UNIT_TESTS += ['run_test_zip.py']
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += ['-Wno-error=shadow']
diff --git a/mozglue/linker/tests/no_central_dir.zip b/mozglue/linker/tests/no_central_dir.zip
new file mode 100644
index 000000000..df882220d
--- /dev/null
+++ b/mozglue/linker/tests/no_central_dir.zip
Binary files differ
diff --git a/mozglue/linker/tests/run_test_zip.py b/mozglue/linker/tests/run_test_zip.py
new file mode 100644
index 000000000..e606e16d4
--- /dev/null
+++ b/mozglue/linker/tests/run_test_zip.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+#
+# Any copyright is dedicated to the Public Domain.
+# http://creativecommons.org/publicdomain/zero/1.0/
+
+import buildconfig
+import mozpack.path as mozpath
+import mozunit
+import subprocess
+import unittest
+
+class TestZip(unittest.TestCase):
+ def test_zip(self):
+ srcdir = mozpath.dirname(__file__)
+ relsrcdir = mozpath.relpath(srcdir, buildconfig.topsrcdir)
+ test_bin = mozpath.join(buildconfig.topobjdir, relsrcdir,
+ 'TestZip' + buildconfig.substs['BIN_SUFFIX'])
+ self.assertEqual(0, subprocess.call([test_bin, srcdir]))
+
+if __name__ == '__main__':
+ mozunit.main()
diff --git a/mozglue/linker/tests/test.zip b/mozglue/linker/tests/test.zip
new file mode 100644
index 000000000..657835b0c
--- /dev/null
+++ b/mozglue/linker/tests/test.zip
Binary files differ