1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <string>
#include <cstring>
#include <cstdlib>
#include <cstdio>
#include <dlfcn.h>
#include <unistd.h>
#include <errno.h>
#include <algorithm>
#include <fcntl.h>
#include "ElfLoader.h"
#include "BaseElf.h"
#include "CustomElf.h"
#include "Mappable.h"
#include "Logging.h"
#include <inttypes.h>
#if defined(ANDROID)
#include <sys/syscall.h>
#include <android/api-level.h>
#if __ANDROID_API__ < 8
/* Android API < 8 doesn't provide sigaltstack */
extern "C" {
inline int sigaltstack(const stack_t *ss, stack_t *oss) {
return syscall(__NR_sigaltstack, ss, oss);
}
} /* extern "C" */
#endif /* __ANDROID_API__ */
#endif /* ANDROID */
#ifdef __ARM_EABI__
extern "C" MOZ_EXPORT const void *
__gnu_Unwind_Find_exidx(void *pc, int *pcount) __attribute__((weak));
#endif
/* Pointer to the PT_DYNAMIC section of the executable or library
* containing this code. */
extern "C" Elf::Dyn _DYNAMIC[];
using namespace mozilla;
/**
* dlfcn.h replacements functions
*/
void *
__wrap_dlopen(const char *path, int flags)
{
RefPtr<LibHandle> handle = ElfLoader::Singleton.Load(path, flags);
if (handle)
handle->AddDirectRef();
return handle;
}
const char *
__wrap_dlerror(void)
{
const char *error = ElfLoader::Singleton.lastError;
ElfLoader::Singleton.lastError = nullptr;
return error;
}
void *
__wrap_dlsym(void *handle, const char *symbol)
{
if (!handle) {
ElfLoader::Singleton.lastError = "dlsym(NULL, sym) unsupported";
return nullptr;
}
if (handle != RTLD_DEFAULT && handle != RTLD_NEXT) {
LibHandle *h = reinterpret_cast<LibHandle *>(handle);
return h->GetSymbolPtr(symbol);
}
return dlsym(handle, symbol);
}
int
__wrap_dlclose(void *handle)
{
if (!handle) {
ElfLoader::Singleton.lastError = "No handle given to dlclose()";
return -1;
}
reinterpret_cast<LibHandle *>(handle)->ReleaseDirectRef();
return 0;
}
int
__wrap_dladdr(void *addr, Dl_info *info)
{
RefPtr<LibHandle> handle = ElfLoader::Singleton.GetHandleByPtr(addr);
if (!handle) {
return dladdr(addr, info);
}
info->dli_fname = handle->GetPath();
info->dli_fbase = handle->GetBase();
return 1;
}
int
__wrap_dl_iterate_phdr(dl_phdr_cb callback, void *data)
{
if (!ElfLoader::Singleton.dbg)
return -1;
int pipefd[2];
bool valid_pipe = (pipe(pipefd) == 0);
AutoCloseFD read_fd(pipefd[0]);
AutoCloseFD write_fd(pipefd[1]);
for (ElfLoader::DebuggerHelper::iterator it = ElfLoader::Singleton.dbg.begin();
it < ElfLoader::Singleton.dbg.end(); ++it) {
dl_phdr_info info;
info.dlpi_addr = reinterpret_cast<Elf::Addr>(it->l_addr);
info.dlpi_name = it->l_name;
info.dlpi_phdr = nullptr;
info.dlpi_phnum = 0;
// Assuming l_addr points to Elf headers (in most cases, this is true),
// get the Phdr location from there.
// Unfortunately, when l_addr doesn't point to Elf headers, it may point
// to unmapped memory, or worse, unreadable memory. The only way to detect
// the latter without causing a SIGSEGV is to use the pointer in a system
// call that will try to read from there, and return an EFAULT error if
// it can't. One such system call is write(). It used to be possible to
// use a file descriptor on /dev/null for these kind of things, but recent
// Linux kernels never return an EFAULT error when using /dev/null.
// So instead, we use a self pipe. We do however need to read() from the
// read end of the pipe as well so as to not fill up the pipe buffer and
// block on subsequent writes.
// In the unlikely event reads from or write to the pipe fail for some
// other reason than EFAULT, we don't try any further and just skip setting
// the Phdr location for all subsequent libraries, rather than trying to
// start over with a new pipe.
int can_read = true;
if (valid_pipe) {
int ret;
char raw_ehdr[sizeof(Elf::Ehdr)];
static_assert(sizeof(raw_ehdr) < PIPE_BUF, "PIPE_BUF is too small");
do {
// writes are atomic when smaller than PIPE_BUF, per POSIX.1-2008.
ret = write(write_fd, it->l_addr, sizeof(raw_ehdr));
} while (ret == -1 && errno == EINTR);
if (ret != sizeof(raw_ehdr)) {
if (ret == -1 && errno == EFAULT) {
can_read = false;
} else {
valid_pipe = false;
}
} else {
size_t nbytes = 0;
do {
// Per POSIX.1-2008, interrupted reads can return a length smaller
// than the given one instead of failing with errno EINTR.
ret = read(read_fd, raw_ehdr + nbytes, sizeof(raw_ehdr) - nbytes);
if (ret > 0)
nbytes += ret;
} while ((nbytes != sizeof(raw_ehdr) && ret > 0) ||
(ret == -1 && errno == EINTR));
if (nbytes != sizeof(raw_ehdr)) {
valid_pipe = false;
}
}
}
if (valid_pipe && can_read) {
const Elf::Ehdr *ehdr = Elf::Ehdr::validate(it->l_addr);
if (ehdr) {
info.dlpi_phdr = reinterpret_cast<const Elf::Phdr *>(
reinterpret_cast<const char *>(ehdr) + ehdr->e_phoff);
info.dlpi_phnum = ehdr->e_phnum;
}
}
int ret = callback(&info, sizeof(dl_phdr_info), data);
if (ret)
return ret;
}
return 0;
}
#ifdef __ARM_EABI__
const void *
__wrap___gnu_Unwind_Find_exidx(void *pc, int *pcount)
{
RefPtr<LibHandle> handle = ElfLoader::Singleton.GetHandleByPtr(pc);
if (handle)
return handle->FindExidx(pcount);
if (__gnu_Unwind_Find_exidx)
return __gnu_Unwind_Find_exidx(pc, pcount);
*pcount = 0;
return nullptr;
}
#endif
/**
* faulty.lib public API
*/
MFBT_API size_t
__dl_get_mappable_length(void *handle) {
if (!handle)
return 0;
return reinterpret_cast<LibHandle *>(handle)->GetMappableLength();
}
MFBT_API void *
__dl_mmap(void *handle, void *addr, size_t length, off_t offset)
{
if (!handle)
return nullptr;
return reinterpret_cast<LibHandle *>(handle)->MappableMMap(addr, length,
offset);
}
MFBT_API void
__dl_munmap(void *handle, void *addr, size_t length)
{
if (!handle)
return;
return reinterpret_cast<LibHandle *>(handle)->MappableMUnmap(addr, length);
}
MFBT_API bool
IsSignalHandlingBroken()
{
return ElfLoader::Singleton.isSignalHandlingBroken();
}
namespace {
/**
* Returns the part after the last '/' for the given path
*/
const char *
LeafName(const char *path)
{
const char *lastSlash = strrchr(path, '/');
if (lastSlash)
return lastSlash + 1;
return path;
}
} /* Anonymous namespace */
/**
* LibHandle
*/
LibHandle::~LibHandle()
{
free(path);
}
const char *
LibHandle::GetName() const
{
return path ? LeafName(path) : nullptr;
}
size_t
LibHandle::GetMappableLength() const
{
if (!mappable)
mappable = GetMappable();
if (!mappable)
return 0;
return mappable->GetLength();
}
void *
LibHandle::MappableMMap(void *addr, size_t length, off_t offset) const
{
if (!mappable)
mappable = GetMappable();
if (!mappable)
return MAP_FAILED;
void* mapped = mappable->mmap(addr, length, PROT_READ, MAP_PRIVATE, offset);
if (mapped != MAP_FAILED) {
/* Ensure the availability of all pages within the mapping */
for (size_t off = 0; off < length; off += PageSize()) {
mappable->ensure(reinterpret_cast<char *>(mapped) + off);
}
}
return mapped;
}
void
LibHandle::MappableMUnmap(void *addr, size_t length) const
{
if (mappable)
mappable->munmap(addr, length);
}
/**
* SystemElf
*/
already_AddRefed<LibHandle>
SystemElf::Load(const char *path, int flags)
{
/* The Android linker returns a handle when the file name matches an
* already loaded library, even when the full path doesn't exist */
if (path && path[0] == '/' && (access(path, F_OK) == -1)){
DEBUG_LOG("dlopen(\"%s\", 0x%x) = %p", path, flags, (void *)nullptr);
return nullptr;
}
void *handle = dlopen(path, flags);
DEBUG_LOG("dlopen(\"%s\", 0x%x) = %p", path, flags, handle);
ElfLoader::Singleton.lastError = dlerror();
if (handle) {
SystemElf *elf = new SystemElf(path, handle);
ElfLoader::Singleton.Register(elf);
RefPtr<LibHandle> lib(elf);
return lib.forget();
}
return nullptr;
}
SystemElf::~SystemElf()
{
if (!dlhandle)
return;
DEBUG_LOG("dlclose(%p [\"%s\"])", dlhandle, GetPath());
dlclose(dlhandle);
ElfLoader::Singleton.lastError = dlerror();
ElfLoader::Singleton.Forget(this);
}
void *
SystemElf::GetSymbolPtr(const char *symbol) const
{
void *sym = dlsym(dlhandle, symbol);
DEBUG_LOG("dlsym(%p [\"%s\"], \"%s\") = %p", dlhandle, GetPath(), symbol, sym);
ElfLoader::Singleton.lastError = dlerror();
return sym;
}
Mappable *
SystemElf::GetMappable() const
{
const char *path = GetPath();
if (!path)
return nullptr;
#ifdef ANDROID
/* On Android, if we don't have the full path, try in /system/lib */
const char *name = LeafName(path);
std::string systemPath;
if (name == path) {
systemPath = "/system/lib/";
systemPath += path;
path = systemPath.c_str();
}
#endif
return MappableFile::Create(path);
}
#ifdef __ARM_EABI__
const void *
SystemElf::FindExidx(int *pcount) const
{
/* TODO: properly implement when ElfLoader::GetHandleByPtr
does return SystemElf handles */
*pcount = 0;
return nullptr;
}
#endif
/**
* ElfLoader
*/
/* Unique ElfLoader instance */
ElfLoader ElfLoader::Singleton;
already_AddRefed<LibHandle>
ElfLoader::Load(const char *path, int flags, LibHandle *parent)
{
/* Ensure logging is initialized or refresh if environment changed. */
Logging::Init();
/* Ensure self_elf initialization. */
if (!self_elf)
Init();
RefPtr<LibHandle> handle;
/* Handle dlopen(nullptr) directly. */
if (!path) {
handle = SystemElf::Load(nullptr, flags);
return handle.forget();
}
/* TODO: Handle relative paths correctly */
const char *name = LeafName(path);
/* Search the list of handles we already have for a match. When the given
* path is not absolute, compare file names, otherwise compare full paths. */
if (name == path) {
for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it)
if ((*it)->GetName() && (strcmp((*it)->GetName(), name) == 0)) {
handle = *it;
return handle.forget();
}
} else {
for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it)
if ((*it)->GetPath() && (strcmp((*it)->GetPath(), path) == 0)) {
handle = *it;
return handle.forget();
}
}
char *abs_path = nullptr;
const char *requested_path = path;
/* When the path is not absolute and the library is being loaded for
* another, first try to load the library from the directory containing
* that parent library. */
if ((name == path) && parent) {
const char *parentPath = parent->GetPath();
abs_path = new char[strlen(parentPath) + strlen(path)];
strcpy(abs_path, parentPath);
char *slash = strrchr(abs_path, '/');
strcpy(slash + 1, path);
path = abs_path;
}
Mappable *mappable = GetMappableFromPath(path);
/* Try loading with the custom linker if we have a Mappable */
if (mappable)
handle = CustomElf::Load(mappable, path, flags);
/* Try loading with the system linker if everything above failed */
if (!handle)
handle = SystemElf::Load(path, flags);
/* If we didn't have an absolute path and haven't been able to load
* a library yet, try in the system search path */
if (!handle && abs_path)
handle = SystemElf::Load(name, flags);
delete [] abs_path;
DEBUG_LOG("ElfLoader::Load(\"%s\", 0x%x, %p [\"%s\"]) = %p", requested_path, flags,
reinterpret_cast<void *>(parent), parent ? parent->GetPath() : "",
static_cast<void *>(handle));
return handle.forget();
}
already_AddRefed<LibHandle>
ElfLoader::GetHandleByPtr(void *addr)
{
/* Scan the list of handles we already have for a match */
for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it) {
if ((*it)->Contains(addr)) {
RefPtr<LibHandle> lib = *it;
return lib.forget();
}
}
return nullptr;
}
Mappable *
ElfLoader::GetMappableFromPath(const char *path)
{
const char *name = LeafName(path);
Mappable *mappable = nullptr;
RefPtr<Zip> zip;
const char *subpath;
if ((subpath = strchr(path, '!'))) {
char *zip_path = strndup(path, subpath - path);
while (*(++subpath) == '/') { }
zip = ZipCollection::GetZip(zip_path);
free(zip_path);
Zip::Stream s;
if (zip && zip->GetStream(subpath, &s)) {
/* When the MOZ_LINKER_EXTRACT environment variable is set to "1",
* compressed libraries are going to be (temporarily) extracted as
* files, in the directory pointed by the MOZ_LINKER_CACHE
* environment variable. */
const char *extract = getenv("MOZ_LINKER_EXTRACT");
if (extract && !strncmp(extract, "1", 2 /* Including '\0' */))
mappable = MappableExtractFile::Create(name, zip, &s);
if (!mappable) {
if (s.GetType() == Zip::Stream::DEFLATE) {
mappable = MappableDeflate::Create(name, zip, &s);
} else if (s.GetType() == Zip::Stream::STORE) {
mappable = MappableSeekableZStream::Create(name, zip, &s);
}
}
}
}
/* If we couldn't load above, try with a MappableFile */
if (!mappable && !zip)
mappable = MappableFile::Create(path);
return mappable;
}
void
ElfLoader::Register(LibHandle *handle)
{
handles.push_back(handle);
}
void
ElfLoader::Register(CustomElf *handle)
{
Register(static_cast<LibHandle *>(handle));
if (dbg) {
dbg.Add(handle);
}
}
void
ElfLoader::Forget(LibHandle *handle)
{
/* Ensure logging is initialized or refresh if environment changed. */
Logging::Init();
LibHandleList::iterator it = std::find(handles.begin(), handles.end(), handle);
if (it != handles.end()) {
DEBUG_LOG("ElfLoader::Forget(%p [\"%s\"])", reinterpret_cast<void *>(handle),
handle->GetPath());
handles.erase(it);
} else {
DEBUG_LOG("ElfLoader::Forget(%p [\"%s\"]): Handle not found",
reinterpret_cast<void *>(handle), handle->GetPath());
}
}
void
ElfLoader::Forget(CustomElf *handle)
{
Forget(static_cast<LibHandle *>(handle));
if (dbg) {
dbg.Remove(handle);
}
}
void
ElfLoader::Init()
{
Dl_info info;
/* On Android < 4.1 can't reenter dl* functions. So when the library
* containing this code is dlopen()ed, it can't call dladdr from a
* static initializer. */
if (dladdr(_DYNAMIC, &info) != 0) {
self_elf = LoadedElf::Create(info.dli_fname, info.dli_fbase);
}
#if defined(ANDROID)
if (dladdr(FunctionPtr(syscall), &info) != 0) {
libc = LoadedElf::Create(info.dli_fname, info.dli_fbase);
}
#endif
}
ElfLoader::~ElfLoader()
{
LibHandleList list;
if (!Singleton.IsShutdownExpected()) {
MOZ_CRASH("Unexpected shutdown");
}
/* Release self_elf and libc */
self_elf = nullptr;
#if defined(ANDROID)
libc = nullptr;
#endif
/* Build up a list of all library handles with direct (external) references.
* We actually skip system library handles because we want to keep at least
* some of these open. Most notably, Mozilla codebase keeps a few libgnome
* libraries deliberately open because of the mess that libORBit destruction
* is. dlclose()ing these libraries actually leads to problems. */
for (LibHandleList::reverse_iterator it = handles.rbegin();
it < handles.rend(); ++it) {
if ((*it)->DirectRefCount()) {
if (SystemElf *se = (*it)->AsSystemElf()) {
se->Forget();
} else {
list.push_back(*it);
}
}
}
/* Force release all external references to the handles collected above */
for (LibHandleList::iterator it = list.begin(); it < list.end(); ++it) {
while ((*it)->ReleaseDirectRef()) { }
}
/* Remove the remaining system handles. */
if (handles.size()) {
list = handles;
for (LibHandleList::reverse_iterator it = list.rbegin();
it < list.rend(); ++it) {
if ((*it)->AsSystemElf()) {
DEBUG_LOG("ElfLoader::~ElfLoader(): Remaining handle for \"%s\" "
"[%d direct refs, %d refs total]", (*it)->GetPath(),
(*it)->DirectRefCount(), (*it)->refCount());
} else {
DEBUG_LOG("ElfLoader::~ElfLoader(): Unexpected remaining handle for \"%s\" "
"[%d direct refs, %d refs total]", (*it)->GetPath(),
(*it)->DirectRefCount(), (*it)->refCount());
/* Not removing, since it could have references to other libraries,
* destroying them as a side effect, and possibly leaving dangling
* pointers in the handle list we're scanning */
}
}
}
}
void
ElfLoader::stats(const char *when)
{
if (MOZ_LIKELY(!Logging::isVerbose()))
return;
for (LibHandleList::iterator it = Singleton.handles.begin();
it < Singleton.handles.end(); ++it)
(*it)->stats(when);
}
#ifdef __ARM_EABI__
int
ElfLoader::__wrap_aeabi_atexit(void *that, ElfLoader::Destructor destructor,
void *dso_handle)
{
Singleton.destructors.push_back(
DestructorCaller(destructor, that, dso_handle));
return 0;
}
#else
int
ElfLoader::__wrap_cxa_atexit(ElfLoader::Destructor destructor, void *that,
void *dso_handle)
{
Singleton.destructors.push_back(
DestructorCaller(destructor, that, dso_handle));
return 0;
}
#endif
void
ElfLoader::__wrap_cxa_finalize(void *dso_handle)
{
/* Call all destructors for the given DSO handle in reverse order they were
* registered. */
std::vector<DestructorCaller>::reverse_iterator it;
for (it = Singleton.destructors.rbegin();
it < Singleton.destructors.rend(); ++it) {
if (it->IsForHandle(dso_handle)) {
it->Call();
}
}
}
void
ElfLoader::DestructorCaller::Call()
{
if (destructor) {
DEBUG_LOG("ElfLoader::DestructorCaller::Call(%p, %p, %p)",
FunctionPtr(destructor), object, dso_handle);
destructor(object);
destructor = nullptr;
}
}
ElfLoader::DebuggerHelper::DebuggerHelper(): dbg(nullptr), firstAdded(nullptr)
{
/* Find ELF auxiliary vectors.
*
* The kernel stores the following data on the stack when starting a
* program:
* argc
* argv[0] (pointer into argv strings defined below)
* argv[1] (likewise)
* ...
* argv[argc - 1] (likewise)
* nullptr
* envp[0] (pointer into environment strings defined below)
* envp[1] (likewise)
* ...
* envp[n] (likewise)
* nullptr
* ... (more NULLs on some platforms such as Android 4.3)
* auxv[0] (first ELF auxiliary vector)
* auxv[1] (second ELF auxiliary vector)
* ...
* auxv[p] (last ELF auxiliary vector)
* (AT_NULL, nullptr)
* padding
* argv strings, separated with '\0'
* environment strings, separated with '\0'
* nullptr
*
* What we are after are the auxv values defined by the following struct.
*/
struct AuxVector {
Elf::Addr type;
Elf::Addr value;
};
/* Pointer to the environment variables list */
extern char **environ;
/* The environment may have changed since the program started, in which
* case the environ variables list isn't the list the kernel put on stack
* anymore. But in this new list, variables that didn't change still point
* to the strings the kernel put on stack. It is quite unlikely that two
* modified environment variables point to two consecutive strings in memory,
* so we assume that if two consecutive environment variables point to two
* consecutive strings, we found strings the kernel put on stack. */
char **env;
for (env = environ; *env; env++)
if (*env + strlen(*env) + 1 == env[1])
break;
if (!*env)
return;
/* Next, we scan the stack backwards to find a pointer to one of those
* strings we found above, which will give us the location of the original
* envp list. As we are looking for pointers, we need to look at 32-bits or
* 64-bits aligned values, depening on the architecture. */
char **scan = reinterpret_cast<char **>(
reinterpret_cast<uintptr_t>(*env) & ~(sizeof(void *) - 1));
while (*env != *scan)
scan--;
/* Finally, scan forward to find the last environment variable pointer and
* thus the first auxiliary vector. */
while (*scan++);
/* Some platforms have more NULLs here, so skip them if we encounter them */
while (!*scan)
scan++;
AuxVector *auxv = reinterpret_cast<AuxVector *>(scan);
/* The two values of interest in the auxiliary vectors are AT_PHDR and
* AT_PHNUM, which gives us the the location and size of the ELF program
* headers. */
Array<Elf::Phdr> phdrs;
char *base = nullptr;
while (auxv->type) {
if (auxv->type == AT_PHDR) {
phdrs.Init(reinterpret_cast<Elf::Phdr*>(auxv->value));
/* Assume the base address is the first byte of the same page */
base = reinterpret_cast<char *>(PageAlignedPtr(auxv->value));
}
if (auxv->type == AT_PHNUM)
phdrs.Init(auxv->value);
auxv++;
}
if (!phdrs) {
DEBUG_LOG("Couldn't find program headers");
return;
}
/* In some cases, the address for the program headers we get from the
* auxiliary vectors is not mapped, because of the PT_LOAD segments
* definitions in the program executable. Trying to map anonymous memory
* with a hint giving the base address will return a different address
* if something is mapped there, and the base address otherwise. */
MappedPtr mem(MemoryRange::mmap(base, PageSize(), PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
if (mem == base) {
/* If program headers aren't mapped, try to map them */
int fd = open("/proc/self/exe", O_RDONLY);
if (fd == -1) {
DEBUG_LOG("Failed to open /proc/self/exe");
return;
}
mem.Assign(MemoryRange::mmap(base, PageSize(), PROT_READ, MAP_PRIVATE,
fd, 0));
/* If we don't manage to map at the right address, just give up. */
if (mem != base) {
DEBUG_LOG("Couldn't read program headers");
return;
}
}
/* Sanity check: the first bytes at the base address should be an ELF
* header. */
if (!Elf::Ehdr::validate(base)) {
DEBUG_LOG("Couldn't find program base");
return;
}
/* Search for the program PT_DYNAMIC segment */
Array<Elf::Dyn> dyns;
for (Array<Elf::Phdr>::iterator phdr = phdrs.begin(); phdr < phdrs.end();
++phdr) {
/* While the program headers are expected within the first mapped page of
* the program executable, the executable PT_LOADs may actually make them
* loaded at an address that is not the wanted base address of the
* library. We thus need to adjust the base address, compensating for the
* virtual address of the PT_LOAD segment corresponding to offset 0. */
if (phdr->p_type == PT_LOAD && phdr->p_offset == 0)
base -= phdr->p_vaddr;
if (phdr->p_type == PT_DYNAMIC)
dyns.Init(base + phdr->p_vaddr, phdr->p_filesz);
}
if (!dyns) {
DEBUG_LOG("Failed to find PT_DYNAMIC section in program");
return;
}
/* Search for the DT_DEBUG information */
for (Array<Elf::Dyn>::iterator dyn = dyns.begin(); dyn < dyns.end(); ++dyn) {
if (dyn->d_tag == DT_DEBUG) {
dbg = reinterpret_cast<r_debug *>(dyn->d_un.d_ptr);
break;
}
}
DEBUG_LOG("DT_DEBUG points at %p", static_cast<void *>(dbg));
}
/**
* Helper class to ensure the given pointer is writable within the scope of
* an instance. Permissions to the memory page where the pointer lies are
* restored to their original value when the instance is destroyed.
*/
class EnsureWritable
{
public:
template <typename T>
EnsureWritable(T *ptr, size_t length_ = sizeof(T))
{
MOZ_ASSERT(length_ < PageSize());
prot = -1;
page = MAP_FAILED;
char *firstPage = PageAlignedPtr(reinterpret_cast<char *>(ptr));
char *lastPageEnd = PageAlignedEndPtr(reinterpret_cast<char *>(ptr) + length_);
length = lastPageEnd - firstPage;
uintptr_t start = reinterpret_cast<uintptr_t>(firstPage);
uintptr_t end;
prot = getProt(start, &end);
if (prot == -1 || (start + length) > end)
MOZ_CRASH();
if (prot & PROT_WRITE)
return;
page = firstPage;
mprotect(page, length, prot | PROT_WRITE);
}
~EnsureWritable()
{
if (page != MAP_FAILED) {
mprotect(page, length, prot);
}
}
private:
int getProt(uintptr_t addr, uintptr_t *end)
{
/* The interesting part of the /proc/self/maps format looks like:
* startAddr-endAddr rwxp */
int result = 0;
AutoCloseFILE f(fopen("/proc/self/maps", "r"));
while (f) {
unsigned long long startAddr, endAddr;
char perms[5];
if (fscanf(f, "%llx-%llx %4s %*1024[^\n] ", &startAddr, &endAddr, perms) != 3)
return -1;
if (addr < startAddr || addr >= endAddr)
continue;
if (perms[0] == 'r')
result |= PROT_READ;
else if (perms[0] != '-')
return -1;
if (perms[1] == 'w')
result |= PROT_WRITE;
else if (perms[1] != '-')
return -1;
if (perms[2] == 'x')
result |= PROT_EXEC;
else if (perms[2] != '-')
return -1;
*end = endAddr;
return result;
}
return -1;
}
int prot;
void *page;
size_t length;
};
/**
* The system linker maintains a doubly linked list of library it loads
* for use by the debugger. Unfortunately, it also uses the list pointers
* in a lot of operations and adding our data in the list is likely to
* trigger crashes when the linker tries to use data we don't provide or
* that fall off the amount data we allocated. Fortunately, the linker only
* traverses the list forward and accesses the head of the list from a
* private pointer instead of using the value in the r_debug structure.
* This means we can safely add members at the beginning of the list.
* Unfortunately, gdb checks the coherency of l_prev values, so we have
* to adjust the l_prev value for the first element the system linker
* knows about. Fortunately, it doesn't use l_prev, and the first element
* is not ever going to be released before our elements, since it is the
* program executable, so the system linker should not be changing
* r_debug::r_map.
*/
void
ElfLoader::DebuggerHelper::Add(ElfLoader::link_map *map)
{
if (!dbg->r_brk)
return;
dbg->r_state = r_debug::RT_ADD;
dbg->r_brk();
map->l_prev = nullptr;
map->l_next = dbg->r_map;
if (!firstAdded) {
firstAdded = map;
/* When adding a library for the first time, r_map points to data
* handled by the system linker, and that data may be read-only */
EnsureWritable w(&dbg->r_map->l_prev);
dbg->r_map->l_prev = map;
} else
dbg->r_map->l_prev = map;
dbg->r_map = map;
dbg->r_state = r_debug::RT_CONSISTENT;
dbg->r_brk();
}
void
ElfLoader::DebuggerHelper::Remove(ElfLoader::link_map *map)
{
if (!dbg->r_brk)
return;
dbg->r_state = r_debug::RT_DELETE;
dbg->r_brk();
if (dbg->r_map == map)
dbg->r_map = map->l_next;
else if (map->l_prev) {
map->l_prev->l_next = map->l_next;
}
if (map == firstAdded) {
firstAdded = map->l_prev;
/* When removing the first added library, its l_next is going to be
* data handled by the system linker, and that data may be read-only */
EnsureWritable w(&map->l_next->l_prev);
map->l_next->l_prev = map->l_prev;
} else if (map->l_next) {
map->l_next->l_prev = map->l_prev;
}
dbg->r_state = r_debug::RT_CONSISTENT;
dbg->r_brk();
}
#if defined(ANDROID)
/* As some system libraries may be calling signal() or sigaction() to
* set a SIGSEGV handler, effectively breaking MappableSeekableZStream,
* or worse, restore our SIGSEGV handler with wrong flags (which using
* signal() will do), we want to hook into the system's sigaction() to
* replace it with our own wrapper instead, so that our handler is never
* replaced. We used to only do that with libraries this linker loads,
* but it turns out at least one system library does call signal() and
* breaks us (libsc-a3xx.so on the Samsung Galaxy S4).
* As libc's signal (bsd_signal/sysv_signal, really) calls sigaction
* under the hood, instead of calling the signal system call directly,
* we only need to hook sigaction. This is true for both bionic and
* glibc.
*/
/* libc's sigaction */
extern "C" int
sigaction(int signum, const struct sigaction *act,
struct sigaction *oldact);
/* Simple reimplementation of sigaction. This is roughly equivalent
* to the assembly that comes in bionic, but not quite equivalent to
* glibc's implementation, so we only use this on Android. */
int
sys_sigaction(int signum, const struct sigaction *act,
struct sigaction *oldact)
{
return syscall(__NR_sigaction, signum, act, oldact);
}
/* Replace the first instructions of the given function with a jump
* to the given new function. */
template <typename T>
static bool
Divert(T func, T new_func)
{
void *ptr = FunctionPtr(func);
uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
#if defined(__i386__)
// A 32-bit jump is a 5 bytes instruction.
EnsureWritable w(ptr, 5);
*reinterpret_cast<unsigned char *>(addr) = 0xe9; // jmp
*reinterpret_cast<intptr_t *>(addr + 1) =
reinterpret_cast<uintptr_t>(new_func) - addr - 5; // target displacement
return true;
#elif defined(__arm__)
const unsigned char trampoline[] = {
// .thumb
0x46, 0x04, // nop
0x78, 0x47, // bx pc
0x46, 0x04, // nop
// .arm
0x04, 0xf0, 0x1f, 0xe5, // ldr pc, [pc, #-4]
// .word <new_func>
};
const unsigned char *start;
if (addr & 0x01) {
/* Function is thumb, the actual address of the code is without the
* least significant bit. */
addr--;
/* The arm part of the trampoline needs to be 32-bit aligned */
if (addr & 0x02)
start = trampoline;
else
start = trampoline + 2;
} else {
/* Function is arm, we only need the arm part of the trampoline */
start = trampoline + 6;
}
size_t len = sizeof(trampoline) - (start - trampoline);
EnsureWritable w(reinterpret_cast<void *>(addr), len + sizeof(void *));
memcpy(reinterpret_cast<void *>(addr), start, len);
*reinterpret_cast<void **>(addr + len) = FunctionPtr(new_func);
cacheflush(addr, addr + len + sizeof(void *), 0);
return true;
#else
return false;
#endif
}
#else
#define sys_sigaction sigaction
template <typename T>
static bool
Divert(T func, T new_func)
{
return false;
}
#endif
namespace {
/* Clock that only accounts for time spent in the current process. */
static uint64_t ProcessTimeStamp_Now()
{
struct timespec ts;
int rv = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
if (rv != 0) {
return 0;
}
uint64_t baseNs = (uint64_t)ts.tv_sec * 1000000000;
return baseNs + (uint64_t)ts.tv_nsec;
}
}
/* Data structure used to pass data to the temporary signal handler,
* as well as triggering a test crash. */
struct TmpData {
volatile int crash_int;
volatile uint64_t crash_timestamp;
};
SEGVHandler::SEGVHandler()
: initialized(false), registeredHandler(false), signalHandlingBroken(true)
, signalHandlingSlow(true)
{
/* Ensure logging is initialized before the DEBUG_LOG in the test_handler.
* As this constructor runs before the ElfLoader constructor (by effect
* of ElfLoader inheriting from this class), this also initializes on behalf
* of ElfLoader and DebuggerHelper. */
Logging::Init();
/* Initialize oldStack.ss_flags to an invalid value when used to set
* an alternative stack, meaning we haven't got information about the
* original alternative stack and thus don't mean to restore it in
* the destructor. */
oldStack.ss_flags = SS_ONSTACK;
/* Get the current segfault signal handler. */
struct sigaction old_action;
sys_sigaction(SIGSEGV, nullptr, &old_action);
/* Some devices don't provide useful information to their SIGSEGV handlers,
* making it impossible for on-demand decompression to work. To check if
* we're on such a device, setup a temporary handler and deliberately
* trigger a segfault. The handler will set signalHandlingBroken if the
* provided information is bogus.
* Some other devices have a kernel option enabled that makes SIGSEGV handler
* have an overhead so high that it affects how on-demand decompression
* performs. The handler will also set signalHandlingSlow if the triggered
* SIGSEGV took too much time. */
struct sigaction action;
action.sa_sigaction = &SEGVHandler::test_handler;
sigemptyset(&action.sa_mask);
action.sa_flags = SA_SIGINFO | SA_NODEFER;
action.sa_restorer = nullptr;
stackPtr.Assign(MemoryRange::mmap(nullptr, PageSize(),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
if (stackPtr.get() == MAP_FAILED)
return;
if (sys_sigaction(SIGSEGV, &action, nullptr))
return;
TmpData *data = reinterpret_cast<TmpData*>(stackPtr.get());
data->crash_timestamp = ProcessTimeStamp_Now();
mprotect(stackPtr, stackPtr.GetLength(), PROT_NONE);
data->crash_int = 123;
/* Restore the original segfault signal handler. */
sys_sigaction(SIGSEGV, &old_action, nullptr);
stackPtr.Assign(MAP_FAILED, 0);
}
void
SEGVHandler::FinishInitialization()
{
/* Ideally, we'd need some locking here, but in practice, we're not
* going to race with another thread. */
initialized = true;
if (signalHandlingBroken || signalHandlingSlow)
return;
typedef int (*sigaction_func)(int, const struct sigaction *,
struct sigaction *);
sigaction_func libc_sigaction;
#if defined(ANDROID)
/* Android > 4.4 comes with a sigaction wrapper in a LD_PRELOADed library
* (libsigchain) for ART. That wrapper kind of does the same trick as we
* do, so we need extra care in handling it.
* - Divert the libc's sigaction, assuming the LD_PRELOADed library uses
* it under the hood (which is more or less true according to the source
* of that library, since it's doing a lookup in RTLD_NEXT)
* - With the LD_PRELOADed library in place, all calls to sigaction from
* from system libraries will go to the LD_PRELOADed library.
* - The LD_PRELOADed library calls to sigaction go to our __wrap_sigaction.
* - The calls to sigaction from libraries faulty.lib loads are sent to
* the LD_PRELOADed library.
* In practice, for signal handling, this means:
* - The signal handler registered to the kernel is ours.
* - Our handler redispatches to the LD_PRELOADed library's if there's a
* segfault we don't handle.
* - The LD_PRELOADed library redispatches according to whatever system
* library or faulty.lib-loaded library set with sigaction.
*
* When there is no sigaction wrapper in place:
* - Divert the libc's sigaction.
* - Calls to sigaction from system library and faulty.lib-loaded libraries
* all go to the libc's sigaction, which end up in our __wrap_sigaction.
* - The signal handler registered to the kernel is ours.
* - Our handler redispatches according to whatever system library or
* faulty.lib-loaded library set with sigaction.
*/
void *libc = dlopen("libc.so", RTLD_GLOBAL | RTLD_LAZY);
if (libc) {
/*
* Lollipop bionic only has a small trampoline in sigaction, with the real
* work happening in __sigaction. Divert there instead of sigaction if it exists.
* Bug 1154803
*/
libc_sigaction = reinterpret_cast<sigaction_func>(dlsym(libc, "__sigaction"));
if (!libc_sigaction) {
libc_sigaction =
reinterpret_cast<sigaction_func>(dlsym(libc, "sigaction"));
}
} else
#endif
{
libc_sigaction = sigaction;
}
if (!Divert(libc_sigaction, __wrap_sigaction))
return;
/* Setup an alternative stack if the already existing one is not big
* enough, or if there is none. */
if (sigaltstack(nullptr, &oldStack) == 0) {
if (oldStack.ss_flags == SS_ONSTACK)
oldStack.ss_flags = 0;
if (!oldStack.ss_sp || oldStack.ss_size < stackSize) {
stackPtr.Assign(MemoryRange::mmap(nullptr, stackSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
if (stackPtr.get() == MAP_FAILED)
return;
stack_t stack;
stack.ss_sp = stackPtr;
stack.ss_size = stackSize;
stack.ss_flags = 0;
if (sigaltstack(&stack, nullptr) != 0)
return;
}
}
/* Register our own handler, and store the already registered one in
* SEGVHandler's struct sigaction member */
action.sa_sigaction = &SEGVHandler::handler;
action.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
registeredHandler = !sys_sigaction(SIGSEGV, &action, &this->action);
}
SEGVHandler::~SEGVHandler()
{
/* Restore alternative stack for signals */
if (oldStack.ss_flags != SS_ONSTACK)
sigaltstack(&oldStack, nullptr);
/* Restore original signal handler */
if (registeredHandler)
sys_sigaction(SIGSEGV, &this->action, nullptr);
}
/* Test handler for a deliberately triggered SIGSEGV that determines whether
* useful information is provided to signal handlers, particularly whether
* si_addr is filled in properly, and whether the segfault handler is called
* quickly enough. */
void SEGVHandler::test_handler(int signum, siginfo_t *info, void *context)
{
SEGVHandler &that = ElfLoader::Singleton;
if (signum == SIGSEGV && info &&
info->si_addr == that.stackPtr.get())
that.signalHandlingBroken = false;
mprotect(that.stackPtr, that.stackPtr.GetLength(), PROT_READ | PROT_WRITE);
TmpData *data = reinterpret_cast<TmpData*>(that.stackPtr.get());
uint64_t latency = ProcessTimeStamp_Now() - data->crash_timestamp;
DEBUG_LOG("SEGVHandler latency: %" PRIu64, latency);
/* See bug 886736 for timings on different devices, 150 µs is reasonably above
* the latency on "working" devices and seems to be short enough to not incur
* a huge overhead to on-demand decompression. */
if (latency <= 150000)
that.signalHandlingSlow = false;
}
/* TODO: "properly" handle signal masks and flags */
void SEGVHandler::handler(int signum, siginfo_t *info, void *context)
{
//ASSERT(signum == SIGSEGV);
DEBUG_LOG("Caught segmentation fault @%p", info->si_addr);
/* Check whether we segfaulted in the address space of a CustomElf. We're
* only expecting that to happen as an access error. */
if (info->si_code == SEGV_ACCERR) {
RefPtr<LibHandle> handle =
ElfLoader::Singleton.GetHandleByPtr(info->si_addr);
BaseElf *elf;
if (handle && (elf = handle->AsBaseElf())) {
DEBUG_LOG("Within the address space of %s", handle->GetPath());
if (elf->mappable && elf->mappable->ensure(info->si_addr)) {
return;
}
}
}
/* Redispatch to the registered handler */
SEGVHandler &that = ElfLoader::Singleton;
if (that.action.sa_flags & SA_SIGINFO) {
DEBUG_LOG("Redispatching to registered handler @%p",
FunctionPtr(that.action.sa_sigaction));
that.action.sa_sigaction(signum, info, context);
} else if (that.action.sa_handler == SIG_DFL) {
DEBUG_LOG("Redispatching to default handler");
/* Reset the handler to the default one, and trigger it. */
sys_sigaction(signum, &that.action, nullptr);
raise(signum);
} else if (that.action.sa_handler != SIG_IGN) {
DEBUG_LOG("Redispatching to registered handler @%p",
FunctionPtr(that.action.sa_handler));
that.action.sa_handler(signum);
} else {
DEBUG_LOG("Ignoring");
}
}
int
SEGVHandler::__wrap_sigaction(int signum, const struct sigaction *act,
struct sigaction *oldact)
{
SEGVHandler &that = ElfLoader::Singleton;
/* Use system sigaction() function for all but SIGSEGV signals. */
if (!that.registeredHandler || (signum != SIGSEGV))
return sys_sigaction(signum, act, oldact);
if (oldact)
*oldact = that.action;
if (act)
that.action = *act;
return 0;
}
Logging Logging::Singleton;
|