Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 | /* * Copyright (c) 2017 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_LICENSE_HEADER_END@ */ #include <stdint.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/sysctl.h> #include <mach/mach_time.h> // mach_absolute_time() #include <libkern/OSAtomic.h> #include <uuid/uuid.h> #include <mach-o/dyld_images.h> #include <libc_private.h> #include <vector> #include <algorithm> #include "AllImages.h" #include "libdyldEntryVector.h" #include "Logging.h" #include "Loading.h" #include "Tracing.h" #include "DyldSharedCache.h" #include "PathOverrides.h" #include "Closure.h" #include "ClosureBuilder.h" #include "ClosureFileSystemPhysical.h" #include "RootsChecker.h" #include "objc-shared-cache.h" extern const char** appleParams; // should be a header for these struct __cxa_range_t { const void* addr; size_t length; }; extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges[], unsigned int count); extern "C" int __cxa_atexit(void (*func)(void *), void* arg, void* dso); VIS_HIDDEN void* __ptrauth_dyld_address_auth gUseDyld3 = nullptr; namespace dyld3 { ///////////////////// AllImages //////////////////////////// AllImages gAllImages; void AllImages::init(const closure::LaunchClosure* closure, const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath, const Array<LoadedImage>& initialImages) { _mainClosure = closure; _initialImages = &initialImages; _dyldCacheAddress = dyldCacheLoadAddress; _dyldCachePath = dyldCachePath; if ( _dyldCacheAddress ) { _dyldCacheSlide = (uint64_t)dyldCacheLoadAddress - dyldCacheLoadAddress->unslidLoadAddress(); _imagesArrays.push_back(dyldCacheLoadAddress->cachedDylibsImageArray()); if ( auto others = dyldCacheLoadAddress->otherOSImageArray() ) _imagesArrays.push_back(others); } _imagesArrays.push_back(_mainClosure->images()); // record first ImageNum to do use for dlopen() calls _mainClosure->images()->forEachImage(^(const dyld3::closure::Image* image, bool& stop) { closure::ImageNum num = image->imageNum(); if ( num >= _nextImageNum ) _nextImageNum = num+1; }); // Make temporary old image array, so libSystem initializers can be debugged STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, initialImages.count()); for (const LoadedImage& li : initialImages) { oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0}); } _oldAllImageInfos->infoArray = &oldDyldInfo[0]; _oldAllImageInfos->infoArrayCount = (uint32_t)oldDyldInfo.count(); _oldAllImageInfos->notification(dyld_image_adding, _oldAllImageInfos->infoArrayCount, _oldAllImageInfos->infoArray); _oldAllImageInfos->infoArray = nullptr; _oldAllImageInfos->infoArrayCount = 0; _processDOFs = Loader::dtraceUserProbesEnabled(); } void AllImages::setProgramVars(ProgramVars* vars, bool keysOff, bool osBinariesOnly) { _programVars = vars; _archs = &GradedArchs::forCurrentOS(keysOff, osBinariesOnly); } void AllImages::setLaunchMode(uint32_t flags) { _launchMode = flags; } AllImages::MainFunc AllImages::getDriverkitMain() { return _driverkitMain; } void AllImages::setDriverkitMain(MainFunc mainFunc) { _driverkitMain = mainFunc; } void AllImages::setRestrictions(bool allowAtPaths, bool allowEnvPaths) { _allowAtPaths = allowAtPaths; _allowEnvPaths = allowEnvPaths; } void AllImages::setHasCacheOverrides(bool someCacheImageOverriden) { _someImageOverridden = someCacheImageOverriden; } bool AllImages::hasCacheOverrides() const { return _someImageOverridden; } void AllImages::applyInitialImages() { addImages(*_initialImages); runImageNotifiers(*_initialImages); runImageCallbacks(*_initialImages); _initialImages = nullptr; // this was stack allocated } void AllImages::withReadLock(void (^work)()) const { #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT os_unfair_recursive_lock_lock(&_globalLock); work(); os_unfair_recursive_lock_unlock(&_globalLock); #else pthread_mutex_lock(&_globalLock); work(); pthread_mutex_unlock(&_globalLock); #endif } void AllImages::withWriteLock(void (^work)()) { #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT os_unfair_recursive_lock_lock(&_globalLock); work(); os_unfair_recursive_lock_unlock(&_globalLock); #else pthread_mutex_lock(&_globalLock); work(); pthread_mutex_unlock(&_globalLock); #endif } void AllImages::withNotifiersLock(void (^work)()) const { #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT os_unfair_recursive_lock_lock(&_globalLock); work(); os_unfair_recursive_lock_unlock(&_globalLock); #else pthread_mutex_lock(&_globalLock); work(); pthread_mutex_unlock(&_globalLock); #endif } void AllImages::mirrorToOldAllImageInfos() { withReadLock(^(){ // set infoArray to NULL to denote it is in-use _oldAllImageInfos->infoArray = nullptr; // if array not large enough, re-alloc it uint32_t imageCount = (uint32_t)_loadedImages.count(); if ( _oldArrayAllocCount < imageCount ) { uint32_t newAllocCount = imageCount + 16; dyld_image_info* newArray = (dyld_image_info*)::malloc(sizeof(dyld_image_info)*newAllocCount); if ( _oldAllImageArray != nullptr ) { ::memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount); ::free(_oldAllImageArray); } _oldAllImageArray = newArray; _oldArrayAllocCount = newAllocCount; } // fill out array to mirror current image list int index = 0; for (const LoadedImage& li : _loadedImages) { _oldAllImageArray[index].imageLoadAddress = li.loadedAddress(); _oldAllImageArray[index].imageFilePath = imagePath(li.image()); _oldAllImageArray[index].imageFileModDate = 0; ++index; } // set infoArray back to base address of array (so other process can now read) _oldAllImageInfos->infoArrayCount = imageCount; _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time(); _oldAllImageInfos->infoArray = _oldAllImageArray; // <radr://problem/42668846> update UUID array if needed uint32_t nonCachedCount = 1; // always add dyld for (const LoadedImage& li : _loadedImages) { if ( _oldAllImageInfos->processDetachedFromSharedRegion || !li.loadedAddress()->inDyldCache()) ++nonCachedCount; } if ( nonCachedCount != _oldAllImageInfos->uuidArrayCount ) { // set infoArray to NULL to denote it is in-use _oldAllImageInfos->uuidArray = nullptr; // make sure allocation can hold all uuids if ( _oldUUIDAllocCount < nonCachedCount ) { uint32_t newAllocCount = (nonCachedCount + 3) & (-4); // round up to multiple of 4 dyld_uuid_info* newArray = (dyld_uuid_info*)::malloc(sizeof(dyld_uuid_info)*newAllocCount); if ( _oldUUIDArray != nullptr ) ::free(_oldUUIDArray); _oldUUIDArray = newArray; _oldUUIDAllocCount = newAllocCount; } // add dyld then all images not in dyld cache const MachOFile* dyldMF = (MachOFile*)_oldAllImageInfos->dyldImageLoadAddress; _oldUUIDArray[0].imageLoadAddress = dyldMF; dyldMF->getUuid(_oldUUIDArray[0].imageUUID); index = 1; for (const LoadedImage& li : _loadedImages) { if ( _oldAllImageInfos->processDetachedFromSharedRegion || !li.loadedAddress()->inDyldCache() ) { _oldUUIDArray[index].imageLoadAddress = li.loadedAddress(); li.loadedAddress()->getUuid(_oldUUIDArray[index].imageUUID); ++index; } } // set uuidArray back to base address of array (so kernel can now read) _oldAllImageInfos->uuidArray = _oldUUIDArray; _oldAllImageInfos->uuidArrayCount = nonCachedCount; } }); } void AllImages::addImages(const Array<LoadedImage>& newImages) { // copy into _loadedImages withWriteLock(^(){ _loadedImages.append(newImages); }); } void AllImages::addImmutableRange(uintptr_t start, uintptr_t end) { //fprintf(stderr, "AllImages::addImmutableRange(0x%09lX, 0x%09lX)\n", start, end); // first look in existing range buckets for empty slot ImmutableRanges* lastRange = nullptr; for (ImmutableRanges* ranges = &_immutableRanges; ranges != nullptr; ranges = ranges->next.load(std::memory_order_acquire)) { lastRange = ranges; for (uintptr_t i=0; i < ranges->arraySize; ++i) { if ( ranges->array[i].start.load(std::memory_order_acquire) == 0 ) { // set 'end' before 'start' so readers always see consistent state ranges->array[i].end.store(end, std::memory_order_release); ranges->array[i].start.store(start, std::memory_order_release); return; } } } // if we got here, there are no empty slots, so add new ImmutableRanges const uintptr_t newSize = 15; // allocation is 256 bytes on 64-bit processes ImmutableRanges* newRange = (ImmutableRanges*)calloc(offsetof(ImmutableRanges,array[newSize]), 1); newRange->arraySize = newSize; newRange->array[0].end.store(end, std::memory_order_release); newRange->array[0].start.store(start, std::memory_order_release); // tie into previous list last lastRange->next.store(newRange, std::memory_order_release); } void AllImages::runImageNotifiers(const Array<LoadedImage>& newImages) { uint32_t count = (uint32_t)newImages.count(); assert(count != 0); if ( _oldAllImageInfos != nullptr ) { // sync to old all image infos struct mirrorToOldAllImageInfos(); // tell debugger about new images dyld_image_info oldDyldInfo[count]; for (uint32_t i=0; i < count; ++i) { oldDyldInfo[i].imageLoadAddress = newImages[i].loadedAddress(); oldDyldInfo[i].imageFilePath = imagePath(newImages[i].image()); oldDyldInfo[i].imageFileModDate = 0; } _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo); } // if any image not in the shared cache added, recompute bounds for (const LoadedImage& li : newImages) { if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) { recomputeBounds(); break; } } // update immutable ranges for (const LoadedImage& li : newImages) { if ( !li.image()->inDyldCache() && li.image()->neverUnload() ) { uintptr_t baseAddr = (uintptr_t)li.loadedAddress(); li.image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool &stop) { if ( (permissions & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ ) { addImmutableRange(baseAddr + (uintptr_t)vmOffset, (uintptr_t)(baseAddr + vmOffset + vmSize)); } }); } } // log loads for (const LoadedImage& li : newImages) { const char *path = imagePath(li.image()); uuid_t imageUUID; if ( li.image()->getUuid(imageUUID)) { uuid_string_t imageUUIDStr; uuid_unparse_upper(imageUUID, imageUUIDStr); log_loads("dyld: <%s> %s\n", imageUUIDStr, path); } else { log_loads("dyld: %s\n", path); } } // call kdebug trace for each image if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) { for (const LoadedImage& li : newImages) { const closure::Image* image = li.image(); struct stat stat_buf; const char *path = imagePath(image); uuid_t uuid; image->getUuid(uuid); fsid_t fsid = {{ 0, 0 }}; fsobj_id_t fsobjid = { 0, 0 }; if ( !li.loadedAddress()->inDyldCache() && (dyld3::stat(path, &stat_buf) == 0) ) { fsobjid = *(fsobj_id_t*)&stat_buf.st_ino; fsid = {{ stat_buf.st_dev, 0 }}; } kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, path, &uuid, fsobjid, fsid, li.loadedAddress()); } } } void AllImages::runImageCallbacks(const Array<LoadedImage>& newImages) { uint32_t count = (uint32_t)newImages.count(); assert(count != 0); // call each _dyld_register_func_for_add_image function with each image withNotifiersLock(^{ for (NotifyFunc func : _loadNotifiers) { for (const LoadedImage& li : newImages) { dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0); log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress()); if ( li.image()->inDyldCache() ) func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide); else func(li.loadedAddress(), li.loadedAddress()->getSlide()); } } for (LoadNotifyFunc func : _loadNotifiers2) { for (const LoadedImage& li : newImages) { dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0); log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress()); if ( li.image()->inDyldCache() ) func(li.loadedAddress(), li.image()->path(), false); else func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload()); } } for (BulkLoadNotifier func : _loadBulkNotifiers) { const mach_header* mhs[count]; const char* paths[count]; for (unsigned i=0; i < count; ++i) { mhs[i] = newImages[i].loadedAddress(); paths[i] = newImages[i].image()->path(); } dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)mhs[0], (uint64_t)func, 0); log_notifications("dyld: add notifier %p called with %d images\n", func, count); func(count, mhs, paths); } }); // call objc about images that use objc if ( _objcNotifyMapped != nullptr ) { const char* pathsBuffer[count]; const mach_header* mhBuffer[count]; uint32_t imagesWithObjC = 0; for (const LoadedImage& li : newImages) { const closure::Image* image = li.image(); if ( image->hasObjC() ) { pathsBuffer[imagesWithObjC] = imagePath(image); mhBuffer[imagesWithObjC] = li.loadedAddress(); ++imagesWithObjC; } } if ( imagesWithObjC != 0 ) { dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_MAP, 0, 0, 0); (*_objcNotifyMapped)(imagesWithObjC, pathsBuffer, mhBuffer); if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) { for (uint32_t i=0; i < imagesWithObjC; ++i) { log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]); } } } } #if !TARGET_OS_DRIVERKIT // FIXME: This may make more sense in runImageCallbacks, but the present order // is after callbacks. Can we safely move it? // notify any processes tracking loads in this process notifyMonitorLoads(newImages); #endif } void AllImages::removeImages(const Array<LoadedImage>& unloadImages) { // call each _dyld_register_func_for_remove_image function with each image withNotifiersLock(^{ for (NotifyFunc func : _unloadNotifiers) { for (const LoadedImage& li : unloadImages) { dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0); log_notifications("dyld: remove notifier %p called with mh=%p\n", func, li.loadedAddress()); if ( li.image()->inDyldCache() ) func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide); else func(li.loadedAddress(), li.loadedAddress()->getSlide()); } } }); // call objc about images going away if ( _objcNotifyUnmapped != nullptr ) { for (const LoadedImage& li : unloadImages) { if ( li.image()->hasObjC() ) { (*_objcNotifyUnmapped)(imagePath(li.image()), li.loadedAddress()); log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li.loadedAddress(), imagePath(li.image())); } } } // call kdebug trace for each image if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) { for (const LoadedImage& li : unloadImages) { const closure::Image* image = li.image(); struct stat stat_buf; const char *path = imagePath(image); uuid_t uuid; image->getUuid(uuid); fsid_t fsid = {{ 0, 0 }}; fsobj_id_t fsobjid = { 0, 0 }; if ( dyld3::stat(path, &stat_buf) == 0 ) { fsobjid = *(fsobj_id_t*)&stat_buf.st_ino; fsid = {{ stat_buf.st_dev, 0 }}; } kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, path, &uuid, fsobjid, fsid, li.loadedAddress()); } } // remove each from _loadedImages withWriteLock(^(){ for (const LoadedImage& uli : unloadImages) { for (LoadedImage& li : _loadedImages) { if ( uli.loadedAddress() == li.loadedAddress() ) { _loadedImages.erase(li); break; } } } recomputeBounds(); }); // sync to old all image infos struct mirrorToOldAllImageInfos(); // tell debugger about removed images STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, unloadImages.count()); for (const LoadedImage& li : unloadImages) { oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0}); } _oldAllImageInfos->notification(dyld_image_removing, (uint32_t)oldDyldInfo.count(), &oldDyldInfo[0]); // notify any processes tracking loads in this process notifyMonitorUnloads(unloadImages); // finally, unmap images for (const LoadedImage& li : unloadImages) { if ( li.leaveMapped() ) { log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li.image())); } else { // unmapImage() modifies parameter, so use copy LoadedImage copy = li; Loader::unmapImage(copy); log_loads("dyld: unloaded %s\n", imagePath(li.image())); } } } // must be called with writeLock held void AllImages::recomputeBounds() { _lowestNonCached = UINTPTR_MAX; _highestNonCached = 0; for (const LoadedImage& li : _loadedImages) { const MachOLoaded* ml = li.loadedAddress(); uintptr_t start = (uintptr_t)ml; if ( !((MachOAnalyzer*)ml)->inDyldCache() ) { if ( start < _lowestNonCached ) _lowestNonCached = start; uintptr_t end = start + (uintptr_t)(li.image()->vmSizeToMap()); if ( end > _highestNonCached ) _highestNonCached = end; } } } uint32_t AllImages::count() const { return (uint32_t)_loadedImages.count(); } bool AllImages::dyldCacheHasPath(const char* path) const { uint32_t dyldCacheImageIndex; if ( _dyldCacheAddress != nullptr ) return _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex); return false; } const char* AllImages::imagePathByIndex(uint32_t index) const { __block const char* result = nullptr; withReadLock(^{ if ( index < _loadedImages.count() ) { result = imagePath(_loadedImages[index].image()); return; } }); return result; } const mach_header* AllImages::imageLoadAddressByIndex(uint32_t index) const { __block const mach_header* result = nullptr; withReadLock(^{ if ( index < _loadedImages.count() ) { result = _loadedImages[index].loadedAddress(); return; } }); return result; } bool AllImages::findImage(const mach_header* loadAddress, LoadedImage& foundImage) const { __block bool result = false; withReadLock(^(){ for (const LoadedImage& li : _loadedImages) { if ( li.loadedAddress() == loadAddress ) { foundImage = li; result = true; break; } } }); return result; } void AllImages::forEachImage(void (^handler)(const LoadedImage& loadedImage, bool& stop)) const { if ( _initialImages != nullptr ) { // being called during libSystem initialization, so _loadedImages not allocated yet bool stop = false; for (const LoadedImage& li : *_initialImages) { handler(li, stop); if ( stop ) break; } return; } withReadLock(^{ bool stop = false; for (const LoadedImage& li : _loadedImages) { handler(li, stop); if ( stop ) break; } }); } const char* AllImages::pathForImageMappedAt(const void* addr) const { if ( _initialImages != nullptr ) { // being called during libSystem initialization, so _loadedImages not allocated yet for (const LoadedImage& li : *_initialImages) { uint8_t permissions; if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) { return li.image()->path(); } } return nullptr; } // if address is in cache, do fast search of TEXT segments in cache __block const char* result = nullptr; if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) { if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) { uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress(); uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide; _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) { if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) { result = installName; stop = true; } }); if ( result != nullptr ) return result; } } // slow path - search image list infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) { result = foundImage.image()->path(); }); return result; } void AllImages::infoForImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const { __block uint8_t permissions; if ( _initialImages != nullptr ) { // being called during libSystem initialization, so _loadedImages not allocated yet for (const LoadedImage& li : *_initialImages) { if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) { handler(li, permissions); break; } } return; } withReadLock(^{ for (const LoadedImage& li : _loadedImages) { if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) { handler(li, permissions); break; } } }); } bool AllImages::infoForImageMappedAt(const void* addr, const MachOLoaded** ml, uint64_t* textSize, const char** path) const { if ( _initialImages != nullptr ) { // being called during libSystem initialization, so _loadedImages not allocated yet for (const LoadedImage& li : *_initialImages) { uint8_t permissions; if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) { if ( ml != nullptr ) *ml = li.loadedAddress(); if ( path != nullptr ) *path = li.image()->path(); if ( textSize != nullptr ) { *textSize = li.image()->textSize(); } return true; } } return false; } // if address is in cache, do fast search of TEXT segments in cache __block bool result = false; if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) { if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) { uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress(); uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide; _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) { if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) { if ( ml != nullptr ) *ml = (MachOLoaded*)(loadAddressUnslid + cacheSlide); if ( path != nullptr ) *path = installName; if ( textSize != nullptr ) *textSize = textSegmentSize; stop = true; result = true; } }); if ( result ) return result; // in shared cache, but not in a TEXT segment, do slow search of all loaded cache images withReadLock(^{ for (const LoadedImage& li : _loadedImages) { if ( ((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) { uint8_t permissions; if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) { if ( ml != nullptr ) *ml = li.loadedAddress(); if ( path != nullptr ) *path = li.image()->path(); if ( textSize != nullptr ) *textSize = li.image()->textSize(); result = true; break; } } } }); return result; } } // address not in dyld cache, check each non-cache image infoForNonCachedImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) { if ( ml != nullptr ) *ml = foundImage.loadedAddress(); if ( path != nullptr ) *path = foundImage.image()->path(); if ( textSize != nullptr ) *textSize = foundImage.image()->textSize(); result = true; }); return result; } // same as infoForImageMappedAt(), but only look at images not in the dyld cache void AllImages::infoForNonCachedImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const { __block uint8_t permissions; if ( _initialImages != nullptr ) { // being called during libSystem initialization, so _loadedImages not allocated yet for (const LoadedImage& li : *_initialImages) { if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) { if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) { handler(li, permissions); break; } } } return; } withReadLock(^{ for (const LoadedImage& li : _loadedImages) { if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) { if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) { handler(li, permissions); break; } } } }); } bool AllImages::immutableMemory(const void* addr, size_t length) const { // check to see if in shared cache if ( _dyldCacheAddress != nullptr ) { bool readOnly; if ( _dyldCacheAddress->inCache(addr, length, readOnly) ) { return readOnly; } } // check to see if it is outside the range of any loaded image if ( ((uintptr_t)addr < _lowestNonCached) || ((uintptr_t)addr+length > _highestNonCached) ) { return false; } // check immutable ranges for (const ImmutableRanges* ranges = &_immutableRanges; ranges != nullptr; ranges = ranges->next.load(std::memory_order_acquire)) { for (uintptr_t i=0; i < ranges->arraySize; ++i) { if ( ranges->array[i].start.load(std::memory_order_acquire) == 0 ) break; // no more entries in use if ( (ranges->array[i].start.load(std::memory_order_acquire) <= (uintptr_t)addr) && (ranges->array[i].end.load(std::memory_order_acquire) > ((uintptr_t)addr)+length) ) return true; } } return false; } uintptr_t AllImages::resolveTarget(closure::Image::ResolvedSymbolTarget target) const { switch ( target.sharedCache.kind ) { case closure::Image::ResolvedSymbolTarget::kindSharedCache: assert(_dyldCacheAddress != nullptr); return (uintptr_t)_dyldCacheAddress + (uintptr_t)target.sharedCache.offset; case closure::Image::ResolvedSymbolTarget::kindImage: { LoadedImage info; bool foundImage = findImageNum(target.image.imageNum, info); assert(foundImage); return (uintptr_t)(info.loadedAddress()) + (uintptr_t)target.image.offset; } case closure::Image::ResolvedSymbolTarget::kindAbsolute: if ( target.absolute.value & (1ULL << 62) ) return (uintptr_t)(target.absolute.value | 0xC000000000000000ULL); else return (uintptr_t)target.absolute.value; } assert(0 && "malformed ResolvedSymbolTarget"); return 0; } void* AllImages::interposeValue(void *value) const { if ( !_mainClosure->hasInterposings() ) return value; __block void* replacementValue = nullptr; __block bool foundReplacement = false; _mainClosure->forEachInterposingTuple(^(const closure::InterposingTuple& tuple, bool& stop) { void* stockPointer = (void*)resolveTarget(tuple.stockImplementation); if ( stockPointer == value) { replacementValue = (void*)resolveTarget(tuple.newImplementation); foundReplacement = true; stop = true; } }); if ( foundReplacement ) return replacementValue; return value; } void AllImages::infoForImageWithLoadAddress(const MachOLoaded* mh, void (^handler)(const LoadedImage& foundImage)) const { withReadLock(^{ for (const LoadedImage& li : _loadedImages) { if ( li.loadedAddress() == mh ) { handler(li); break; } } }); } bool AllImages::findImageNum(closure::ImageNum imageNum, LoadedImage& foundImage) const { if ( _initialImages != nullptr ) { // being called during libSystem initialization, so _loadedImages not allocated yet for (const LoadedImage& li : *_initialImages) { if ( li.image()->representsImageNum(imageNum) ) { foundImage = li; return true; } } return false; } bool result = false; for (const LoadedImage& li : _loadedImages) { if ( li.image()->representsImageNum(imageNum) ) { foundImage = li; result = true; break; } } return result; } const MachOLoaded* AllImages::findDependent(const MachOLoaded* mh, uint32_t depIndex) { __block const MachOLoaded* result = nullptr; withReadLock(^{ for (const LoadedImage& li : _loadedImages) { if ( li.loadedAddress() == mh ) { closure::ImageNum depImageNum = li.image()->dependentImageNum(depIndex); LoadedImage depLi; if ( findImageNum(depImageNum, depLi) ) result = depLi.loadedAddress(); break; } } }); return result; } void AllImages::breadthFirstRecurseDependents(Array<closure::ImageNum>& visited, const LoadedImage& nodeLi, bool& stopped, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const { // call handler on all direct dependents (unless already visited) STACK_ALLOC_ARRAY(LoadedImage, dependentsToRecurse, 256); nodeLi.image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& depStop) { if ( kind == closure::Image::LinkKind::upward ) return; if ( visited.contains(depImageNum) ) return; LoadedImage depLi; if ( !findImageNum(depImageNum, depLi) ) return; handler(depLi, depStop); // <rdar://58466613> if there is an override of some dyld cache dylib, we need to store the override ImageNum in the visited set if ( depImageNum != depLi.image()->imageNum() ) { depImageNum = depLi.image()->imageNum(); if ( visited.contains(depImageNum) ) return; } visited.push_back(depImageNum); if ( depStop ) { stopped = true; return; } dependentsToRecurse.push_back(depLi); }); if ( stopped ) return; // recurse on all dependents just visited for (LoadedImage& depLi : dependentsToRecurse) { breadthFirstRecurseDependents(visited, depLi, stopped, handler); } } void AllImages::visitDependentsTopDown(const LoadedImage& start, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const { withReadLock(^{ STACK_ALLOC_ARRAY(closure::ImageNum, visited, count()); bool stop = false; handler(start, stop); if ( stop ) return; visited.push_back(start.image()->imageNum()); breadthFirstRecurseDependents(visited, start, stop, handler); }); } const MachOLoaded* AllImages::mainExecutable() const { assert(_programVars != nullptr); return (const MachOLoaded*)_programVars->mh; } const closure::Image* AllImages::mainExecutableImage() const { assert(_mainClosure != nullptr); return _mainClosure->images()->imageForNum(_mainClosure->topImageNum()); } void AllImages::setMainPath(const char* path ) { _mainExeOverridePath = path; } const char* AllImages::imagePath(const closure::Image* image) const { #if TARGET_OS_IPHONE // on iOS and watchOS, apps may be moved on device after closure built if ( _mainExeOverridePath != nullptr ) { if ( image == mainExecutableImage() ) return _mainExeOverridePath; } #endif return image->path(); } dyld_platform_t AllImages::platform() const { return (dyld_platform_t)oldAllImageInfo()->platform; } const GradedArchs& AllImages::archs() const { return *_archs; } void AllImages::incRefCount(const mach_header* loadAddress) { for (DlopenCount& entry : _dlopenRefCounts) { if ( entry.loadAddress == loadAddress ) { // found existing DlopenCount entry, bump counter entry.refCount += 1; return; } } // no existing DlopenCount, add new one _dlopenRefCounts.push_back({ loadAddress, 1 }); } void AllImages::decRefCount(const mach_header* loadAddress) { bool doCollect = false; for (DlopenCount& entry : _dlopenRefCounts) { if ( entry.loadAddress == loadAddress ) { // found existing DlopenCount entry, bump counter entry.refCount -= 1; if ( entry.refCount == 0 ) { _dlopenRefCounts.erase(entry); doCollect = true; break; } return; } } if ( doCollect ) garbageCollectImages(); } #if TARGET_OS_OSX NSObjectFileImage AllImages::addNSObjectFileImage(const OFIInfo& image) { __block uint64_t imageNum = 0; withWriteLock(^{ imageNum = ++_nextObjectFileImageNum; _objectFileImages.push_back(image); _objectFileImages.back().imageNum = imageNum; }); return (NSObjectFileImage)imageNum; } bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle, void (^handler)(OFIInfo& image)) { uint64_t imageNum = (uint64_t)imageHandle; bool __block foundImage = false; withReadLock(^{ for (OFIInfo& ofi : _objectFileImages) { if ( ofi.imageNum == imageNum ) { handler(ofi); foundImage = true; return; } } }); return foundImage; } void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle) { uint64_t imageNum = (uint64_t)imageHandle; withWriteLock(^{ for (OFIInfo& ofi : _objectFileImages) { if ( ofi.imageNum == imageNum ) { _objectFileImages.erase(ofi); return; } } }); } #endif class VIS_HIDDEN Reaper { public: struct ImageAndUse { const LoadedImage* li; bool inUse; }; Reaper(Array<ImageAndUse>& unloadables, AllImages*); void garbageCollect(); void finalizeDeadImages(); static void runTerminators(const LoadedImage& li); private: void markDirectlyDlopenedImagesAsUsed(); void markDependentOfInUseImages(); void markDependentsOf(const LoadedImage*); uint32_t inUseCount(); void dump(const char* msg); Array<ImageAndUse>& _unloadables; AllImages* _allImages; uint32_t _deadCount; }; Reaper::Reaper(Array<ImageAndUse>& unloadables, AllImages* all) : _unloadables(unloadables), _allImages(all), _deadCount(0) { } void Reaper::markDirectlyDlopenedImagesAsUsed() { for (AllImages::DlopenCount& entry : _allImages->_dlopenRefCounts) { if ( entry.refCount != 0 ) { for (ImageAndUse& iu : _unloadables) { if ( iu.li->loadedAddress() == entry.loadAddress ) { iu.inUse = true; break; } } } } } uint32_t Reaper::inUseCount() { uint32_t count = 0; for (ImageAndUse& iu : _unloadables) { if ( iu.inUse ) ++count; } return count; } void Reaper::markDependentsOf(const LoadedImage* li) { li->image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& stop) { for (ImageAndUse& iu : _unloadables) { if ( !iu.inUse && iu.li->image()->representsImageNum(depImageNum) ) { iu.inUse = true; break; } } }); } void Reaper::markDependentOfInUseImages() { for (ImageAndUse& iu : _unloadables) { if ( iu.inUse ) markDependentsOf(iu.li); } } void Reaper::dump(const char* msg) { //log("%s:\n", msg); //for (ImageAndUse& iu : _unloadables) { // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path()); //} } void Reaper::garbageCollect() { //dump("all unloadable images"); // mark all dylibs directly dlopen'ed as in use markDirectlyDlopenedImagesAsUsed(); //dump("directly dlopen()'ed marked"); // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing uint32_t lastCount = inUseCount(); bool countChanged = false; do { markDependentOfInUseImages(); //dump("dependents marked"); uint32_t newCount = inUseCount(); countChanged = (newCount != lastCount); lastCount = newCount; } while (countChanged); _deadCount = (uint32_t)_unloadables.count() - inUseCount(); } void Reaper::finalizeDeadImages() { if ( _deadCount == 0 ) return; STACK_ALLOC_OVERFLOW_SAFE_ARRAY(__cxa_range_t, ranges, _deadCount); for (ImageAndUse& iu : _unloadables) { if ( iu.inUse ) continue; runTerminators(*iu.li); iu.li->image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool &stop) { if ( permissions & VM_PROT_EXECUTE ) { __cxa_range_t range; range.addr = (char*)(iu.li->loadedAddress()) + vmOffset; range.length = (size_t)vmSize; ranges.push_back(range); } }); } __cxa_finalize_ranges(ranges.begin(), (uint32_t)ranges.count()); } void Reaper::runTerminators(const LoadedImage& li) { // <rdar://problem/71820555> Don't run static terminator for arm64e const MachOAnalyzer* ma = (MachOAnalyzer*)li.loadedAddress(); if ( ma->isArch("arm64e") ) return; if ( li.image()->hasTerminators() ) { typedef void (*Terminator)(); li.image()->forEachTerminator(li.loadedAddress(), ^(const void* terminator) { Terminator termFunc = (Terminator)terminator; termFunc(); log_initializers("dyld: called static terminator %p in %s\n", termFunc, li.image()->path()); }); } } void AllImages::runAllStaticTerminators() { // We want to run terminators in reverse chronological order of initializing // Note: initialLoadCount may be larger than what was actually loaded const uint32_t currentCount = (uint32_t)_loadedImages.count(); const uint32_t initialLoadCount = std::min(_mainClosure->initialLoadCount(), currentCount); // first run static terminators of anything dlopen()ed for (uint32_t i=currentCount-1; i >= initialLoadCount; --i) { Reaper::runTerminators(_loadedImages[i]); } // next run terminators of statically load images, in loader-order they were init in reverse of this for (uint32_t i=0; i < initialLoadCount; ++i) { Reaper::runTerminators(_loadedImages[i]); } } // This function is called at the end of dlclose() when the reference count goes to zero. // The dylib being unloaded may have brought in other dependent dylibs when it was loaded. // Those dependent dylibs need to be unloaded, but only if they are not referenced by // something else. We use a standard mark and sweep garbage collection. // // The tricky part is that when a dylib is unloaded it may have a termination function that // can run and itself call dlclose() on yet another dylib. The problem is that this // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose() // which calls garbageCollectImages() will just set a flag to re-do the garbage collection // when the current pass is done. // // Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose // on other threads are blocked while this garbage collections runs // void AllImages::garbageCollectImages() { // if some other thread is currently GC'ing images, let other thread do the work int32_t newCount = OSAtomicIncrement32(&_gcCount); if ( newCount != 1 ) return; do { STACK_ALLOC_ARRAY(Reaper::ImageAndUse, unloadables, _loadedImages.count()); withReadLock(^{ for (const LoadedImage& li : _loadedImages) { if ( !li.image()->neverUnload() /*&& !li.neverUnload()*/ ) { unloadables.push_back({&li, false}); //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path()); } } }); // make reaper object to do garbage collection and notifications Reaper reaper(unloadables, this); reaper.garbageCollect(); // FIXME: we should sort dead images so higher level ones are terminated first // call cxa_finalize_ranges and static terminators of dead images reaper.finalizeDeadImages(); // FIXME: DOF unregister //fprintf(stderr, "_loadedImages before GC removals:\n"); //for (const LoadedImage& li : _loadedImages) { // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path()); //} // make copy of LoadedImages we want to remove // because unloadables[] points into LoadedImage we are shrinking STACK_ALLOC_ARRAY(LoadedImage, unloadImages, _loadedImages.count()); for (const Reaper::ImageAndUse& iu : unloadables) { if ( !iu.inUse ) unloadImages.push_back(*iu.li); } // remove entries from _loadedImages if ( !unloadImages.empty() ) { removeImages(unloadImages); //fprintf(stderr, "_loadedImages after GC removals:\n"); //for (const LoadedImage& li : _loadedImages) { // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path()); //} } // if some other thread called GC during our work, redo GC on its behalf newCount = OSAtomicDecrement32(&_gcCount); } while (newCount > 0); } void AllImages::addLoadNotifier(NotifyFunc func) { // callback about already loaded images withReadLock(^{ for (const LoadedImage& li : _loadedImages) { dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0); log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress()); if ( li.image()->inDyldCache() ) func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide); else func(li.loadedAddress(), li.loadedAddress()->getSlide()); } }); // add to list of functions to call about future loads withNotifiersLock(^{ _loadNotifiers.push_back(func); }); } void AllImages::addUnloadNotifier(NotifyFunc func) { // add to list of functions to call about future unloads withNotifiersLock(^{ _unloadNotifiers.push_back(func); }); } void AllImages::addLoadNotifier(LoadNotifyFunc func) { // callback about already loaded images withReadLock(^{ for (const LoadedImage& li : _loadedImages) { dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0); log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress()); func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload()); } }); // add to list of functions to call about future loads withNotifiersLock(^{ _loadNotifiers2.push_back(func); }); } void AllImages::addBulkLoadNotifier(BulkLoadNotifier func) { // callback about already loaded images unsigned count = (unsigned)_loadedImages.count(); const mach_header* mhs[count]; const char* paths[count]; for (unsigned i=0; i < count; ++i) { mhs[i] = _loadedImages[i].loadedAddress(); paths[i] = _loadedImages[i].image()->path(); } dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)mhs[0], (uint64_t)func, 0); log_notifications("dyld: add notifier %p called with %d images\n", func, count); func(count, mhs, paths); // add to list of functions to call about future loads withNotifiersLock(^{ _loadBulkNotifiers.push_back(func); }); } // Returns true if logs should be sent to stderr as well as syslog. // Copied from objc which copied it from CFUtilities.c static bool also_do_stderr(void) { struct stat st; int ret = fstat(STDERR_FILENO, &st); if (ret < 0) return false; mode_t m = st.st_mode & S_IFMT; if (m == S_IFREG || m == S_IFSOCK || m == S_IFIFO || m == S_IFCHR) { return true; } return false; } // Print "message" to the console. Copied from objc. static void _objc_syslog(const char *message) { _simple_asl_log(ASL_LEVEL_ERR, NULL, message); if (also_do_stderr()) { write(STDERR_FILENO, message, strlen(message)); } } void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmap) { _objcNotifyMapped = map; _objcNotifyInit = init; _objcNotifyUnmapped = unmap; // We couldn't initialize the objc optimized closure data in init() as that needs malloc but runs before malloc initializes. // So lets grab the data now and set it up // Pull out the objc selector hash table if we have one Array<closure::Image::ObjCSelectorImage> selectorImageNums; const closure::ObjCSelectorOpt* selectorHashTable = nullptr; if (_mainClosure->selectorHashTable(selectorImageNums, selectorHashTable)) { _objcSelectorHashTable = selectorHashTable; for (closure::Image::ObjCSelectorImage selectorImage : selectorImageNums) { LoadedImage loadedImage; bool found = findImageNum(selectorImage.imageNum, loadedImage); assert(found); _objcSelectorHashTableImages.push_back( (uintptr_t)loadedImage.loadedAddress() + selectorImage.offset ); } } // Pull out the objc class hash table if we have one Array<closure::Image::ObjCClassImage> classImageNums; const closure::ObjCClassOpt* classHashTable = nullptr; const closure::ObjCClassOpt* protocolHashTable = nullptr; if (_mainClosure->classAndProtocolHashTables(classImageNums, classHashTable, protocolHashTable)) { _objcClassHashTable = (const closure::ObjCClassOpt*)classHashTable; _objcProtocolHashTable = (const closure::ObjCClassOpt*)protocolHashTable; for (closure::Image::ObjCClassImage classImage : classImageNums) { LoadedImage loadedImage; bool found = findImageNum(classImage.imageNum, loadedImage); assert(found); uintptr_t loadAddress = (uintptr_t)loadedImage.loadedAddress(); uintptr_t nameBaseAddress = loadAddress + classImage.offsetOfClassNames; uintptr_t dataBaseAddress = loadAddress + classImage.offsetOfClasses; _objcClassHashTableImages.push_back({ nameBaseAddress, dataBaseAddress }); } } _mainClosure->duplicateClassesHashTable(_objcClassDuplicatesHashTable); if ( _objcClassDuplicatesHashTable != nullptr ) { // If we have duplicates, the those need the objc opt pointer to find dupes _dyldCacheObjCOpt = _dyldCacheAddress->objcOpt(); } // ObjC would have issued warnings on duplicate classes. We've recorded those too _mainClosure->forEachWarning(closure::Closure::Warning::duplicateObjCClass, ^(const char *warning, bool &stop) { Diagnostics diag; diag.error("objc[%d]: %s\n", getpid(), warning); _objc_syslog(diag.errorMessage()); }); // callback about already loaded images uint32_t maxCount = count(); STACK_ALLOC_ARRAY(const mach_header*, mhs, maxCount); STACK_ALLOC_ARRAY(const char*, paths, maxCount); // don't need _mutex here because this is called when process is still single threaded for (const LoadedImage& li : _loadedImages) { if ( li.image()->hasObjC() ) { paths.push_back(imagePath(li.image())); mhs.push_back(li.loadedAddress()); } } if ( !mhs.empty() ) { (*map)((uint32_t)mhs.count(), &paths[0], &mhs[0]); if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs.count()) ) { for (uintptr_t i=0; i < mhs.count(); ++i) { log_notifications("dyld: objc-mapped: %p %s\n", mhs[i], paths[i]); } } } } void AllImages::applyInterposingToDyldCache(const closure::Closure* closure, mach_port_t mach_task_self) { dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_INTERPOSING, 0, 0, 0); const uintptr_t cacheStart = (uintptr_t)_dyldCacheAddress; __block closure::ImageNum lastCachedDylibImageNum = 0; __block const closure::Image* lastCachedDylibImage = nullptr; __block bool suspendedAccounting = false; if ( closure->findAttributePayload(closure::TypedBytes::Type::cacheOverrides) == nullptr ) return; // make the cache writable for this block DyldSharedCache::DataConstScopedWriter patcher(_dyldCacheAddress, mach_task_self, (DyldSharedCache::DataConstLogFunc)&log_segments); closure->forEachPatchEntry(^(const closure::Closure::PatchEntry& entry) { if ( entry.overriddenDylibInCache != lastCachedDylibImageNum ) { lastCachedDylibImage = closure::ImageArray::findImage(imagesArrays(), entry.overriddenDylibInCache); assert(lastCachedDylibImage != nullptr); lastCachedDylibImageNum = entry.overriddenDylibInCache; } if ( !suspendedAccounting ) { Loader::vmAccountingSetSuspended(true, log_fixups); suspendedAccounting = true; } uintptr_t newValue = 0; LoadedImage foundImage; switch ( entry.replacement.image.kind ) { case closure::Image::ResolvedSymbolTarget::kindImage: if ( !findImageNum(entry.replacement.image.imageNum, foundImage) ) { abort_report_np("cannot find replacement imageNum=0x%04X when patching cache to override imageNum=0x%04X\n", entry.replacement.image.imageNum, entry.overriddenDylibInCache); } newValue = (uintptr_t)(foundImage.loadedAddress()) + (uintptr_t)entry.replacement.image.offset; break; case closure::Image::ResolvedSymbolTarget::kindSharedCache: newValue = (uintptr_t)_dyldCacheAddress + (uintptr_t)entry.replacement.sharedCache.offset; break; case closure::Image::ResolvedSymbolTarget::kindAbsolute: // this means the symbol was missing in the cache override dylib, so set any uses to NULL newValue = (uintptr_t)entry.replacement.absolute.value; break; default: assert(0 && "bad replacement kind"); } uint32_t lastCachedDylibImageIndex = lastCachedDylibImageNum - (uint32_t)_dyldCacheAddress->cachedDylibsImageArray()->startImageNum(); _dyldCacheAddress->forEachPatchableUseOfExport(lastCachedDylibImageIndex, entry.exportCacheOffset, ^(dyld_cache_patchable_location patchLocation) { uintptr_t* loc = (uintptr_t*)(cacheStart+patchLocation.cacheOffset); #if __has_feature(ptrauth_calls) if ( patchLocation.authenticated ) { MachOLoaded::ChainedFixupPointerOnDisk fixupInfo; fixupInfo.arm64e.authRebase.auth = true; fixupInfo.arm64e.authRebase.addrDiv = patchLocation.usesAddressDiversity; fixupInfo.arm64e.authRebase.diversity = patchLocation.discriminator; fixupInfo.arm64e.authRebase.key = patchLocation.key; *loc = fixupInfo.arm64e.signPointer(loc, newValue + DyldSharedCache::getAddend(patchLocation)); log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n", loc, (void*)*loc, patchLocation.discriminator, patchLocation.usesAddressDiversity, DyldSharedCache::keyName(patchLocation)); return; } #endif log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc, newValue + (uintptr_t)DyldSharedCache::getAddend(patchLocation)); *loc = newValue + (uintptr_t)DyldSharedCache::getAddend(patchLocation); }); }); if ( suspendedAccounting ) Loader::vmAccountingSetSuspended(false, log_fixups); } void AllImages::runStartupInitialzers() { __block bool mainExecutableInitializerNeedsToRun = true; __block uint32_t imageIndex = 0; while ( mainExecutableInitializerNeedsToRun ) { __block const closure::Image* image = nullptr; withReadLock(^{ image = _loadedImages[imageIndex].image(); if ( _loadedImages[imageIndex].loadedAddress()->isMainExecutable() ) mainExecutableInitializerNeedsToRun = false; }); runInitialzersBottomUp(image); ++imageIndex; } } // Find image in _loadedImages which has ImageNum == num. // Try indexHint first, if hint is wrong, updated it, so next use is faster. LoadedImage AllImages::findImageNum(closure::ImageNum num, uint32_t& indexHint) { __block LoadedImage copy; withReadLock(^{ if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) { indexHint = 0; for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) { if ( _loadedImages[indexHint].image()->representsImageNum(num) ) break; } assert(indexHint < _loadedImages.count()); } copy = _loadedImages[indexHint]; }); return copy; } // Change the state of the LoadedImage in _loadedImages which has ImageNum == num. // Only change state if current state is expectedCurrentState (atomic swap). bool AllImages::swapImageState(closure::ImageNum num, uint32_t& indexHint, LoadedImage::State expectedCurrentState, LoadedImage::State newState) { __block bool result = false; withWriteLock(^{ if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) { indexHint = 0; for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) { if ( _loadedImages[indexHint].image()->representsImageNum(num) ) break; } assert(indexHint < _loadedImages.count()); } if ( _loadedImages[indexHint].state() == expectedCurrentState ) { _loadedImages[indexHint].setState(newState); result = true; } }); return result; } // dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure. // This method uses that list to run all initializers. // Because an initializer may call dlopen() and/or create threads, the _loadedImages array // may move under us. So, never keep a pointer into it. Always reference images by ImageNum // and use hint to make that faster in the case where the _loadedImages does not move. void AllImages::runInitialzersBottomUp(const closure::Image* topImage) { // walk closure specified initializer list, already ordered bottom up topImage->forEachImageToInitBefore(^(closure::ImageNum imageToInit, bool& stop) { // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen() uint32_t indexHint = 0; LoadedImage loadedImageCopy = findImageNum(imageToInit, indexHint); // skip if the image is already inited, or in process of being inited (dependency cycle) if ( (loadedImageCopy.state() == LoadedImage::State::fixedUp) && swapImageState(imageToInit, indexHint, LoadedImage::State::fixedUp, LoadedImage::State::beingInited) ) { // tell objc to run any +load methods in image if ( (_objcNotifyInit != nullptr) && loadedImageCopy.image()->mayHavePlusLoads() ) { dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_INIT, (uint64_t)loadedImageCopy.loadedAddress(), 0, 0); const char* path = imagePath(loadedImageCopy.image()); log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy.loadedAddress(), path); (*_objcNotifyInit)(path, loadedImageCopy.loadedAddress()); } // run all initializers in image runAllInitializersInImage(loadedImageCopy.image(), loadedImageCopy.loadedAddress()); // advance state to inited swapImageState(imageToInit, indexHint, LoadedImage::State::beingInited, LoadedImage::State::inited); } }); } void AllImages::runLibSystemInitializer(LoadedImage& libSystem) { // First set the libSystem state to beingInited. This protects against accidentally trying // to run its initializers again if a dlopen happens insie libSystem_initializer(). libSystem.setState(LoadedImage::State::beingInited); // run all initializers in libSystem.dylib // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages runAllInitializersInImage(libSystem.image(), libSystem.loadedAddress()); // update global flags that libsystem has been initialized (so debug tools know it is safe to inject threads) _oldAllImageInfos->libSystemInitialized = true; // mark libSystem.dylib as being inited, so later recursive-init would re-run it for (LoadedImage& li : _loadedImages) { if ( li.loadedAddress() == libSystem.loadedAddress() ) { li.setState(LoadedImage::State::inited); break; } } // now that libSystem is up, register a callback that should be called at exit __cxa_atexit(&AllImages::runAllStaticTerminatorsHelper, nullptr, nullptr); } void AllImages::runAllStaticTerminatorsHelper(void*) { gAllImages.runAllStaticTerminators(); } void AllImages::runAllInitializersInImage(const closure::Image* image, const MachOLoaded* ml) { image->forEachInitializer(ml, ^(const void* func) { Initializer initFunc = (Initializer)func; #if __has_feature(ptrauth_calls) initFunc = (Initializer)__builtin_ptrauth_sign_unauthenticated((void*)initFunc, 0, 0); #endif { ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)ml, (uint64_t)func, 0); initFunc(NXArgc, NXArgv, environ, appleParams, _programVars); } log_initializers("dyld: called initialzer %p in %s\n", initFunc, image->path()); }); } // Note this is noinline to avoid having too much stack used if loadImage has to call due to an invalid closure __attribute__((noinline)) const MachOLoaded* AllImages::dlopen(Diagnostics& diag, const char* path, bool rtldNoLoad, bool rtldLocal, bool rtldNoDelete, bool rtldNow, bool fromOFI, const void* callerAddress, bool canUsePrebuiltSharedCacheClosure) { bool sharedCacheFormatCompatible = (_dyldCacheAddress != nullptr) && (_dyldCacheAddress->header.formatVersion == dyld3::closure::kFormatVersion); canUsePrebuiltSharedCacheClosure &= sharedCacheFormatCompatible; // quick check if path is in shared cache and already loaded if ( _dyldCacheAddress != nullptr ) { uint32_t dyldCacheImageIndex; if ( _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex) ) { uint64_t mTime; uint64_t inode; const MachOLoaded* mh = (MachOLoaded*)_dyldCacheAddress->getIndexedImageEntry(dyldCacheImageIndex, mTime, inode); // Note: we do not need readLock because this is within global dlopen lock for (const LoadedImage& li : _loadedImages) { if ( li.loadedAddress() == mh ) { return mh; } } // If this is a customer cache, and we have no overrides, then we know for sure the cache closure is valid // This assumes that a libdispatch root would have been loaded on launch, and that root path is not // supported with customer caches, which is the case today. if ( !rtldNoLoad && !hasInsertedOrInterposingLibraries() && (_dyldCacheAddress->header.cacheType == kDyldSharedCacheTypeProduction) && sharedCacheFormatCompatible ) { const dyld3::closure::ImageArray* images = _dyldCacheAddress->cachedDylibsImageArray(); const dyld3::closure::Image* image = images->imageForNum(dyldCacheImageIndex+1); return loadImage(diag, path, image->imageNum(), nullptr, rtldLocal, rtldNoDelete, rtldNow, fromOFI, callerAddress); } } } __block closure::ImageNum callerImageNum = 0; for (const LoadedImage& li : _loadedImages) { uint8_t permissions; if ( (callerImageNum == 0) && li.image()->containsAddress(callerAddress, li.loadedAddress(), &permissions) ) { callerImageNum = li.image()->imageNum(); } //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path()); } // make closure closure::ImageNum topImageNum = 0; const closure::DlopenClosure* newClosure = nullptr; // First try with closures from the shared cache permitted. // Then try again with forcing a new closure for (bool canUseSharedCacheClosure : { true, false }) { // We can only use a shared cache closure if the shared cache format is the same as libdyld. canUseSharedCacheClosure &= canUsePrebuiltSharedCacheClosure; closure::FileSystemPhysical fileSystem(nullptr, nullptr, _allowEnvPaths); RootsChecker rootsChecker; closure::ClosureBuilder::AtPath atPathHanding = (_allowAtPaths ? closure::ClosureBuilder::AtPath::all : closure::ClosureBuilder::AtPath::onlyInRPaths); closure::ClosureBuilder cb(_nextImageNum, fileSystem, rootsChecker, _dyldCacheAddress, true, *_archs, closure::gPathOverrides, atPathHanding, true, nullptr, (dyld3::Platform)platform()); newClosure = cb.makeDlopenClosure(path, _mainClosure, _loadedImages.array(), callerImageNum, rtldNoLoad, rtldNow, canUseSharedCacheClosure, &topImageNum); if ( newClosure == closure::ClosureBuilder::sRetryDlopenClosure ) { log_apis(" dlopen: closure builder needs to retry: %s\n", path); assert(canUseSharedCacheClosure); continue; } if ( (newClosure == nullptr) && (topImageNum == 0) ) { if ( cb.diagnostics().hasError()) diag.error("%s", cb.diagnostics().errorMessage()); else if ( !rtldNoLoad ) diag.error("dlopen(): file not found: %s", path); return nullptr; } // save off next available ImageNum for use by next call to dlopen() _nextImageNum = cb.nextFreeImageNum(); break; } if ( newClosure != nullptr ) { // if new closure contains an ImageArray, add it to list if ( const closure::ImageArray* newArray = newClosure->images() ) { appendToImagesArray(newArray); } log_apis(" dlopen: made %s closure: %p\n", newClosure->topImage()->variantString(), newClosure); } // if already loaded, just bump refCount and return if ( (newClosure == nullptr) && (topImageNum != 0) ) { for (LoadedImage& li : _loadedImages) { if ( li.image()->imageNum() == topImageNum ) { // is already loaded const MachOLoaded* topLoadAddress = li.loadedAddress(); if ( !li.image()->inDyldCache() ) incRefCount(topLoadAddress); log_apis(" dlopen: already loaded as '%s'\n", li.image()->path()); // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it if ( !rtldLocal && li.hideFromFlatSearch() ) li.setHideFromFlatSearch(false); // if called with RTLD_NODELETE, mark it as never-unload if ( rtldNoDelete ) li.markLeaveMapped(); // If we haven't run the initializers then we must be in a static init in a dlopen if ( li.state() != LoadedImage::State::inited ) { // RTLD_NOLOAD means dlopen should fail unless path is already loaded. // don't run initializers when RTLD_NOLOAD is set. This only matters if dlopen() is // called from within an initializer because it can cause initializers to run // out of order. Most uses of RTLD_NOLOAD are "probes". If they want initialzers // to run, then don't use RTLD_NOLOAD. if (!rtldNoLoad) { runInitialzersBottomUp(li.image()); } } return topLoadAddress; } } } return loadImage(diag, path, topImageNum, newClosure, rtldLocal, rtldNoDelete, rtldNow, fromOFI, callerAddress); } // Note this is noinline to avoid having too much stack used in the parent // dlopen method __attribute__((noinline)) const MachOLoaded* AllImages::loadImage(Diagnostics& diag, const char* path, closure::ImageNum topImageNum, const closure::DlopenClosure* newClosure, bool rtldLocal, bool rtldNoDelete, bool rtldNow, bool fromOFI, const void* callerAddress) { // Note this array is used as the storage to Loader so needs to be at least // large enough to handle whatever total number of images we need to do the dlopen STACK_ALLOC_OVERFLOW_SAFE_ARRAY(LoadedImage, newImages, 1024); // Note we don't need pre-optimized Objective-C for dlopen closures, but use // variables here to make it easier to see whats going on. const dyld3::closure::ObjCSelectorOpt* selectorOpt = nullptr; dyld3::Array<dyld3::closure::Image::ObjCSelectorImage> selectorImages; // run loader to load all new images RootsChecker rootsChecker; Loader loader(_loadedImages.array(), newImages, _dyldCacheAddress, imagesArrays(), selectorOpt, selectorImages, rootsChecker, (dyld3::Platform)platform(), &dyld3::log_loads, &dyld3::log_segments, &dyld3::log_fixups, &dyld3::log_dofs, !rtldNow); // find Image* for top image, look in new closure first const closure::Image* topImage = nullptr; if ( newClosure != nullptr ) topImage = newClosure->images()->imageForNum(topImageNum); if ( topImage == nullptr ) topImage = closure::ImageArray::findImage(imagesArrays(), topImageNum); if ( newClosure == nullptr ) { if ( topImageNum < dyld3::closure::kLastDyldCacheImageNum ) log_apis(" dlopen: using pre-built %s dlopen closure from dyld shared cache %p\n", topImage->variantString(), topImage); else log_apis(" dlopen: using pre-built %s dlopen closure %p\n", topImage->variantString(), topImage); } LoadedImage topLoadedImage = LoadedImage::make(topImage); if ( rtldLocal && !topImage->inDyldCache() ) topLoadedImage.setHideFromFlatSearch(true); if ( rtldNoDelete && !topImage->inDyldCache() ) topLoadedImage.markLeaveMapped(); loader.addImage(topLoadedImage); // recursively load all dependents and fill in allImages array bool someCacheImageOverridden = false; loader.completeAllDependents(diag, someCacheImageOverridden); if ( diag.hasError() ) return nullptr; bool closureOutOfDate; bool recoverable; loader.mapAndFixupAllImages(diag, _processDOFs, fromOFI, &closureOutOfDate, &recoverable); if ( diag.hasError() ) { // If we used a pre-built shared cache closure, and now found that it was out of date, // try again and rebuild a new closure // Note, newClosure is null in the case where we used a prebuilt closure if ( closureOutOfDate && recoverable && (newClosure == nullptr) ) { diag.clearError(); return dlopen(diag, path, false /* rtldNoLoad */, rtldLocal, rtldNoDelete, rtldNow, fromOFI, callerAddress, false); } return nullptr; } // Record if we had a root _someImageOverridden |= someCacheImageOverridden; const MachOLoaded* topLoadAddress = newImages.begin()->loadedAddress(); // bump dlopen refcount of image directly loaded if ( !topImage->inDyldCache() ) incRefCount(topLoadAddress); // tell gAllImages about new images addImages(newImages); // Run notifiers before applyInterposingToDyldCache() as then we have an // accurate image list before any calls to findImage(). // TODO: Can we move this even earlier, eg, after map images but before fixups? runImageNotifiers(newImages); // if closure adds images that override dyld cache, patch cache if ( newClosure != nullptr ) applyInterposingToDyldCache(newClosure, mach_task_self()); runImageCallbacks(newImages); // run initializers runInitialzersBottomUp(topImage); return topLoadAddress; } void AllImages::appendToImagesArray(const closure::ImageArray* newArray) { _imagesArrays.push_back(newArray); } const Array<const closure::ImageArray*>& AllImages::imagesArrays() { return _imagesArrays.array(); } bool AllImages::isRestricted() const { return !_allowEnvPaths; } bool AllImages::hasInsertedOrInterposingLibraries() const { return _mainClosure->hasInsertedLibraries() || _mainClosure->hasInterposings(); } void AllImages::takeLockBeforeFork() { #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT os_unfair_recursive_lock_lock(&_globalLock); #endif } void AllImages::releaseLockInForkParent() { #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT os_unfair_recursive_lock_unlock(&_globalLock); #endif } void AllImages::resetLockInForkChild() { #if TARGET_OS_SIMULATOR // There's no dyld3 on the simulator this year assert(false); #else #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT os_unfair_recursive_lock_unlock_forked_child(&_globalLock); #endif #endif // TARGET_OS_SIMULATOR } const char* AllImages::getObjCSelector(const char *selName) const { if ( _objcSelectorHashTable == nullptr ) return nullptr; return _objcSelectorHashTable->getString(selName, _objcSelectorHashTableImages.array()); } void AllImages::forEachObjCClass(const char* className, void (^callback)(void* classPtr, bool isLoaded, bool* stop)) const { if ( _objcClassHashTable == nullptr ) return; // There may be a duplicate in the shared cache. If that is the case, return it first if ( _objcClassDuplicatesHashTable != nullptr ) { void* classImpl = nullptr; if ( _objcClassDuplicatesHashTable->getClassLocation(className, _dyldCacheObjCOpt, classImpl) ) { bool stop = false; callback(classImpl, true, &stop); if (stop) return; } } _objcClassHashTable->forEachClass(className, _objcClassHashTableImages.array(), callback); } void AllImages::forEachObjCProtocol(const char* protocolName, void (^callback)(void* protocolPtr, bool isLoaded, bool* stop)) const { if ( _objcProtocolHashTable == nullptr ) return; _objcProtocolHashTable->forEachClass(protocolName, _objcClassHashTableImages.array(), callback); } } // namespace dyld3 |