Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 | /* * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ /* Low level routines dealing with exception entry and exit. * There are various types of exception: * * Interrupt, trap, system call and debugger entry. Each has it's own * handler since the state save routine is different for each. The * code is very similar (a lot of cut and paste). * * The code for the FPU disabled handler (lazy fpu) is in cswtch.s */ #include <debug.h> #include <mach_assert.h> #include <mach/exception_types.h> #include <mach/ppc/vm_param.h> #include <assym.s> #include <ppc/asm.h> #include <ppc/proc_reg.h> #include <ppc/trap.h> #include <ppc/exception.h> #include <ppc/savearea.h> #include <ppc/spl.h> #define VERIFYSAVE 0 #define FPVECDBG 0 /* * thandler(type) * * ENTRY: VM switched ON * Interrupts OFF * R3 contains exception code * R4 points to the saved context (virtual address) * Everything is saved in savearea */ /* * If pcb.ksp == 0 then the kernel stack is already busy, * we make a stack frame * leaving enough space for the 'red zone' in case the * trapped thread was in the middle of saving state below * its stack pointer. * * otherwise we make a stack frame and * the kernel stack (setting pcb.ksp to 0) * * on return, we do the reverse, the last state is popped from the pcb * and pcb.ksp is set to the top of stack */ /* TRAP_SPACE_NEEDED is the space assumed free on the kernel stack when * another trap is taken. We need at least enough space for a saved state * structure plus two small backpointer frames, and we add a few * hundred bytes for the space needed by the C (which may be less but * may be much more). We're trying to catch kernel stack overflows :-) */ #define TRAP_SPACE_NEEDED FM_REDZONE+(2*FM_SIZE)+256 .text .align 5 .globl EXT(thandler) LEXT(thandler) ; Trap handler mfsprg r25,0 ; Get the per_proc lwz r1,PP_ISTACKPTR(r25) ; Get interrupt stack pointer cmpwi cr0,r1,0 ; Are we on interrupt stack? lwz r6,PP_ACTIVE_THREAD(r25) ; Get the pointer to the currently active thread beq- cr0,EXT(ihandler) ; If on interrupt stack, treat this as interrupt... lwz r13,THREAD_TOP_ACT(r6) ; Point to the active activation lwz r26,ACT_MACT_SPF(r13) ; Get special flags lwz r8,ACT_MACT_PCB(r13) ; Get the last savearea used rlwinm. r26,r26,0,bbThreadbit,bbThreadbit ; Do we have Blue Box Assist active? lwz r1,ACT_MACT_KSP(r13) ; Get the top of kernel stack bnel- checkassist ; See if we should assist this stw r4,ACT_MACT_PCB(r13) ; Point to our savearea stw r8,SAVprev(r4) ; Queue the new save area in the front #if VERIFYSAVE bl versave ; (TEST/DEBUG) #endif lwz r9,THREAD_KERNEL_STACK(r6) ; Get our kernel stack start cmpwi cr1,r1,0 ; Are we already on kernel stack? stw r13,SAVact(r4) ; Mark the savearea as belonging to this activation lwz r26,saver1(r4) ; Get the stack at interrupt time bne+ cr1,.L_kstackfree ; We are not on kernel stack yet... subi r1,r26,FM_REDZONE ; Make a red zone on interrupt time kernel stack .L_kstackfree: lwz r7,savesrr1(r4) ; Pick up the entry MSR sub r9,r1,r9 ; Get displacment into the kernel stack li r0,0 ; Make this 0 cmplwi cr2,r9,KERNEL_STACK_SIZE ; Do we still have room on the stack? beq cr1,.L_state_on_kstack ; using above test for pcb/stack stw r0,ACT_MACT_KSP(r13) ; Show that we have taken the stack .L_state_on_kstack: lwz r9,savevrsave(r4) ; Get the VRSAVE register rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? subi r1,r1,FM_SIZE ; Push a header onto the current stack bgt- cr2,kernelStackBad ; Kernel stack is bogus... kernelStackNotBad: ; Vector was off beq+ tvecoff ; Vector off, do not save vrsave... stw r9,liveVRS(r25) ; Set the live value tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame #if DEBUG /* If debugging, we need two frames, the first being a dummy * which links back to the trapped routine. The second is * that which the C routine below will need */ lwz r3,savesrr0(r4) ; Get the point of interruption stw r3,FM_LR_SAVE(r1) ; save old instr ptr as LR value stwu r1, -FM_SIZE(r1) ; and make new frame #endif /* DEBUG */ /* call trap handler proper, with * ARG0 = type (not yet, holds pcb ptr) * ARG1 = saved_state ptr (already there) * ARG2 = dsisr (already there) * ARG3 = dar (already there) */ lwz r3,saveexception(r4) ; Get the exception code lwz r0,ACT_MACT_SPF(r13) ; Get the special flags addi r5,r3,-T_DATA_ACCESS ; Adjust to start of range rlwinm. r0,r0,0,runningVMbit,runningVMbit ; Are we in VM state? (cr0_eq == 0 if yes) cmplwi cr2,r5,T_TRACE-T_DATA_ACCESS ; Are we still in range? (cr_gt if not) lwz r5,savedsisr(r4) ; Get the saved DSISR crnor cr7_eq,cr0_eq,cr2_gt ; We should intercept if in VM and is a true trap (cr7_eq == 1 if yes) rlwinm. r0,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes) cmpi cr2,r3,T_PREEMPT ; Is this a preemption? crandc cr0_eq,cr7_eq,cr0_eq ; Do not intercept if we are in the kernel (cr0_eq == 1 if yes) lwz r6,savedar(r4) ; Get the DAR beq- cr2, .L_call_trap ; Do not turn on interrupts for T_PREEMPT beq- exitFromVM ; Any true trap but T_MACHINE_CHECK exits us from the VM... /* syscall exception might warp here if there's nothing left * to do except generate a trap */ .L_call_trap: bl EXT(trap) mfmsr r7 ; Get the MSR rlwinm r7,r7,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off rlwinm r7,r7,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear the interrupt enable mask mtmsr r7 ; Disable for interrupts mfsprg r10,0 ; Restore the per_proc info /* * This is also the point where new threads come when they are created. * The new thread is setup to look like a thread that took an * interrupt and went immediatly into trap. */ thread_return: lwz r4,SAVprev(r3) ; Pick up the previous savearea lwz r11,SAVflags(r3) ; Get the flags of the current savearea lwz r8,savesrr1(r3) ; Get the MSR we are going to rlwinm r11,r11,0,15,13 ; Clear the syscall flag lwz r1,PP_ACTIVE_THREAD(r10) ; Get the active thread rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user? mfsprg r8,1 ; Get the current activation stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared) stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none) beq- chkfac ; We are not leaving the kernel yet... lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer b chkfac ; Go end it all... ; ; Here is where we go when we detect that the kernel stack is all messed up. ; We just try to dump some info and get into the debugger. ; kernelStackBad: lwz r3,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top subi r3,r3,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack sub r3,r1,r3 ; Get displacement into debug stack cmplwi cr2,r3,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack blt+ cr2,kernelStackNotBad ; Yeah, that is ok too... lis r0,hi16(Choke) ; Choke code ori r0,r0,lo16(Choke) ; and the rest li r3,failStack ; Bad stack code sc ; System ABEND /* * shandler(type) * * ENTRY: VM switched ON * Interrupts OFF * R3 contains exception code * R4 points to the saved context (virtual address) * Everything is saved in savearea */ /* * If pcb.ksp == 0 then the kernel stack is already busy, * this is an error - jump to the debugger entry * * otherwise depending upon the type of * syscall, look it up in the kernel table * or pass it to the server. * * on return, we do the reverse, the state is popped from the pcb * and pcb.ksp is set to the top of stack. */ /* * NOTE: * mach system calls are negative * BSD system calls are low positive * PPC-only system calls are in the range 0x6xxx * PPC-only "fast" traps are in the range 0x7xxx */ .align 5 .globl EXT(shandler) LEXT(shandler) ; System call handler mfsprg r25,0 ; Get the per proc area lwz r0,saver0(r4) ; Get the original syscall number lwz r17,PP_ISTACKPTR(r25) ; Get interrupt stack pointer rlwinm r15,r0,0,0,19 ; Clear the bottom of call number for fast check mr. r17,r17 ; Are we on interrupt stack? lwz r7,savesrr1(r4) ; Get the SRR1 value beq- EXT(ihandler) ; On interrupt stack, not allowed... lwz r9,savevrsave(r4) ; Get the VRsave register rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? lwz r16,PP_ACTIVE_THREAD(r25) ; Get the thread pointer mfsprg r13,1 ; Pick up the active thread beq+ svecoff ; Vector off, do not save vrsave... stw r9,liveVRS(r25) ; Set the live value ; ; Check if SCs are being redirected for the BlueBox or to VMM ; svecoff: lwz r6,ACT_MACT_SPF(r13) ; Pick up activation special flags mtcrf 0x41,r6 ; Check special flags crmove cr6_eq,runningVMbit ; Remember if we are in VMM bne cr6,sVMchecked ; Not running VM lwz r18,spcFlags(r25) ; Load per_proc special flags rlwinm. r18,r18,0,FamVMmodebit,FamVMmodebit ; Is FamVMmodebit set? beq sVMchecked ; Not in FAM cmpwi r0,0x6004 ; Is it vmm_dispatch syscall: bne sVMchecked lwz r26,saver3(r4) ; Get the original syscall number cmpwi cr6,r26,kvmmExitToHost ; vmm_exit_to_host request sVMchecked: bf+ bbNoMachSCbit,noassist ; Take branch if SCs are not redirected lwz r26,ACT_MACT_BEDA(r13) ; Pick up the pointer to the blue box exception area b EXT(atomic_switch_syscall) ; Go to the assist... noassist: cmplwi r15,0x7000 ; Do we have a fast path trap? lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB beql+ fastpath ; We think it is a fastpath... lwz r1,ACT_MACT_KSP(r13) ; Get the kernel stack pointer #if DEBUG mr. r1,r1 ; Are we already on the kernel stack? li r3,T_SYSTEM_CALL ; Yup, pretend we had an interrupt... beq- EXT(ihandler) ; Bad boy, bad boy... What cha gonna do when they come for you? #endif /* DEBUG */ stw r4,ACT_MACT_PCB(r13) ; Point to our savearea li r0,0 ; Clear this out stw r14,SAVprev(r4) ; Queue the new save area in the front stw r13,SAVact(r4) ; Point the savearea at its activation #if VERIFYSAVE bl versave ; (TEST/DEBUG) #endif mr r30,r4 ; Save pointer to the new context savearea lwz r15,saver1(r4) ; Grab interrupt time stack stw r0,ACT_MACT_KSP(r13) ; Mark stack as busy with 0 val stw r15,FM_BACKPTR(r1) ; Link stack frame backwards #if DEBUG /* If debugging, we need two frames, the first being a dummy * which links back to the trapped routine. The second is * that which the C routine below will need */ lwz r8,savesrr0(r30) ; Get the point of interruption stw r8,FM_LR_SAVE(r1) ; Save old instr ptr as LR value stwu r1, -FM_SIZE(r1) ; and make new frame #endif /* DEBUG */ mfmsr r11 ; Get the MSR lwz r15,SAVflags(r30) ; Get the savearea flags ori r11,r11,lo16(MASK(MSR_EE)) ; Turn on interruption enabled bit lwz r0,saver0(r30) ; Get R0 back oris r15,r15,SAVsyscall >> 16 ; Mark that it this is a syscall rlwinm r10,r0,0,0,19 ; Keep only the top part stwu r1,-(FM_SIZE+ARG_SIZE)(r1) ; Make a stack frame cmplwi r10,0x6000 ; Is it the special ppc-only guy? stw r15,SAVflags(r30) ; Save syscall marker beq- cr6,exitFromVM ; It is time to exit from alternate context... beq- ppcscall ; Call the ppc-only system call handler... mtmsr r11 ; Enable interruptions lwz r0,saver0(r30) ; Get the system call selector mr. r0,r0 ; What kind is it? blt- .L_kernel_syscall ; System call number if negative, this is a mach call... cmpwi cr0,r0,0x7FFA ; Special blue box call? beq- .L_notify_interrupt_syscall ; Yeah, call it... lwz r8,ACT_TASK(r13) ; Get our task lis r10,hi16(EXT(c_syscalls_unix)) ; Get top half of counter address lwz r7,TASK_SYSCALLS_UNIX(r8) ; Get the current count ori r10,r10,lo16(EXT(c_syscalls_unix)) ; Get low half of counter address addi r7,r7,1 ; Bump it lwz r9,0(r10) ; Get counter stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it mr r3,r30 ; Get PCB/savearea mr r4,r13 ; current activation addi r9,r9,1 ; Add 1 stw r9,0(r10) ; Save it back bl EXT(unix_syscall) ; Check out unix... .L_call_server_syscall_exception: li r3,EXC_SYSCALL ; doexception(EXC_SYSCALL, num, 1) .L_call_server_exception: mr r4,r0 ; Set syscall selector li r5,1 b EXT(doexception) ; Go away, never to return... .L_notify_interrupt_syscall: lwz r3,saver3(r30) ; Get the new PC address to pass in bl EXT(syscall_notify_interrupt) b .L_syscall_return ; ; Handle PPC-only system call interface ; These are called with interruptions disabled ; and the savearea/pcb as the first parameter. ; It is up to the callee to enable interruptions if ; they should be. We are in a state here where ; both interrupts and preemption is ok, but because we could ; be calling diagnostic code we will not enable. ; ; Also, the callee is responsible for finding any parameters ; in the savearea/pcb. It also must set saver3 with any return ; code before returning. ; ; There are 3 possible return codes: ; 0 the call is disabled or something, we treat this like it was bogus ; + the call finished ok, check for AST ; - the call finished ok, do not check for AST ; ; Note: the last option is intended for special diagnostics calls that ; want the thread to return and execute before checking for preemption. ; ; NOTE: Both R16 (thread) and R30 (savearea) need to be preserved over this call!!!! ; .align 5 ppcscall: rlwinm r11,r0,2,18,29 ; Make an index into the table lis r10,hi16(EXT(PPCcalls)) ; Get PPC-only system call table cmplwi r11,PPCcallmax ; See if we are too big ori r10,r10,lo16(EXT(PPCcalls)) ; Merge in low half bgt- .L_call_server_syscall_exception ; Bogus call... lwzx r11,r10,r11 ; Get function address ; ; Note: make sure we do not change the savearea in R30 to ; a different register without checking. Some of the PPCcalls ; depend upon it being there. ; mr r3,r30 ; Pass the savearea mr r4,r13 ; Pass the activation mr. r11,r11 ; See if there is a function here mtlr r11 ; Set the function address beq- .L_call_server_syscall_exception ; Disabled call... blrl ; Call it .globl EXT(ppcscret) LEXT(ppcscret) mr. r3,r3 ; See what we should do mr r31,r16 ; Restore the current thread pointer bgt+ .L_thread_syscall_ret_check_ast ; Take normal AST checking return.... mfsprg r10,0 ; Get the per_proc blt+ .L_thread_syscall_return ; Return, but no ASTs.... lwz r0,saver0(r30) ; Restore the system call number b .L_call_server_syscall_exception ; Go to common exit... /* Once here, we know that the syscall was -ve * we should still have r1=ksp, * r16 = pointer to current thread, * r13 = pointer to top activation, * r0 = syscall number * r30 = pointer to saved state (in pcb) */ .align 5 .L_kernel_syscall: ; ; Call a function that can print out our syscall info ; Note that we don t care about any volatiles yet ; mr r4,r30 bl EXT(syscall_trace) lwz r0,saver0(r30) ; Get the system call selector */ neg r31,r0 ; Make system call number positive and put in r31 lis r29,hi16(EXT(mach_trap_count)) ; High part of valid trap number ori r29,r29,lo16(EXT(mach_trap_count)) ; Low part of valid trap number lis r28,hi16(EXT(mach_trap_table)) ; High part of trap table lwz r29,0(r29) ; Get the first invalid system call number ori r28,r28,lo16(EXT(mach_trap_table)) ; Low part of trap table cmplw r31,r29 ; See if we have a valid system call number slwi r31,r31,MACH_TRAP_OFFSET_POW2 ; Get offset into table bge- .L_call_server_syscall_exception ; System call number of bogus add r31,r31,r28 ; Point to the system call entry lis r28,hi16(EXT(kern_invalid)) ; Get the high invalid routine address lwz r0,MACH_TRAP_FUNCTION(r31) ; Grab the system call routine address ori r28,r28,lo16(EXT(kern_invalid)) ; Get the low part of the invalid routine address lwz r29,MACH_TRAP_ARGC(r31) ; Get the number of arguments in the call cmplw r0,r28 ; Is this an invalid entry? beq- .L_call_server_syscall_exception ; Yes, it is invalid... /* get arg count. If argc > 8 then not all args were in regs, * so we must perform copyin. */ cmpwi cr0,r29,8 ; Do we have more than 8 arguments? ble+ .L_syscall_got_args ; Nope, no copy in needed... /* argc > 8 - perform a copyin */ /* if the syscall came from kernel space, we can just copy */ lwz r0,savesrr1(r30) ; Pick up exception time MSR andi. r0,r0,MASK(MSR_PR) ; Check the priv bit bne+ .L_syscall_arg_copyin ; We are not priviliged... /* we came from a privilaged task, just do a copy */ /* get user's stack pointer */ lwz r28,saver1(r30) ; Get the stack pointer subi r29,r29,8 ; Get the number of arguments to copy addi r28,r28,COPYIN_ARG0_OFFSET-4 ; Point to source - 4 addi r27,r1,FM_ARG0-4 ; Point to sink - 4 .L_syscall_copy_word_loop: addic. r29,r29,-1 ; Count down the number of arguments left lwz r0,4(r28) ; Pick up the argument from the stack addi r28,r28,4 ; Point to the next source stw r0,4(r27) ; Store the argument addi r27,r27,4 ; Point to the next sink bne+ .L_syscall_copy_word_loop ; Move all arguments... b .L_syscall_got_args ; Go call it now... /* we came from a user task, pay the price of a real copyin */ /* set recovery point */ .align 5 .L_syscall_arg_copyin: lwz r8,ACT_VMMAP(r13) ; Get the vm_map for this activation lis r28,hi16(.L_syscall_copyin_recover) lwz r8,VMMAP_PMAP(r8) ; Get the pmap ori r28,r28,lo16(.L_syscall_copyin_recover) addi r8,r8,PMAP_SEGS ; Point to the pmap SR slots stw r28,THREAD_RECOVER(r16) ; R16 still holds thread ptr /* We can manipulate the COPYIN segment register quite easily * here, but we've also got to make sure we don't go over a * segment boundary - hence some mess. * Registers from 12-29 are free for our use. */ lwz r28,saver1(r30) ; Get the stack pointer subi r29,r29,8 ; Get the number of arguments to copy addi r28,r28,COPYIN_ARG0_OFFSET ; Set source in user land /* set up SR_COPYIN to allow us to copy, we may need to loop * around if we change segments. We know that this previously * pointed to user space, so the sid doesn't need setting. */ rlwinm r7,r28,6,26,29 ; Get index to the segment slot .L_syscall_copyin_seg_loop: lwzx r10,r8,r7 ; Get the source SR value rlwinm r26,r28,0,4,31 ; Clear the segment number from source address mtsr SR_COPYIN,r10 ; Set the copyin SR isync oris r26,r26,(SR_COPYIN_NUM << (28-16)) ; Insert the copyin segment number into source address addi r27,r1,FM_ARG0-4 ; Point to area - 4 where we will store the arguments .L_syscall_copyin_word_loop: lwz r0,0(r26) ; MAY CAUSE PAGE FAULT! subi r29,r29,1 ; Decrement count addi r26,r26,4 ; Bump input stw r0,4(r27) ; Save the copied in word mr. r29,r29 ; Are they all moved? addi r27,r27,4 ; Bump output beq+ .L_syscall_copyin_done ; Escape if we are done... rlwinm. r0,r26,0,4,29 ; Did we just step into a new segment? addi r28,r28,4 ; Bump up user state address also bne+ .L_syscall_copyin_word_loop ; We are still on the same segment... addi r7,r7,4 ; Bump to next slot b .L_syscall_copyin_seg_loop ; On new segment! remap /* Don't bother restoring SR_COPYIN, we can leave it trashed */ /* clear thread recovery as we're done touching user data */ .align 5 .L_syscall_copyin_done: li r0,0 stw r0,THREAD_RECOVER(r16) ; R16 still holds thread ptr .L_syscall_got_args: lwz r0,MACH_TRAP_FUNCTION(r31) ; Get function address lwz r8,ACT_TASK(r13) ; Get our task lis r10,hi16(EXT(c_syscalls_mach)) ; Get top half of counter address lwz r7,TASK_SYSCALLS_MACH(r8) ; Get the current count lwz r3,saver3(r30) ; Restore r3 addi r7,r7,1 ; Bump it ori r10,r10,lo16(EXT(c_syscalls_mach)) ; Get low half of counter address stw r7,TASK_SYSCALLS_MACH(r8) ; Save it lwz r4,saver4(r30) ; Restore r4 lwz r9,0(r10) ; Get counter mtctr r0 ; Set function address lwz r5,saver5(r30) ; Restore r5 lwz r6,saver6(r30) ; Restore r6 addi r9,r9,1 ; Add 1 lwz r7,saver7(r30) ; Restore r7 lwz r8,saver8(r30) ; Restore r8 stw r9,0(r10) ; Save it back lwz r9,saver9(r30) ; Restore r9 lwz r10,saver10(r30) ; Restore r10 ; ; Note that all arguments from the system call are passed into the function ; bctrl ; Perform the actual syscall /* 'standard' syscall returns here - INTERRUPTS ARE STILL ON */ /* r3 contains value that we're going to return to the user */ /* * Ok, return from C function, R3 = return value * * get the active thread's PCB pointer and thus pointer to user state * saved state is still in R30 and the active thread is in R16 . */ /* Store return value into saved state structure, since * we need to pick up the value from here later - the * syscall may perform a thread_set_syscall_return * followed by a thread_exception_return, ending up * at thread_syscall_return below, with SS_R3 having * been set up already */ /* When we are here, r16 should point to the current thread, * r30 should point to the current pcb */ /* save off return value, we must load it * back anyway for thread_exception_return */ .L_syscall_return: mr r31,r16 ; Move the current thread pointer stw r3,saver3(r30) ; Stash the return code mr r4,r30 ; Pass in the savearea bl EXT(syscall_trace_end) ; Trace the exit of the system call .L_thread_syscall_ret_check_ast: mfmsr r12 ; Get the current MSR rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off rlwinm r12,r12,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions enable bit mtmsr r12 ; Turn interruptions off mfsprg r10,0 ; Get the per_processor block /* Check to see if there's an outstanding AST */ lwz r4,PP_NEED_AST(r10) ; Get the pointer to the ast requests lwz r4,0(r4) ; Get the flags cmpi cr0,r4, 0 ; Any pending asts? beq+ cr0,.L_syscall_no_ast ; Nope... /* Yes there is, call ast_taken * pretending that the user thread took an AST exception here, * ast_taken will save all state and bring us back here */ #if DEBUG /* debug assert - make sure that we're not returning to kernel */ lwz r3,savesrr1(r30) andi. r3,r3,MASK(MSR_PR) bne+ scrnotkern ; returning to user level, check lis r0,hi16(Choke) ; Choke code ori r0,r0,lo16(Choke) ; and the rest li r3,failContext ; Bad state code sc ; System ABEND scrnotkern: #endif /* DEBUG */ li r3,AST_ALL ; Set ast flags li r4,1 ; Set interrupt allowed bl EXT(ast_taken) ; Process the pending ast b .L_thread_syscall_ret_check_ast ; Go see if there was another... /* thread_exception_return returns to here, almost all * registers intact. It expects a full context restore * of what it hasn't restored itself (ie. what we use). * * In particular for us, * we still have r31 points to the current thread, * r30 points to the current pcb */ .align 5 .L_syscall_no_ast: .L_thread_syscall_return: mr r3,r30 ; Get savearea to the correct register for common exit mfsprg r8,1 ; Now find the current activation lwz r11,SAVflags(r30) ; Get the flags lwz r5,THREAD_KERNEL_STACK(r31) ; Get the base pointer to the stack rlwinm r11,r11,0,15,13 ; Clear the syscall flag lwz r4,SAVprev(r30) ; Get the previous save area stw r11,SAVflags(r30) ; Stick back the flags addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty stw r4,ACT_MACT_PCB(r8) ; Save previous save area stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer b chkfac ; Go end it all... .align 5 .L_syscall_copyin_recover: /* This is the catcher for any data faults in the copyin * of arguments from the user's stack. * r30 still holds a pointer to the PCB * * call syscall_error(EXC_BAD_ACCESS, EXC_PPC_VM_PROT_READ, sp, ssp), * * we already had a frame so we can do this */ li r3,EXC_BAD_ACCESS ; Set bad access code li r4,EXC_PPC_VM_PROT_READ ; Set protection exception lwz r5,saver1(r30) ; Point to the stack mr r6,r30 ; Pass savearea bl EXT(syscall_error) ; Generate error... b .L_syscall_return ; Continue out... /* * thread_exception_return() * * Return to user mode directly from within a system call. */ .align 5 .globl EXT(thread_bootstrap_return) LEXT(thread_bootstrap_return) ; NOTE: THIS IS GOING AWAY IN A FEW DAYS.... .globl EXT(thread_exception_return) LEXT(thread_exception_return) ; Directly return to user mode .L_thread_exc_ret_check_ast: mfmsr r3 ; Get the MSR rlwinm r3,r3,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off rlwinm r3,r3,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off rlwinm r3,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear EE mtmsr r3 ; Disable interrupts /* Check to see if there's an outstanding AST */ /* We don't bother establishing a call frame even though CHECK_AST can invoke ast_taken(), because it can just borrow our caller's frame, given that we're not going to return. */ mfsprg r10,0 ; Get the per_processor block lwz r4,PP_NEED_AST(r10) lwz r4,0(r4) cmpi cr0,r4, 0 beq cr0,.L_exc_ret_no_ast /* Yes there is, call ast_taken * pretending that the user thread took an AST exception here, * ast_taken will save all state and bring us back here */ li r3,AST_ALL li r4,1 bl EXT(ast_taken) b .L_thread_exc_ret_check_ast ; check for a second AST (rare) /* arriving here, interrupts should be disabled */ /* Get the active thread's PCB pointer to restore regs */ .L_exc_ret_no_ast: mfsprg r30,1 ; Get the currrent activation lwz r31,ACT_THREAD(r30) ; Get the current thread lwz r30,ACT_MACT_PCB(r30) mr. r30,r30 ; Is there any context yet? beq- makeDummyCtx ; No, hack one up... #if DEBUG /* * debug assert - make sure that we're not returning to kernel * get the active thread's PCB pointer and thus pointer to user state */ lwz r3,savesrr1(r30) andi. r3,r3,MASK(MSR_PR) bne+ ret_user2 ; We are ok... lis r0,hi16(Choke) ; Choke code ori r0,r0,lo16(Choke) ; and the rest li r3,failContext ; Bad state code sc ; System ABEND ret_user2: #endif /* DEBUG */ /* If the system call flag isn't set, then we came from a trap, * so warp into the return_from_trap (thread_return) routine, * which takes PCB pointer in R3, not in r30! */ lwz r0,SAVflags(r30) ; Grab the savearea flags mr r3,r30 ; Copy pcb pointer into r3 in case we need it andis. r0,r0,SAVsyscall>>16 ; Are we returning from a syscall? beq- cr0,thread_return ; Nope, must be a thread return... b .L_thread_syscall_return ; Join up with the system call return... ; ; This is where we handle someone trying who did a thread_create followed ; by a thread_resume with no intervening thread_set_state. Just make an ; empty context, initialize it to trash and let em execute at 0... ; .align 5 makeDummyCtx: bl EXT(save_get) ; Get a save_area li r4,SAVgeneral ; Get the general context type li r0,0 ; Get a 0 stb r4,SAVflags+2(r3) ; Set type addi r2,r3,savevscr ; Point past what we are clearing mr r4,r3 ; Save the start cleardummy: stw r0,0(r4) ; Clear stuff addi r4,r4,4 ; Next word cmplw r4,r2 ; Still some more? blt+ cleardummy ; Yeah... lis r2,hi16(MSR_EXPORT_MASK_SET) ; Set the high part of the user MSR ori r2,r2,lo16(MSR_EXPORT_MASK_SET) ; And the low part stw r2,savesrr1(r3) ; Set the default user MSR b thread_return ; Go let em try to execute, hah! /* * ihandler(type) * * ENTRY: VM switched ON * Interrupts OFF * R3 contains exception code * R4 points to the saved context (virtual address) * Everything is saved in savearea * */ .align 5 .globl EXT(ihandler) LEXT(ihandler) ; Interrupt handler */ /* * get the value of istackptr, if it's zero then we're already on the * interrupt stack. */ lwz r10,savesrr1(r4) ; Get SRR1 lwz r7,savevrsave(r4) ; Get the VRSAVE register mfsprg r25,0 ; Get the per_proc block li r14,0 ; Zero this for now rlwinm. r13,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? lwz r1,PP_ISTACKPTR(r25) ; Get the interrupt stack li r13,0 ; Zero this for now lwz r16,PP_ACTIVE_THREAD(r25) ; Get the thread pointer beq+ ivecoff ; Vector off, do not save vrsave... stw r7,liveVRS(r25) ; Set the live value ivecoff: li r0,0 ; Get a constant 0 cmplwi cr1,r16,0 ; Are we still booting? ifpoff: mr. r1,r1 ; Is it active? beq- cr1,ihboot1 ; We are still coming up... lwz r13,THREAD_TOP_ACT(r16) ; Pick up the active thread lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB ihboot1: lwz r9,saver1(r4) ; Pick up the rupt time stack stw r14,SAVprev(r4) ; Queue the new save area in the front stw r13,SAVact(r4) ; Point the savearea at its activation beq- cr1,ihboot4 ; We are still coming up... stw r4,ACT_MACT_PCB(r13) ; Point to our savearea ihboot4: bne .L_istackfree ; Nope... /* We're already on the interrupt stack, get back the old * stack pointer and make room for a frame */ lwz r10,PP_INTSTACK_TOP_SS(r25) ; Get the top of the interrupt stack addi r5,r9,INTSTACK_SIZE-FM_SIZE ; Shift stack for bounds check subi r1,r9,FM_REDZONE ; Back up beyond the red zone sub r5,r5,r10 ; Get displacement into stack cmplwi r5,INTSTACK_SIZE-FM_SIZE ; Is the stack actually invalid? blt+ ihsetback ; The stack is ok... lwz r5,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top subi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack sub r5,r1,r5 ; Get displacement into debug stack cmplwi cr2,r5,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack blt+ ihsetback ; Yeah, that is ok too... lis r0,hi16(Choke) ; Choke code ori r0,r0,lo16(Choke) ; and the rest li r3,failStack ; Bad stack code sc ; System ABEND .align 5 .L_istackfree: lwz r10,SAVflags(r4) stw r0,PP_ISTACKPTR(r25) ; Mark the stack in use oris r10,r10,HIGH_ADDR(SAVrststk) ; Indicate we reset stack when we return from this one stw r10,SAVflags(r4) ; Stick it back /* * To summarize, when we reach here, the state has been saved and * the stack is marked as busy. We now generate a small * stack frame with backpointers to follow the calling * conventions. We set up the backpointers to the trapped * routine allowing us to backtrace. */ ihsetback: subi r1,r1,FM_SIZE ; Make a new frame stw r9,FM_BACKPTR(r1) ; Point back to previous stackptr #if VERIFYSAVE beq- cr1,ihbootnover ; (TEST/DEBUG) bl versave ; (TEST/DEBUG) ihbootnover: ; (TEST/DEBUG) #endif #if DEBUG /* If debugging, we need two frames, the first being a dummy * which links back to the trapped routine. The second is * that which the C routine below will need */ lwz r5,savesrr0(r4) ; Get interrupt address stw r5,FM_LR_SAVE(r1) ; save old instr ptr as LR value stwu r1,-FM_SIZE(r1) ; Make another new frame for C routine #endif /* DEBUG */ lwz r5,savedsisr(r4) ; Get the DSISR lwz r6,savedar(r4) ; Get the DAR bl EXT(interrupt) /* interrupt() returns a pointer to the saved state in r3 * * Ok, back from C. Disable interrupts while we restore things */ .globl EXT(ihandler_ret) LEXT(ihandler_ret) ; Marks our return point from debugger entry mfmsr r0 ; Get our MSR rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off rlwinm r0,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Flip off the interrupt enabled bit mtmsr r0 ; Make sure interrupts are disabled mfsprg r10,0 ; Get the per_proc block lwz r7,SAVflags(r3) ; Pick up the flags lwz r8,PP_ACTIVE_THREAD(r10) ; and the active thread lwz r9,SAVprev(r3) ; Get previous save area cmplwi cr1,r8,0 ; Are we still initializing? lwz r12,savesrr1(r3) ; Get the MSR we will load on return beq- cr1,ihboot2 ; Skip if we are still in init... lwz r8,THREAD_TOP_ACT(r8) ; Pick up the active thread ihboot2: andis. r11,r7,hi16(SAVrststk) ; Is this the first on the stack? beq- cr1,ihboot3 ; Skip if we are still in init... stw r9,ACT_MACT_PCB(r8) ; Point to previous context savearea ihboot3: mr r4,r3 ; Move the savearea pointer beq .L_no_int_ast2 ; Get going if not the top-o-stack... /* We're the last frame on the stack. Restore istackptr to empty state. * * Check for ASTs if one of the below is true: * returning to user mode * returning to a kloaded server */ lwz r9,PP_INTSTACK_TOP_SS(r10) ; Get the empty stack value andc r7,r7,r11 ; Remove the stack reset bit in case we pass this one stw r9,PP_ISTACKPTR(r10) ; Save that saved state ptr lwz r3,PP_PREEMPT_CNT(r10) ; Get preemption level stw r7,SAVflags(r4) ; Save the flags cmplwi r3, 0 ; Check for preemption bne .L_no_int_ast ; Do not preempt if level is not zero andi. r6,r12,MASK(MSR_PR) ; privilege mode lwz r11,PP_NEED_AST(r10) ; Get the AST request address lwz r11,0(r11) ; Get the request beq- .L_kernel_int_ast ; In kernel space, AST_URGENT check li r3,T_AST ; Assume the worst mr. r11,r11 ; Are there any pending? beq .L_no_int_ast ; Nope... b .L_call_thandler .L_kernel_int_ast: andi. r11,r11,AST_URGENT ; Do we have AST_URGENT? li r3,T_PREEMPT ; Assume the worst beq .L_no_int_ast ; Nope... /* * There is a pending AST. Massage things to make it look like * we took a trap and jump into the trap handler. To do this * we essentially pretend to return from the interrupt but * at the last minute jump into the trap handler with an AST * trap instead of performing an rfi. */ .L_call_thandler: stw r3,saveexception(r4) ; Set the exception code to T_AST/T_PREEMPT b EXT(thandler) ; We need to preempt so treat like a trap... .L_no_int_ast: mr r3,r4 ; Get into the right register for common code .L_no_int_ast2: rlwinm r7,r7,0,15,13 ; Clear the syscall flag li r4,0 ; Assume for a moment that we are in init stw r7,SAVflags(r3) ; Set the flags with cleared syscall flag beq- cr1,chkfac ; Jump away if we are in init... lwz r4,ACT_MACT_PCB(r8) ; Get the new level marker ; ; This section is common to all exception exits. It throws away vector ; and floating point saveareas as the exception level of a thread is ; exited. ; ; It also enables the facility if its context is live ; Requires: ; R3 = Savearea to be released (virtual) ; R4 = New top of savearea stack (could be 0) ; R8 = pointer to activation ; R10 = per_proc block ; ; Note that barring unforseen crashes, there is no escape from this point ; on. We WILL call exception_exit and launch this context. No worries ; about preemption or interruptions here. ; ; Note that we will set up R26 with whatever context we will be launching, ; so it will indicate the current, or the deferred it it is set and we ; are going to user state. CR2_eq will be set to indicate deferred. ; chkfac: mr r31,r10 ; Move per_proc address mr r30,r4 ; Preserve new level lwz r29,savesrr1(r3) ; Get the current MSR mr. r28,r8 ; Are we still in boot? mr r27,r3 ; Save the old level beq- chkenax ; Yeah, skip it all... rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going into user state? #if 0 beq+ lllll ; (TEST/DEBUG) BREAKPOINT_TRAP ; (TEST/DEBUG) lllll: #endif lwz r20,curctx(r28) ; Get our current context lwz r26,deferctx(r28) ; Get any deferred context switch rlwinm r29,r29,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off floating point for now lwz r21,FPUlevel(r20) ; Get the facility level cmplwi cr2,r26,0 ; Are we going into a deferred context later? rlwinm r29,r29,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off vector for now crnor cr2_eq,cr0_eq,cr2_eq ; Set cr2_eq if going to user state and there is deferred cmplw r27,r21 ; Are we returning from the active level? lhz r19,PP_CPU_NUMBER(r31) ; Get our CPU number bne+ fpuchkena ; Nope... ; ; First clean up any live context we are returning from ; lwz r22,FPUcpu(r20) ; Get CPU this context was last dispatched on stw r19,FPUcpu(r20) ; Claim context for us eieio ; Make sure this gets out before owner clear lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc mulli r22,r22,ppSize ; Find offset to the owner per_proc ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc li r24,FPUowner ; Displacement to FPU owner add r22,r23,r22 ; Point to the owner per_proc li r0,0 ; We need this in a bit fpuinvothr: lwarx r23,r24,r22 ; Get the owner cmplw r23,r20 ; Does he still have this context? bne fpuinvoths ; Nope... stwcx. r0,r24,r22 ; Try to invalidate it bne- fpuinvothr ; Try again if there was a collision... fpuinvoths: isync ; ; Now if there is a savearea associated with the popped context, release it. ; Either way, pop the level to the top stacked context. ; lwz r22,FPUsave(r20) ; Get pointer to the first savearea li r21,0 ; Assume we popped all the way out mr. r22,r22 ; Is there anything there? beq+ fpusetlvl ; No, see if we need to enable... lwz r21,SAVlevel(r22) ; Get the level of that savearea cmplw r21,r27 ; Is this the saved copy of the live stuff? bne fpusetlvl ; No, leave as is... lwz r24,SAVprev(r22) ; Pick up the previous area li r21,0 ; Assume we popped all the way out mr. r24,r24 ; Any more context stacked? beq- fpuonlyone ; Nope... lwz r21,SAVlevel(r24) ; Get the level associated with save fpuonlyone: stw r24,FPUsave(r20) ; Dequeue this savearea rlwinm r3,r22,0,0,19 ; Find main savearea header lwz r3,SACvrswap(r3) ; Get the virtual to real conversion la r9,quickfret(r31) ; Point to the quickfret chain header xor r3,r22,r3 ; Convert to physical #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) li r2,0x3301 ; (TEST/DEBUG) oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif fpufpucdq: lwarx r0,0,r9 ; Pick up the old chain head stw r0,SAVprev(r22) ; Move it to the current guy stwcx. r3,0,r9 ; Save it bne- fpufpucdq ; Someone chaged the list... fpusetlvl: stw r21,FPUlevel(r20) ; Save the level ; ; Here we check if we are at the right level ; We need to check the level we are entering, not the one we are exiting. ; Therefore, we will use the defer level if it is non-zero and we are ; going into user state. ; fpuchkena: bt- cr2_eq,fpuhasdfrd ; Skip if deferred, R26 already set up... mr r26,r20 ; Use the non-deferred value fpuhasdfrd: lwz r21,FPUowner(r31) ; Get the ID of the live context lwz r23,FPUlevel(r26) ; Get the level ID cmplw cr3,r26,r21 ; Do we have the live context? lwz r24,FPUcpu(r26) ; Get the CPU that the context was last dispatched on bne- cr3,chkvec ; No, can not possibly enable... cmplw r30,r23 ; Are we about to launch the live level? cmplw cr1,r19,r24 ; Was facility used on this processor last? bne- chkvec ; No, not live... bne- cr1,chkvec ; No, wrong cpu, have to enable later.... lwz r24,FPUsave(r26) ; Get the first savearea mr. r24,r24 ; Any savearea? beq+ fpuena ; Nope... lwz r25,SAVlevel(r24) ; Get the level of savearea lwz r0,SAVprev(r24) ; Get the previous cmplw r30,r25 ; Is savearea for the level we are launching? bne+ fpuena ; No, just go enable... stw r0,FPUsave(r26) ; Pop the chain rlwinm r3,r24,0,0,19 ; Find main savearea header lwz r3,SACvrswap(r3) ; Get the virtual to real conversion la r9,quickfret(r31) ; Point to the quickfret chain header xor r3,r24,r3 ; Convert to physical #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) li r2,0x3302 ; (TEST/DEBUG) oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif fpuhascdq: lwarx r0,0,r9 ; Pick up the old chain head stw r0,SAVprev(r24) ; Move it to the current guy stwcx. r3,0,r9 ; Save it bne- fpuhascdq ; Someone chaged the list... fpuena: ori r29,r29,lo16(MASK(MSR_FP)) ; Enable facility chkvec: #if 0 rlwinm. r21,r29,0,MSR_PR_BIT,MSR_PR_BIT ; (TEST/DEBUG) beq+ ppppp ; (TEST/DEBUG) lwz r21,FPUlevel(r26) ; (TEST/DEBUG) mr. r21,r21 ; (TEST/DEBUG) bne- qqqqq ; (TEST/DEBUG) lwz r21,FPUsave(r26) ; (TEST/DEBUG) mr. r21,r21 ; (TEST/DEBUG) beq+ ppppp ; (TEST/DEBUG) lwz r22,SAVlevel(r21) ; (TEST/DEBUG) mr. r22,r22 ; (TEST/DEBUG) beq+ ppppp ; (TEST/DEBUG) qqqqq: BREAKPOINT_TRAP ; (TEST/DEBUG) ppppp: ; (TEST/DEBUG) #endif lwz r21,VMXlevel(r20) ; Get the facility level cmplw r27,r21 ; Are we returning from the active level? bne+ vmxchkena ; Nope... ; ; First clean up any live context we are returning from ; lwz r22,VMXcpu(r20) ; Get CPU this context was last dispatched on stw r19,VMXcpu(r20) ; Claim context for us eieio ; Make sure this gets out before owner clear lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc mulli r22,r22,ppSize ; Find offset to the owner per_proc ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc li r24,VMXowner ; Displacement to VMX owner add r22,r23,r22 ; Point to the owner per_proc li r0,0 ; We need this in a bit vmxinvothr: lwarx r23,r24,r22 ; Get the owner cmplw r23,r20 ; Does he still have this context? bne vmxinvoths ; Nope... stwcx. r0,r24,r22 ; Try to invalidate it bne- vmxinvothr ; Try again if there was a collision... vmxinvoths: isync ; ; Now if there is a savearea associated with the popped context, release it. ; Either way, pop the level to the top stacked context. ; lwz r22,VMXsave(r20) ; Get pointer to the first savearea li r21,0 ; Assume we popped all the way out mr. r22,r22 ; Is there anything there? beq+ vmxsetlvl ; No, see if we need to enable... lwz r21,SAVlevel(r22) ; Get the level of that savearea cmplw r21,r27 ; Is this the saved copy of the live stuff? bne vmxsetlvl ; No, leave as is... lwz r24,SAVprev(r22) ; Pick up the previous area li r21,0 ; Assume we popped all the way out mr. r24,r24 ; Any more context? beq- vmxonlyone ; Nope... lwz r21,SAVlevel(r24) ; Get the level associated with save vmxonlyone: stw r24,VMXsave(r20) ; Dequeue this savearea rlwinm r3,r22,0,0,19 ; Find main savearea header lwz r3,SACvrswap(r3) ; Get the virtual to real conversion la r9,quickfret(r31) ; Point to the quickfret chain header xor r3,r22,r3 ; Convert to physical #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) li r2,0x3401 ; (TEST/DEBUG) oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif vmxhscdq: lwarx r0,0,r9 ; Pick up the old chain head stw r0,SAVprev(r22) ; Move it to the current guy stwcx. r3,0,r9 ; Save it bne- vmxhscdq ; Someone chaged the list... vmxsetlvl: stw r21,VMXlevel(r20) ; Save the level ; ; Here we check if we are at the right level ; vmxchkena: lwz r21,VMXowner(r31) ; Get the ID of the live context lwz r23,VMXlevel(r26) ; Get the level ID cmplw r26,r21 ; Do we have the live context? lwz r24,VMXcpu(r26) ; Get the CPU that the context was last dispatched on bne- setena ; No, can not possibly enable... cmplw r30,r23 ; Are we about to launch the live level? cmplw cr1,r19,r24 ; Was facility used on this processor last? bne- setena ; No, not live... bne- cr1,setena ; No, wrong cpu, have to enable later.... lwz r24,VMXsave(r26) ; Get the first savearea mr. r24,r24 ; Any savearea? beq+ vmxena ; Nope... lwz r25,SAVlevel(r24) ; Get the level of savearea lwz r0,SAVprev(r24) ; Get the previous cmplw r30,r25 ; Is savearea for the level we are launching? bne+ vmxena ; No, just go enable... stw r0,VMXsave(r26) ; Pop the chain rlwinm r3,r24,0,0,19 ; Find main savearea header lwz r3,SACvrswap(r3) ; Get the virtual to real conversion la r9,quickfret(r31) ; Point to the quickfret chain header xor r3,r24,r3 ; Convert to physical #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) li r2,0x3402 ; (TEST/DEBUG) oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif vmxckcdq: lwarx r0,0,r9 ; Pick up the old chain head stw r0,SAVprev(r24) ; Move it to the current guy stwcx. r3,0,r9 ; Save it bne- vmxckcdq ; Someone chaged the list... vmxena: oris r29,r29,hi16(MASK(MSR_VEC)) ; Enable facility setena: rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we about to launch user state? rlwinm r20,r29,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector stw r29,savesrr1(r27) ; Turn facility on or off crmove cr7_eq,cr0_eq ; Remember if we are going to user state lwz r19,deferctx(r28) ; Get any deferred facility context switch rlwimi. r20,r29,(((31-floatCngbit)+(MSR_FP_BIT+1))&31),floatCngbit,floatCngbit ; Set flag if we enabled floats beq setenaa ; Neither float nor vector turned on.... lwz r5,ACT_MACT_SPF(r28) ; Get activation copy lwz r6,spcFlags(r31) ; Get per_proc copy or r5,r5,r20 ; Set vector/float changed bits in activation or r6,r6,r20 ; Set vector/float changed bits in per_proc stw r5,ACT_MACT_SPF(r28) ; Set activation copy stw r6,spcFlags(r31) ; Set per_proc copy setenaa: mfdec r24 ; Get decrementer bf+ cr2_eq,nodefer ; No deferred to switch to... li r20,0 ; Clear this stw r26,curctx(r28) ; Make the facility context current stw r20,deferctx(r28) ; Clear deferred context nodefer: lwz r22,qactTimer(r28) ; Get high order quick activation timer mr. r24,r24 ; See if it has popped already... lwz r23,qactTimer+4(r28) ; Get low order qact timer ble- chkenax ; We have popped or are just about to... segtb: mftbu r20 ; Get the upper time base mftb r21 ; Get the low mftbu r19 ; Get upper again or. r0,r22,r23 ; Any time set? cmplw cr1,r20,r19 ; Did they change? beq+ chkenax ; No time set.... bne- cr1,segtb ; Timebase ticked, get them again... subfc r6,r21,r23 ; Subtract current from qact time li r0,0 ; Make a 0 subfe r5,r20,r22 ; Finish subtract subfze r0,r0 ; Get a 0 if qact was bigger than current, -1 otherwise andc. r12,r5,r0 ; Set 0 if qact has passed andc r13,r6,r0 ; Set 0 if qact has passed bne chkenax ; If high order is non-zero, this is too big for a decrementer cmplw r13,r24 ; Is this earlier than the decrementer? (logical compare takes care of high bit on) bge+ chkenax ; No, do not reset decrementer... mtdec r13 ; Set our value chkenax: #if DEBUG lwz r20,SAVact(r27) ; (TEST/DEBUG) Make sure our restore lwz r21,PP_ACTIVE_THREAD(r31) ; (TEST/DEBUG) with the current act. cmpwi r21,0 ; (TEST/DEBUG) beq- yeswereok ; (TEST/DEBUG) lwz r21,THREAD_TOP_ACT(r21) ; (TEST/DEBUG) cmplw r21,r20 ; (TEST/DEBUG) beq+ yeswereok ; (TEST/DEBUG) lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest mr r21,r27 ; (TEST/DEBUG) Save the savearea address li r3,failContext ; (TEST/DEBUG) Bad state code sc ; (TEST/DEBUG) System ABEND yeswereok: #endif rlwinm r5,r27,0,0,19 ; Round savearea down to page bndry lwz r5,SACvrswap(r5) ; Get the conversion from virtual to real xor r3,r27,r5 ; Flip to physical address b EXT(exception_exit) ; We are all done now... /* * Here's where we handle the fastpath stuff * We'll do what we can here because registers are already * loaded and it will be less confusing that moving them around. * If we need to though, we'll branch off somewhere's else. * * Registers when we get here: * * r0 = syscall number * r4 = savearea/pcb * r13 = activation * r14 = previous savearea (if any) * r16 = thread * r25 = per_proc */ .align 5 fastpath: cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber? bnelr- cr3 ; Not a fast path... /* * void cthread_set_self(cproc_t p) * * set's thread state "user_value" * * This op is invoked as follows: * li r0, CthreadSetSelfNumber // load the fast-trap number * sc // invoke fast-trap * blr * */ CthreadSetSelfNumber: lwz r5,saver3(r4) /* Retrieve the self number */ stw r5,CTHREAD_SELF(r13) /* Remember it */ stw r5,UAW(r25) /* Prime the per_proc_info with it */ .globl EXT(fastexit) EXT(fastexit): rlwinm r9,r4,0,0,19 /* Round down to the base savearea block */ lwz r9,SACvrswap(r9) /* Get the conversion from virtual to real */ xor r3,r4,r9 /* Switch savearea to physical addressing */ b EXT(exception_exit) /* Go back to the caller... */ /* * Here's where we check for a hit on the Blue Box Assist * Most registers are non-volatile, so be careful here. If we don't * recognize the trap instruction we go back for regular processing. * Otherwise we transfer to the assist code. */ .align 5 checkassist: lwz r0,saveexception(r4) ; Get the exception code lwz r23,savesrr1(r4) ; Get the interrupted MSR lwz r26,ACT_MACT_BEDA(r13) ; Get Blue Box Descriptor Area mtcrf 0x18,r23 ; Check what SRR1 says lwz r24,ACT_MACT_BTS(r13) ; Get the table start cmplwi r0,T_AST ; Check for T_AST trap lwz r27,savesrr0(r4) ; Get trapped address crnand cr1_eq,SRR1_PRG_TRAP_BIT,MSR_PR_BIT ; We need both trap and user state sub r24,r27,r24 ; See how far into it we are cror cr0_eq,cr0_eq,cr1_eq ; Need to bail if AST or not trap or not user state cmplwi cr1,r24,BB_MAX_TRAP ; Do we fit in the list? cror cr0_eq,cr0_eq,cr1_gt ; Also leave it trap not in range btlr- cr0_eq ; No assist if AST or not trap or not user state or trap not in range b EXT(atomic_switch_trap) ; Go to the assist... ; ; Virtual Machine Monitor ; Here is where we exit from the emulated context ; Note that most registers get trashed here ; R3 and R30 are preserved across the call and hold the activation ; and savearea respectivily. ; .align 5 exitFromVM: mr r30,r4 ; Get the savearea mr r3,r13 ; Get the activation b EXT(vmm_exit) ; Do it to it .align 5 .globl EXT(retFromVM) LEXT(retFromVM) mfsprg r10,0 ; Restore the per_proc info mr r8,r3 ; Get the activation lwz r4,SAVprev(r30) ; Pick up the previous savearea mr r3,r30 ; Put savearea in proper register for common code lwz r11,SAVflags(r30) ; Get the flags of the current savearea rlwinm r11,r11,0,15,13 ; Clear the syscall flag lwz r1,ACT_THREAD(r8) ; and the active thread stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared) stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none) lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer b chkfac ; Go end it all... ; ; chandler (note: not a candle maker or tallow merchant) ; ; Here is the system choke handler. This is where the system goes ; to die. ; ; We get here as a result of a T_CHOKE exception which is generated ; by the Choke firmware call or by lowmem_vectors when it detects a ; fatal error. Examples of where this may be used is when we detect ; problems in low-level mapping chains, trashed savearea free chains, ; or stack guardpage violations. ; ; Note that we can not set a back chain in the stack when we come ; here because we are probably here because the chain was corrupt. ; .align 5 .globl EXT(chandler) LEXT(chandler) /* Choke handler */ lis r25,hi16(EXT(trcWork)) ; (TEST/DEBUG) li r31,0 ; (TEST/DEBUG) ori r25,r25,lo16(EXT(trcWork)) ; (TEST/DEBUG) stw r31,traceMask(r25) ; (TEST/DEBUG) mfsprg r25,0 ; Get the per_proc lwz r1,PP_DEBSTACKPTR(r25) ; Get debug stack pointer cmpwi r1,-1 ; Are we already choking? bne chokefirst ; Nope... chokespin: addi r31,r31,1 ; Spin and hope for an analyzer connection... addi r31,r31,1 ; Spin and hope for an analyzer connection... addi r31,r31,1 ; Spin and hope for an analyzer connection... addi r31,r31,1 ; Spin and hope for an analyzer connection... addi r31,r31,1 ; Spin and hope for an analyzer connection... addi r31,r31,1 ; Spin and hope for an analyzer connection... b chokespin ; Spin and hope for an analyzer connection... chokefirst: li r0,-1 ; Set choke value mr. r1,r1 ; See if we are on debug stack yet lwz r10,saver1(r4) ; stw r0,PP_DEBSTACKPTR(r25) ; Show we are choking bne chokestart ; We are not on the debug stack yet... lwz r2,PP_DEBSTACK_TOP_SS(r25) ; Get debug stack top sub r11,r2,r10 ; Get stack depth cmplwi r11,KERNEL_STACK_SIZE-FM_SIZE-TRAP_SPACE_NEEDED ; Check if stack pointer is ok bgt chokespin ; Bad stack pointer or too little left, just die... subi r1,r10,FM_REDZONE ; Make a red zone chokestart: li r0,0 ; Get a zero stw r0,FM_BACKPTR(r1) ; We now have terminated the back chain bl EXT(SysChoked) ; Call the "C" phase of this b chokespin ; Should not be here so just go spin... #if VERIFYSAVE ; ; Savearea chain verification ; versave: #if 1 ; ; Make sure that all savearea chains have the right type on them ; lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) li r20,0 ; (TEST/DEBUG) lwz r26,0(r27) ; (TEST/DEBUG) lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) mr. r26,r26 ; (TEST/DEBUG) Have we locked the test out? lwz r28,psthreads(r28) ; (TEST/DEBUG) mflr r31 ; (TEST/DEBUG) Save return bnelr- ; (TEST/DEBUG) Test already triggered, skip... b fckgo ; (TEST/DEBUG) Join up... fcknext: mr. r27,r27 ; (TEST/DEBUG) Any more threads? bne+ fckxxx ; (TEST/DEBUG) Yes... mtlr r31 ; (TEST/DEBUG) Restore return blr ; (TEST/DEBUG) Leave... fckxxx: lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Get next thread fckgo: subi r27,r27,1 ; (TEST/DEBUG) Decrement thread count lwz r24,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) Get activation for the thread lwz r20,ACT_MACT_PCB(r24) ; (TEST/DEBUG) Get the normal context li r21,SAVgeneral ; (TEST/DEBUG) Make sure this is all general context bl versavetype ; (TEST/DEBUG) Check the chain lwz r20,facctx+FPUsave(r24) ; (TEST/DEBUG) Get regular floating point li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point bl versavetype ; (TEST/DEBUG) Check the chain lwz r20,facctx+VMXsave(r24) ; (TEST/DEBUG) Get regular vector point li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector bl versavetype ; (TEST/DEBUG) Check the chain lwz r29,vmmControl(r24) ; (TEST/DEBUG) Get the virtual machine control blocks mr. r29,r29 ; (TEST/DEBUG) Are there any? beq+ fcknext ; (TEST/DEBUG) Nope, next thread... li r22,kVmmMaxContextsPerThread ; (TEST/DEBUG) Get the number of control blocks subi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get running start fcknvmm: subi r22,r22,1 ; (TEST/DEBUG) Do all of them mr. r22,r22 ; (TEST/DEBUG) Are we all done? addi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get the next entry blt- fcknext ; (TEST/DEBUG) Yes, check next thread... lwz r23,vmmFlags(r29) ; (TEST/DEBUG) Get entry flags rlwinm. r23,r23,0,0,0 ; (TEST/DEBUG) Is this in use? beq+ fcknvmm ; (TEST/DEBUG) Not in use... lwz r20,vmmFacCtx+FPUsave(r29) ; (TEST/DEBUG) Get regular floating point li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point bl versavetype ; (TEST/DEBUG) Check the chain lwz r20,vmmFacCtx+VMXsave(r29) ; (TEST/DEBUG) Get regular vector point li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector bl versavetype ; (TEST/DEBUG) Check the chain b fcknvmm ; (TEST/DEBUG) Get then vmm block... versavetype: mr. r20,r20 ; (TEST/DEBUG) Chain done? beqlr- ; (TEST/DEBUG) Yes... lwz r23,SAVflags(r20) ; (TEST/DEBUG) Get the flags rlwinm r23,r23,24,24,31 ; (TEST/DEBUG) Position it cmplw r23,r21 ; (TEST/DEBUG) Are we the correct type? beq+ versvok ; (TEST/DEBUG) This one is ok... lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG) ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG) stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger versvok: lwz r20,SAVprev(r20) ; (TEST/DEBUG) Get the previous one b versavetype ; (TEST/DEBUG) Go check its type... #endif #if 0 ; ; Make sure there are no circular links in the float chain ; And that FP is marked busy in it. ; And the only the top is marked invalid. ; And that the owning PCB is correct. ; lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) li r20,0 ; (TEST/DEBUG) lwz r26,0(r27) ; (TEST/DEBUG) lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) mr. r26,r26 ; (TEST/DEBUG) lwz r28,psthreads(r28) ; (TEST/DEBUG) bnelr- ; (TEST/DEBUG) fcknxtth: mr. r27,r27 ; (TEST/DEBUG) beqlr- ; (TEST/DEBUG) lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) fckact: mr. r26,r26 ; (TEST/DEBUG) bne+ fckact2 ; (TEST/DEBUG) lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line subi r27,r27,1 ; (TEST/DEBUG) b fcknxtth ; (TEST/DEBUG) fckact2: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain li r29,1 ; (TEST/DEBUG) li r22,0 ; (TEST/DEBUG) fckact3: mr. r20,r20 ; (TEST/DEBUG) Are there any? beq+ fckact5 ; (TEST/DEBUG) No... addi r22,r22,1 ; (TEST/DEBUG) Count chain depth lwz r21,SAVflags(r20) ; (TEST/DEBUG) Get the flags rlwinm. r21,r21,0,1,1 ; (TEST/DEBUG) FP busy? bne+ fckact3a ; (TEST/DEBUG) Yeah... lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) stw r27,0(r27) ; (TEST/DEBUG) BREAKPOINT_TRAP ; (TEST/DEBUG) Die fckact3a: cmplwi r22,1 ; (TEST/DEBUG) At first SA? beq+ fckact3b ; (TEST/DEBUG) Yeah, invalid is ok... lwz r21,SAVlvlfp(r20) ; (TEST/DEBUG) Get level cmplwi r21,1 ; (TEST/DEBUG) Is it invalid? bne+ fckact3b ; (TEST/DEBUG) Nope, it is ok... lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) stw r27,0(r27) ; (TEST/DEBUG) BREAKPOINT_TRAP ; (TEST/DEBUG) Die fckact3b: lwz r21,SAVact(r20) ; (TEST/DEBUG) Get the owner cmplw r21,r26 ; (TEST/DEBUG) Correct activation? beq+ fckact3c ; (TEST/DEBUG) Yup... lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) stw r27,0(r27) ; (TEST/DEBUG) BREAKPOINT_TRAP ; (TEST/DEBUG) Die fckact3c: ; (TEST/DEBUG) lbz r21,SAVflags+3(r20) ; (TEST/DEBUG) Pick up the test byte mr. r21,r21 ; (TEST/DEBUG) marked? beq+ fckact4 ; (TEST/DEBUG) No, good... lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) stw r27,0(r27) ; (TEST/DEBUG) BREAKPOINT_TRAP ; (TEST/DEBUG) fckact4: stb r29,SAVflags+3(r20) ; (TEST/DEBUG) Set the test byte lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Next in list b fckact3 ; (TEST/DEBUG) Try it... fckact5: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain li r29,0 ; (TEST/DEBUG) fckact6: mr. r20,r20 ; (TEST/DEBUG) Are there any? beq+ fcknact ; (TEST/DEBUG) No... stb r29,SAVflags+3(r20) ; (TEST/DEBUG) Clear the test byte lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Next in list b fckact6 ; (TEST/DEBUG) Try it... fcknact: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation b fckact ; (TEST/DEBUG) #endif #if 0 ; ; Make sure in use count matches found savearea. This is ; not always accurate. There is a variable "fuzz" factor in count. lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) li r20,0 ; (TEST/DEBUG) lwz r26,0(r27) ; (TEST/DEBUG) lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) mr. r26,r26 ; (TEST/DEBUG) lwz r28,psthreads(r28) ; (TEST/DEBUG) bnelr- ; (TEST/DEBUG) cknxtth: mr. r27,r27 ; (TEST/DEBUG) beq- cktotal ; (TEST/DEBUG) lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) ckact: mr. r26,r26 ; (TEST/DEBUG) bne+ ckact2 ; (TEST/DEBUG) lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line subi r27,r27,1 ; (TEST/DEBUG) b cknxtth ; (TEST/DEBUG) ckact2: lwz r29,ACT_MACT_PCB(r26) ; (TEST/DEBUG) cknorm: mr. r29,r29 ; (TEST/DEBUG) beq- cknormd ; (TEST/DEBUG) addi r20,r20,1 ; (TEST/DEBUG) Count normal savearea lwz r29,SAVprev(r29) ; (TEST/DEBUG) b cknorm ; (TEST/DEBUG) cknormd: lwz r29,ACT_MACT_FPU(r26) ; (TEST/DEBUG) ckfpu: mr. r29,r29 ; (TEST/DEBUG) beq- ckfpud ; (TEST/DEBUG) lwz r21,SAVflags(r29) ; (TEST/DEBUG) rlwinm. r21,r21,0,0,0 ; (TEST/DEBUG) See if already counted bne- cknfpu ; (TEST/DEBUG) addi r20,r20,1 ; (TEST/DEBUG) Count fpu savearea cknfpu: lwz r29,SAVprefp(r29) ; (TEST/DEBUG) b ckfpu ; (TEST/DEBUG) ckfpud: lwz r29,ACT_MACT_VMX(r26) ; (TEST/DEBUG) ckvmx: mr. r29,r29 ; (TEST/DEBUG) beq- ckvmxd ; (TEST/DEBUG) lwz r21,SAVflags(r29) ; (TEST/DEBUG) rlwinm. r21,r21,0,0,1 ; (TEST/DEBUG) See if already counted bne- cknvmx ; (TEST/DEBUG) addi r20,r20,1 ; (TEST/DEBUG) Count vector savearea cknvmx: lwz r29,SAVprevec(r29) ; (TEST/DEBUG) b ckvmx ; (TEST/DEBUG) ckvmxd: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation b ckact ; (TEST/DEBUG) cktotal: lis r28,hi16(EXT(saveanchor)) ; (TEST/DEBUG) lis r27,hi16(EXT(real_ncpus)) ; (TEST/DEBUG) ori r28,r28,lo16(EXT(saveanchor)) ; (TEST/DEBUG) ori r27,r27,lo16(EXT(real_ncpus)) ; (TEST/DEBUG) lwz r21,SVinuse(r28) ; (TEST/DEBUG) lwz r27,0(r27) ; (TEST/DEBUG) Get the number of CPUs sub. r29,r21,r20 ; (TEST/DEBUG) Get number accounted for blt- badsave ; (TEST/DEBUG) Have too many in use... sub r26,r29,r27 ; (TEST/DEBUG) Should be 1 unaccounted for for each processor cmpwi r26,10 ; (TEST/DEBUG) Allow a 10 area slop factor bltlr+ ; (TEST/DEBUG) badsave: lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) stw r27,0(r27) ; (TEST/DEBUG) BREAKPOINT_TRAP ; (TEST/DEBUG) #endif #endif |