Compare commits
1048 Commits
Author | SHA1 | Date |
---|---|---|
dump_stack() | a79ea1905a | |
dump_stack() | 331876127a | |
dump_stack() | ee1262e983 | |
dump_stack() | e51a528838 | |
dump_stack() | fc193afe92 | |
dump_stack() | 1c8e1d068b | |
dump_stack() | c909c2a352 | |
dump_stack() | e633fd2e79 | |
dump_stack() | 2b4db95166 | |
dump_stack() | 6a9bfb503f | |
dump_stack() | 29f4821320 | |
dump_stack() | bb8344958e | |
dump_stack() | 2c66dbc736 | |
dump_stack() | cc43cbcc2d | |
dump_stack() | e203229f00 | |
dump_stack() | 7b7c01ac8a | |
dump_stack() | 247f0f90ed | |
dump_stack() | b1dc739cfa | |
dump_stack() | 9727c7863c | |
dump_stack() | 335eeb5ed5 | |
dump_stack() | 8812cb4293 | |
dump_stack() | a9a42ba33a | |
dump_stack() | c17676d0f9 | |
dump_stack() | 8251927821 | |
dump_stack() | 4a1422e010 | |
dump_stack() | 35df5850f5 | |
dump_stack() | 451566d27f | |
dump_stack() | 61b995f330 | |
dump_stack() | b1be394d6b | |
dump_stack() | cc5e70373f | |
dump_stack() | 86213c171a | |
dump_stack() | 8a5971379a | |
dump_stack() | ce0a4d92fc | |
dump_stack() | 059ca6dc93 | |
dump_stack() | d317769a64 | |
dump_stack() | d733cde2ae | |
dump_stack() | 8a4ce9909b | |
dump_stack() | 293dbda2a7 | |
dump_stack() | 0c6d5bd371 | |
dump_stack() | bd2f274887 | |
dump_stack() | 0ba3651c4a | |
dump_stack() | 6bda2344c9 | |
dump_stack() | 30e0e5f554 | |
dump_stack() | fcd559124d | |
dump_stack() | 4e01c719a0 | |
dump_stack() | 3c6a2eab32 | |
dump_stack() | a8066428f8 | |
dump_stack() | 94d0cf1ae4 | |
dump_stack() | 987be594a4 | |
dump_stack() | 8aa5391a25 | |
dump_stack() | e63059043f | |
dump_stack() | 438f7f7386 | |
dump_stack() | edfaf68b11 | |
dump_stack() | ee232dc54a | |
dump_stack() | ed99ffd2e1 | |
dump_stack() | 375844e2cd | |
dump_stack() | bdeb395dd9 | |
dump_stack() | 592fdc8b83 | |
dump_stack() | ed34797dc0 | |
dump_stack() | 4e5a499db7 | |
dump_stack() | 4202a7bc26 | |
dump_stack() | 1356e0dc34 | |
dump_stack() | 0314b5ca93 | |
dump_stack() | 820208d079 | |
dump_stack() | 48e5e8cd04 | |
dump_stack() | 9b69738163 | |
dump_stack() | 0a9b16a0f6 | |
dump_stack() | b4bf0314f0 | |
dump_stack() | d43cd36858 | |
dump_stack() | 67ffa2347a | |
dump_stack() | 6036211172 | |
dump_stack() | 4e92950929 | |
dump_stack() | 1b3e23d188 | |
dump_stack() | 6f53a3f386 | |
dump_stack() | 4a8f119b5d | |
dump_stack() | 56faf1a351 | |
dump_stack() | 41c4241d75 | |
dump_stack() | 1cb5c40c77 | |
dump_stack() | b86c0508f9 | |
dump_stack() | 7e1b2a24f3 | |
dump_stack() | f8e5d29722 | |
dump_stack() | 47a3d00f1b | |
dump_stack() | 3a1fc86251 | |
dump_stack() | f44c275c9d | |
dump_stack() | 6ffaa3dad4 | |
dump_stack() | 8516ff9e91 | |
dump_stack() | 983f135097 | |
dump_stack() | 2a55d611d1 | |
dump_stack() | 77201baa63 | |
dump_stack() | 338e184424 | |
dump_stack() | cfc6c1928f | |
dump_stack() | 7497b67b8b | |
dump_stack() | 6d725d3581 | |
dump_stack() | 312e708116 | |
dump_stack() | 0c16dc02dc | |
dump_stack() | b1f11af512 | |
dump_stack() | a6944050cc | |
dump_stack() | c12b0a8829 | |
dump_stack() | b53b3f2632 | |
dump_stack() | 79037d61ec | |
dump_stack() | 482378abaf | |
dump_stack() | b0b19d87ca | |
dump_stack() | 5396375b47 | |
dump_stack() | 6d6ee135cd | |
dump_stack() | c7bc206ad8 | |
dump_stack() | c54616594c | |
dump_stack() | 4441e84063 | |
dump_stack() | 7d28549db7 | |
dump_stack() | c1ee3e1ac0 | |
dump_stack() | 49388981aa | |
dump_stack() | a72d9b77de | |
dump_stack() | 75d740b22b | |
dump_stack() | 8f39b502a4 | |
dump_stack() | 916acc9895 | |
dump_stack() | 2f9b5d615b | |
dump_stack() | eaba233ca3 | |
dump_stack() | bec424b493 | |
dump_stack() | 6b4298c55d | |
dump_stack() | 56dfabdfa3 | |
dump_stack() | 1e3b7a867d | |
dump_stack() | 3b76b4c0cd | |
dump_stack() | 43d7643ba7 | |
dump_stack() | 347fcbae60 | |
dump_stack() | e141f46892 | |
dump_stack() | 9271d69bc6 | |
dump_stack() | 7942bd22fa | |
dump_stack() | 17356ac0e4 | |
dump_stack() | ca57ea2dac | |
dump_stack() | 004d4223f9 | |
dump_stack() | 443d23bd67 | |
dump_stack() | a3170ada69 | |
dump_stack() | 31f4d0e92d | |
dump_stack() | a748778b72 | |
dump_stack() | 487b9c520d | |
dump_stack() | 4db10c66dc | |
dump_stack() | 5ec6873c57 | |
dump_stack() | 8e57f7f5ef | |
dump_stack() | c0914820c0 | |
dump_stack() | 9df0880e3e | |
dump_stack() | 88bfa867fd | |
dump_stack() | 013fb42350 | |
dump_stack() | dce7546dd2 | |
dump_stack() | b6bc9b36c5 | |
dump_stack() | 4fca6b07e1 | |
dump_stack() | e618d6b001 | |
dump_stack() | 3d70591717 | |
dump_stack() | 5813721dc9 | |
dump_stack() | f827a72bee | |
dump_stack() | 5c1bd9a27d | |
dump_stack() | 99fea27497 | |
dump_stack() | fe2b05d5fc | |
dump_stack() | 1410fe4660 | |
dump_stack() | 8c49680675 | |
dump_stack() | 852680e944 | |
dump_stack() | 673b273593 | |
dump_stack() | 650cf65fa4 | |
dump_stack() | 56e9898d75 | |
dump_stack() | 022ced0eba | |
dump_stack() | 603f1c3654 | |
dump_stack() | 92f95d8658 | |
dump_stack() | 1b87946130 | |
dump_stack() | d9bfa63ed1 | |
dump_stack() | a98fca403c | |
dump_stack() | 1354c029b1 | |
dump_stack() | a57478e38b | |
dump_stack() | 60de4af81e | |
dump_stack() | 3292e5c874 | |
dump_stack() | db9516b358 | |
dump_stack() | b17433ab42 | |
dump_stack() | d7cf88e34f | |
dump_stack() | 1a9fdf0917 | |
dump_stack() | 0dceacd2df | |
dump_stack() | aceaf96448 | |
dump_stack() | b24008ad3f | |
dump_stack() | a930b8d9b8 | |
dump_stack() | e18ddf8a11 | |
dump_stack() | 2c462da3de | |
dump_stack() | 5e67783bb8 | |
dump_stack() | 9a5a42202e | |
dump_stack() | 4fa8c2c02b | |
dump_stack() | 301e2fde1a | |
dump_stack() | a646e9cf33 | |
dump_stack() | b631767d98 | |
dump_stack() | 6db5ffc8c2 | |
dump_stack() | 10c5fb7ac4 | |
dump_stack() | ad3a76320e | |
dump_stack() | 65b49996e6 | |
dump_stack() | 7806a774e4 | |
dump_stack() | 605871d17a | |
dump_stack() | 49760c065e | |
dump_stack() | 5749f7d96e | |
dump_stack() | 94285cd94d | |
dump_stack() | bf9a43c1b8 | |
dump_stack() | 7d0ee9a1dc | |
dump_stack() | b8058bffb0 | |
dump_stack() | 04d6f0dbd3 | |
dump_stack() | 11bf6eda38 | |
dump_stack() | 3d8faafcce | |
dump_stack() | 2c31dd25f9 | |
dump_stack() | f36c412250 | |
dump_stack() | abcf2c1013 | |
dump_stack() | ee90bfaa72 | |
dump_stack() | 3271710653 | |
dump_stack() | 3dd88bac0e | |
dump_stack() | 2c2124bdb0 | |
dump_stack() | ceaacade0b | |
dump_stack() | 7b9935dc13 | |
dump_stack() | 4171954350 | |
dump_stack() | 707cf6f268 | |
dump_stack() | a4b20299cd | |
dump_stack() | 93f66b08f4 | |
dump_stack() | c1fceb6ce6 | |
dump_stack() | e0295664af | |
dump_stack() | a9cd7ba18b | |
dump_stack() | 3740a07619 | |
dump_stack() | fe96366eba | |
dump_stack() | 48ba7b7c7b | |
dump_stack() | ae00b57471 | |
dump_stack() | 408e330b27 | |
dump_stack() | ac4fcaaa91 | |
dump_stack() | bff4422098 | |
dump_stack() | 4a5376eb43 | |
dump_stack() | 02bca8e0ae | |
dump_stack() | 26a65924df | |
dump_stack() | 77e118be64 | |
dump_stack() | 66d45e69d9 | |
dump_stack() | c35def964e | |
dump_stack() | e2d66db16f | |
dump_stack() | daaef89050 | |
dump_stack() | c1ec4add81 | |
dump_stack() | 0edb0ac0af | |
dump_stack() | c6e06d8e3e | |
dump_stack() | e302c447f5 | |
dump_stack() | 330519f617 | |
dump_stack() | 7ca989fd8d | |
dump_stack() | f2ce20e53b | |
dump_stack() | 6f40fa554e | |
dump_stack() | a1999115db | |
dump_stack() | ff7bed76f2 | |
dump_stack() | 14320faca8 | |
dump_stack() | fa5d0adb39 | |
dump_stack() | 2fe3103603 | |
dump_stack() | 2eb91ffac9 | |
dump_stack() | 519b8d190a | |
dump_stack() | d507b86373 | |
dump_stack() | e1dd7c18be | |
dump_stack() | c076db3505 | |
dump_stack() | 632e4f5ffc | |
dump_stack() | b02da8adeb | |
dump_stack() | 31b0945a15 | |
dump_stack() | ba03d4a049 | |
dump_stack() | b88ab7cca3 | |
dump_stack() | b8817a4930 | |
dump_stack() | e767299222 | |
dump_stack() | f0c82f9289 | |
dump_stack() | 9c237b52db | |
dump_stack() | 120fcdc56b | |
dump_stack() | c3774714fd | |
dump_stack() | 73f5df2425 | |
dump_stack() | d551cc8fc4 | |
dump_stack() | 6385ce92e3 | |
dump_stack() | 6939d64226 | |
dump_stack() | 071608805e | |
dump_stack() | 80e57cb60c | |
dump_stack() | bcf8de336f | |
dump_stack() | 8d2d56bea3 | |
dump_stack() | 17256317c9 | |
dump_stack() | 26faa53f8b | |
dump_stack() | 5ccca6617f | |
dump_stack() | 0589ae25e4 | |
dump_stack() | d6670ee8d9 | |
dump_stack() | 6a338fc6ad | |
dump_stack() | 407c1a7975 | |
dump_stack() | 99c9346995 | |
dump_stack() | 90f7e62888 | |
dump_stack() | 412199966e | |
dump_stack() | ef35743579 | |
dump_stack() | 71c2b2001c | |
dump_stack() | 4eed03ec2a | |
dump_stack() | 3cd901b1be | |
dump_stack() | 73b1edd1cb | |
dump_stack() | a607ce62d1 | |
dump_stack() | e1ac462642 | |
dump_stack() | 304bb74ecf | |
dump_stack() | 8486a0337d | |
dump_stack() | 2a6e775b69 | |
dump_stack() | f2e43f891a | |
dump_stack() | 5707559c28 | |
dump_stack() | 51a67db71a | |
dump_stack() | 6df94d7e15 | |
dump_stack() | d45d5731a9 | |
dump_stack() | 950cee6df0 | |
dump_stack() | 7e3f02f3a9 | |
dump_stack() | 360afdb05e | |
dump_stack() | 0cb9128810 | |
dump_stack() | c3f6e90137 | |
dump_stack() | be5f114694 | |
dump_stack() | f1429d3e1d | |
dump_stack() | fb6ef30aaa | |
dump_stack() | eb54ec4a24 | |
dump_stack() | 5d95422624 | |
dump_stack() | e95e8d299f | |
dump_stack() | 3de5f5e12d | |
dump_stack() | a68ceacb43 | |
dump_stack() | 72f52d3200 | |
dump_stack() | 706d442948 | |
dump_stack() | f7b9f538b4 | |
dump_stack() | d70be6a306 | |
dump_stack() | 15a6f38631 | |
dump_stack() | ac2166b050 | |
dump_stack() | f630fa6f49 | |
dump_stack() | 6e92010dc0 | |
dump_stack() | 008ce1cdbf | |
dump_stack() | 5270f2438c | |
dump_stack() | 204413af9e | |
dump_stack() | c43f16733e | |
dump_stack() | 74898924da | |
dump_stack() | c6acbef7f5 | |
dump_stack() | d27847c533 | |
dump_stack() | eec740b208 | |
dump_stack() | bd2dfe3e4e | |
dump_stack() | 6ab8f2fea1 | |
dump_stack() | e7614ef3a7 | |
dump_stack() | 18426775b9 | |
dump_stack() | e87add8e44 | |
dump_stack() | b8d0319097 | |
dump_stack() | 968c4d7363 | |
dump_stack() | 246e0efac1 | |
dump_stack() | 4cc0166a92 | |
dump_stack() | 87e9790f79 | |
dump_stack() | 2af2692a66 | |
dump_stack() | ef1ebf6f23 | |
dump_stack() | 8a7439d7a9 | |
dump_stack() | 8d93517be7 | |
dump_stack() | da637c2923 | |
dump_stack() | 6f18f6c779 | |
dump_stack() | 66026ebf5a | |
dump_stack() | f5b1283690 | |
dump_stack() | f906e3187f | |
dump_stack() | ba3e6072d4 | |
dump_stack() | 0338483e72 | |
dump_stack() | f2d0035c0e | |
dump_stack() | 7e87567070 | |
dump_stack() | 2be8b14fc7 | |
dump_stack() | a043b998ff | |
dump_stack() | c527544107 | |
dump_stack() | ef4eeea6a2 | |
dump_stack() | d4fe5d8f15 | |
dump_stack() | e3652db73b | |
dump_stack() | f571635848 | |
dump_stack() | 2cc06ed092 | |
dump_stack() | adc450c201 | |
dump_stack() | 5de9c50579 | |
dump_stack() | a49d705846 | |
dump_stack() | 67630e080b | |
dump_stack() | 697eb18552 | |
dump_stack() | a855a6e70c | |
dump_stack() | d8aafe081f | |
dump_stack() | 4e956d10ad | |
dump_stack() | a6d4fe362c | |
dump_stack() | 6ff9fcc2c0 | |
dump_stack() | fca20d2d63 | |
dump_stack() | 4fe7a0906e | |
dump_stack() | 22ddada2f7 | |
dump_stack() | 08dcfd52a3 | |
dump_stack() | 3dd9071057 | |
dump_stack() | decdf0625b | |
dump_stack() | 2ee26c989d | |
dump_stack() | 73eb3bf70d | |
dump_stack() | 65688dcd9d | |
dump_stack() | d447b91908 | |
dump_stack() | 926631e19d | |
dump_stack() | 5ecacf00bd | |
dump_stack() | 4d950d7302 | |
dump_stack() | e1ae427757 | |
dump_stack() | 4fd2fd31d2 | |
dump_stack() | 046c553ed5 | |
dump_stack() | a7e5827ff9 | |
dump_stack() | 7f6fe18d0a | |
dump_stack() | 53183245ce | |
dump_stack() | 8f1a2afc53 | |
dump_stack() | 8949b53ccc | |
dump_stack() | 4ea7fbfbf9 | |
dump_stack() | 9b33140cc8 | |
dump_stack() | c13b595ab1 | |
dump_stack() | 7c2957dafb | |
dump_stack() | 3e64c99b1c | |
dump_stack() | b0c795153a | |
dump_stack() | 43bb539db8 | |
dump_stack() | 3959a23efa | |
dump_stack() | 262362659a | |
dump_stack() | bbdc9712c5 | |
dump_stack() | 53878bcb23 | |
dump_stack() | 24c0a05ab0 | |
dump_stack() | 40b1b223d4 | |
dump_stack() | 97ee8f09a4 | |
dump_stack() | 346e24db6b | |
dump_stack() | d118ab03c3 | |
dump_stack() | e1ac75d0fa | |
dump_stack() | 34b5693ae8 | |
dump_stack() | 562abec7f4 | |
dump_stack() | 883c8ee6cb | |
dump_stack() | 689bf1098a | |
dump_stack() | eda23b45b9 | |
dump_stack() | 6c1f9f8606 | |
dump_stack() | 5e11c1939d | |
dump_stack() | 52c452debe | |
dump_stack() | a05b579086 | |
dump_stack() | d089ad4931 | |
dump_stack() | 0f799b0d5a | |
dump_stack() | 21882ff461 | |
dump_stack() | 82ba7bd7af | |
dump_stack() | c0603404a8 | |
dump_stack() | e0b63aee1a | |
dump_stack() | b2383ba442 | |
dump_stack() | 1b2d636410 | |
dump_stack() | de5ebd6455 | |
dump_stack() | 42be5161d8 | |
dump_stack() | 1a2929a1aa | |
dump_stack() | 5778f39ac4 | |
dump_stack() | 032bba6ee5 | |
dump_stack() | 23a28f33d4 | |
dump_stack() | 8bb211cf01 | |
dump_stack() | c75f10e692 | |
dump_stack() | b4a75dc66e | |
dump_stack() | f85ad89130 | |
dump_stack() | 501dcb23ae | |
dump_stack() | 19081aea5d | |
dump_stack() | a090328b1c | |
dump_stack() | 2452b090b0 | |
dump_stack() | b09b51840c | |
dump_stack() | a13a78e292 | |
dump_stack() | e10b50a41a | |
dump_stack() | 87ed8da5b8 | |
dump_stack() | e9ced28b29 | |
dump_stack() | f8f3424e1e | |
dump_stack() | 0fd9d80940 | |
dump_stack() | fa23cdfc54 | |
dump_stack() | 6bb0da5082 | |
dump_stack() | 6b8d97be39 | |
dump_stack() | 7502221cfd | |
dump_stack() | 181115d914 | |
dump_stack() | f91534aa6a | |
dump_stack() | 0ee813124d | |
dump_stack() | f7f8a27dfa | |
dump_stack() | bb676fa491 | |
dump_stack() | 78626c10af | |
dump_stack() | 93a1b74e34 | |
dump_stack() | 73139e1b91 | |
dump_stack() | e231121082 | |
dump_stack() | 9e8a381de0 | |
dump_stack() | 17295cad89 | |
dump_stack() | 29010b2a1b | |
dump_stack() | 0bf2acb043 | |
dump_stack() | d0693e64c4 | |
dump_stack() | 70fec57d2f | |
dump_stack() | 2cc84ac962 | |
dump_stack() | 143e54984d | |
dump_stack() | c6d0ee0102 | |
dump_stack() | 39f4cd4cfd | |
dump_stack() | 065aca24b0 | |
dump_stack() | baf282ec2c | |
dump_stack() | 9d1bbcc288 | |
dump_stack() | 804b6b56ba | |
dump_stack() | 5975898225 | |
dump_stack() | dc8d667930 | |
dump_stack() | 05f210494a | |
dump_stack() | fb5411503c | |
dump_stack() | 1818d38b03 | |
dump_stack() | c8d171da98 | |
dump_stack() | 4e77cf82d3 | |
dump_stack() | 60a1d19042 | |
dump_stack() | 7cf1bbd194 | |
dump_stack() | 5ada1ef41a | |
dump_stack() | 997d6a67ba | |
dump_stack() | 1d22902eb0 | |
dump_stack() | 59febd75b0 | |
dump_stack() | b1b7a9e675 | |
dump_stack() | 95695a4070 | |
dump_stack() | 28acc51417 | |
dump_stack() | ebc597ff0b | |
dump_stack() | 2c2435a7a5 | |
dump_stack() | 2977b6f7fd | |
dump_stack() | da5797766b | |
dump_stack() | 9b987bcc82 | |
dump_stack() | 599ce03ca4 | |
dump_stack() | d13eab6947 | |
dump_stack() | 9fd4b541da | |
dump_stack() | 43aa116682 | |
dump_stack() | b5d4bdd5f4 | |
dump_stack() | fa579e5170 | |
dump_stack() | b310b29670 | |
dump_stack() | 490d063e5b | |
dump_stack() | 33ee48692c | |
dump_stack() | ed5d0ab1d1 | |
dump_stack() | ea6775fa45 | |
dump_stack() | 4e16dec7c1 | |
dump_stack() | 0a1a5890ed | |
dump_stack() | 593c152798 | |
dump_stack() | e8554e7c4a | |
dump_stack() | 39d7adc72f | |
dump_stack() | 4688c6aefd | |
dump_stack() | 4f2b7e1962 | |
dump_stack() | dc025ff32f | |
dump_stack() | 11c69f24ce | |
dump_stack() | da57e4e6b5 | |
dump_stack() | f46a2ec4b0 | |
dump_stack() | 166c125391 | |
dump_stack() | bb511898bb | |
dump_stack() | e919def8d0 | |
dump_stack() | 8f06448240 | |
dump_stack() | 7d88f09bb0 | |
dump_stack() | 9c4313c3a4 | |
dump_stack() | e87a6be1bc | |
dump_stack() | eb51469283 | |
dump_stack() | 3ae8707dcb | |
dump_stack() | 9c830bf22a | |
dump_stack() | a78429729b | |
dump_stack() | c965cf3ade | |
dump_stack() | 5b50fd2b13 | |
dump_stack() | c860b77332 | |
dump_stack() | 4e4c62c376 | |
dump_stack() | a48cbe4bb2 | |
dump_stack() | 2179f84874 | |
dump_stack() | 33d3d28e5d | |
dump_stack() | 8873566dcb | |
dump_stack() | fb12fc2f65 | |
dump_stack() | da28fef007 | |
dump_stack() | 44d474d6e5 | |
dump_stack() | 3e87a1b651 | |
dump_stack() | 310999744e | |
dump_stack() | 61ac856afb | |
dump_stack() | 995f24fdc4 | |
dump_stack() | 8b807a7e77 | |
dump_stack() | 467f31d141 | |
dump_stack() | 9752c7ae7d | |
dump_stack() | e3c2bb134f | |
dump_stack() | 94f77cd742 | |
dump_stack() | 028fa48f71 | |
dump_stack() | 2910ce17c7 | |
dump_stack() | 183b4698dd | |
dump_stack() | efbdc9db36 | |
dump_stack() | 3d2b8b7d2a | |
dump_stack() | 9190b850b7 | |
dump_stack() | d41846ede2 | |
dump_stack() | 52c6581675 | |
dump_stack() | 83c1ca303c | |
dump_stack() | ad0a3706cb | |
dump_stack() | 2e6ce1e8f9 | |
dump_stack() | a77d56c26b | |
dump_stack() | 53324e663a | |
dump_stack() | d4fbec39a6 | |
dump_stack() | cca637cf9d | |
dump_stack() | bef382920e | |
dump_stack() | 20cbd2f72e | |
dump_stack() | 0594b0ea60 | |
dump_stack() | f6cb2d8d12 | |
dump_stack() | 27a22ba023 | |
dump_stack() | da9a243ee4 | |
dump_stack() | e123bf258c | |
dump_stack() | 9c563ca68b | |
dump_stack() | 2b0d7b0460 | |
dump_stack() | 68ad89973c | |
dump_stack() | 96a6dd7fb9 | |
dump_stack() | 6a0846e129 | |
dump_stack() | 5b396e7b5e | |
dump_stack() | 7e545586bc | |
dump_stack() | da6843f9b7 | |
dump_stack() | 26c261b6f3 | |
dump_stack() | 77be74797b | |
dump_stack() | d04a9de932 | |
dump_stack() | ac74e450c3 | |
dump_stack() | 41c84c70f6 | |
dump_stack() | 3ba71a7787 | |
dump_stack() | aaca60cafc | |
dump_stack() | 3a29b3b869 | |
dump_stack() | 07bd886383 | |
dump_stack() | c3042c97f0 | |
dump_stack() | 1b68fc571f | |
dump_stack() | b1d034a7fe | |
dump_stack() | 35d34fdbe0 | |
dump_stack() | cc470d2105 | |
dump_stack() | 5f0749155a | |
dump_stack() | e2e0dad1ad | |
dump_stack() | d5ea97c532 | |
dump_stack() | 31370b7bad | |
dump_stack() | cbdef2936d | |
dump_stack() | bc9eea879d | |
dump_stack() | b0dae2fc69 | |
dump_stack() | c22cea2834 | |
dump_stack() | 828511f0eb | |
dump_stack() | 0735f1d581 | |
dump_stack() | f816b43609 | |
dump_stack() | 52d3d339df | |
dump_stack() | 308b916b0f | |
dump_stack() | 5ecf36ebc3 | |
dump_stack() | 1351819f17 | |
dump_stack() | 0323d3d941 | |
dump_stack() | fa9ee43817 | |
dump_stack() | 25fad476b4 | |
dump_stack() | f484dd99de | |
dump_stack() | 5aed593d81 | |
dump_stack() | e8a7c043d6 | |
dump_stack() | dbd3449074 | |
dump_stack() | e274fe55f0 | |
dump_stack() | 66bd74d59d | |
dump_stack() | 98a0a561f1 | |
dump_stack() | f8880b2486 | |
dump_stack() | f5f87867ac | |
dump_stack() | 70ac88a07f | |
dump_stack() | 5d13843835 | |
dump_stack() | 0f36b3b55b | |
dump_stack() | aa32c7a8ae | |
dump_stack() | f7fcfe8292 | |
dump_stack() | d5b733a0a0 | |
dump_stack() | cd68dc1ddc | |
dump_stack() | 2460b8230a | |
dump_stack() | 2f4c859dd8 | |
dump_stack() | 169acca9df | |
dump_stack() | 48be2df1b0 | |
dump_stack() | 0e85866822 | |
dump_stack() | e291352925 | |
dump_stack() | c14c5989a4 | |
dump_stack() | 3677adece9 | |
dump_stack() | ca95155ce0 | |
dump_stack() | 15d8ab8137 | |
dump_stack() | 1a1afce4f2 | |
dump_stack() | 17a70fdb2d | |
dump_stack() | 8ec4f13364 | |
dump_stack() | c1c5afc0e0 | |
dump_stack() | 2e5c386c42 | |
dump_stack() | b558269ac3 | |
dump_stack() | d4f826d44b | |
dump_stack() | 7b94053376 | |
dump_stack() | 0e08d87a64 | |
dump_stack() | 734240500b | |
dump_stack() | 8c1024b36c | |
dump_stack() | ababd027f9 | |
dump_stack() | 0826948568 | |
dump_stack() | e8a446ec76 | |
dump_stack() | de3f361e51 | |
dump_stack() | 8279517ecf | |
dump_stack() | 4f80122039 | |
dump_stack() | 8922b3e548 | |
dump_stack() | 321fe0567a | |
dump_stack() | 530b98e513 | |
dump_stack() | 249f11d059 | |
dump_stack() | b1f5a36a32 | |
dump_stack() | 8fa62e9a6e | |
dump_stack() | e04154b235 | |
dump_stack() | 096cad8701 | |
dump_stack() | 70d464f0e2 | |
dump_stack() | d65d683dfc | |
dump_stack() | bde115f5df | |
dump_stack() | d972bae547 | |
dump_stack() | b3d4a0dbc2 | |
dump_stack() | 4a3d739b85 | |
dump_stack() | bb319a9ff6 | |
dump_stack() | 21daac4fbc | |
dump_stack() | 841fd7f585 | |
dump_stack() | b812048408 | |
dump_stack() | a5edc4837f | |
dump_stack() | 9e55ebd44e | |
dump_stack() | e35e030c54 | |
dump_stack() | a4f2a31819 | |
dump_stack() | c3cf25e523 | |
dump_stack() | 056e38698e | |
dump_stack() | 32b692f752 | |
dump_stack() | 3f8c7fd86b | |
dump_stack() | f9c2849658 | |
dump_stack() | caba73cd7e | |
dump_stack() | 5bb79302dd | |
dump_stack() | 4570e9adbe | |
dump_stack() | 8029ad2185 | |
dump_stack() | 2f8446864a | |
dump_stack() | dd602df291 | |
dump_stack() | c9d71601f2 | |
dump_stack() | 9863c93c02 | |
dump_stack() | 27a3cc498c | |
dump_stack() | b75289a9d1 | |
dump_stack() | fd973c367f | |
dump_stack() | 4bc4ca738b | |
dump_stack() | cd7cf0f2b6 | |
dump_stack() | 87a5c389df | |
dump_stack() | be3f519573 | |
dump_stack() | a5bfe334cb | |
dump_stack() | c0dd0ae07b | |
dump_stack() | a4c83c1637 | |
dump_stack() | 897ac0699d | |
dump_stack() | 5b444a3193 | |
dump_stack() | 8aed31e41b | |
dump_stack() | f57b3408be | |
dump_stack() | 483e56163e | |
dump_stack() | ac5f83349c | |
dump_stack() | 5931c08de1 | |
dump_stack() | 0d3a075d76 | |
dump_stack() | bbd6f79443 | |
dump_stack() | 5ce73d2fc5 | |
dump_stack() | f65d4ad879 | |
dump_stack() | 7dddf71d93 | |
dump_stack() | f75c70db94 | |
dump_stack() | 603e91af6f | |
dump_stack() | 42dc8ac98c | |
dump_stack() | b7404aa453 | |
dump_stack() | bf455d9788 | |
dump_stack() | a0ed1eb1f5 | |
dump_stack() | 3220b9a5ae | |
dump_stack() | 87ef1e42b5 | |
dump_stack() | 17a4b746cc | |
dump_stack() | 7314cc72db | |
dump_stack() | c353618c17 | |
dump_stack() | fe3092371c | |
dump_stack() | ab7a70cc0a | |
dump_stack() | 0907129529 | |
dump_stack() | a874ac9fc7 | |
dump_stack() | 23e933824b | |
dump_stack() | 80d7f9fb52 | |
dump_stack() | fad8502639 | |
dump_stack() | 5b468a4ec1 | |
dump_stack() | 4a22df770b | |
dump_stack() | 88a3ff3869 | |
dump_stack() | c5645f1985 | |
dump_stack() | bf421f80c8 | |
dump_stack() | 055ea6b83d | |
dump_stack() | 96c267d093 | |
dump_stack() | 301eb2a60b | |
dump_stack() | fcfbf4f36d | |
dump_stack() | b98abe4a83 | |
dump_stack() | 72d51c0e1c | |
dump_stack() | 2d345c584b | |
dump_stack() | 97fb543fef | |
dump_stack() | 3fd2fd5966 | |
dump_stack() | 29af467bee | |
dump_stack() | 604d21e4a2 | |
dump_stack() | e44124c063 | |
dump_stack() | fc0c76f114 | |
dump_stack() | f399390c2c | |
dump_stack() | 8d3986ce8e | |
dump_stack() | 3aba883b81 | |
dump_stack() | 3329dc4c24 | |
dump_stack() | 34f3692d01 | |
dump_stack() | 1e66c156fa | |
dump_stack() | 2b54d13b9e | |
dump_stack() | 44494b65a6 | |
dump_stack() | a36d5ddb12 | |
dump_stack() | 488d2380e1 | |
dump_stack() | 292e3dc211 | |
dump_stack() | ec1732c8ec | |
dump_stack() | bcdfb23112 | |
dump_stack() | d70150b496 | |
dump_stack() | 105809ddec | |
dump_stack() | 5ece0e0f15 | |
dump_stack() | 2150162e8e | |
dump_stack() | 7b16a439d8 | |
dump_stack() | 7e050d9e99 | |
dump_stack() | 2c7341f0d8 | |
dump_stack() | b98dc87d54 | |
dump_stack() | 0f1bdc795d | |
dump_stack() | 3e9410bf09 | |
dump_stack() | 0b198f71ca | |
dump_stack() | d6c678b0cd | |
dump_stack() | e2fcc20f36 | |
dump_stack() | 60bc7238a8 | |
dump_stack() | 04106e7537 | |
dump_stack() | 21d8bec382 | |
dump_stack() | c82bd6a554 | |
dump_stack() | 08beba2bab | |
dump_stack() | 305c6972ca | |
dump_stack() | 78069c6240 | |
dump_stack() | 992a0f871c | |
dump_stack() | 3f16599109 | |
dump_stack() | c2c3837f44 | |
dump_stack() | f1f67e38ee | |
dump_stack() | ae20a6d11d | |
dump_stack() | 8bffea0aea | |
dump_stack() | feb1ab7d37 | |
dump_stack() | 12d5d43d7a | |
dump_stack() | 585a608083 | |
dump_stack() | f10c4165a1 | |
dump_stack() | 51e4cfec30 | |
dump_stack() | d5d9cce517 | |
dump_stack() | 0e153b2763 | |
dump_stack() | 71f5530fed | |
dump_stack() | 870fe202b7 | |
dump_stack() | b0587a4ade | |
dump_stack() | 4fdcc5d098 | |
dump_stack() | 09feffb6a8 | |
dump_stack() | 2d6db97b43 | |
dump_stack() | cc1261b0b0 | |
dump_stack() | 24b6749504 | |
dump_stack() | f97cb3f10a | |
dump_stack() | b246ecf956 | |
dump_stack() | c9618be454 | |
dump_stack() | f6b6b823a9 | |
dump_stack() | 3f79c8e461 | |
dump_stack() | 3d6961dfd7 | |
dump_stack() | 9910921e30 | |
dump_stack() | d59049e531 | |
dump_stack() | 668bc1e391 | |
dump_stack() | 3ec919abc1 | |
dump_stack() | 0529b30558 | |
dump_stack() | 063df192b4 | |
dump_stack() | 1a952e0212 | |
dump_stack() | 8b5ce9923b | |
dump_stack() | b1493b79a3 | |
dump_stack() | fb5b2a2bbb | |
dump_stack() | a9db750ea5 | |
dump_stack() | 55032f07af | |
dump_stack() | bb7c2f94d5 | |
dump_stack() | 422f05d25b | |
dump_stack() | 3c8e80cace | |
dump_stack() | a0ee660e50 | |
dump_stack() | 82436cbd83 | |
dump_stack() | ce8f8d3a38 | |
dump_stack() | 330da3b930 | |
dump_stack() | ce7794ce84 | |
dump_stack() | abd8e69186 | |
dump_stack() | 2f52f6db6d | |
dump_stack() | 935266c850 | |
dump_stack() | a7b619fc40 | |
dump_stack() | 0e185ab36b | |
dump_stack() | b8bb11943a | |
dump_stack() | 2bc55e2011 | |
dump_stack() | 6e1216201e | |
dump_stack() | 92706c68fb | |
dump_stack() | 49ee65de76 | |
dump_stack() | 8fca9dbd2e | |
dump_stack() | 1deb201e25 | |
dump_stack() | cc26ff8626 | |
Denis Efremov | 05ae073fe6 | |
dump_stack() | 603a2c98bd | |
dump_stack() | cfee4c565c | |
dump_stack() | 02663fad64 | |
Bradley Landherr | e43993c6e5 | |
dump_stack() | 90829e2409 | |
dump_stack() | 514e2c9c91 | |
dump_stack() | 5b0bf7de01 | |
dump_stack() | 992c41c84b | |
dump_stack() | 22a8e32e2c | |
dump_stack() | 2f5f1db0db | |
dump_stack() | 551ec7f7ef | |
dump_stack() | 8a53b6081c | |
dump_stack() | 27d8291bb2 | |
dump_stack() | db5d31d563 | |
dump_stack() | d27fbf6671 | |
dump_stack() | cf79a9f94f | |
dump_stack() | bfc6f11a7e | |
dump_stack() | bfae451749 | |
dump_stack() | 9b8d4a056e | |
dump_stack() | 81234fc3a6 | |
dump_stack() | 81db5a6d6a | |
alyakimenko | 5bb7e08188 | |
alyakimenko | dce1ce6c17 | |
dump_stack() | 1c2ea77920 | |
dump_stack() | f92b4e6640 | |
dump_stack() | db72ff0aea | |
dump_stack() | a6b81a3a24 | |
dump_stack() | f93f4e7072 | |
dump_stack() | 70168afa4a | |
dump_stack() | 26a724096e | |
dump_stack() | 0a332c670a | |
dump_stack() | 196f17277c | |
dump_stack() | 7f418b30ac | |
dump_stack() | 2494c94f6e | |
dump_stack() | 27ffff2d05 | |
dump_stack() | eafe9e57a8 | |
dump_stack() | 7e5126c042 | |
dump_stack() | 81219be062 | |
dump_stack() | 434aeb768b | |
dump_stack() | bd27e890d1 | |
dump_stack() | 873b35a18d | |
dump_stack() | fc2ee93b57 | |
dump_stack() | e03dff8409 | |
dump_stack() | f4a8b75244 | |
dump_stack() | c1a3cb6ce5 | |
dump_stack() | d58226c22c | |
dump_stack() | 9e1d71d1b2 | |
dump_stack() | 9c70af4f6f | |
dump_stack() | 7b8cf96b4a | |
dump_stack() | 7b6e3a9ad6 | |
dump_stack() | b117739c49 | |
dump_stack() | b28c47e64d | |
dump_stack() | 4b14187dad | |
dump_stack() | 950b1e5e83 | |
dump_stack() | bf90a10692 | |
dump_stack() | 3e7c564a5a | |
dump_stack() | dc73413114 | |
dump_stack() | 104e70f861 | |
dump_stack() | 365c9d0e95 | |
dump_stack() | 5bad772125 | |
dump_stack() | f3b0c07af2 | |
dump_stack() | f3d67cc3c2 | |
dump_stack() | 12b5bd2a99 | |
dump_stack() | b05c44ab9d | |
dump_stack() | 19535fc75c | |
dump_stack() | 5e6a9dec93 | |
dump_stack() | 0f89a868bd | |
dump_stack() | 14b8010fee | |
dump_stack() | 7fd8614e3c | |
dump_stack() | 3d958c1e10 | |
dump_stack() | e4bed2a4c3 | |
dump_stack() | a9d4d64e30 | |
dump_stack() | a8b423cddf | |
dump_stack() | df5d226772 | |
dump_stack() | 72bb8df46b | |
dump_stack() | 1ffd68601c | |
dump_stack() | 86ad71f230 | |
dump_stack() | a08861cc19 | |
dump_stack() | 24b2123582 | |
dump_stack() | 01d6c89d60 | |
dump_stack() | 08ed3461ad | |
dump_stack() | d425f455bb | |
dump_stack() | 857f398f6b | |
dump_stack() | 282d99f511 | |
dump_stack() | 338300eeec | |
dump_stack() | e106eaa3e0 | |
dump_stack() | 89305b7011 | |
dump_stack() | 54a3704bc2 | |
dump_stack() | f3d932e100 | |
dump_stack() | 0daf31e3aa | |
dump_stack() | c0aeb01ff7 | |
dump_stack() | ddf2fc0d0b | |
dump_stack() | 735256ff13 | |
dump_stack() | ea5f06334c | |
dump_stack() | f847f0a773 | |
dump_stack() | e5856c1931 | |
dump_stack() | faf9f9fd8f | |
dump_stack() | 6ee5530554 | |
dump_stack() | 844f5a5580 | |
dump_stack() | 1fdf92cc6b | |
dump_stack() | aa08b7a8b2 | |
dump_stack() | 73b39b5c0d | |
dump_stack() | b654fb29b9 | |
dump_stack() | 927fcddebf | |
dump_stack() | 986a6f55e0 | |
dump_stack() | e0c0d3a072 | |
dump_stack() | 5ad41bc1c8 | |
dump_stack() | b3b1ddcb7d | |
dump_stack() | 3789da0579 | |
dump_stack() | c1c5fd4f16 | |
dump_stack() | 2c341076a0 | |
dump_stack() | 4a55957edb | |
dump_stack() | fb750a93e4 | |
dump_stack() | 75f9436482 | |
dump_stack() | 7a689d942a | |
dump_stack() | a4ac4ff798 | |
dump_stack() | 56032241a0 | |
dump_stack() | 09087d066f | |
dump_stack() | eb9ed90571 | |
dump_stack() | f7c884e4f8 | |
dump_stack() | c2481272e2 | |
dump_stack() | 085690697d | |
dump_stack() | 574d5d45c3 | |
dump_stack() | ddec4adf57 | |
dump_stack() | b7d785f0c8 | |
dump_stack() | 53a80743ba | |
dump_stack() | 3064dc3a27 | |
dump_stack() | fc50808893 | |
dump_stack() | a0a9333385 | |
dump_stack() | f2b32d1e27 | |
dump_stack() | d035e4f8ad | |
dump_stack() | 15a8c6b1e4 | |
dump_stack() | 89c3175de4 | |
dump_stack() | 35dfe2a361 | |
dump_stack() | 8430eea47f | |
dump_stack() | ecf55a0cdf | |
dump_stack() | b7624f0d28 | |
dump_stack() | 5ed23ee2b0 | |
dump_stack() | 9175305cb9 | |
dump_stack() | 94be33f869 | |
dump_stack() | 6cebd85535 | |
dump_stack() | e63bfa24e9 | |
dump_stack() | 23be05969d | |
dump_stack() | 51fa085170 | |
dump_stack() | caee1b5756 | |
dump_stack() | 5dbbb33297 | |
dump_stack() | 238592e546 | |
dump_stack() | 6156947406 | |
dump_stack() | 133b7a9b03 | |
The Codacy Badger | a83acbae8b | |
dump_stack() | 5864109080 | |
dump_stack() | 75f5636d31 | |
dump_stack() | 56cdad74b3 | |
dump_stack() | c680099801 | |
dump_stack() | 94706ea8e7 | |
dump_stack() | 24de060a13 | |
dump_stack() | 27090f674a | |
dump_stack() | 80b3ae6912 | |
chiveson | 7d6806846d | |
dump_stack() | c12daaa1d6 | |
dump_stack() | e0c91f1b59 | |
dump_stack() | cf75f4424d | |
dump_stack() | c3af494fa8 | |
dump_stack() | 92484bf1d7 | |
dump_stack() | 983201bb7a | |
dump_stack() | b5965e8374 | |
dump_stack() | 144a8547bc | |
dump_stack() | fb9b03029b | |
dump_stack() | 2e6b983a84 | |
dump_stack() | ddf9c90ead | |
dump_stack() | 42bebad9ca | |
dump_stack() | 556ead8594 | |
dump_stack() | 630f6c7fe1 | |
dump_stack() | 094f209791 | |
dump_stack() | d42474892c | |
dump_stack() | 18a92703ba | |
dump_stack() | e0f0133d42 | |
dump_stack() | 1dace23475 | |
dump_stack() | 0b6ae6a23c | |
dump_stack() | 597de7f8c4 | |
dump_stack() | 051d080a67 | |
dump_stack() | 1f35eb165d | |
dump_stack() | db27959c8b | |
dump_stack() | 880af47cc5 | |
dump_stack() | 49b567cd4b | |
dump_stack() | 2a3c3ed18e | |
dump_stack() | 4dd34fec1d | |
dump_stack() | 076a5babb9 | |
dump_stack() | bc4129e01c | |
dump_stack() | 6b0301ec45 | |
dump_stack() | 825d69b770 | |
dump_stack() | 3fdb2736c8 | |
dump_stack() | 257ff0cb7f | |
dump_stack() | f8b3f5fbaf | |
dump_stack() | 5682dd99c1 | |
dump_stack() | cf0e5efe18 | |
dump_stack() | b459f91a22 | |
dump_stack() | bda5a5764a | |
dump_stack() | b0d2c99246 | |
dump_stack() | 6188043cef | |
dump_stack() | cb93a7df40 | |
dump_stack() | 3695e50f35 | |
dump_stack() | 287ce68c6e | |
dump_stack() | 7199854f44 | |
dump_stack() | ec17f97881 | |
dump_stack() | b56d718f35 | |
dump_stack() | ce4a5c740d | |
dump_stack() | f6eb95abc0 | |
dump_stack() | 6e77fc230d | |
dump_stack() | 9fc1eff305 | |
dump_stack() | 278c95f55e | |
dump_stack() | e25d5de854 | |
dump_stack() | 291715cbf8 | |
dump_stack() | dcc156272c | |
dump_stack() | 1488d4b081 | |
dump_stack() | b2f50efa2a | |
dump_stack() | b36956c7a4 | |
dump_stack() | 8225f0044d |
|
@ -0,0 +1,117 @@
|
|||
name: Debian Cache
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
push:
|
||||
paths:
|
||||
- '.github/workflows/debian-cache.yml'
|
||||
- 'distro/debian/snapshot/**'
|
||||
- 'distro/debian/cache.go'
|
||||
- 'distro/debian/kernel.go'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
|
||||
jobs:
|
||||
debian-kernel-metadata-cache:
|
||||
name: Metadata
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Build
|
||||
run: go build
|
||||
|
||||
- name: Cache
|
||||
run: ./out-of-tree --log-level=trace distro debian cache --refetch=0 --limit=128 --update-release
|
||||
|
||||
- name: Install s3cmd
|
||||
run: sudo apt install s3cmd
|
||||
|
||||
- name: Archive cache
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debian-cache
|
||||
path: ~/.out-of-tree/debian.cache
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debian-metadata-cache-logs
|
||||
path: ~/.out-of-tree/logs
|
||||
|
||||
- name: Upload cache
|
||||
run: s3cmd put --acl-public ~/.out-of-tree/debian.cache s3://out-of-tree/1.0.0/ --host=fra1.digitaloceanspaces.com --host-bucket='%(bucket)s.fra1.digitaloceanspaces.com' --access_key=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} --secret_key=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }}
|
||||
|
||||
debian-kernel-packages-mirror:
|
||||
name: Packages
|
||||
needs: debian-kernel-metadata-cache
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Build
|
||||
run: go build
|
||||
|
||||
- name: Install s3cmd
|
||||
run: sudo apt install s3cmd
|
||||
|
||||
- name: Mirror deb packages
|
||||
shell: python
|
||||
run: |
|
||||
import os
|
||||
import logging
|
||||
import time
|
||||
import datetime
|
||||
|
||||
from subprocess import getstatusoutput
|
||||
|
||||
def get_kernels() -> bool:
|
||||
status, output = getstatusoutput(
|
||||
"./out-of-tree distro debian fetch --max=16 --limit=1"
|
||||
)
|
||||
logging.info(output)
|
||||
return status == 0
|
||||
|
||||
def upload(f: str) -> bool:
|
||||
status, output = getstatusoutput(
|
||||
"s3cmd "
|
||||
"--host=fra1.digitaloceanspaces.com "
|
||||
"--host-bucket='%(bucket)s.fra1.digitaloceanspaces.com' "
|
||||
"--access_key=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} "
|
||||
"--secret_key=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} "
|
||||
f"put --acl-public {f} "
|
||||
"s3://out-of-tree/1.0.0/packages/debian/"
|
||||
)
|
||||
logging.info(output)
|
||||
return status == 0
|
||||
|
||||
logging.basicConfig(level=logging.NOTSET)
|
||||
|
||||
uploaded = []
|
||||
|
||||
timeout = time.time() + datetime.timedelta(hours=2).seconds
|
||||
|
||||
while get_kernels() and time.time() < timeout:
|
||||
for f in os.listdir():
|
||||
if not f.endswith('.deb'):
|
||||
continue
|
||||
|
||||
if f in uploaded:
|
||||
continue
|
||||
|
||||
logging.info(f)
|
||||
|
||||
ok = upload(f)
|
||||
if ok:
|
||||
uploaded += [f]
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debian-packages-cache-logs
|
||||
path: ~/.out-of-tree/logs
|
|
@ -0,0 +1,172 @@
|
|||
name: E2E
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths-ignore:
|
||||
- ".github/workflows/images-*"
|
||||
- ".github/workflows/ubuntu.yml"
|
||||
- ".github/workflows/macos.yml"
|
||||
- ".github/workflows/debian-cache.yml"
|
||||
- "docs/**"
|
||||
- ".readthedocs.yaml"
|
||||
- "README.md"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test-end-to-end:
|
||||
name: Module
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [
|
||||
{ distro: Ubuntu, release: 12.04 },
|
||||
{ distro: Ubuntu, release: 14.04 },
|
||||
{ distro: Ubuntu, release: 16.04 },
|
||||
{ distro: Ubuntu, release: 18.04 },
|
||||
{ distro: Ubuntu, release: 20.04 },
|
||||
{ distro: Ubuntu, release: 22.04 },
|
||||
{ distro: CentOS, release: 6 },
|
||||
{ distro: CentOS, release: 7 },
|
||||
{ distro: CentOS, release: 8 },
|
||||
{ distro: OracleLinux, release: 6 },
|
||||
{ distro: OracleLinux, release: 7 },
|
||||
{ distro: OracleLinux, release: 8 },
|
||||
{ distro: OracleLinux, release: 9 },
|
||||
{ distro: Debian, release: 7 },
|
||||
{ distro: Debian, release: 8 },
|
||||
{ distro: Debian, release: 9 },
|
||||
{ distro: Debian, release: 10 },
|
||||
{ distro: Debian, release: 11 },
|
||||
{ distro: Debian, release: 12 },
|
||||
{ distro: OpenSUSE, release: "12.1" },
|
||||
{ distro: OpenSUSE, release: "12.2" },
|
||||
{ distro: OpenSUSE, release: "12.3" },
|
||||
{ distro: OpenSUSE, release: "13.1" },
|
||||
{ distro: OpenSUSE, release: "13.2" },
|
||||
{ distro: OpenSUSE, release: "42.1" },
|
||||
{ distro: OpenSUSE, release: "42.2" },
|
||||
{ distro: OpenSUSE, release: "42.3" },
|
||||
{ distro: OpenSUSE, release: "15.0" },
|
||||
{ distro: OpenSUSE, release: "15.1" },
|
||||
{ distro: OpenSUSE, release: "15.2" },
|
||||
{ distro: OpenSUSE, release: "15.3" },
|
||||
{ distro: OpenSUSE, release: "15.4" },
|
||||
{ distro: OpenSUSE, release: "15.5" }
|
||||
]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Build
|
||||
run: go build
|
||||
|
||||
- uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
|
||||
|
||||
- uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Create droplet
|
||||
run: >-
|
||||
doctl compute droplet create
|
||||
--size s-4vcpu-8gb-intel
|
||||
--tag-name=github-actions
|
||||
--image almalinux-9-x64
|
||||
--ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c'
|
||||
--wait
|
||||
--region fra1
|
||||
ga-out-of-tree-e2e-${{ matrix.os.distro }}-${{ matrix.os.release }}-$GITHUB_SHA
|
||||
|
||||
- name: End-to-End Testing [${{ matrix.os.distro }} ${{ matrix.os.release }}]
|
||||
shell: bash
|
||||
run: |
|
||||
sleep 1m
|
||||
|
||||
IP=$(doctl compute droplet list \
|
||||
--tag-name=github-actions \
|
||||
--format "Name,Public IPv4" \
|
||||
| grep -v ID \
|
||||
| grep ga-out-of-tree-e2e-${{ matrix.os.distro }}-${{ matrix.os.release }}-$GITHUB_SHA \
|
||||
| awk '{print $2}')
|
||||
|
||||
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
|
||||
do
|
||||
sleep 1s
|
||||
done
|
||||
|
||||
ssh root@$IP "cloud-init status --wait"
|
||||
|
||||
ssh root@$IP "dnf install -y podman qemu-kvm-core epel-release"
|
||||
ssh root@$IP "dnf install -y s3cmd"
|
||||
|
||||
ssh root@$IP "ln -s /usr/libexec/qemu-kvm /usr/bin/qemu-system-x86_64"
|
||||
|
||||
scp ./out-of-tree root@$IP:/usr/local/bin/
|
||||
|
||||
echo 'name = "test"' > examples/kernel-module/.out-of-tree.toml
|
||||
echo 'type = "module"' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo 'standard_modules = true' >> examples/kernel-module/.out-of-tree.toml
|
||||
|
||||
echo '[[targets]]' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo 'distro = { id = "${{ matrix.os.distro }}", release = "${{ matrix.os.release }}" }' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo 'kernel = { regex = ".*" }' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo '[qemu]' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo 'timeout = "10m"' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo 'after_start_timeout = "10s"' >> examples/kernel-module/.out-of-tree.toml
|
||||
|
||||
echo 'modprobe uio || modprobe 9p || modprobe xfs' >> examples/kernel-module/test.sh
|
||||
|
||||
scp -r examples/kernel-module root@$IP:test
|
||||
|
||||
echo '[Unit]' >> test.service
|
||||
echo 'Description=e2e' >> test.service
|
||||
echo '[Service]' >> test.service
|
||||
echo 'RemainAfterExit=yes' >> test.service
|
||||
echo 'StandardError=append:/var/log/test.log' >> test.service
|
||||
echo 'StandardOutput=append:/var/log/test.log' >> test.service
|
||||
echo 'Type=oneshot' >> test.service
|
||||
echo 'WorkingDirectory=/root/test' >> test.service
|
||||
echo 'TimeoutStopSec=1' >> test.service
|
||||
echo 'ExecStart=/usr/local/bin/out-of-tree kernel --no-container-cache autogen --threads=8 --max=128 --shuffle' >> test.service
|
||||
echo 'ExecStart=/usr/local/bin/out-of-tree pew --qemu-timeout=10m --threads=4 --include-internal-errors' >> test.service
|
||||
|
||||
scp test.service root@$IP:/etc/systemd/system/test.service
|
||||
|
||||
ssh root@$IP systemctl daemon-reload
|
||||
|
||||
ssh root@$IP setenforce 0
|
||||
|
||||
ssh root@$IP systemctl start test --no-block
|
||||
|
||||
while ! ssh root@$IP systemctl show test -p SubState --value | grep -E '(failed|exited)'
|
||||
do
|
||||
sleep 30s
|
||||
done
|
||||
|
||||
ssh root@$IP "cat /var/log/test.log"
|
||||
|
||||
scp -r root@$IP:.out-of-tree/logs .
|
||||
|
||||
ssh root@$IP systemctl is-active test || exit 1
|
||||
|
||||
ssh root@$IP "/usr/local/bin/out-of-tree container save"
|
||||
ssh root@$IP "s3cmd put --acl-public *.tar.gz s3://out-of-tree/1.0.0/containers/ --host=fra1.digitaloceanspaces.com --host-bucket='%(bucket)s.fra1.digitaloceanspaces.com' --access_key=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} --secret_key=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }}"
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-end-to-end-${{ matrix.os.distro }}-${{ matrix.os.release }}-logs
|
||||
path: logs
|
||||
|
||||
- name: Delete droplet
|
||||
if: always()
|
||||
run: doctl compute droplet delete -f ga-out-of-tree-e2e-${{ matrix.os.distro }}-${{ matrix.os.release }}-$GITHUB_SHA
|
|
@ -0,0 +1,86 @@
|
|||
name: Debian
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'tools/qemu-debian-img/**'
|
||||
- '.github/workflows/images-debian.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
images:
|
||||
name: Qemu Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
|
||||
|
||||
- uses: webfactory/ssh-agent@v0.8.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: create droplet
|
||||
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-debian-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
|
||||
|
||||
# TODO Move to common script
|
||||
- name: generate images
|
||||
shell: bash
|
||||
run: |
|
||||
sleep 1m
|
||||
|
||||
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-debian-$GITHUB_SHA | awk '{print $2}')
|
||||
|
||||
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
|
||||
do
|
||||
sleep 1s
|
||||
done
|
||||
|
||||
sleep 5m
|
||||
ssh root@$IP pkill apt-get || true
|
||||
|
||||
ssh root@$IP apt-get update
|
||||
ssh root@$IP apt-get install -y git podman s3cmd
|
||||
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
|
||||
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
|
||||
|
||||
ssh root@$IP "echo -e '[Unit]\nDescription=Debian image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-debian.log\nStandardOutput=append:/var/log/images-debian.log\nType=oneshot' >> /etc/systemd/system/images-debian.service"
|
||||
|
||||
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-debian.service"
|
||||
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-debian.service"
|
||||
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-debian.service"
|
||||
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-debian.service"
|
||||
|
||||
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-debian-img/generate-images.sh' >> /etc/systemd/system/images-debian.service"
|
||||
|
||||
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-debian-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-debian.service'
|
||||
|
||||
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-debian.service"
|
||||
|
||||
ssh root@$IP systemctl daemon-reload
|
||||
|
||||
ssh root@$IP systemctl start images-debian --no-block
|
||||
|
||||
while ! ssh root@$IP systemctl show images-debian -p SubState --value | grep -E '(failed|exited)'
|
||||
do
|
||||
sleep 3m
|
||||
done
|
||||
|
||||
scp root@$IP:/var/log/images-debian.log .
|
||||
|
||||
ssh root@$IP systemctl is-active images-debian
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: images-debian-log
|
||||
path: images-debian.log
|
||||
|
||||
- name: delete droplet
|
||||
if: always()
|
||||
run: doctl compute droplet delete -f ga-out-of-tree-images-debian-$GITHUB_SHA
|
|
@ -0,0 +1,79 @@
|
|||
name: Oracle Linux
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'tools/qemu-oraclelinux-img/**'
|
||||
- '.github/workflows/images-oraclelinux.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
images-oraclelinux:
|
||||
name: Qemu Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
|
||||
|
||||
- uses: webfactory/ssh-agent@v0.8.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: create droplet
|
||||
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-oraclelinux-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
|
||||
|
||||
# TODO Move to common script
|
||||
- name: generate images
|
||||
shell: bash
|
||||
run: |
|
||||
sleep 1m
|
||||
|
||||
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-oraclelinux-$GITHUB_SHA | awk '{print $2}')
|
||||
|
||||
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
|
||||
do
|
||||
sleep 1s
|
||||
done
|
||||
|
||||
sleep 5m
|
||||
ssh root@$IP pkill apt-get || true
|
||||
|
||||
ssh root@$IP apt-get update
|
||||
ssh root@$IP apt-get install -y git podman s3cmd
|
||||
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
|
||||
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
|
||||
|
||||
ssh root@$IP "echo -e '[Unit]\nDescription=Oracle Linux image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-oraclelinux.log\nStandardOutput=append:/var/log/images-oraclelinux.log\nType=oneshot' >> /etc/systemd/system/images-oraclelinux.service"
|
||||
|
||||
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-oraclelinux.service"
|
||||
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-oraclelinux.service"
|
||||
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-oraclelinux.service"
|
||||
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-oraclelinux.service"
|
||||
|
||||
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-oraclelinux-img/generate-images.sh' >> /etc/systemd/system/images-oraclelinux.service"
|
||||
|
||||
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-oraclelinux-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-oraclelinux.service'
|
||||
|
||||
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-oraclelinux.service"
|
||||
|
||||
ssh root@$IP systemctl daemon-reload
|
||||
|
||||
ssh root@$IP systemctl start images-oraclelinux --no-block
|
||||
|
||||
while ! ssh root@$IP systemctl show images-oraclelinux -p SubState --value | grep -E '(failed|exited)'
|
||||
do
|
||||
sleep 3m
|
||||
done
|
||||
|
||||
scp root@$IP:/var/log/images-oraclelinux.log .
|
||||
|
||||
ssh root@$IP systemctl is-active images-oraclelinux
|
||||
|
||||
- name: delete droplet
|
||||
if: always()
|
||||
run: doctl compute droplet delete -f ga-out-of-tree-images-oraclelinux-$GITHUB_SHA
|
|
@ -0,0 +1,26 @@
|
|||
name: macOS
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths-ignore:
|
||||
- ".github/workflows/images-*"
|
||||
- ".github/workflows/debian-cache.yml"
|
||||
- ".github/workflows/e2e.yml"
|
||||
- "docs/**"
|
||||
- "README.md"
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: macOS-12
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Build
|
||||
run: go build
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
id
|
||||
|
||||
df -h
|
||||
|
||||
sudo systemd-run --wait rm -rf \
|
||||
/usr/share/az* \
|
||||
/usr/share/dotnet \
|
||||
/usr/share/gradle* \
|
||||
/usr/share/miniconda \
|
||||
/usr/share/swift \
|
||||
/var/lib/gems \
|
||||
/var/lib/mysql \
|
||||
/var/lib/snapd \
|
||||
/opt/hostedtoolcache/CodeQL \
|
||||
/opt/hostedtoolcache/Java_Temurin-Hotspot_jdk
|
||||
|
||||
sudo fstrim /
|
||||
|
||||
df -h
|
|
@ -0,0 +1,212 @@
|
|||
name: Ubuntu
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths-ignore:
|
||||
- ".github/workflows/images-*"
|
||||
- ".github/workflows/e2e.yml"
|
||||
- ".github/workflows/macos.yml"
|
||||
- ".github/workflows/debian-cache.yml"
|
||||
- "docs/**"
|
||||
- ".readthedocs.yaml"
|
||||
- "README.md"
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v5
|
||||
|
||||
- name: Build
|
||||
run: go build
|
||||
|
||||
test-unit:
|
||||
name: Unit Testing
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v5
|
||||
|
||||
- name: Install dependencies for tests
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install qemu-system-x86
|
||||
|
||||
- name: Bootstrap
|
||||
run: ./tools/qemu-ubuntu-img/bootstrap.sh
|
||||
|
||||
- name: Unit Testing
|
||||
run: |
|
||||
mkdir ~/.out-of-tree
|
||||
go test -parallel 1 -v ./...
|
||||
|
||||
test-end-to-end-examples:
|
||||
needs: [build]
|
||||
name: Examples
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
example: [
|
||||
{ dir: "kernel-module", params: "" },
|
||||
{ dir: "kernel-exploit", params: "--threshold=0" },
|
||||
{ dir: "script", params: "" },
|
||||
{ dir: "preload", params: "" }
|
||||
]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v5
|
||||
|
||||
- name: Setup
|
||||
run: .github/workflows/scripts/setup.sh
|
||||
|
||||
- name: Build
|
||||
run: go build
|
||||
|
||||
- name: Install dependencies for tests
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y qemu-system-x86
|
||||
echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_22.04/ /' | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list
|
||||
curl -fsSL https://download.opensuse.org/repositories/devel:kubic:libcontainers:unstable/xUbuntu_22.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/devel_kubic_libcontainers_unstable.gpg > /dev/null
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y podman
|
||||
|
||||
- name: End-to-End Testing [${{ matrix.example.dir }}]
|
||||
run: |
|
||||
cd examples/${{ matrix.example.dir }}
|
||||
../../out-of-tree --log-level=debug kernel autogen --max=1
|
||||
../../out-of-tree --log-level=debug pew --qemu-timeout=10m ${{ matrix.example.params }}
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-end-to-end-examples-${{ matrix.example.dir }}-logs
|
||||
path: ~/.out-of-tree/logs
|
||||
|
||||
test-end-to-end:
|
||||
needs: [build]
|
||||
name: E2E
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
#type: [ Script, Module ]
|
||||
type: [ Module ]
|
||||
os: [
|
||||
{ distro: Ubuntu, release: 12.04 },
|
||||
{ distro: Ubuntu, release: 14.04 },
|
||||
{ distro: Ubuntu, release: 16.04 },
|
||||
{ distro: Ubuntu, release: 18.04 },
|
||||
{ distro: Ubuntu, release: 20.04 },
|
||||
{ distro: Ubuntu, release: 22.04 },
|
||||
{ distro: CentOS, release: 6 },
|
||||
{ distro: CentOS, release: 7 },
|
||||
{ distro: CentOS, release: 8 },
|
||||
{ distro: OracleLinux, release: 6 },
|
||||
{ distro: OracleLinux, release: 7 },
|
||||
{ distro: OracleLinux, release: 8 },
|
||||
{ distro: OracleLinux, release: 9 },
|
||||
{ distro: Debian, release: 7 },
|
||||
{ distro: Debian, release: 8 },
|
||||
{ distro: Debian, release: 9 },
|
||||
{ distro: Debian, release: 10 },
|
||||
{ distro: Debian, release: 11 },
|
||||
{ distro: Debian, release: 12 },
|
||||
{ distro: OpenSUSE, release: "12.1" },
|
||||
{ distro: OpenSUSE, release: "12.2" },
|
||||
{ distro: OpenSUSE, release: "12.3" },
|
||||
{ distro: OpenSUSE, release: "13.1" },
|
||||
{ distro: OpenSUSE, release: "13.2" },
|
||||
{ distro: OpenSUSE, release: "42.1" },
|
||||
{ distro: OpenSUSE, release: "42.2" },
|
||||
{ distro: OpenSUSE, release: "42.3" },
|
||||
# { distro: OpenSUSE, release: "15.0" },
|
||||
{ distro: OpenSUSE, release: "15.1" },
|
||||
{ distro: OpenSUSE, release: "15.2" },
|
||||
{ distro: OpenSUSE, release: "15.3" },
|
||||
{ distro: OpenSUSE, release: "15.4" },
|
||||
{ distro: OpenSUSE, release: "15.5" }
|
||||
]
|
||||
|
||||
steps:
|
||||
- name: Backup docker files
|
||||
run: |
|
||||
echo "backup moby/buildkit image"
|
||||
sudo docker image save -o ${GITHUB_WORKSPACE}/images.tar moby/buildkit
|
||||
echo "prune docker"
|
||||
sudo docker system prune -a -f
|
||||
echo "back up /var/lib/docker folder structure and other files"
|
||||
sudo rsync -aPq /var/lib/docker/ ${GITHUB_WORKSPACE}/docker
|
||||
|
||||
- name: Maximize build space
|
||||
uses: easimon/maximize-build-space@master
|
||||
with:
|
||||
overprovision-lvm: 'true'
|
||||
remove-dotnet: 'true'
|
||||
# instead of using default value to mount to build path,
|
||||
# /var/lib/docker/ is really the place we need more spaces.
|
||||
build-mount-path: '/var/lib/docker/'
|
||||
|
||||
- name: Restore docker files
|
||||
run: |
|
||||
sudo rsync -aPq ${GITHUB_WORKSPACE}/docker/ /var/lib/docker
|
||||
sudo rm -rf ${GITHUB_WORKSPACE}/docker
|
||||
sudo ls ${GITHUB_WORKSPACE} -l
|
||||
sudo docker image load -i ${GITHUB_WORKSPACE}/images.tar
|
||||
sudo rm ${GITHUB_WORKSPACE}/images.tar
|
||||
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v5
|
||||
|
||||
- name: Setup
|
||||
run: .github/workflows/scripts/setup.sh
|
||||
|
||||
- name: Build
|
||||
run: go build
|
||||
|
||||
- name: Install dependencies for tests
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install qemu-system-x86
|
||||
|
||||
- name: End-to-End Testing ${{ matrix.type }} [${{ matrix.os.distro }} ${{ matrix.os.release }}]
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir test
|
||||
cd test
|
||||
|
||||
echo 'name = "test"' >> .out-of-tree.toml
|
||||
echo 'type = "${{ matrix.type }}"' >> .out-of-tree.toml
|
||||
echo 'script = "script.sh"' >> .out-of-tree.toml
|
||||
echo '[[targets]]' >> .out-of-tree.toml
|
||||
echo 'distro = { id = "${{ matrix.os.distro }}", release = "${{ matrix.os.release }}" }' >> .out-of-tree.toml
|
||||
echo 'kernel = { regex = ".*" }' >> .out-of-tree.toml
|
||||
|
||||
echo -e '#!/bin/sh\necho ok' >> script.sh
|
||||
|
||||
cp ../examples/kernel-module/{module.c,Makefile,test.sh} .
|
||||
|
||||
../out-of-tree --log-level=debug kernel list-remote --distro=${{ matrix.os.distro }} --ver=${{ matrix.os.release }}
|
||||
../out-of-tree --log-level=debug kernel autogen --max=1 --shuffle
|
||||
../out-of-tree --log-level=debug pew --qemu-timeout=20m --include-internal-errors
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-end-to-end-${{ matrix.type }}-${{ matrix.os.distro }}-${{ matrix.os.release }}-logs
|
||||
path: ~/.out-of-tree/logs
|
|
@ -10,3 +10,7 @@
|
|||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
out-of-tree
|
||||
*.cache
|
||||
result
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: latest
|
||||
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
formats:
|
||||
- pdf
|
|
@ -0,0 +1,286 @@
|
|||
# Changelog
|
||||
|
||||
[ISO 8601](https://xkcd.com/1179/).
|
||||
|
||||
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [2.1.0]
|
||||
|
||||
### Added
|
||||
|
||||
- Graceful shutdown on ^C while kernels generation.
|
||||
|
||||
- Flag to set the container runtime command.
|
||||
|
||||
- out-of-tree image --dry-run for printing full qemu command.
|
||||
|
||||
### Changed
|
||||
|
||||
- No exit at the end of the retries, will continue with the other
|
||||
kernels.
|
||||
|
||||
- All temporary files moved to ~/.out-of-tree/tmp/.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Discrepancies between /lib/modules and /boot should no longer lead
|
||||
to fatal errors.
|
||||
|
||||
- Podman support on macOS.
|
||||
|
||||
## [2.0.0]
|
||||
|
||||
### Breaking
|
||||
|
||||
- Layers with kernels in containers have been abandoned in favor of
|
||||
installation to mounted volumes.
|
||||
|
||||
- Command line interface has been changed to alecthomas/kong.
|
||||
|
||||
### Added
|
||||
|
||||
- Command `kernel install` to install specific kernel.
|
||||
|
||||
- Command `containers` to manage containers.
|
||||
|
||||
- Command `image edit` to edit qemu image.
|
||||
|
||||
- Flag `--force` to force reinstallation of the kernel.
|
||||
|
||||
- Flag `--artifact-config` to specify the path to .out-of-tree.toml.
|
||||
|
||||
- Flag `--no-headers` flag to install kernel and initrd only.
|
||||
|
||||
- Flag `--shuffle` to randomize the order of kernels for
|
||||
installation/testing.
|
||||
|
||||
- Support make targets in artifact config.
|
||||
|
||||
- Support patches in artifact config.
|
||||
|
||||
- Support for copying standard modules to qemu.
|
||||
|
||||
- Script artifact type for various automation and information gathering.
|
||||
|
||||
- Add TestFiles to artifact config, transfers additional test files to VM.
|
||||
|
||||
- Improved logging, with logfile at ~/.out-of-tree/logs/out-of-tree.log
|
||||
|
||||
- Kernel installation will retry (10 times by default) in case of
|
||||
network problems.
|
||||
|
||||
- Stdout trace (with --log-level=trace, and always to logfile) for
|
||||
qemu and container execution.
|
||||
|
||||
- Compatibility with Podman.
|
||||
|
||||
- Support for Ubuntu 22.04.
|
||||
|
||||
## [1.4.0]
|
||||
|
||||
### Added
|
||||
|
||||
- Parameter `--docker-timeout` may also be set in the artifact
|
||||
configuration file.
|
||||
|
||||
- Preload modules before inserting module or run exploit. Modules can
|
||||
be specified by git repository path in the `repo` parameter of
|
||||
section `[[preload]]`. Also, there is a `path` parameter for local
|
||||
projects. Note that `repo` is using a cache that uses last commit
|
||||
hash to check is project needs to be rebuilt, so it's not suitable
|
||||
for local development (except if you will commit each time before
|
||||
run out-of-tree).
|
||||
|
||||
- Flag `--disable-preload` to ignore `[[preload]]` section of
|
||||
configuration file.
|
||||
|
||||
- Now `out-of-tree log dump` will show the last log if no ID
|
||||
specified.
|
||||
|
||||
## [1.3.0] 2020-05-30
|
||||
|
||||
### Added
|
||||
|
||||
- Support for Ubuntu 20.04 and CentOS 8.
|
||||
|
||||
## [1.2.1] 2019-12-25
|
||||
|
||||
### Fixed
|
||||
|
||||
- macOS support.
|
||||
|
||||
## [1.2.0] 2019-11-15
|
||||
|
||||
### Added
|
||||
|
||||
- Flag for Verbose output. Right now only qemu status messages is
|
||||
implemented.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Kpti settings was not affected for regular runs.
|
||||
|
||||
## [1.1.2] 2019-09-05
|
||||
|
||||
### Added
|
||||
|
||||
- Added policykit-1 to rootfs for Ubuntu.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Avoided slow mirrors with use of mirror://mirrors.ubuntu.com for
|
||||
Ubuntu 16.04 and newer.
|
||||
|
||||
## [1.1.1] 2019-08-31
|
||||
|
||||
### Fixed
|
||||
|
||||
- macOS support.
|
||||
|
||||
## [1.1.0] 2019-08-30
|
||||
|
||||
### Added
|
||||
|
||||
- Global configuration file (~/.out-of-tree/out-of-tree.toml) allow to
|
||||
set up default values for settings.
|
||||
|
||||
- rootfs generator for Ubuntu 14.04.
|
||||
|
||||
- Parameter for setting up docker registry server.
|
||||
|
||||
- Support for (distro-specific) custom docker commands that will be
|
||||
executed before the base template.
|
||||
|
||||
- Parameter for setting up a reliability threshold for exit code.
|
||||
|
||||
- Parameter for setting up global timeout, after which no new tasks
|
||||
will be started.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Spelling in output.
|
||||
|
||||
- Now kernel generation will not fail if there are no directory
|
||||
/lib/modules inside the container.
|
||||
|
||||
## [1.0.0] 2019-08-20
|
||||
|
||||
### Added
|
||||
|
||||
- New parameter `--max=X` is added for `autogen` (generate kernels
|
||||
base on `.out-of-tree.toml` definitions) and `pew` (automated
|
||||
runs) and allows to specify a maximum number of runs per each
|
||||
supported kernel in module/exploit definition.
|
||||
|
||||
- New command `genall` -- generate all kernels for specified
|
||||
distro/version.
|
||||
|
||||
- All logs stores in sqlite3 database. Implemented specific commands
|
||||
for making simple queries and export data to markdown and json.
|
||||
|
||||
- Implemented success rate calculation for previous runs.
|
||||
|
||||
- Save of build results supported by parameter `--dist` for `pew`.
|
||||
|
||||
- Support for generating kernels info from host system.
|
||||
|
||||
- Support for build on host.
|
||||
|
||||
- Support for custom kernels.
|
||||
|
||||
- Now debugging environment is automatically looking for debug
|
||||
kernel on the host system.
|
||||
|
||||
- Added ability to enable/disable kaslr/smep/smap/kpti for debugging
|
||||
by command line flags.
|
||||
|
||||
- New parameter `--threads=N` is added for `pew` and allows to
|
||||
specify maximum number of threads that will be used for parallel
|
||||
build/run/test.
|
||||
|
||||
- Tagging for runs. Tags write to log and can be used for
|
||||
statistics.
|
||||
|
||||
- Added non-regex way to set kernel version in .out-of-tree.toml (see
|
||||
examples).
|
||||
|
||||
- New command `pack` that perform tests in subdirectories.
|
||||
|
||||
- Added ability to disable kaslr/smep/smap/kpti for in artifact
|
||||
definition.
|
||||
|
||||
- Added ability to change amount of memory/CPUs and set qemu timeout
|
||||
in artifact definition (`.out-of-tree.toml`).
|
||||
|
||||
- Now images downloading while `kernel autogen`, bootstrap is not
|
||||
required anymore.
|
||||
|
||||
- Support CentOS kernels.
|
||||
|
||||
### Changed
|
||||
|
||||
- Now if there's no base image found — out-of-tree will try to use
|
||||
an image from closest previous version, e.g. image from Ubuntu
|
||||
18.04 for Ubuntu 18.10.
|
||||
|
||||
- Kernel modules tests will not be failed if there are no tests
|
||||
exists.
|
||||
|
||||
- Now *out-of-tree* will return negative error code if at least one
|
||||
of the stage was failed.
|
||||
|
||||
- Project is switch to use Go modules.
|
||||
|
||||
- Now test.sh is used by default if copying is not implemented in
|
||||
Makefile.
|
||||
|
||||
- dmesg is not cleaned before the start of module/exploit anymore.
|
||||
|
||||
- qemu/kvm will use all host cpu features.
|
||||
|
||||
### Removed
|
||||
|
||||
- *Kernel factory* is removed completely in favor of incremental
|
||||
Dockerfiles.
|
||||
|
||||
- `bootstrap` is not doing anything anymore. It'll be removed in next
|
||||
release.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Command `timeout` is not required anymore.
|
||||
|
||||
- Errors is more meaningful.
|
||||
|
||||
- Temporary files is moved to `~/.out-of-tree/tmp/` to avoid docker
|
||||
mounting issues on some systems.
|
||||
|
||||
## [0.2.0] - 2018-12-01
|
||||
|
||||
The main purpose of the release is to simplify installation.
|
||||
|
||||
### Changes
|
||||
|
||||
- All configuration moved to `~/.out-of-tree`.
|
||||
|
||||
- Now prebuilt images can be downloaded with bootstrap.
|
||||
|
||||
- Ability to generate kernels specific to .out-of-tree.toml in
|
||||
current directory. So now there's no need to wait for several
|
||||
hours for start work on specific kernel with module/exploit.
|
||||
|
||||
- Now there's no need to keep source tree and _out-of-tree_ can be
|
||||
distributed in binary form.
|
||||
|
||||
- New command: **debug**. Creates interactive environment for kernel
|
||||
module/exploit development. Still work-in-progress.
|
||||
|
||||
- No warning anymore if test.sh is not exists.
|
||||
|
||||
## [0.1.0] - 2018-11-20
|
||||
|
||||
Initial release that was never tagged.
|
||||
|
||||
Refer to state after first public release on ZeroNights 2018
|
||||
([video](https://youtu.be/2tL7bbCdIio),
|
||||
[slides](https://2018.zeronights.ru/wp-content/uploads/materials/07-Ways-to-automate-testing-Linux-kernel-exploits.pdf)).
|
87
README.md
87
README.md
|
@ -1,64 +1,59 @@
|
|||
[![Ubuntu](https://github.com/out-of-tree/out-of-tree/actions/workflows/ubuntu.yml/badge.svg)](https://github.com/out-of-tree/out-of-tree/actions/workflows/ubuntu.yml)
|
||||
[![E2E](https://github.com/out-of-tree/out-of-tree/actions/workflows/e2e.yml/badge.svg)](https://github.com/out-of-tree/out-of-tree/actions/workflows/e2e.yml)
|
||||
[![Documentation Status](https://readthedocs.org/projects/out-of-tree/badge/?version=latest)](https://out-of-tree.readthedocs.io/en/latest/?badge=latest)
|
||||
|
||||
# [out-of-tree](https://out-of-tree.io)
|
||||
|
||||
out-of-tree kernel {module, exploit} development tool
|
||||
*out-of-tree* is the kernel {module, exploit} development tool.
|
||||
|
||||
*out-of-tree* was created to reduce the complexity of the environment for developing, testing and debugging Linux kernel exploits and out-of-tree kernel modules (hence the name "out-of-tree").
|
||||
|
||||
![Screenshot](https://cloudflare-ipfs.com/ipfs/Qmb88fgdDjbWkxz91sWsgmoZZNfVThnCtj37u3mF2s3T3T)
|
||||
|
||||
## Installation
|
||||
|
||||
$ go get github.com/jollheef/out-of-tree
|
||||
$ out-of-tree bootstrap
|
||||
### GNU/Linux (with [Nix](https://nixos.org/nix/))
|
||||
|
||||
Then you can check it on kernel module example:
|
||||
sudo apt install podman || sudo dnf install podman
|
||||
|
||||
$ cd $GOPATH/github.com/jollheef/out-of-tree/examples/kernel-module
|
||||
$ out-of-tree kernel autogen # generate kernels based on .out-of-tree.toml
|
||||
$ out-of-tree pew
|
||||
curl -L https://nixos.org/nix/install | sh
|
||||
mkdir -p ~/.config/nix
|
||||
echo "experimental-features = nix-command flakes" >> ~/.config/nix/nix.conf
|
||||
|
||||
# stable
|
||||
nix profile install nixpkgs#out-of-tree
|
||||
|
||||
# latest
|
||||
nix profile install git+https://code.dumpstack.io/tools/out-of-tree
|
||||
|
||||
### macOS
|
||||
|
||||
Note: case-sensitive FS is required for the ~/.out-of-tree directory.
|
||||
|
||||
$ brew install podman
|
||||
$ podman machine stop || true
|
||||
$ podman machine rm || true
|
||||
$ podman machine init --cpus=4 --memory=4096 -v $HOME:$HOME
|
||||
$ podman machine start
|
||||
$ brew tap out-of-tree/repo
|
||||
$ brew install out-of-tree
|
||||
|
||||
Read [documentation](https://out-of-tree.readthedocs.io) for further info.
|
||||
|
||||
## Examples
|
||||
|
||||
Run by absolute path
|
||||
Generate all Ubuntu 22.04 kernels:
|
||||
|
||||
$ out-of-tree --path /path/to/exploit/directory pew
|
||||
$ out-of-tree kernel genall --distro=Ubuntu --ver=22.04
|
||||
|
||||
Test only with one kernel:
|
||||
Run tests based on .out-of-tree.toml definitions:
|
||||
|
||||
$ out-of-tree pew --kernel='Ubuntu:4.10.0-30-generic'
|
||||
$ out-of-tree pew
|
||||
|
||||
Test with a specific kernel:
|
||||
|
||||
$ out-of-tree pew --kernel='Ubuntu:5.4.0-29-generic'
|
||||
|
||||
Run debug environment:
|
||||
|
||||
$ out-of-tree debug --kernel='Ubuntu:4.10.0-30-generic'
|
||||
|
||||
Test binary module/exploit with implicit defined test ($BINARY_test)
|
||||
|
||||
$ out-of-tree pew --binary /path/to/exploit
|
||||
|
||||
Test binary module/exploit with explicit defined test
|
||||
|
||||
$ out-of-tree pew --binary /path/to/exploit --test /path/to/exploit_test
|
||||
|
||||
Guess work kernels:
|
||||
|
||||
$ out-of-tree pew --guess
|
||||
|
||||
Use custom kernels config
|
||||
|
||||
$ out-of-tree --kernels /path/to/kernels.toml pew
|
||||
|
||||
## Generate all kernels
|
||||
|
||||
Does not required if you dont need to use `--guess`.
|
||||
|
||||
$ cd $GOPATH/src/github.com/jollheef/out-of-tree/tools/kernel-factory
|
||||
$ ./bootstrap.sh # more than 6-8 hours for all kernels
|
||||
$ export OUT_OF_TREE_KCFG=$GOPATH/src/github.com/jollheef/out-of-tree/tools/kernel-factory/output/kernels.toml
|
||||
|
||||
## Development
|
||||
|
||||
Read [Qemu API](qemu/README.md).
|
||||
|
||||
### Generate images
|
||||
|
||||
$ cd $GOPATH/src/github.com/jollheef/out-of-tree/tools/qemu-debian-img/
|
||||
$ docker run --privileged -v $(pwd):/shared -e IMAGE=/shared/ubuntu1404.img -e RELEASE=trusty -t gen-ubuntu1804-image
|
||||
$ docker run --privileged -v $(pwd):/shared -e IMAGE=/shared/ubuntu1604.img -e RELEASE=xenial -t gen-ubuntu1804-image
|
||||
$ out-of-tree debug --kernel='Ubuntu:5.4.0-29-generic'
|
||||
|
|
|
@ -0,0 +1,191 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
var ErrInvalid = errors.New("")
|
||||
|
||||
type Status string
|
||||
|
||||
const (
|
||||
StatusNew Status = "new"
|
||||
StatusWaiting Status = "waiting"
|
||||
StatusRunning Status = "running"
|
||||
StatusSuccess Status = "success"
|
||||
StatusFailure Status = "failure"
|
||||
)
|
||||
|
||||
type Command string
|
||||
|
||||
const (
|
||||
RawMode Command = "rawmode"
|
||||
|
||||
AddJob Command = "add_job"
|
||||
ListJobs Command = "list_jobs"
|
||||
JobLogs Command = "job_logs"
|
||||
JobStatus Command = "job_status"
|
||||
|
||||
AddRepo Command = "add_repo"
|
||||
ListRepos Command = "list_repos"
|
||||
|
||||
Kernels Command = "kernels"
|
||||
)
|
||||
|
||||
type Job struct {
|
||||
ID int64
|
||||
|
||||
UpdatedAt time.Time
|
||||
|
||||
// Job UUID
|
||||
UUID string
|
||||
// Group UUID
|
||||
Group string
|
||||
|
||||
RepoName string
|
||||
Commit string
|
||||
|
||||
Description string
|
||||
|
||||
Artifact artifact.Artifact
|
||||
Target distro.KernelInfo
|
||||
|
||||
Created time.Time
|
||||
Started time.Time
|
||||
Finished time.Time
|
||||
|
||||
Status Status
|
||||
}
|
||||
|
||||
func (job *Job) GenUUID() {
|
||||
job.UUID = uuid.New().String()
|
||||
}
|
||||
|
||||
// ListJobsParams is the parameters for ListJobs command
|
||||
type ListJobsParams struct {
|
||||
// Group UUID
|
||||
Group string
|
||||
|
||||
// Repo name
|
||||
Repo string
|
||||
|
||||
// Commit hash
|
||||
Commit string
|
||||
|
||||
// Status of the job
|
||||
Status Status
|
||||
|
||||
UpdatedAfter int64
|
||||
}
|
||||
|
||||
type Repo struct {
|
||||
ID int64
|
||||
Name string
|
||||
Path string
|
||||
}
|
||||
|
||||
type JobLog struct {
|
||||
Name string
|
||||
Text string
|
||||
}
|
||||
|
||||
type Req struct {
|
||||
Command Command
|
||||
|
||||
Type string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func (r *Req) SetData(data any) (err error) {
|
||||
r.Type = fmt.Sprintf("%v", reflect.TypeOf(data))
|
||||
var buf bytes.Buffer
|
||||
err = gob.NewEncoder(&buf).Encode(data)
|
||||
r.Data = buf.Bytes()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Req) GetData(data any) (err error) {
|
||||
if len(r.Data) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t := fmt.Sprintf("%v", reflect.TypeOf(data))
|
||||
if r.Type != t {
|
||||
err = fmt.Errorf("type mismatch (%v != %v)", r.Type, t)
|
||||
return
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(r.Data)
|
||||
return gob.NewDecoder(buf).Decode(data)
|
||||
}
|
||||
|
||||
func (r *Req) Encode(conn net.Conn) (err error) {
|
||||
return gob.NewEncoder(conn).Encode(r)
|
||||
}
|
||||
|
||||
func (r *Req) Decode(conn net.Conn) (err error) {
|
||||
return gob.NewDecoder(conn).Decode(r)
|
||||
}
|
||||
|
||||
type Resp struct {
|
||||
UUID string
|
||||
|
||||
Error string
|
||||
|
||||
Err error `json:"-"`
|
||||
|
||||
Type string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func NewResp() (resp Resp) {
|
||||
resp.UUID = uuid.New().String()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Resp) SetData(data any) (err error) {
|
||||
r.Type = fmt.Sprintf("%v", reflect.TypeOf(data))
|
||||
var buf bytes.Buffer
|
||||
err = gob.NewEncoder(&buf).Encode(data)
|
||||
r.Data = buf.Bytes()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Resp) GetData(data any) (err error) {
|
||||
if len(r.Data) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t := fmt.Sprintf("%v", reflect.TypeOf(data))
|
||||
if r.Type != t {
|
||||
err = fmt.Errorf("type mismatch (%v != %v)", r.Type, t)
|
||||
return
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(r.Data)
|
||||
return gob.NewDecoder(buf).Decode(data)
|
||||
}
|
||||
|
||||
func (r *Resp) Encode(conn net.Conn) (err error) {
|
||||
if r.Err != nil && r.Err != ErrInvalid && r.Error == "" {
|
||||
r.Error = fmt.Sprintf("%v", r.Err)
|
||||
}
|
||||
return gob.NewEncoder(conn).Encode(r)
|
||||
}
|
||||
|
||||
func (r *Resp) Decode(conn net.Conn) (err error) {
|
||||
err = gob.NewDecoder(conn).Decode(r)
|
||||
r.Err = ErrInvalid
|
||||
return
|
||||
}
|
|
@ -0,0 +1,443 @@
|
|||
package artifact
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/naoina/toml"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
type Kernel struct {
|
||||
// TODO
|
||||
// Version string
|
||||
// From string
|
||||
// To string
|
||||
|
||||
// prev. ReleaseMask
|
||||
Regex string
|
||||
ExcludeRegex string
|
||||
}
|
||||
|
||||
// Target defines the kernel
|
||||
type Target struct {
|
||||
Distro distro.Distro
|
||||
|
||||
Kernel Kernel
|
||||
}
|
||||
|
||||
// DockerName is returns stable name for docker container
|
||||
func (km Target) DockerName() string {
|
||||
distro := strings.ToLower(km.Distro.ID.String())
|
||||
release := strings.Replace(km.Distro.Release, ".", "__", -1)
|
||||
return fmt.Sprintf("out_of_tree_%s_%s", distro, release)
|
||||
}
|
||||
|
||||
// ArtifactType is the kernel module or exploit
|
||||
type ArtifactType int
|
||||
|
||||
const (
|
||||
// KernelModule is any kind of kernel module
|
||||
KernelModule ArtifactType = iota
|
||||
// KernelExploit is the privilege escalation exploit
|
||||
KernelExploit
|
||||
// Script for information gathering or automation
|
||||
Script
|
||||
)
|
||||
|
||||
func (at ArtifactType) String() string {
|
||||
return [...]string{"module", "exploit", "script"}[at]
|
||||
}
|
||||
|
||||
// UnmarshalTOML is for support github.com/naoina/toml
|
||||
func (at *ArtifactType) UnmarshalTOML(data []byte) (err error) {
|
||||
stype := strings.Trim(string(data), `"`)
|
||||
stypelower := strings.ToLower(stype)
|
||||
if strings.Contains(stypelower, "module") {
|
||||
*at = KernelModule
|
||||
} else if strings.Contains(stypelower, "exploit") {
|
||||
*at = KernelExploit
|
||||
} else if strings.Contains(stypelower, "script") {
|
||||
*at = Script
|
||||
} else {
|
||||
err = fmt.Errorf("type %s is unsupported", stype)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalTOML is for support github.com/naoina/toml
|
||||
func (at ArtifactType) MarshalTOML() (data []byte, err error) {
|
||||
s := ""
|
||||
switch at {
|
||||
case KernelModule:
|
||||
s = "module"
|
||||
case KernelExploit:
|
||||
s = "exploit"
|
||||
case Script:
|
||||
s = "script"
|
||||
default:
|
||||
err = fmt.Errorf("cannot marshal %d", at)
|
||||
}
|
||||
data = []byte(`"` + s + `"`)
|
||||
return
|
||||
}
|
||||
|
||||
// Duration type with toml unmarshalling support
|
||||
type Duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
// UnmarshalTOML for Duration
|
||||
func (d *Duration) UnmarshalTOML(data []byte) (err error) {
|
||||
duration := strings.Replace(string(data), "\"", "", -1)
|
||||
d.Duration, err = time.ParseDuration(duration)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalTOML for Duration
|
||||
func (d Duration) MarshalTOML() (data []byte, err error) {
|
||||
data = []byte(`"` + d.Duration.String() + `"`)
|
||||
return
|
||||
}
|
||||
|
||||
type PreloadModule struct {
|
||||
Repo string
|
||||
Path string
|
||||
TimeoutAfterLoad Duration
|
||||
}
|
||||
|
||||
// Extra test files to copy over
|
||||
type FileTransfer struct {
|
||||
User string
|
||||
Local string
|
||||
Remote string
|
||||
}
|
||||
|
||||
type Patch struct {
|
||||
Path string
|
||||
Source string
|
||||
Script string
|
||||
}
|
||||
|
||||
// Artifact is for .out-of-tree.toml
|
||||
type Artifact struct {
|
||||
Name string
|
||||
Type ArtifactType
|
||||
|
||||
SourcePath string
|
||||
SourceFiles []string
|
||||
|
||||
TestFiles []FileTransfer
|
||||
|
||||
Targets []Target
|
||||
|
||||
Script string
|
||||
|
||||
Qemu struct {
|
||||
Cpus int
|
||||
Memory int
|
||||
Timeout Duration
|
||||
AfterStartTimeout Duration
|
||||
}
|
||||
|
||||
Docker struct {
|
||||
Timeout Duration
|
||||
}
|
||||
|
||||
Mitigations struct {
|
||||
DisableSmep bool
|
||||
DisableSmap bool
|
||||
DisableKaslr bool
|
||||
DisableKpti bool
|
||||
}
|
||||
|
||||
Patches []Patch
|
||||
|
||||
Make struct {
|
||||
Target string
|
||||
}
|
||||
|
||||
StandardModules bool
|
||||
|
||||
Preload []PreloadModule
|
||||
}
|
||||
|
||||
// Read is for read .out-of-tree.toml
|
||||
func (Artifact) Read(path string) (ka Artifact, err error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
buf, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = toml.Unmarshal(buf, &ka)
|
||||
|
||||
if len(strings.Fields(ka.Name)) != 1 {
|
||||
err = errors.New("artifact name should not contain spaces")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ka Artifact) checkSupport(ki distro.KernelInfo, target Target) (
|
||||
supported bool, err error) {
|
||||
|
||||
if target.Distro.Release == "" {
|
||||
if ki.Distro.ID != target.Distro.ID {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !ki.Distro.Equal(target.Distro) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r, err := regexp.Compile(target.Kernel.Regex)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
exr, err := regexp.Compile(target.Kernel.ExcludeRegex)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !r.MatchString(ki.KernelRelease) {
|
||||
return
|
||||
}
|
||||
|
||||
if target.Kernel.ExcludeRegex != "" && exr.MatchString(ki.KernelRelease) {
|
||||
return
|
||||
}
|
||||
|
||||
supported = true
|
||||
return
|
||||
}
|
||||
|
||||
// Supported returns true if given kernel is supported by artifact
|
||||
func (ka Artifact) Supported(ki distro.KernelInfo) (supported bool, err error) {
|
||||
for _, km := range ka.Targets {
|
||||
supported, err = ka.checkSupport(ki, km)
|
||||
if supported {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
|
||||
endless bool, cBinary,
|
||||
cEndlessStress string, cEndlessTimeout time.Duration,
|
||||
dump func(q *qemu.System, ka Artifact, ki distro.KernelInfo,
|
||||
result *Result)) {
|
||||
|
||||
slog.Info().Msg("start")
|
||||
testStart := time.Now()
|
||||
defer func() {
|
||||
slog.Debug().Str("test_duration",
|
||||
time.Since(testStart).String()).
|
||||
Msg("")
|
||||
}()
|
||||
|
||||
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
|
||||
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("qemu init")
|
||||
return
|
||||
}
|
||||
q.Log = slog
|
||||
|
||||
if ka.Qemu.Timeout.Duration != 0 {
|
||||
q.Timeout = ka.Qemu.Timeout.Duration
|
||||
}
|
||||
if ka.Qemu.Cpus != 0 {
|
||||
q.Cpus = ka.Qemu.Cpus
|
||||
}
|
||||
if ka.Qemu.Memory != 0 {
|
||||
q.Memory = ka.Qemu.Memory
|
||||
}
|
||||
|
||||
q.SetKASLR(!ka.Mitigations.DisableKaslr)
|
||||
q.SetSMEP(!ka.Mitigations.DisableSmep)
|
||||
q.SetSMAP(!ka.Mitigations.DisableSmap)
|
||||
q.SetKPTI(!ka.Mitigations.DisableKpti)
|
||||
|
||||
if ki.CPU.Model != "" {
|
||||
q.CPU.Model = ki.CPU.Model
|
||||
}
|
||||
|
||||
if len(ki.CPU.Flags) != 0 {
|
||||
q.CPU.Flags = ki.CPU.Flags
|
||||
}
|
||||
|
||||
if endless {
|
||||
q.Timeout = 0
|
||||
}
|
||||
|
||||
qemuStart := time.Now()
|
||||
|
||||
slog.Debug().Msgf("qemu start %v", qemuStart)
|
||||
err = q.Start()
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("qemu start")
|
||||
return
|
||||
}
|
||||
defer q.Stop()
|
||||
|
||||
slog.Debug().Msgf("wait %v", ka.Qemu.AfterStartTimeout)
|
||||
time.Sleep(ka.Qemu.AfterStartTimeout.Duration)
|
||||
|
||||
go func() {
|
||||
time.Sleep(time.Minute)
|
||||
for !q.Died {
|
||||
slog.Debug().Msg("still alive")
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}()
|
||||
|
||||
tmp, err := os.MkdirTemp(dotfiles.Dir("tmp"), "")
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("making tmp directory")
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
result := Result{}
|
||||
if !endless {
|
||||
defer dump(q, ka, ki, &result)
|
||||
}
|
||||
|
||||
var cTest string
|
||||
|
||||
if ka.Type == Script {
|
||||
result.BuildDir = ka.SourcePath
|
||||
result.Build.Ok = true
|
||||
ka.Script = filepath.Join(ka.SourcePath, ka.Script)
|
||||
cTest = ka.Script
|
||||
} else if cBinary == "" {
|
||||
// TODO: build should return structure
|
||||
start := time.Now()
|
||||
result.BuildDir, result.BuildArtifact, result.Build.Output, err =
|
||||
Build(slog, tmp, ka, ki, ka.Docker.Timeout.Duration)
|
||||
slog.Debug().Str("duration", time.Since(start).String()).
|
||||
Msg("build done")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("build")
|
||||
return
|
||||
}
|
||||
result.Build.Ok = true
|
||||
} else {
|
||||
result.BuildArtifact = cBinary
|
||||
result.Build.Ok = true
|
||||
}
|
||||
|
||||
if cTest == "" {
|
||||
cTest = result.BuildArtifact + "_test"
|
||||
if _, err := os.Stat(cTest); err != nil {
|
||||
slog.Debug().Msgf("%s does not exist", cTest)
|
||||
cTest = tmp + "/source/" + "test.sh"
|
||||
} else {
|
||||
slog.Debug().Msgf("%s exist", cTest)
|
||||
}
|
||||
}
|
||||
|
||||
if ka.Qemu.Timeout.Duration == 0 {
|
||||
ka.Qemu.Timeout.Duration = time.Minute
|
||||
}
|
||||
|
||||
err = q.WaitForSSH(ka.Qemu.Timeout.Duration)
|
||||
if err != nil {
|
||||
result.InternalError = err
|
||||
return
|
||||
}
|
||||
slog.Debug().Str("qemu_startup_duration",
|
||||
time.Since(qemuStart).String()).
|
||||
Msg("ssh is available")
|
||||
|
||||
remoteTest, err := copyTest(q, cTest, ka)
|
||||
if err != nil {
|
||||
result.InternalError = err
|
||||
slog.Error().Err(err).Msg("copy test script")
|
||||
return
|
||||
}
|
||||
|
||||
if ka.StandardModules {
|
||||
// Module depends on one of the standard modules
|
||||
start := time.Now()
|
||||
err = CopyStandardModules(q, ki)
|
||||
if err != nil {
|
||||
result.InternalError = err
|
||||
slog.Error().Err(err).Msg("copy standard modules")
|
||||
return
|
||||
}
|
||||
slog.Debug().Str("duration", time.Since(start).String()).
|
||||
Msg("copy standard modules")
|
||||
}
|
||||
|
||||
err = PreloadModules(q, ka, ki, ka.Docker.Timeout.Duration)
|
||||
if err != nil {
|
||||
result.InternalError = err
|
||||
slog.Error().Err(err).Msg("preload modules")
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
copyArtifactAndTest(slog, q, ka, &result, remoteTest)
|
||||
slog.Debug().Str("duration", time.Since(start).String()).
|
||||
Msgf("test completed (success: %v)", result.Test.Ok)
|
||||
|
||||
if !endless {
|
||||
return
|
||||
}
|
||||
|
||||
dump(q, ka, ki, &result)
|
||||
|
||||
if !result.Build.Ok || !result.Run.Ok || !result.Test.Ok {
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info().Msg("start endless tests")
|
||||
|
||||
if cEndlessStress != "" {
|
||||
slog.Debug().Msg("copy and run endless stress script")
|
||||
err = q.CopyAndRunAsync("root", cEndlessStress)
|
||||
if err != nil {
|
||||
q.Stop()
|
||||
//f.Sync()
|
||||
slog.Fatal().Err(err).Msg("cannot copy/run stress")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
output, err := q.Command("root", remoteTest)
|
||||
if err != nil {
|
||||
q.Stop()
|
||||
//f.Sync()
|
||||
slog.Fatal().Err(err).Msg(output)
|
||||
return
|
||||
}
|
||||
slog.Debug().Msg(output)
|
||||
|
||||
slog.Info().Msg("test success")
|
||||
|
||||
slog.Debug().Msgf("wait %v", cEndlessTimeout)
|
||||
time.Sleep(cEndlessTimeout)
|
||||
}
|
||||
}
|
|
@ -1,12 +1,10 @@
|
|||
// Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package config
|
||||
package artifact
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
|
||||
"github.com/naoina/toml"
|
||||
)
|
||||
|
||||
|
@ -15,8 +13,16 @@ func TestMarshalUnmarshal(t *testing.T) {
|
|||
Name: "Put name here",
|
||||
Type: KernelModule,
|
||||
}
|
||||
artifactCfg.SupportedKernels = append(artifactCfg.SupportedKernels,
|
||||
KernelMask{Ubuntu, "18.04", ".*"})
|
||||
artifactCfg.Targets = append(artifactCfg.Targets,
|
||||
Target{
|
||||
Distro: distro.Distro{
|
||||
ID: distro.Ubuntu,
|
||||
Release: "18.04",
|
||||
},
|
||||
Kernel: Kernel{
|
||||
Regex: ".*",
|
||||
},
|
||||
})
|
||||
buf, err := toml.Marshal(&artifactCfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
|
@ -0,0 +1,175 @@
|
|||
// Copyright 2020 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package artifact
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
func PreloadModules(q *qemu.System, ka Artifact, ki distro.KernelInfo,
|
||||
dockerTimeout time.Duration) (err error) {
|
||||
|
||||
for _, pm := range ka.Preload {
|
||||
err = preload(q, ki, pm, dockerTimeout)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func preload(q *qemu.System, ki distro.KernelInfo, pm PreloadModule,
|
||||
dockerTimeout time.Duration) (err error) {
|
||||
|
||||
var workPath, cache string
|
||||
if pm.Path != "" {
|
||||
log.Print("Use non-git path for preload module (no cache)")
|
||||
workPath = pm.Path
|
||||
} else if pm.Repo != "" {
|
||||
workPath, cache, err = cloneOrPull(pm.Repo, ki)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = errors.New("no repo/path in preload entry")
|
||||
return
|
||||
}
|
||||
|
||||
err = buildAndInsmod(workPath, q, ki, dockerTimeout, cache)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(pm.TimeoutAfterLoad.Duration)
|
||||
return
|
||||
}
|
||||
|
||||
func buildAndInsmod(workPath string, q *qemu.System, ki distro.KernelInfo,
|
||||
dockerTimeout time.Duration, cache string) (err error) {
|
||||
|
||||
tmp, err := tempDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var af string
|
||||
if pathExists(cache) {
|
||||
af = cache
|
||||
} else {
|
||||
af, err = buildPreload(workPath, tmp, ki, dockerTimeout)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if cache != "" {
|
||||
err = CopyFile(af, cache)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output, err := q.CopyAndInsmod(af)
|
||||
if err != nil {
|
||||
log.Print(output)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func buildPreload(workPath, tmp string, ki distro.KernelInfo,
|
||||
dockerTimeout time.Duration) (af string, err error) {
|
||||
|
||||
ka, err := Artifact{}.Read(workPath + "/.out-of-tree.toml")
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("preload")
|
||||
}
|
||||
|
||||
ka.SourcePath = workPath
|
||||
|
||||
km := Target{
|
||||
Distro: ki.Distro,
|
||||
Kernel: Kernel{Regex: ki.KernelRelease},
|
||||
}
|
||||
ka.Targets = []Target{km}
|
||||
|
||||
if ka.Docker.Timeout.Duration != 0 {
|
||||
dockerTimeout = ka.Docker.Timeout.Duration
|
||||
}
|
||||
|
||||
_, af, _, err = Build(log.Logger, tmp, ka, ki, dockerTimeout)
|
||||
return
|
||||
}
|
||||
|
||||
func pathExists(path string) bool {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func tempDir() (string, error) {
|
||||
return os.MkdirTemp(dotfiles.Dir("tmp"), "")
|
||||
}
|
||||
|
||||
func cloneOrPull(repo string, ki distro.KernelInfo) (workPath, cache string,
|
||||
err error) {
|
||||
|
||||
base := dotfiles.Dir("preload")
|
||||
workPath = filepath.Join(base, "/repos/", sha1sum(repo))
|
||||
|
||||
var r *git.Repository
|
||||
if pathExists(workPath) {
|
||||
r, err = git.PlainOpen(workPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var w *git.Worktree
|
||||
w, err = r.Worktree()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = w.Pull(&git.PullOptions{})
|
||||
if err != nil && err != git.NoErrAlreadyUpToDate {
|
||||
log.Print(repo, "pull error:", err)
|
||||
}
|
||||
} else {
|
||||
r, err = git.PlainClone(workPath, false, &git.CloneOptions{URL: repo})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ref, err := r.Head()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cachedir := filepath.Join(base, "/cache/")
|
||||
os.MkdirAll(cachedir, 0700)
|
||||
|
||||
filename := sha1sum(repo + ki.KernelPath + ref.Hash().String())
|
||||
cache = filepath.Join(cachedir, filename)
|
||||
return
|
||||
}
|
||||
|
||||
func sha1sum(data string) string {
|
||||
h := sha1.Sum([]byte(data))
|
||||
return hex.EncodeToString(h[:])
|
||||
}
|
|
@ -0,0 +1,411 @@
|
|||
package artifact
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/otiai10/copy"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
func sh(workdir, command string) (output string, err error) {
|
||||
flog := log.With().
|
||||
Str("workdir", workdir).
|
||||
Str("command", command).
|
||||
Logger()
|
||||
|
||||
cmd := exec.Command("sh", "-c", "cd "+workdir+" && "+command)
|
||||
|
||||
flog.Debug().Msgf("%v", cmd)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cmd.Stderr = cmd.Stdout
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
output += m + "\n"
|
||||
flog.Trace().Str("stdout", m).Msg("")
|
||||
}
|
||||
}()
|
||||
|
||||
err = cmd.Wait()
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%v %v output: %v", cmd, err, output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func applyPatches(src string, ka Artifact) (err error) {
|
||||
for i, patch := range ka.Patches {
|
||||
name := fmt.Sprintf("patch_%02d", i)
|
||||
|
||||
path := src + "/" + name + ".diff"
|
||||
if patch.Source != "" && patch.Path != "" {
|
||||
err = errors.New("path and source are mutually exclusive")
|
||||
return
|
||||
} else if patch.Source != "" {
|
||||
err = os.WriteFile(path, []byte(patch.Source), 0644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else if patch.Path != "" {
|
||||
err = copy.Copy(patch.Path, path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if patch.Source != "" || patch.Path != "" {
|
||||
_, err = sh(src, "patch < "+path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if patch.Script != "" {
|
||||
script := src + "/" + name + ".sh"
|
||||
err = os.WriteFile(script, []byte(patch.Script), 0755)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = sh(src, script)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Build(flog zerolog.Logger, tmp string, ka Artifact,
|
||||
ki distro.KernelInfo, dockerTimeout time.Duration) (
|
||||
outdir, outpath, output string, err error) {
|
||||
|
||||
target := strings.Replace(ka.Name, " ", "_", -1)
|
||||
if target == "" {
|
||||
target = fmt.Sprintf("%d", rand.Int())
|
||||
}
|
||||
|
||||
outdir = tmp + "/source"
|
||||
|
||||
if len(ka.SourceFiles) == 0 {
|
||||
err = copy.Copy(ka.SourcePath, outdir)
|
||||
} else {
|
||||
err = CopyFiles(ka.SourcePath, ka.SourceFiles, outdir)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = applyPatches(outdir, ka)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outpath = outdir + "/" + target
|
||||
if ka.Type == KernelModule {
|
||||
outpath += ".ko"
|
||||
}
|
||||
|
||||
if ki.KernelVersion == "" {
|
||||
ki.KernelVersion = ki.KernelRelease
|
||||
}
|
||||
|
||||
kernel := "/lib/modules/" + ki.KernelVersion + "/build"
|
||||
if ki.KernelSource != "" {
|
||||
kernel = ki.KernelSource
|
||||
}
|
||||
|
||||
buildCommand := "make KERNEL=" + kernel + " TARGET=" + target
|
||||
if ka.Make.Target != "" {
|
||||
buildCommand += " " + ka.Make.Target
|
||||
}
|
||||
|
||||
if ki.ContainerName != "" {
|
||||
var c container.Container
|
||||
container.Timeout = dockerTimeout
|
||||
c, err = container.NewFromKernelInfo(ki)
|
||||
c.Log = flog
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("container creation failure")
|
||||
}
|
||||
|
||||
output, err = c.Run(outdir, []string{
|
||||
buildCommand + " && chmod -R 777 /work",
|
||||
})
|
||||
} else {
|
||||
cmd := exec.Command("bash", "-c", "cd "+outdir+" && "+
|
||||
buildCommand)
|
||||
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
timer := time.AfterFunc(dockerTimeout, func() {
|
||||
cmd.Process.Kill()
|
||||
})
|
||||
defer timer.Stop()
|
||||
|
||||
var raw []byte
|
||||
raw, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
|
||||
err, buildCommand, string(raw))
|
||||
err = errors.New(e)
|
||||
return
|
||||
}
|
||||
|
||||
output = string(raw)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func runScript(q *qemu.System, script string) (output string, err error) {
|
||||
return q.Command("root", script)
|
||||
}
|
||||
|
||||
func testKernelModule(q *qemu.System, ka Artifact,
|
||||
test string) (output string, err error) {
|
||||
|
||||
output, err = q.Command("root", test)
|
||||
// TODO generic checks for WARNING's and so on
|
||||
return
|
||||
}
|
||||
|
||||
func testKernelExploit(q *qemu.System, ka Artifact,
|
||||
test, exploit string) (output string, err error) {
|
||||
|
||||
output, err = q.Command("user", "chmod +x "+exploit)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
randFilePath := fmt.Sprintf("/root/%d", rand.Int())
|
||||
|
||||
cmd := fmt.Sprintf("%s %s %s", test, exploit, randFilePath)
|
||||
output, err = q.Command("user", cmd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = q.Command("root", "stat "+randFilePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
BuildDir string
|
||||
BuildArtifact string
|
||||
Build, Run, Test struct {
|
||||
Output string
|
||||
Ok bool
|
||||
}
|
||||
|
||||
InternalError error
|
||||
InternalErrorString string
|
||||
}
|
||||
|
||||
func CopyFiles(path string, files []string, dest string) (err error) {
|
||||
err = os.MkdirAll(dest, os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, sf := range files {
|
||||
if sf[0] == '/' {
|
||||
err = CopyFile(sf, filepath.Join(dest, filepath.Base(sf)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = os.MkdirAll(filepath.Join(dest, filepath.Dir(sf)), os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = CopyFile(filepath.Join(path, sf), filepath.Join(dest, sf))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func CopyFile(sourcePath, destinationPath string) (err error) {
|
||||
sourceFile, err := os.Open(sourcePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
destinationFile, err := os.Create(destinationPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(destinationFile, sourceFile); err != nil {
|
||||
destinationFile.Close()
|
||||
return err
|
||||
}
|
||||
return destinationFile.Close()
|
||||
}
|
||||
|
||||
func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
|
||||
res *Result, remoteTest string) (err error) {
|
||||
|
||||
// Copy all test files to the remote machine
|
||||
for _, f := range ka.TestFiles {
|
||||
if f.Local[0] != '/' {
|
||||
if res.BuildDir != "" {
|
||||
f.Local = res.BuildDir + "/" + f.Local
|
||||
}
|
||||
}
|
||||
err = q.CopyFile(f.User, f.Local, f.Remote)
|
||||
if err != nil {
|
||||
res.InternalError = err
|
||||
slog.Error().Err(err).Msg("copy test file")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch ka.Type {
|
||||
case KernelModule:
|
||||
res.Run.Output, err = q.CopyAndInsmod(res.BuildArtifact)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Run.Output)
|
||||
// TODO errors.As
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
res.InternalError = err
|
||||
}
|
||||
return
|
||||
}
|
||||
res.Run.Ok = true
|
||||
|
||||
res.Test.Output, err = testKernelModule(q, ka, remoteTest)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Test.Output)
|
||||
return
|
||||
}
|
||||
res.Test.Ok = true
|
||||
case KernelExploit:
|
||||
remoteExploit := fmt.Sprintf("/tmp/exploit_%d", rand.Int())
|
||||
err = q.CopyFile("user", res.BuildArtifact, remoteExploit)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
res.Test.Output, err = testKernelExploit(q, ka, remoteTest,
|
||||
remoteExploit)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Test.Output)
|
||||
return
|
||||
}
|
||||
res.Run.Ok = true // does not really used
|
||||
res.Test.Ok = true
|
||||
case Script:
|
||||
res.Test.Output, err = runScript(q, remoteTest)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Test.Output)
|
||||
return
|
||||
}
|
||||
slog.Info().Msgf("\n%v\n", res.Test.Output)
|
||||
res.Run.Ok = true
|
||||
res.Test.Ok = true
|
||||
default:
|
||||
slog.Fatal().Msg("Unsupported artifact type")
|
||||
}
|
||||
|
||||
_, err = q.Command("root", "echo")
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("after-test ssh reconnect")
|
||||
res.Test.Ok = false
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func copyTest(q *qemu.System, testPath string, ka Artifact) (
|
||||
remoteTest string, err error) {
|
||||
|
||||
remoteTest = fmt.Sprintf("/tmp/test_%d", rand.Int())
|
||||
err = q.CopyFile("user", testPath, remoteTest)
|
||||
if err != nil {
|
||||
if ka.Type == KernelExploit {
|
||||
q.Command("user",
|
||||
"echo -e '#!/bin/sh\necho touch $2 | $1' "+
|
||||
"> "+remoteTest+
|
||||
" && chmod +x "+remoteTest)
|
||||
} else {
|
||||
q.Command("user", "echo '#!/bin/sh' "+
|
||||
"> "+remoteTest+" && chmod +x "+remoteTest)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = q.Command("root", "chmod +x "+remoteTest)
|
||||
return
|
||||
}
|
||||
|
||||
func CopyStandardModules(q *qemu.System, ki distro.KernelInfo) (err error) {
|
||||
_, err = q.Command("root", "mkdir -p /lib/modules/"+ki.KernelVersion)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
remotePath := "/lib/modules/" + ki.KernelVersion + "/"
|
||||
|
||||
err = q.CopyDirectory("root", ki.ModulesPath+"/kernel", remotePath+"/kernel")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(ki.ModulesPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, de := range files {
|
||||
var fi fs.FileInfo
|
||||
fi, err = de.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(fi.Name(), "modules") {
|
||||
continue
|
||||
}
|
||||
err = q.CopyFile("root", ki.ModulesPath+"/"+fi.Name(), remotePath)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
// Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
const imagesURL = "https://github.com/jollheef/out-of-tree/releases/download/v0.2/images.tar.gz"
|
76
bootstrap.go
76
bootstrap.go
|
@ -1,76 +0,0 @@
|
|||
// Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
)
|
||||
|
||||
// inspired by Edd Turtle code
|
||||
func downloadFile(filepath string, url string) (err error) {
|
||||
out, err := os.Create(filepath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
_, err = io.Copy(out, resp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
func unpackTar(archive, destination string) (err error) {
|
||||
cmd := exec.Command("tar", "xf", archive)
|
||||
cmd.Dir = destination + "/"
|
||||
|
||||
rawOutput, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
// I don't like when some errors printed inside
|
||||
// So if you know way to do it better - FIXME please
|
||||
log.Println("Unpack images error:", string(rawOutput), err)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func bootstrapHandler() (err error) {
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
imagesPath := usr.HomeDir + "/.out-of-tree/images/"
|
||||
os.MkdirAll(imagesPath, os.ModePerm)
|
||||
|
||||
tmp, err := ioutil.TempDir("/tmp/", "out-of-tree_")
|
||||
if err != nil {
|
||||
log.Println("Temporary directory creation error:", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
imagesArchive := tmp + "/images.tar.gz"
|
||||
|
||||
err = downloadFile(imagesArchive, imagesURL)
|
||||
if err != nil {
|
||||
log.Println("Download file error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = unpackTar(imagesArchive, imagesPath)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/cavaliergopher/grab/v3"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
|
||||
var URL = "https://out-of-tree.fra1.digitaloceanspaces.com/1.0.0/"
|
||||
|
||||
func unpackTar(archive, destination string) (err error) {
|
||||
// NOTE: If you're change anything in tar command please check also
|
||||
// BSD tar (or if you're using macOS, do not forget to check GNU Tar)
|
||||
// Also make sure that sparse files are extracting correctly
|
||||
cmd := exec.Command("tar", "-Sxf", archive)
|
||||
cmd.Dir = destination + "/"
|
||||
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
rawOutput, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%v: %s", err, rawOutput)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func DownloadRootFS(path, file string) (err error) {
|
||||
tmp, err := fs.TempDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
fileurl, err := url.JoinPath(URL, file+".tar.gz")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().Msgf("download qemu image from %s", fileurl)
|
||||
|
||||
resp, err := grab.Get(tmp, fileurl)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot download %s. It looks like you need "+
|
||||
"to generate it manually and place it "+
|
||||
"to ~/.out-of-tree/images/; "+
|
||||
"check documentation for additional information",
|
||||
fileurl)
|
||||
return
|
||||
}
|
||||
|
||||
err = unpackTar(resp.Filename, path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return os.Remove(resp.Filename)
|
||||
}
|
||||
|
||||
func DownloadDebianCache(cachePath string) (err error) {
|
||||
tmp, err := fs.TempDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
file := filepath.Base(cachePath)
|
||||
|
||||
fileurl, err := url.JoinPath(URL, file)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().Msgf("download debian cache from %s", fileurl)
|
||||
|
||||
resp, err := grab.Get(tmp, fileurl)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return os.Rename(resp.Filename, cachePath)
|
||||
}
|
||||
|
||||
func PackageURL(dt distro.ID, orig string) (found bool, fileurl string) {
|
||||
if dt != distro.Debian {
|
||||
return
|
||||
}
|
||||
|
||||
filename := filepath.Base(orig)
|
||||
|
||||
fileurl, err := url.JoinPath(URL, "packages/debian", filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := http.Head(fileurl)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return
|
||||
}
|
||||
|
||||
found = true
|
||||
return
|
||||
}
|
||||
|
||||
func ContainerURL(name string) (path string) {
|
||||
path, _ = url.JoinPath(URL, "containers", fmt.Sprintf("%s.tar.gz", name))
|
||||
return
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
|
||||
func TestDownloadRootFS(t *testing.T) {
|
||||
tmp, err := os.MkdirTemp("", "out-of-tree_")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
file := "out_of_tree_ubuntu_12__04.img"
|
||||
|
||||
err = DownloadRootFS(tmp, file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !fs.PathExists(filepath.Join(tmp, file)) {
|
||||
t.Fatalf("%s does not exist", file)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDownloadDebianCache(t *testing.T) {
|
||||
tmp, err := fs.TempDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
file := "debian.cache"
|
||||
|
||||
cachePath := filepath.Join(tmp, file)
|
||||
|
||||
err = DownloadDebianCache(cachePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !fs.PathExists(filepath.Join(tmp, file)) {
|
||||
t.Fatalf("%s does not exist", file)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,262 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
RemoteAddr string
|
||||
}
|
||||
|
||||
func (c Client) client() *tls.Conn {
|
||||
if !fs.PathExists(dotfiles.File("daemon/cert.pem")) {
|
||||
log.Fatal().Msgf("no {cert,key}.pem at %s",
|
||||
dotfiles.Dir("daemon"))
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
dotfiles.File("daemon/cert.pem"),
|
||||
dotfiles.File("daemon/key.pem"))
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
cacert, err := os.ReadFile(dotfiles.File("daemon/cert.pem"))
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
certpool := x509.NewCertPool()
|
||||
certpool.AppendCertsFromPEM(cacert)
|
||||
|
||||
tlscfg := &tls.Config{
|
||||
RootCAs: certpool,
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
|
||||
conn, err := tls.Dial("tcp", c.RemoteAddr, tlscfg)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return conn // conn.Close()
|
||||
}
|
||||
|
||||
func (c Client) request(cmd api.Command, data any) (resp api.Resp, err error) {
|
||||
req := api.Req{Command: cmd}
|
||||
if data != nil {
|
||||
req.SetData(data)
|
||||
}
|
||||
|
||||
conn := c.client()
|
||||
defer conn.Close()
|
||||
|
||||
req.Encode(conn)
|
||||
|
||||
err = resp.Decode(conn)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("request %v", req)
|
||||
}
|
||||
|
||||
log.Debug().Msgf("resp: %v", resp)
|
||||
|
||||
if resp.Error != "" {
|
||||
err = errors.New(resp.Error)
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) Jobs(params api.ListJobsParams) (jobs []api.Job, err error) {
|
||||
resp, _ := c.request(api.ListJobs, ¶ms)
|
||||
|
||||
err = resp.GetData(&jobs)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) AddJob(job api.Job) (uuid string, err error) {
|
||||
resp, err := c.request(api.AddJob, &job)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.GetData(&uuid)
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) Repos() (repos []api.Repo, err error) {
|
||||
resp, _ := c.request(api.ListRepos, nil)
|
||||
|
||||
log.Debug().Msgf("resp: %v", spew.Sdump(resp))
|
||||
|
||||
err = resp.GetData(&repos)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
tag string
|
||||
}
|
||||
|
||||
func (lw logWriter) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
log.Trace().Str("tag", lw.tag).Msgf("%v", strconv.Quote(string(p)))
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) handler(cConn net.Conn) {
|
||||
defer cConn.Close()
|
||||
|
||||
dConn := c.client()
|
||||
defer dConn.Close()
|
||||
|
||||
req := api.Req{Command: api.RawMode}
|
||||
req.Encode(dConn)
|
||||
|
||||
go io.Copy(cConn, io.TeeReader(dConn, logWriter{"recv"}))
|
||||
io.Copy(dConn, io.TeeReader(cConn, logWriter{"send"}))
|
||||
}
|
||||
|
||||
var ErrRepoNotFound = errors.New("repo not found")
|
||||
|
||||
// GetRepo virtual API call
|
||||
func (c Client) GetRepo(name string) (repo api.Repo, err error) {
|
||||
// TODO add API call
|
||||
|
||||
repos, err := c.Repos()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, r := range repos {
|
||||
if r.Name == name {
|
||||
repo = r
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = ErrRepoNotFound
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) GitProxy(addr string, ready *sync.Mutex) {
|
||||
l, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("git proxy listen")
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
log.Debug().Msgf("git proxy listen on %v", addr)
|
||||
|
||||
for {
|
||||
ready.Unlock()
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("accept")
|
||||
}
|
||||
log.Debug().Msgf("git proxy accept %s", conn.RemoteAddr())
|
||||
|
||||
go c.handler(conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (c Client) PushRepo(repo api.Repo) (err error) {
|
||||
addr := qemu.GetFreeAddrPort()
|
||||
|
||||
ready := &sync.Mutex{}
|
||||
|
||||
ready.Lock()
|
||||
go c.GitProxy(addr, ready)
|
||||
|
||||
ready.Lock()
|
||||
|
||||
remote := fmt.Sprintf("git://%s/%s", addr, repo.Name)
|
||||
log.Debug().Msgf("git proxy remote: %v", remote)
|
||||
|
||||
raw, err := exec.Command("git", "--work-tree", repo.Path, "push", "--force", remote).
|
||||
CombinedOutput()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Msgf("push repo %v\n%v", repo, string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) AddRepo(repo api.Repo) (err error) {
|
||||
_, err = c.request(api.AddRepo, &repo)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Msgf("add repo %v", repo)
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
resp, err := c.request(api.Kernels, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.GetData(&kernels)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
log.Info().Msgf("got %d kernels", len(kernels))
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) JobStatus(uuid string) (st api.Status, err error) {
|
||||
resp, err := c.request(api.JobStatus, &uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.GetData(&st)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) JobLogs(uuid string) (logs []api.JobLog, err error) {
|
||||
resp, err := c.request(api.JobLogs, &uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.GetData(&logs)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
)
|
||||
|
||||
type ContainerCmd struct {
|
||||
Filter string `help:"filter by name"`
|
||||
|
||||
List ContainerListCmd `cmd:"" help:"list containers"`
|
||||
Save ContainerSaveCmd `cmd:"" help:"save containers"`
|
||||
Cleanup ContainerCleanupCmd `cmd:"" help:"cleanup containers"`
|
||||
}
|
||||
|
||||
func (cmd ContainerCmd) Containers() (names []string) {
|
||||
images, err := container.Images()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
for _, img := range images {
|
||||
if cmd.Filter != "" && !strings.Contains(img.Name, cmd.Filter) {
|
||||
continue
|
||||
}
|
||||
names = append(names, img.Name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ContainerListCmd struct{}
|
||||
|
||||
func (cmd ContainerListCmd) Run(containerCmd *ContainerCmd) (err error) {
|
||||
for _, name := range containerCmd.Containers() {
|
||||
fmt.Println(name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ContainerSaveCmd struct {
|
||||
OutDir string `help:"directory to save containers" default:"./" type:"existingdir"`
|
||||
}
|
||||
|
||||
func (cmd ContainerSaveCmd) Run(containerCmd *ContainerCmd) (err error) {
|
||||
for _, name := range containerCmd.Containers() {
|
||||
nlog := log.With().Str("name", name).Logger()
|
||||
|
||||
output := filepath.Join(cmd.OutDir, name+".tar")
|
||||
nlog.Info().Msgf("saving to %v", output)
|
||||
|
||||
err = container.Save(name, output)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
compressed := output + ".gz"
|
||||
nlog.Info().Msgf("compressing to %v", compressed)
|
||||
|
||||
var raw []byte
|
||||
raw, err = exec.Command("gzip", output).CombinedOutput()
|
||||
if err != nil {
|
||||
nlog.Error().Err(err).Msg(string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
nlog.Info().Msg("done")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ContainerCleanupCmd struct{}
|
||||
|
||||
func (cmd ContainerCleanupCmd) Run(containerCmd *ContainerCmd) (err error) {
|
||||
var output []byte
|
||||
for _, name := range containerCmd.Containers() {
|
||||
output, err = exec.Command(container.Runtime, "image", "rm", name).
|
||||
CombinedOutput()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("output", string(output)).Msg("")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2024 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/client"
|
||||
)
|
||||
|
||||
type daemonCmd struct {
|
||||
Addr string `default:":63527"`
|
||||
|
||||
Job DaemonJobCmd `cmd:"" aliases:"jobs" help:"manage jobs"`
|
||||
Repo DaemonRepoCmd `cmd:"" aliases:"repos" help:"manage repositories"`
|
||||
}
|
||||
|
||||
type DaemonJobCmd struct {
|
||||
List DaemonJobsListCmd `cmd:"" help:"list jobs"`
|
||||
Status DaemonJobsStatusCmd `cmd:"" help:"show job status"`
|
||||
Log DaemonJobsLogsCmd `cmd:"" help:"job logs"`
|
||||
}
|
||||
|
||||
type DaemonJobsListCmd struct {
|
||||
Group string `help:"group uuid"`
|
||||
Repo string `help:"repo name"`
|
||||
Commit string `help:"commit sha"`
|
||||
Status string `help:"job status"`
|
||||
After time.Time `help:"updated after" format:"2006-01-02 15:04:05"`
|
||||
}
|
||||
|
||||
func (cmd *DaemonJobsListCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
c := client.Client{RemoteAddr: g.RemoteAddr}
|
||||
|
||||
params := api.ListJobsParams{
|
||||
Group: cmd.Group,
|
||||
Repo: cmd.Repo,
|
||||
Commit: cmd.Commit,
|
||||
Status: api.Status(cmd.Status),
|
||||
}
|
||||
|
||||
if !cmd.After.IsZero() {
|
||||
params.UpdatedAfter = cmd.After.Unix()
|
||||
}
|
||||
|
||||
jobs, err := c.Jobs(params)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(jobs, "", " ")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
fmt.Println(string(b))
|
||||
return
|
||||
}
|
||||
|
||||
type DaemonJobsStatusCmd struct {
|
||||
UUID string `arg:""`
|
||||
}
|
||||
|
||||
func (cmd *DaemonJobsStatusCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
c := client.Client{RemoteAddr: g.RemoteAddr}
|
||||
st, err := c.JobStatus(cmd.UUID)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(st)
|
||||
return
|
||||
}
|
||||
|
||||
type DaemonJobsLogsCmd struct {
|
||||
UUID string `arg:""`
|
||||
}
|
||||
|
||||
func (cmd *DaemonJobsLogsCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
c := client.Client{RemoteAddr: g.RemoteAddr}
|
||||
logs, err := c.JobLogs(cmd.UUID)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
for _, l := range logs {
|
||||
log.Info().Msg(l.Name)
|
||||
fmt.Println(l.Text)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type DaemonRepoCmd struct {
|
||||
List DaemonRepoListCmd `cmd:"" help:"list repos"`
|
||||
}
|
||||
|
||||
type DaemonRepoListCmd struct{}
|
||||
|
||||
func (cmd *DaemonRepoListCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
c := client.Client{RemoteAddr: g.RemoteAddr}
|
||||
repos, err := c.Repos()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(repos, "", " ")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
fmt.Println(string(b))
|
||||
return
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/daemon"
|
||||
)
|
||||
|
||||
type DaemonCmd struct {
|
||||
daemonCmd
|
||||
|
||||
Threads int `help:"number of threads to use"`
|
||||
|
||||
OvercommitMemory float64 `help:"overcommit memory factor"`
|
||||
OvercommitCPU float64 `help:"overcommit CPU factor"`
|
||||
|
||||
Serve DaemonServeCmd `cmd:"" help:"start daemon"`
|
||||
}
|
||||
|
||||
type DaemonServeCmd struct{}
|
||||
|
||||
func (cmd *DaemonServeCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
d, err := daemon.Init(g.Config.Kernels)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
defer d.Kill()
|
||||
|
||||
if dm.Threads > 0 {
|
||||
d.Threads = dm.Threads
|
||||
}
|
||||
|
||||
if dm.OvercommitMemory > 0 {
|
||||
d.Resources.CPU.SetOvercommit(dm.OvercommitMemory)
|
||||
}
|
||||
|
||||
if dm.OvercommitCPU > 0 {
|
||||
d.Resources.CPU.SetOvercommit(dm.OvercommitCPU)
|
||||
}
|
||||
|
||||
go d.Daemon()
|
||||
d.Listen(dm.Addr)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package cmd
|
||||
|
||||
type DaemonCmd struct {
|
||||
daemonCmd
|
||||
}
|
|
@ -0,0 +1,380 @@
|
|||
// Copyright 2019 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
// Change on ANY database update
|
||||
const currentDatabaseVersion = 3
|
||||
|
||||
const versionField = "db_version"
|
||||
|
||||
type logEntry struct {
|
||||
ID int
|
||||
Tag string
|
||||
Timestamp time.Time
|
||||
|
||||
qemu.System
|
||||
artifact.Artifact
|
||||
distro.KernelInfo
|
||||
artifact.Result
|
||||
}
|
||||
|
||||
func createLogTable(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS log (
|
||||
id INTEGER PRIMARY KEY,
|
||||
time DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
tag TEXT,
|
||||
|
||||
name TEXT,
|
||||
type TEXT,
|
||||
|
||||
distro_type TEXT,
|
||||
distro_release TEXT,
|
||||
kernel_release TEXT,
|
||||
|
||||
internal_err TEXT,
|
||||
|
||||
build_output TEXT,
|
||||
build_ok BOOLEAN,
|
||||
|
||||
run_output TEXT,
|
||||
run_ok BOOLEAN,
|
||||
|
||||
test_output TEXT,
|
||||
test_ok BOOLEAN,
|
||||
|
||||
qemu_stdout TEXT,
|
||||
qemu_stderr TEXT,
|
||||
|
||||
kernel_panic BOOLEAN,
|
||||
timeout_kill BOOLEAN
|
||||
)`)
|
||||
return
|
||||
}
|
||||
|
||||
func createMetadataTable(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS metadata (
|
||||
id INTEGER PRIMARY KEY,
|
||||
key TEXT UNIQUE,
|
||||
value TEXT
|
||||
)`)
|
||||
return
|
||||
}
|
||||
|
||||
func metaChkValue(db *sql.DB, key string) (exist bool, err error) {
|
||||
sql := "SELECT EXISTS(SELECT id FROM metadata WHERE key = $1)"
|
||||
stmt, err := db.Prepare(sql)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
err = stmt.QueryRow(key).Scan(&exist)
|
||||
return
|
||||
}
|
||||
|
||||
func metaGetValue(db *sql.DB, key string) (value string, err error) {
|
||||
stmt, err := db.Prepare("SELECT value FROM metadata " +
|
||||
"WHERE key = $1")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
err = stmt.QueryRow(key).Scan(&value)
|
||||
return
|
||||
}
|
||||
|
||||
func metaSetValue(db *sql.DB, key, value string) (err error) {
|
||||
stmt, err := db.Prepare("INSERT OR REPLACE INTO metadata " +
|
||||
"(key, value) VALUES ($1, $2)")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
_, err = stmt.Exec(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
func getVersion(db *sql.DB) (version int, err error) {
|
||||
s, err := metaGetValue(db, versionField)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version, err = strconv.Atoi(s)
|
||||
return
|
||||
}
|
||||
|
||||
func addToLog(db *sql.DB, q *qemu.System, ka artifact.Artifact,
|
||||
ki distro.KernelInfo, res *artifact.Result, tag string) (err error) {
|
||||
|
||||
stmt, err := db.Prepare("INSERT INTO log (name, type, tag, " +
|
||||
"distro_type, distro_release, kernel_release, " +
|
||||
"internal_err, " +
|
||||
"build_output, build_ok, " +
|
||||
"run_output, run_ok, " +
|
||||
"test_output, test_ok, " +
|
||||
"qemu_stdout, qemu_stderr, " +
|
||||
"kernel_panic, timeout_kill) " +
|
||||
"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, " +
|
||||
"$10, $11, $12, $13, $14, $15, $16, $17);")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
_, err = stmt.Exec(
|
||||
ka.Name, ka.Type, tag,
|
||||
ki.Distro.ID, ki.Distro.Release, ki.KernelRelease,
|
||||
res.InternalErrorString,
|
||||
res.Build.Output, res.Build.Ok,
|
||||
res.Run.Output, res.Run.Ok,
|
||||
res.Test.Output, res.Test.Ok,
|
||||
q.Stdout, q.Stderr,
|
||||
q.KernelPanic, q.KilledByTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
|
||||
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
|
||||
"distro_type, distro_release, kernel_release, " +
|
||||
"internal_err, " +
|
||||
"build_ok, run_ok, test_ok, kernel_panic, " +
|
||||
"timeout_kill FROM log ORDER BY datetime(time) DESC " +
|
||||
"LIMIT $1")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
rows, err := stmt.Query(num)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var internalErr sql.NullString
|
||||
le := logEntry{}
|
||||
err = rows.Scan(&le.ID, &le.Timestamp,
|
||||
&le.Name, &le.Type, &le.Tag,
|
||||
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
|
||||
&internalErr,
|
||||
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
|
||||
&le.KernelPanic, &le.KilledByTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
le.InternalErrorString = internalErr.String
|
||||
|
||||
if tag == "" || tag == le.Tag {
|
||||
les = append(les, le)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getAllArtifactLogs(db *sql.DB, tag string, num int, ka artifact.Artifact) (
|
||||
les []logEntry, err error) {
|
||||
|
||||
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
|
||||
"distro_type, distro_release, kernel_release, " +
|
||||
"internal_err, " +
|
||||
"build_ok, run_ok, test_ok, kernel_panic, " +
|
||||
"timeout_kill FROM log WHERE name=$1 AND type=$2 " +
|
||||
"ORDER BY datetime(time) DESC LIMIT $3")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
rows, err := stmt.Query(ka.Name, ka.Type, num)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var internalErr sql.NullString
|
||||
le := logEntry{}
|
||||
err = rows.Scan(&le.ID, &le.Timestamp,
|
||||
&le.Name, &le.Type, &le.Tag,
|
||||
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
|
||||
&internalErr,
|
||||
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
|
||||
&le.KernelPanic, &le.KilledByTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
le.InternalErrorString = internalErr.String
|
||||
|
||||
if tag == "" || tag == le.Tag {
|
||||
les = append(les, le)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getLogByID(db *sql.DB, id int) (le logEntry, err error) {
|
||||
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
|
||||
"distro_type, distro_release, kernel_release, " +
|
||||
"internal_err, " +
|
||||
"build_ok, run_ok, test_ok, " +
|
||||
"build_output, run_output, test_output, " +
|
||||
"qemu_stdout, qemu_stderr, " +
|
||||
"kernel_panic, timeout_kill " +
|
||||
"FROM log WHERE id=$1")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
var internalErr sql.NullString
|
||||
err = stmt.QueryRow(id).Scan(&le.ID, &le.Timestamp,
|
||||
&le.Name, &le.Type, &le.Tag,
|
||||
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
|
||||
&internalErr,
|
||||
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
|
||||
&le.Build.Output, &le.Run.Output, &le.Test.Output,
|
||||
&le.Stdout, &le.Stderr,
|
||||
&le.KernelPanic, &le.KilledByTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
le.InternalErrorString = internalErr.String
|
||||
return
|
||||
}
|
||||
|
||||
func getLastLog(db *sql.DB) (le logEntry, err error) {
|
||||
var internalErr sql.NullString
|
||||
err = db.QueryRow("SELECT MAX(id), time, name, type, tag, "+
|
||||
"distro_type, distro_release, kernel_release, "+
|
||||
"internal_err, "+
|
||||
"build_ok, run_ok, test_ok, "+
|
||||
"build_output, run_output, test_output, "+
|
||||
"qemu_stdout, qemu_stderr, "+
|
||||
"kernel_panic, timeout_kill "+
|
||||
"FROM log").Scan(&le.ID, &le.Timestamp,
|
||||
&le.Name, &le.Type, &le.Tag,
|
||||
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
|
||||
&internalErr,
|
||||
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
|
||||
&le.Build.Output, &le.Run.Output, &le.Test.Output,
|
||||
&le.Stdout, &le.Stderr,
|
||||
&le.KernelPanic, &le.KilledByTimeout,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
le.InternalErrorString = internalErr.String
|
||||
return
|
||||
}
|
||||
|
||||
func createSchema(db *sql.DB) (err error) {
|
||||
err = createMetadataTable(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = createLogTable(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func openDatabase(path string) (db *sql.DB, err error) {
|
||||
db, err = sql.Open("sqlite3", path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
exists, _ := metaChkValue(db, versionField)
|
||||
if !exists {
|
||||
err = createSchema(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = metaSetValue(db, versionField,
|
||||
strconv.Itoa(currentDatabaseVersion))
|
||||
return
|
||||
}
|
||||
|
||||
version, err := getVersion(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if version == 1 {
|
||||
_, err = db.Exec(`ALTER TABLE log ADD tag TEXT`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = metaSetValue(db, versionField, "2")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version = 2
|
||||
|
||||
} else if version == 2 {
|
||||
_, err = db.Exec(`ALTER TABLE log ADD internal_err TEXT`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = metaSetValue(db, versionField, "3")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version = 3
|
||||
}
|
||||
|
||||
if version != currentDatabaseVersion {
|
||||
err = fmt.Errorf("database is not supported (%d instead of %d)",
|
||||
version, currentDatabaseVersion)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,287 @@
|
|||
// Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/logrusorgru/aurora.v2"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
type DebugCmd struct {
|
||||
Kernel string `help:"regexp (first match)" required:""`
|
||||
Gdb string `help:"gdb listen address" default:"tcp::1234"`
|
||||
|
||||
SshAddr string `help:"ssh address to listen" default:"127.0.0.1"`
|
||||
SshPort int `help:"ssh port to listen" default:"50022"`
|
||||
|
||||
ArtifactConfig string `help:"path to artifact config" type:"path"`
|
||||
|
||||
Kaslr bool `help:"Enable KASLR"`
|
||||
Smep bool `help:"Enable SMEP"`
|
||||
Smap bool `help:"Enable SMAP"`
|
||||
Kpti bool `help:"Enable KPTI"`
|
||||
|
||||
NoKaslr bool `help:"Disable KASLR"`
|
||||
NoSmep bool `help:"Disable SMEP"`
|
||||
NoSmap bool `help:"Disable SMAP"`
|
||||
NoKpti bool `help:"Disable KPTI"`
|
||||
}
|
||||
|
||||
// TODO: merge with pew.go
|
||||
func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
|
||||
var configPath string
|
||||
if cmd.ArtifactConfig == "" {
|
||||
configPath = g.WorkDir + "/.out-of-tree.toml"
|
||||
} else {
|
||||
configPath = cmd.ArtifactConfig
|
||||
}
|
||||
ka, err := artifact.Artifact{}.Read(configPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ka.SourcePath == "" {
|
||||
ka.SourcePath = g.WorkDir
|
||||
}
|
||||
|
||||
ki, err := firstSupported(kcfg, ka, cmd.Kernel)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
|
||||
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = q.SetSSHAddrPort(cmd.SshAddr, cmd.SshPort)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ka.Qemu.Cpus != 0 {
|
||||
q.Cpus = ka.Qemu.Cpus
|
||||
}
|
||||
if ka.Qemu.Memory != 0 {
|
||||
q.Memory = ka.Qemu.Memory
|
||||
}
|
||||
|
||||
if ka.Docker.Timeout.Duration != 0 {
|
||||
g.Config.Docker.Timeout.Duration = ka.Docker.Timeout.Duration
|
||||
}
|
||||
|
||||
q.SetKASLR(false) // set KASLR to false by default because of gdb
|
||||
q.SetSMEP(!ka.Mitigations.DisableSmep)
|
||||
q.SetSMAP(!ka.Mitigations.DisableSmap)
|
||||
q.SetKPTI(!ka.Mitigations.DisableKpti)
|
||||
|
||||
if cmd.Kaslr {
|
||||
q.SetKASLR(true)
|
||||
} else if cmd.NoKaslr {
|
||||
q.SetKASLR(false)
|
||||
}
|
||||
|
||||
if cmd.Smep {
|
||||
q.SetSMEP(true)
|
||||
} else if cmd.NoSmep {
|
||||
q.SetSMEP(false)
|
||||
}
|
||||
|
||||
if cmd.Smap {
|
||||
q.SetSMAP(true)
|
||||
} else if cmd.NoSmap {
|
||||
q.SetSMAP(false)
|
||||
}
|
||||
|
||||
if cmd.Kpti {
|
||||
q.SetKPTI(true)
|
||||
} else if cmd.NoKpti {
|
||||
q.SetKPTI(false)
|
||||
}
|
||||
|
||||
redgreen := func(name string, enabled bool) aurora.Value {
|
||||
if enabled {
|
||||
return aurora.BgGreen(aurora.Black(name))
|
||||
}
|
||||
|
||||
return aurora.BgRed(aurora.White(name))
|
||||
}
|
||||
|
||||
fmt.Printf("[*] %s %s %s %s\n",
|
||||
redgreen("KASLR", q.GetKASLR()),
|
||||
redgreen("SMEP", q.GetSMEP()),
|
||||
redgreen("SMAP", q.GetSMAP()),
|
||||
redgreen("KPTI", q.GetKPTI()))
|
||||
|
||||
fmt.Printf("[*] SMP: %d CPUs\n", q.Cpus)
|
||||
fmt.Printf("[*] Memory: %d MB\n", q.Memory)
|
||||
|
||||
q.Debug(cmd.Gdb)
|
||||
coloredGdbAddress := aurora.BgGreen(aurora.Black(cmd.Gdb))
|
||||
fmt.Printf("[*] gdb is listening on %s\n", coloredGdbAddress)
|
||||
|
||||
err = q.Start()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer q.Stop()
|
||||
|
||||
tmp, err := fs.TempDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
err = q.WaitForSSH(time.Minute)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ka.StandardModules {
|
||||
// Module depends on one of the standard modules
|
||||
err = artifact.CopyStandardModules(q, ki)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = artifact.PreloadModules(q, ka, ki, g.Config.Docker.Timeout.Duration)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
return
|
||||
}
|
||||
|
||||
var buildDir, outFile, output, remoteFile string
|
||||
|
||||
if ka.Type == artifact.Script {
|
||||
err = q.CopyFile("root", ka.Script, ka.Script)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
buildDir, outFile, output, err = artifact.Build(log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration)
|
||||
if err != nil {
|
||||
log.Print(err, output)
|
||||
return
|
||||
}
|
||||
|
||||
remoteFile = "/tmp/" + strings.Replace(ka.Name, " ", "_", -1)
|
||||
if ka.Type == artifact.KernelModule {
|
||||
remoteFile += ".ko"
|
||||
}
|
||||
|
||||
err = q.CopyFile("user", outFile, remoteFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Copy all test files to the remote machine
|
||||
for _, f := range ka.TestFiles {
|
||||
if f.Local[0] != '/' {
|
||||
if buildDir != "" {
|
||||
f.Local = buildDir + "/" + f.Local
|
||||
}
|
||||
}
|
||||
err = q.CopyFile(f.User, f.Local, f.Remote)
|
||||
if err != nil {
|
||||
log.Print("error copy err:", err, f.Local, f.Remote)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
coloredRemoteFile := aurora.BgGreen(aurora.Black(remoteFile))
|
||||
fmt.Printf("[*] build result copied to %s\n", coloredRemoteFile)
|
||||
|
||||
fmt.Printf("\n%s\n", q.GetSSHCommand())
|
||||
fmt.Printf("gdb %s -ex 'target remote %s'\n\n", ki.VmlinuxPath, cmd.Gdb)
|
||||
|
||||
// TODO set substitute-path /build/.../linux-... /path/to/linux-source
|
||||
|
||||
err = interactive(q)
|
||||
return
|
||||
}
|
||||
|
||||
func firstSupported(kcfg config.KernelConfig, ka artifact.Artifact,
|
||||
kernel string) (ki distro.KernelInfo, err error) {
|
||||
|
||||
km, err := kernelMask(kernel)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ka.Targets = []artifact.Target{km}
|
||||
|
||||
for _, ki = range kcfg.Kernels {
|
||||
var supported bool
|
||||
supported, err = ka.Supported(ki)
|
||||
if err != nil || supported {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New("no supported kernel found")
|
||||
return
|
||||
}
|
||||
|
||||
func handleLine(q *qemu.System) (err error) {
|
||||
fmt.Print("out-of-tree> ")
|
||||
rawLine := "help"
|
||||
fmt.Scanf("%s", &rawLine)
|
||||
params := strings.Fields(rawLine)
|
||||
cmd := params[0]
|
||||
|
||||
switch cmd {
|
||||
case "h", "help":
|
||||
fmt.Printf("help\t: print this help message\n")
|
||||
fmt.Printf("log\t: print qemu log\n")
|
||||
fmt.Printf("clog\t: print qemu log and cleanup buffer\n")
|
||||
fmt.Printf("cleanup\t: cleanup qemu log buffer\n")
|
||||
fmt.Printf("ssh\t: print arguments to ssh command\n")
|
||||
fmt.Printf("quit\t: quit\n")
|
||||
case "l", "log":
|
||||
fmt.Println(q.Stdout)
|
||||
case "cl", "clog":
|
||||
fmt.Println(q.Stdout)
|
||||
q.Stdout = ""
|
||||
case "c", "cleanup":
|
||||
q.Stdout = ""
|
||||
case "s", "ssh":
|
||||
fmt.Println(q.GetSSHCommand())
|
||||
case "q", "quit":
|
||||
return errors.New("end of session")
|
||||
default:
|
||||
fmt.Println("No such command")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func interactive(q *qemu.System) (err error) {
|
||||
for {
|
||||
err = handleLine(q)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,216 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/cavaliergopher/grab/v3"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
|
||||
type DistroCmd struct {
|
||||
List DistroListCmd `cmd:"" help:"list available distros"`
|
||||
|
||||
Debian DebianCmd `cmd:"" hidden:""`
|
||||
}
|
||||
|
||||
type DebianCmd struct {
|
||||
Cache DebianCacheCmd `cmd:"" help:"populate cache"`
|
||||
Fetch DebianFetchCmd `cmd:"" help:"download deb packages"`
|
||||
|
||||
Limit int `help:"limit amount of kernels to fetch"`
|
||||
Regex string `help:"match deb pkg names by regex" default:".*"`
|
||||
}
|
||||
|
||||
type DebianCacheCmd struct {
|
||||
Path string `help:"path to cache"`
|
||||
Refetch int `help:"days before refetch versions without deb package" default:"7"`
|
||||
UpdateRelease bool `help:"update release data"`
|
||||
UpdateKbuild bool `help:"update kbuild package"`
|
||||
Dump bool `help:"dump cache"`
|
||||
}
|
||||
|
||||
func (cmd *DebianCacheCmd) Run(dcmd *DebianCmd) (err error) {
|
||||
if cmd.Path != "" {
|
||||
debian.CachePath = cmd.Path
|
||||
}
|
||||
debian.RefetchDays = cmd.Refetch
|
||||
|
||||
log.Info().Msg("Fetching kernels...")
|
||||
|
||||
if dcmd.Limit == 0 {
|
||||
dcmd.Limit = math.MaxInt32
|
||||
}
|
||||
|
||||
mode := debian.NoMode
|
||||
if cmd.UpdateRelease {
|
||||
mode |= debian.UpdateRelease
|
||||
}
|
||||
if cmd.UpdateKbuild {
|
||||
mode |= debian.UpdateKbuild
|
||||
}
|
||||
|
||||
kernels, err := debian.GetKernelsWithLimit(dcmd.Limit, mode)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
if cmd.Dump {
|
||||
re, err := regexp.Compile(dcmd.Regex)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("regex")
|
||||
}
|
||||
|
||||
for _, kernel := range kernels {
|
||||
if !re.MatchString(kernel.Image.Deb.Name) {
|
||||
continue
|
||||
}
|
||||
fmt.Println(spew.Sdump(kernel))
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Msg("Success")
|
||||
return
|
||||
}
|
||||
|
||||
type DebianFetchCmd struct {
|
||||
Path string `help:"path to download directory" type:"existingdir" default:"./"`
|
||||
IgnoreMirror bool `help:"ignore check if packages on the mirror"`
|
||||
|
||||
Max int `help:"do not download more than X" default:"100500"`
|
||||
|
||||
Threads int `help:"parallel download threads" default:"8"`
|
||||
|
||||
Timeout time.Duration `help:"timeout for each download" default:"1m"`
|
||||
|
||||
swg sizedwaitgroup.SizedWaitGroup
|
||||
hasResults bool
|
||||
}
|
||||
|
||||
func (cmd *DebianFetchCmd) fetch(pkg snapshot.Package) {
|
||||
flog := log.With().
|
||||
Str("pkg", pkg.Deb.Name).
|
||||
Logger()
|
||||
|
||||
defer cmd.swg.Done()
|
||||
|
||||
if !cmd.IgnoreMirror {
|
||||
flog.Debug().Msg("check mirror")
|
||||
found, _ := cache.PackageURL(distro.Debian, pkg.Deb.URL)
|
||||
if found {
|
||||
flog.Debug().Msg("found on the mirror")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
target := filepath.Join(cmd.Path, filepath.Base(pkg.Deb.URL))
|
||||
|
||||
if fs.PathExists(target) {
|
||||
flog.Debug().Msg("already exists")
|
||||
return
|
||||
}
|
||||
|
||||
tmp, err := os.MkdirTemp(cmd.Path, "tmp-")
|
||||
if err != nil {
|
||||
flog.Fatal().Err(err).Msg("mkdir")
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
flog.Info().Msg("fetch")
|
||||
flog.Debug().Msg(pkg.Deb.URL)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cmd.Timeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := grab.NewRequest(tmp, pkg.Deb.URL)
|
||||
if err != nil {
|
||||
flog.Warn().Err(err).Msg("cannot create request")
|
||||
return
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp := grab.DefaultClient.Do(req)
|
||||
if err := resp.Err(); err != nil {
|
||||
flog.Warn().Err(err).Msg("request cancelled")
|
||||
return
|
||||
}
|
||||
|
||||
err = os.Rename(resp.Filename, target)
|
||||
if err != nil {
|
||||
flog.Fatal().Err(err).Msg("mv")
|
||||
}
|
||||
|
||||
cmd.hasResults = true
|
||||
cmd.Max--
|
||||
}
|
||||
|
||||
func (cmd *DebianFetchCmd) Run(dcmd *DebianCmd) (err error) {
|
||||
re, err := regexp.Compile(dcmd.Regex)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("regex")
|
||||
}
|
||||
|
||||
log.Info().Msg("will not download packages that exist on the mirror")
|
||||
log.Info().Msg("use --ignore-mirror if you really need it")
|
||||
|
||||
if dcmd.Limit == 0 {
|
||||
dcmd.Limit = math.MaxInt32
|
||||
}
|
||||
|
||||
kernels, err := debian.GetKernelsWithLimit(dcmd.Limit, debian.NoMode)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
var packages []snapshot.Package
|
||||
for _, kernel := range kernels {
|
||||
for _, pkg := range kernel.Packages() {
|
||||
if !re.MatchString(pkg.Deb.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
packages = append(packages, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
cmd.swg = sizedwaitgroup.New(cmd.Threads)
|
||||
for _, pkg := range packages {
|
||||
if cmd.Max <= 0 {
|
||||
break
|
||||
}
|
||||
|
||||
cmd.swg.Add()
|
||||
go cmd.fetch(pkg)
|
||||
}
|
||||
cmd.swg.Wait()
|
||||
|
||||
if !cmd.hasResults {
|
||||
log.Fatal().Msg("no packages found to download")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type DistroListCmd struct{}
|
||||
|
||||
func (cmd *DistroListCmd) Run() (err error) {
|
||||
for _, d := range distro.List() {
|
||||
fmt.Println(d.ID, d.Release)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/naoina/toml"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
type GenCmd struct {
|
||||
Type string `enum:"module,exploit" required:"" help:"module/exploit"`
|
||||
}
|
||||
|
||||
func (cmd *GenCmd) Run(g *Globals) (err error) {
|
||||
switch cmd.Type {
|
||||
case "module":
|
||||
err = genConfig(artifact.KernelModule)
|
||||
case "exploit":
|
||||
err = genConfig(artifact.KernelExploit)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func genConfig(at artifact.ArtifactType) (err error) {
|
||||
a := artifact.Artifact{
|
||||
Name: "Put name here",
|
||||
Type: at,
|
||||
}
|
||||
a.Targets = append(a.Targets, artifact.Target{
|
||||
Distro: distro.Distro{ID: distro.Ubuntu, Release: "18.04"},
|
||||
Kernel: artifact.Kernel{Regex: ".*"},
|
||||
})
|
||||
a.Targets = append(a.Targets, artifact.Target{
|
||||
Distro: distro.Distro{ID: distro.Debian, Release: "8"},
|
||||
Kernel: artifact.Kernel{Regex: ".*"},
|
||||
})
|
||||
a.Preload = append(a.Preload, artifact.PreloadModule{
|
||||
Repo: "Repo name (e.g. https://github.com/openwall/lkrg)",
|
||||
})
|
||||
a.Patches = append(a.Patches, artifact.Patch{
|
||||
Path: "/path/to/profiling.patch",
|
||||
})
|
||||
|
||||
buf, err := toml.Marshal(&a)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Print(string(buf))
|
||||
return
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
)
|
||||
|
||||
type Globals struct {
|
||||
Config config.OutOfTree `help:"path to out-of-tree configuration" default:"~/.out-of-tree/out-of-tree.toml"`
|
||||
|
||||
WorkDir string `help:"path to work directory" default:"./" type:"path" existingdir:""`
|
||||
|
||||
CacheURL url.URL
|
||||
|
||||
Remote bool `help:"run at remote server"`
|
||||
RemoteAddr string `default:"localhost:63527"`
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
type ImageCmd struct {
|
||||
List ImageListCmd `cmd:"" help:"list images"`
|
||||
Edit ImageEditCmd `cmd:"" help:"edit image"`
|
||||
}
|
||||
|
||||
type ImageListCmd struct{}
|
||||
|
||||
func (cmd *ImageListCmd) Run(g *Globals) (err error) {
|
||||
entries, err := os.ReadDir(dotfiles.Dir("images"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
fmt.Println(e.Name())
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type ImageEditCmd struct {
|
||||
Name string `help:"image name" required:""`
|
||||
DryRun bool `help:"do nothing, just print commands"`
|
||||
}
|
||||
|
||||
func (cmd *ImageEditCmd) Run(g *Globals) (err error) {
|
||||
image := filepath.Join(dotfiles.Dir("images"), cmd.Name)
|
||||
if !fs.PathExists(image) {
|
||||
fmt.Println("image does not exist")
|
||||
}
|
||||
|
||||
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(kcfg.Kernels) == 0 {
|
||||
return errors.New("no kernels found")
|
||||
}
|
||||
|
||||
ki := distro.KernelInfo{}
|
||||
for _, k := range kcfg.Kernels {
|
||||
if k.RootFS == image {
|
||||
ki = k
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
kernel := qemu.Kernel{
|
||||
KernelPath: ki.KernelPath,
|
||||
InitrdPath: ki.InitrdPath,
|
||||
}
|
||||
|
||||
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
|
||||
|
||||
q.Mutable = true
|
||||
|
||||
if cmd.DryRun {
|
||||
s := q.Executable()
|
||||
for _, arg := range q.Args() {
|
||||
if strings.Contains(arg, " ") ||
|
||||
strings.Contains(arg, ",") {
|
||||
|
||||
s += fmt.Sprintf(` "%s"`, arg)
|
||||
} else {
|
||||
s += fmt.Sprintf(" %s", arg)
|
||||
}
|
||||
}
|
||||
fmt.Println(s)
|
||||
fmt.Println(q.GetSSHCommand())
|
||||
return
|
||||
}
|
||||
|
||||
err = q.Start()
|
||||
if err != nil {
|
||||
fmt.Println("Qemu start error:", err)
|
||||
return
|
||||
}
|
||||
defer q.Stop()
|
||||
|
||||
fmt.Print("ssh command:\n\n\t")
|
||||
fmt.Println(q.GetSSHCommand())
|
||||
|
||||
fmt.Print("\npress enter to stop")
|
||||
fmt.Scanln()
|
||||
|
||||
q.Command("root", "poweroff")
|
||||
|
||||
for !q.Died {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,448 @@
|
|||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cavaliergopher/grab/v3"
|
||||
"github.com/naoina/toml"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
"code.dumpstack.io/tools/out-of-tree/kernel"
|
||||
)
|
||||
|
||||
type KernelCmd struct {
|
||||
NoDownload bool `help:"do not download qemu image while kernel generation"`
|
||||
UseHost bool `help:"also use host kernels"`
|
||||
Force bool `help:"force reinstall kernel"`
|
||||
NoHeaders bool `help:"do not install kernel headers"`
|
||||
Shuffle bool `help:"randomize kernels installation order"`
|
||||
Retries int `help:"amount of tries for each kernel" default:"2"`
|
||||
Threads int `help:"threads for parallel installation" default:"1"`
|
||||
Update bool `help:"update container"`
|
||||
ContainerCache bool `help:"try prebuilt container images first" default:"true" negatable:""`
|
||||
Max int `help:"maximum kernels to download" default:"100500"`
|
||||
NoPrune bool `help:"do not remove dangling or unused images from local storage after build"`
|
||||
NoCfgRegen bool `help:"do not update kernels.toml"`
|
||||
|
||||
ContainerTimeout time.Duration `help:"container timeout"`
|
||||
|
||||
List KernelListCmd `cmd:"" help:"list kernels"`
|
||||
ListRemote KernelListRemoteCmd `cmd:"" help:"list remote kernels"`
|
||||
Autogen KernelAutogenCmd `cmd:"" help:"generate kernels based on the current config"`
|
||||
Genall KernelGenallCmd `cmd:"" help:"generate all kernels for distro"`
|
||||
Install KernelInstallCmd `cmd:"" help:"install specific kernel"`
|
||||
ConfigRegen KernelConfigRegenCmd `cmd:"" help:"regenerate config"`
|
||||
|
||||
shutdown bool
|
||||
kcfg config.KernelConfig
|
||||
|
||||
stats struct {
|
||||
overall int
|
||||
success int
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd KernelCmd) UpdateConfig() (err error) {
|
||||
if cmd.stats.success != cmd.stats.overall {
|
||||
log.Warn().Msgf("%d kernels failed to install",
|
||||
cmd.stats.overall-cmd.stats.success)
|
||||
}
|
||||
|
||||
if cmd.NoCfgRegen {
|
||||
log.Info().Msgf("kernels.toml is not updated")
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Msgf("updating kernels.toml")
|
||||
kcfg := config.KernelConfig{}
|
||||
|
||||
if cmd.UseHost {
|
||||
// Get host kernels
|
||||
kcfg.Kernels, err = kernel.GenHostKernels(!cmd.NoDownload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, dist := range distro.List() {
|
||||
var kernels []distro.KernelInfo
|
||||
kernels, err = dist.Kernels()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kcfg.Kernels = append(kcfg.Kernels, kernels...)
|
||||
}
|
||||
|
||||
buf, err := toml.Marshal(&kcfg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(dotfiles.File("kernels.toml"), buf, os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Msgf("kernels.toml successfully updated")
|
||||
return
|
||||
}
|
||||
|
||||
func (cmd *KernelCmd) GenKernel(km artifact.Target, pkg string) {
|
||||
flog := log.With().
|
||||
Str("kernel", pkg).
|
||||
Str("distro", km.Distro.String()).
|
||||
Logger()
|
||||
|
||||
reinstall := false
|
||||
for _, kinfo := range cmd.kcfg.Kernels {
|
||||
if !km.Distro.Equal(kinfo.Distro) {
|
||||
continue
|
||||
}
|
||||
|
||||
var found bool
|
||||
if kinfo.Distro.ID == distro.Debian { // FIXME
|
||||
found = pkg == kinfo.Package
|
||||
} else if kinfo.Distro.ID == distro.OpenSUSE {
|
||||
found = strings.Contains(pkg, kinfo.KernelRelease)
|
||||
} else {
|
||||
found = strings.Contains(pkg, kinfo.KernelVersion)
|
||||
}
|
||||
|
||||
if found {
|
||||
if !cmd.Force {
|
||||
flog.Info().Msg("already installed")
|
||||
return
|
||||
}
|
||||
reinstall = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if reinstall {
|
||||
flog.Info().Msg("reinstall")
|
||||
} else {
|
||||
flog.Info().Msg("install")
|
||||
}
|
||||
|
||||
cmd.stats.overall += 1
|
||||
|
||||
var attempt int
|
||||
for {
|
||||
attempt++
|
||||
|
||||
if cmd.shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
err := km.Distro.Install(pkg, !cmd.NoHeaders)
|
||||
if err == nil {
|
||||
cmd.stats.success += 1
|
||||
flog.Info().Msg("success")
|
||||
break
|
||||
} else if attempt >= cmd.Retries {
|
||||
flog.Error().Err(err).Msg("install kernel")
|
||||
flog.Debug().Msg("skip")
|
||||
break
|
||||
} else {
|
||||
flog.Warn().Err(err).Msg("install kernel")
|
||||
time.Sleep(time.Second)
|
||||
flog.Info().Msg("retry")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *KernelCmd) fetchContainerCache(c container.Container) {
|
||||
if !cmd.ContainerCache {
|
||||
return
|
||||
}
|
||||
if c.Exist() {
|
||||
return
|
||||
}
|
||||
|
||||
tmp, err := fs.TempDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
resp, err := grab.Get(tmp, cache.ContainerURL(c.Name()))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer os.Remove(resp.Filename)
|
||||
|
||||
err = container.Load(resp.Filename, c.Name())
|
||||
if err == nil {
|
||||
log.Info().Msgf("use prebuilt container %s", c.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("")
|
||||
} else {
|
||||
log.Debug().Err(err).Msg("")
|
||||
}
|
||||
}()
|
||||
|
||||
if cmd.Update {
|
||||
container.UseCache = false
|
||||
}
|
||||
if cmd.NoPrune {
|
||||
container.Prune = false
|
||||
}
|
||||
|
||||
cmd.kcfg, err = config.ReadKernelConfig(g.Config.Kernels)
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msg("read kernels config")
|
||||
}
|
||||
|
||||
container.Commands = g.Config.Docker.Commands
|
||||
container.Registry = g.Config.Docker.Registry
|
||||
container.Timeout = g.Config.Docker.Timeout.Duration
|
||||
if cmd.ContainerTimeout != 0 {
|
||||
container.Timeout = cmd.ContainerTimeout
|
||||
}
|
||||
|
||||
log.Info().Msgf("Generating for target %v", km)
|
||||
|
||||
_, err = kernel.GenRootfsImage(km.Distro.RootFS(), !cmd.NoDownload)
|
||||
if err != nil || cmd.shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
c, err := container.New(km.Distro)
|
||||
if err != nil || cmd.shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
cmd.fetchContainerCache(c)
|
||||
|
||||
pkgs, err := kernel.MatchPackages(km)
|
||||
if err != nil || cmd.shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
if cmd.Shuffle {
|
||||
pkgs = kernel.ShuffleStrings(pkgs)
|
||||
}
|
||||
|
||||
swg := sizedwaitgroup.New(cmd.Threads)
|
||||
|
||||
for i, pkg := range pkgs {
|
||||
if cmd.shutdown {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
|
||||
swg.Add()
|
||||
|
||||
if cmd.shutdown {
|
||||
err = nil
|
||||
swg.Done()
|
||||
return
|
||||
}
|
||||
|
||||
if cmd.stats.success >= cmd.Max {
|
||||
log.Print("Max is reached")
|
||||
swg.Done()
|
||||
break
|
||||
}
|
||||
|
||||
log.Info().Msgf("%d/%d %s", i+1, len(pkgs), pkg)
|
||||
|
||||
go func(p string) {
|
||||
defer swg.Done()
|
||||
cmd.GenKernel(km, p)
|
||||
}(pkg)
|
||||
}
|
||||
swg.Wait()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type KernelListCmd struct{}
|
||||
|
||||
func (cmd *KernelListCmd) Run(g *Globals) (err error) {
|
||||
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msg("read kernel config")
|
||||
}
|
||||
|
||||
if len(kcfg.Kernels) == 0 {
|
||||
return errors.New("no kernels found")
|
||||
}
|
||||
|
||||
for _, k := range kcfg.Kernels {
|
||||
fmt.Println(k.Distro.ID, k.Distro.Release, k.KernelRelease)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type KernelListRemoteCmd struct {
|
||||
Distro string `required:"" help:"distribution"`
|
||||
Ver string `help:"distro version"`
|
||||
}
|
||||
|
||||
func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
if kernelCmd.Update {
|
||||
container.UseCache = false
|
||||
}
|
||||
if kernelCmd.NoPrune {
|
||||
container.Prune = false
|
||||
}
|
||||
|
||||
distroType, err := distro.NewID(cmd.Distro)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
km := artifact.Target{
|
||||
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
|
||||
Kernel: artifact.Kernel{Regex: ".*"},
|
||||
}
|
||||
|
||||
_, err = kernel.GenRootfsImage(km.Distro.RootFS(), false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
container.Registry = g.Config.Docker.Registry
|
||||
container.Commands = g.Config.Docker.Commands
|
||||
|
||||
c, err := container.New(km.Distro)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernelCmd.fetchContainerCache(c)
|
||||
|
||||
pkgs, err := kernel.MatchPackages(km)
|
||||
// error check skipped on purpose
|
||||
|
||||
for _, k := range pkgs {
|
||||
fmt.Println(k)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type KernelAutogenCmd struct{}
|
||||
|
||||
func (cmd *KernelAutogenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
ka, err := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernel.SetSigintHandler(&kernelCmd.shutdown)
|
||||
|
||||
for _, sk := range ka.Targets {
|
||||
if sk.Distro.Release == "" {
|
||||
err = errors.New("please set distro_release")
|
||||
return
|
||||
}
|
||||
|
||||
err = kernelCmd.Generate(g, sk)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if kernelCmd.shutdown {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return kernelCmd.UpdateConfig()
|
||||
}
|
||||
|
||||
type KernelGenallCmd struct {
|
||||
Distro string `help:"distribution"`
|
||||
Ver string `help:"distro version"`
|
||||
}
|
||||
|
||||
func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
distroType, err := distro.NewID(cmd.Distro)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernel.SetSigintHandler(&kernelCmd.shutdown)
|
||||
|
||||
for _, dist := range distro.List() {
|
||||
if kernelCmd.shutdown {
|
||||
break
|
||||
}
|
||||
|
||||
if distroType != distro.None && distroType != dist.ID {
|
||||
continue
|
||||
}
|
||||
|
||||
if cmd.Ver != "" && dist.Release != cmd.Ver {
|
||||
continue
|
||||
}
|
||||
|
||||
target := artifact.Target{
|
||||
Distro: dist,
|
||||
Kernel: artifact.Kernel{Regex: ".*"},
|
||||
}
|
||||
|
||||
err = kernelCmd.Generate(g, target)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return kernelCmd.UpdateConfig()
|
||||
}
|
||||
|
||||
type KernelInstallCmd struct {
|
||||
Distro string `required:"" help:"distribution"`
|
||||
Ver string `required:"" help:"distro version"`
|
||||
Kernel string `required:"" help:"kernel release mask"`
|
||||
}
|
||||
|
||||
func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
distroType, err := distro.NewID(cmd.Distro)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernel.SetSigintHandler(&kernelCmd.shutdown)
|
||||
|
||||
km := artifact.Target{
|
||||
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
|
||||
Kernel: artifact.Kernel{Regex: cmd.Kernel},
|
||||
}
|
||||
err = kernelCmd.Generate(g, km)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return kernelCmd.UpdateConfig()
|
||||
}
|
||||
|
||||
type KernelConfigRegenCmd struct{}
|
||||
|
||||
func (cmd *KernelConfigRegenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
return kernelCmd.UpdateConfig()
|
||||
}
|
|
@ -0,0 +1,322 @@
|
|||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/logrusorgru/aurora.v2"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
)
|
||||
|
||||
type LogCmd struct {
|
||||
Query LogQueryCmd `cmd:"" help:"query logs"`
|
||||
Dump LogDumpCmd `cmd:"" help:"show all info for log entry with ID"`
|
||||
Json LogJsonCmd `cmd:"" help:"generate json statistics"`
|
||||
Markdown LogMarkdownCmd `cmd:"" help:"generate markdown statistics"`
|
||||
}
|
||||
|
||||
type LogQueryCmd struct {
|
||||
Num int `help:"how much lines" default:"50"`
|
||||
Rate bool `help:"show artifact success rate"`
|
||||
Tag string `help:"filter tag"`
|
||||
}
|
||||
|
||||
func (cmd *LogQueryCmd) Run(g *Globals) (err error) {
|
||||
db, err := openDatabase(g.Config.Database)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
var les []logEntry
|
||||
|
||||
ka, kaErr := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
|
||||
if kaErr == nil {
|
||||
log.Print(".out-of-tree.toml found, filter by artifact name")
|
||||
les, err = getAllArtifactLogs(db, cmd.Tag, cmd.Num, ka)
|
||||
} else {
|
||||
les, err = getAllLogs(db, cmd.Tag, cmd.Num)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
s := "\nS"
|
||||
if cmd.Rate {
|
||||
if kaErr != nil {
|
||||
err = kaErr
|
||||
return
|
||||
}
|
||||
|
||||
s = fmt.Sprintf("{[%s] %s} Overall s", ka.Type, ka.Name)
|
||||
|
||||
les, err = getAllArtifactLogs(db, cmd.Tag, math.MaxInt64, ka)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
for _, l := range les {
|
||||
logLogEntry(l)
|
||||
}
|
||||
}
|
||||
|
||||
success := 0
|
||||
for _, l := range les {
|
||||
if l.Test.Ok {
|
||||
success++
|
||||
}
|
||||
}
|
||||
|
||||
overall := float64(success) / float64(len(les))
|
||||
fmt.Printf("%success rate: %.04f (~%.0f%%)\n",
|
||||
s, overall, overall*100)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type LogDumpCmd struct {
|
||||
ID int `help:"id" default:"-1"`
|
||||
}
|
||||
|
||||
func (cmd *LogDumpCmd) Run(g *Globals) (err error) {
|
||||
db, err := openDatabase(g.Config.Database)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
var l logEntry
|
||||
if cmd.ID > 0 {
|
||||
l, err = getLogByID(db, cmd.ID)
|
||||
} else {
|
||||
l, err = getLastLog(db)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("ID:", l.ID)
|
||||
fmt.Println("Date:", l.Timestamp.Format("2006-01-02 15:04"))
|
||||
fmt.Println("Tag:", l.Tag)
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("Type:", l.Type.String())
|
||||
fmt.Println("Name:", l.Name)
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("Distro:", l.Distro.ID.String(), l.Distro.Release)
|
||||
fmt.Println("Kernel:", l.KernelRelease)
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("Build ok:", l.Build.Ok)
|
||||
if l.Type == artifact.KernelModule {
|
||||
fmt.Println("Insmod ok:", l.Run.Ok)
|
||||
}
|
||||
fmt.Println("Test ok:", l.Test.Ok)
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("Build output:\n%s\n", l.Build.Output)
|
||||
fmt.Println()
|
||||
|
||||
if l.Type == artifact.KernelModule {
|
||||
fmt.Printf("Insmod output:\n%s\n", l.Run.Output)
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
fmt.Printf("Test output:\n%s\n", l.Test.Output)
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("Qemu stdout:\n%s\n", l.Stdout)
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("Qemu stderr:\n%s\n", l.Stderr)
|
||||
fmt.Println()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type LogJsonCmd struct {
|
||||
Tag string `required:"" help:"filter tag"`
|
||||
}
|
||||
|
||||
func (cmd *LogJsonCmd) Run(g *Globals) (err error) {
|
||||
db, err := openDatabase(g.Config.Database)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
distros, err := getStats(db, g.WorkDir, cmd.Tag)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
bytes, err := json.Marshal(&distros)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(string(bytes))
|
||||
return
|
||||
}
|
||||
|
||||
type LogMarkdownCmd struct {
|
||||
Tag string `required:"" help:"filter tag"`
|
||||
}
|
||||
|
||||
func (cmd *LogMarkdownCmd) Run(g *Globals) (err error) {
|
||||
db, err := openDatabase(g.Config.Database)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
distros, err := getStats(db, g.WorkDir, cmd.Tag)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Distro", "Release", "Kernel", "Reliability"})
|
||||
table.SetBorders(tablewriter.Border{
|
||||
Left: true, Top: false, Right: true, Bottom: false})
|
||||
table.SetCenterSeparator("|")
|
||||
|
||||
for distro, releases := range distros {
|
||||
for release, kernels := range releases {
|
||||
for kernel, stats := range kernels {
|
||||
all := float64(stats.All)
|
||||
ok := float64(stats.TestOK)
|
||||
r := fmt.Sprintf("%6.02f%%", (ok/all)*100)
|
||||
table.Append([]string{distro, release, kernel, r})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
table.Render()
|
||||
return
|
||||
}
|
||||
|
||||
func center(s string, w int) string {
|
||||
return fmt.Sprintf("%[1]*s", -w, fmt.Sprintf("%[1]*s", (w+len(s))/2, s))
|
||||
}
|
||||
|
||||
func genOkFailCentered(name string, ok bool) (aurv aurora.Value) {
|
||||
name = center(name, 10)
|
||||
if ok {
|
||||
aurv = aurora.BgGreen(aurora.Black(name))
|
||||
} else {
|
||||
aurv = aurora.BgRed(aurora.White(aurora.Bold(name)))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func logLogEntry(l logEntry) {
|
||||
distroInfo := fmt.Sprintf("%s-%s {%s}", l.Distro.ID,
|
||||
l.Distro.Release, l.KernelRelease)
|
||||
|
||||
artifactInfo := fmt.Sprintf("{[%s] %s}", l.Type, l.Name)
|
||||
|
||||
timestamp := l.Timestamp.Format("2006-01-02 15:04")
|
||||
|
||||
var status aurora.Value
|
||||
if l.InternalErrorString != "" {
|
||||
status = genOkFailCentered("INTERNAL", false)
|
||||
} else if l.Type == artifact.KernelExploit {
|
||||
if l.Build.Ok {
|
||||
status = genOkFailCentered("LPE", l.Test.Ok)
|
||||
} else {
|
||||
status = genOkFailCentered("BUILD", l.Build.Ok)
|
||||
}
|
||||
} else {
|
||||
if l.Build.Ok {
|
||||
if l.Run.Ok {
|
||||
status = genOkFailCentered("TEST", l.Test.Ok)
|
||||
} else {
|
||||
status = genOkFailCentered("INSMOD", l.Run.Ok)
|
||||
}
|
||||
} else {
|
||||
status = genOkFailCentered("BUILD", l.Build.Ok)
|
||||
}
|
||||
}
|
||||
|
||||
additional := ""
|
||||
if l.KernelPanic {
|
||||
additional = "(panic)"
|
||||
} else if l.KilledByTimeout {
|
||||
additional = "(timeout)"
|
||||
}
|
||||
|
||||
colored := aurora.Sprintf("[%4d %4s] [%s] %s %-70s: %s %s",
|
||||
l.ID, l.Tag, timestamp, artifactInfo, distroInfo, status,
|
||||
additional)
|
||||
|
||||
fmt.Println(colored)
|
||||
}
|
||||
|
||||
type runstat struct {
|
||||
All, BuildOK, RunOK, TestOK, Timeout, Panic int
|
||||
}
|
||||
|
||||
func getStats(db *sql.DB, path, tag string) (
|
||||
distros map[string]map[string]map[string]runstat, err error) {
|
||||
|
||||
var les []logEntry
|
||||
|
||||
ka, kaErr := artifact.Artifact{}.Read(path + "/.out-of-tree.toml")
|
||||
if kaErr == nil {
|
||||
les, err = getAllArtifactLogs(db, tag, -1, ka)
|
||||
} else {
|
||||
les, err = getAllLogs(db, tag, -1)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
distros = make(map[string]map[string]map[string]runstat)
|
||||
|
||||
for _, l := range les {
|
||||
_, ok := distros[l.Distro.ID.String()]
|
||||
if !ok {
|
||||
distros[l.Distro.ID.String()] = make(map[string]map[string]runstat)
|
||||
}
|
||||
|
||||
_, ok = distros[l.Distro.ID.String()][l.Distro.Release]
|
||||
if !ok {
|
||||
distros[l.Distro.ID.String()][l.Distro.Release] = make(map[string]runstat)
|
||||
}
|
||||
|
||||
rs := distros[l.Distro.ID.String()][l.Distro.Release][l.KernelRelease]
|
||||
|
||||
rs.All++
|
||||
if l.Build.Ok {
|
||||
rs.BuildOK++
|
||||
}
|
||||
if l.Run.Ok {
|
||||
rs.RunOK++
|
||||
}
|
||||
if l.Test.Ok {
|
||||
rs.TestOK++
|
||||
}
|
||||
if l.KernelPanic {
|
||||
rs.Panic++
|
||||
}
|
||||
if l.KilledByTimeout {
|
||||
rs.Timeout++
|
||||
}
|
||||
|
||||
distros[l.Distro.ID.String()][l.Distro.Release][l.KernelRelease] = rs
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type PackCmd struct {
|
||||
Autogen bool `help:"kernel autogeneration"`
|
||||
UseHost bool `help:"also use host kernels"`
|
||||
NoDownload bool `help:"do not download qemu image while kernel generation"`
|
||||
ExploitRuns int64 `default:"4" help:"amount of runs of each exploit"`
|
||||
KernelRuns int64 `default:"1" help:"amount of runs of each kernel"`
|
||||
Max int `help:"download random kernels from set defined by regex in release_mask, but no more than X for each of release_mask" default:"1"`
|
||||
|
||||
Threads int `help:"threads" default:"4"`
|
||||
|
||||
Tag string `help:"filter tag"`
|
||||
|
||||
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
|
||||
QemuTimeout time.Duration `help:"timeout for qemu"`
|
||||
DockerTimeout time.Duration `help:"timeout for docker"`
|
||||
}
|
||||
|
||||
func (cmd *PackCmd) Run(g *Globals) (err error) {
|
||||
tag := fmt.Sprintf("pack_run_%d", time.Now().Unix())
|
||||
log.Print("Tag:", tag)
|
||||
|
||||
files, err := os.ReadDir(g.WorkDir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
workPath := g.WorkDir + "/" + f.Name()
|
||||
|
||||
if !fs.PathExists(workPath + "/.out-of-tree.toml") {
|
||||
continue
|
||||
}
|
||||
|
||||
if cmd.Autogen {
|
||||
autogen := KernelAutogenCmd{}
|
||||
err = autogen.Run(
|
||||
&KernelCmd{
|
||||
NoDownload: cmd.NoDownload,
|
||||
UseHost: cmd.UseHost,
|
||||
Max: cmd.Max,
|
||||
},
|
||||
&Globals{
|
||||
Config: g.Config,
|
||||
WorkDir: workPath,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.Print(f.Name())
|
||||
|
||||
pew := PewCmd{
|
||||
Max: cmd.KernelRuns,
|
||||
Runs: cmd.ExploitRuns,
|
||||
Threads: cmd.Threads,
|
||||
Tag: tag,
|
||||
Timeout: cmd.Timeout,
|
||||
QemuTimeout: cmd.QemuTimeout,
|
||||
DockerTimeout: cmd.DockerTimeout,
|
||||
Dist: pathDevNull,
|
||||
}
|
||||
|
||||
pew.Run(&Globals{
|
||||
Config: g.Config,
|
||||
WorkDir: workPath,
|
||||
})
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,601 @@
|
|||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/google/uuid"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/logrusorgru/aurora.v2"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/client"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
const pathDevNull = "/dev/null"
|
||||
|
||||
type LevelWriter struct {
|
||||
io.Writer
|
||||
Level zerolog.Level
|
||||
}
|
||||
|
||||
func (lw *LevelWriter) WriteLevel(l zerolog.Level, p []byte) (n int, err error) {
|
||||
if l >= lw.Level {
|
||||
return lw.Writer.Write(p)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
var ConsoleWriter, FileWriter LevelWriter
|
||||
|
||||
var LogLevel zerolog.Level
|
||||
|
||||
type runstate struct {
|
||||
Overall, Success float64
|
||||
InternalErrors int
|
||||
}
|
||||
|
||||
var (
|
||||
state runstate
|
||||
)
|
||||
|
||||
func successRate(state runstate) float64 {
|
||||
return state.Success / state.Overall
|
||||
}
|
||||
|
||||
type PewCmd struct {
|
||||
Max int64 `help:"test no more than X kernels" default:"100500"`
|
||||
Runs int64 `help:"runs per each kernel" default:"1"`
|
||||
Kernel string `help:"override kernel regex"`
|
||||
RootFS string `help:"override rootfs image" type:"existingfile"`
|
||||
Guess bool `help:"try all defined kernels"`
|
||||
Shuffle bool `help:"randomize kernels test order"`
|
||||
Binary string `help:"use binary, do not build"`
|
||||
Test string `help:"override path for test"`
|
||||
Dist string `help:"build result path" default:"/dev/null"`
|
||||
Threads int `help:"threads" default:"1"`
|
||||
Tag string `help:"log tagging"`
|
||||
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
|
||||
|
||||
ArtifactConfig string `help:"path to artifact config" type:"path"`
|
||||
|
||||
QemuTimeout time.Duration `help:"timeout for qemu"`
|
||||
QemuAfterStartTimeout time.Duration `help:"timeout after starting of the qemu vm before tests"`
|
||||
DockerTimeout time.Duration `help:"timeout for docker"`
|
||||
|
||||
Threshold float64 `help:"reliablity threshold for exit code" default:"1.00"`
|
||||
IncludeInternalErrors bool `help:"count internal errors as part of the success rate"`
|
||||
|
||||
Endless bool `help:"endless tests"`
|
||||
EndlessTimeout time.Duration `help:"timeout between tests" default:"1m"`
|
||||
EndlessStress string `help:"endless stress script" type:"existingfile"`
|
||||
|
||||
DB *sql.DB `kong:"-" json:"-"`
|
||||
Kcfg config.KernelConfig `kong:"-" json:"-"`
|
||||
TimeoutDeadline time.Time `kong:"-" json:"-"`
|
||||
|
||||
Watch bool `help:"watch job status"`
|
||||
|
||||
repoName string
|
||||
commit string
|
||||
|
||||
useRemote bool
|
||||
remoteAddr string
|
||||
|
||||
// UUID of the job set
|
||||
groupUUID string
|
||||
}
|
||||
|
||||
func (cmd *PewCmd) getRepoName(worktree string, ka artifact.Artifact) {
|
||||
raw, err := exec.Command("git", "--work-tree="+worktree,
|
||||
"rev-list", "--max-parents=0", "HEAD").CombinedOutput()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg(string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
cmd.repoName = fmt.Sprintf("%s-%s", ka.Name, string(raw[:7]))
|
||||
}
|
||||
|
||||
func (cmd *PewCmd) syncRepo(worktree string, ka artifact.Artifact) (err error) {
|
||||
c := client.Client{RemoteAddr: cmd.remoteAddr}
|
||||
|
||||
cmd.getRepoName(worktree, ka)
|
||||
|
||||
raw, err := exec.Command("git", "--work-tree="+worktree,
|
||||
"rev-parse", "HEAD").CombinedOutput()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cmd.commit = strings.TrimSuffix(string(raw), "\n")
|
||||
|
||||
_, err = c.GetRepo(cmd.repoName)
|
||||
if err != nil && err != client.ErrRepoNotFound {
|
||||
log.Error().Err(err).Msg("GetRepo API error")
|
||||
return
|
||||
}
|
||||
|
||||
if err == client.ErrRepoNotFound {
|
||||
log.Warn().Msg("repo not found")
|
||||
log.Info().Msg("add repo")
|
||||
log.Warn().Msgf("%v", spew.Sdump(ka))
|
||||
err = c.AddRepo(api.Repo{Name: cmd.repoName})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = c.PushRepo(api.Repo{Name: cmd.repoName, Path: worktree})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("push repo error")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (cmd *PewCmd) Run(g *Globals) (err error) {
|
||||
cmd.groupUUID = uuid.New().String()
|
||||
log.Info().Str("group", cmd.groupUUID).Msg("")
|
||||
cmd.useRemote = g.Remote
|
||||
cmd.remoteAddr = g.RemoteAddr
|
||||
|
||||
if cmd.useRemote {
|
||||
c := client.Client{RemoteAddr: cmd.remoteAddr}
|
||||
cmd.Kcfg.Kernels, err = c.Kernels()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("read kernels config")
|
||||
}
|
||||
} else {
|
||||
cmd.Kcfg, err = config.ReadKernelConfig(
|
||||
g.Config.Kernels)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("read kernels config")
|
||||
}
|
||||
}
|
||||
|
||||
if cmd.Timeout != 0 {
|
||||
log.Info().Msgf("Set global timeout to %s", cmd.Timeout)
|
||||
cmd.TimeoutDeadline = time.Now().Add(cmd.Timeout)
|
||||
}
|
||||
|
||||
cmd.DB, err = openDatabase(g.Config.Database)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).
|
||||
Msgf("Cannot open database %s", g.Config.Database)
|
||||
}
|
||||
defer cmd.DB.Close()
|
||||
|
||||
var configPath string
|
||||
if cmd.ArtifactConfig == "" {
|
||||
configPath = g.WorkDir + "/.out-of-tree.toml"
|
||||
} else {
|
||||
configPath = cmd.ArtifactConfig
|
||||
}
|
||||
|
||||
ka, err := artifact.Artifact{}.Read(configPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if cmd.useRemote {
|
||||
err = cmd.syncRepo(g.WorkDir, ka)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(ka.Targets) == 0 || cmd.Guess {
|
||||
log.Debug().Msg("will use all available targets")
|
||||
|
||||
for _, dist := range distro.List() {
|
||||
ka.Targets = append(ka.Targets, artifact.Target{
|
||||
Distro: dist,
|
||||
Kernel: artifact.Kernel{
|
||||
Regex: ".*",
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if ka.SourcePath == "" {
|
||||
ka.SourcePath = g.WorkDir
|
||||
}
|
||||
|
||||
if cmd.Kernel != "" {
|
||||
var km artifact.Target
|
||||
km, err = kernelMask(cmd.Kernel)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ka.Targets = []artifact.Target{km}
|
||||
}
|
||||
|
||||
// TODO there was a lib for merge structures
|
||||
ka.Qemu.Timeout.Duration = g.Config.Qemu.Timeout.Duration
|
||||
ka.Docker.Timeout.Duration = g.Config.Docker.Timeout.Duration
|
||||
|
||||
if cmd.QemuTimeout != 0 {
|
||||
log.Info().Msgf("Set qemu timeout to %s", cmd.QemuTimeout)
|
||||
g.Config.Qemu.Timeout.Duration = cmd.QemuTimeout
|
||||
ka.Qemu.Timeout.Duration = cmd.QemuTimeout
|
||||
}
|
||||
|
||||
if cmd.DockerTimeout != 0 {
|
||||
log.Info().Msgf("Set docker timeout to %s", cmd.DockerTimeout)
|
||||
g.Config.Docker.Timeout.Duration = cmd.DockerTimeout
|
||||
ka.Docker.Timeout.Duration = cmd.DockerTimeout
|
||||
}
|
||||
|
||||
if cmd.Tag == "" {
|
||||
cmd.Tag = fmt.Sprintf("%d", time.Now().Unix())
|
||||
}
|
||||
if !cmd.useRemote {
|
||||
log.Info().Str("tag", cmd.Tag).Msg("")
|
||||
}
|
||||
|
||||
err = cmd.performCI(ka)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if cmd.useRemote {
|
||||
return
|
||||
}
|
||||
|
||||
if state.InternalErrors > 0 {
|
||||
s := "not counted towards success rate"
|
||||
if cmd.IncludeInternalErrors {
|
||||
s = "included in success rate"
|
||||
}
|
||||
log.Warn().Msgf("%d internal errors "+
|
||||
"(%s)", state.InternalErrors, s)
|
||||
}
|
||||
|
||||
if cmd.IncludeInternalErrors {
|
||||
state.Overall += float64(state.InternalErrors)
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("Success rate: %.02f (%d/%d), Threshold: %.02f",
|
||||
successRate(state),
|
||||
int(state.Success), int(state.Overall),
|
||||
cmd.Threshold)
|
||||
|
||||
if successRate(state) < cmd.Threshold {
|
||||
log.Error().Msg(msg)
|
||||
err = errors.New("reliability threshold not met")
|
||||
} else {
|
||||
log.Info().Msg(msg)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (cmd PewCmd) watchJob(swg *sizedwaitgroup.SizedWaitGroup,
|
||||
slog zerolog.Logger, uuid string) {
|
||||
|
||||
defer swg.Done() // FIXME
|
||||
|
||||
c := client.Client{RemoteAddr: cmd.remoteAddr}
|
||||
|
||||
var err error
|
||||
var st api.Status
|
||||
|
||||
for {
|
||||
st, err = c.JobStatus(uuid)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("")
|
||||
continue
|
||||
}
|
||||
if st == api.StatusSuccess || st == api.StatusFailure {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
switch st {
|
||||
case api.StatusSuccess:
|
||||
slog.Info().Msg("success")
|
||||
case api.StatusFailure:
|
||||
slog.Warn().Msg("failure")
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd PewCmd) remote(swg *sizedwaitgroup.SizedWaitGroup,
|
||||
ka artifact.Artifact, ki distro.KernelInfo) {
|
||||
|
||||
defer swg.Done()
|
||||
|
||||
slog := log.With().
|
||||
Str("distro_type", ki.Distro.ID.String()).
|
||||
Str("distro_release", ki.Distro.Release).
|
||||
Str("kernel", ki.KernelRelease).
|
||||
Logger()
|
||||
|
||||
job := api.Job{}
|
||||
job.Group = cmd.groupUUID
|
||||
job.RepoName = cmd.repoName
|
||||
job.Commit = cmd.commit
|
||||
|
||||
job.Artifact = ka
|
||||
job.Target = ki
|
||||
|
||||
c := client.Client{RemoteAddr: cmd.remoteAddr}
|
||||
uuid, err := c.AddJob(job)
|
||||
slog = slog.With().Str("uuid", uuid).Logger()
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("cannot add job")
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info().Msg("add")
|
||||
|
||||
if cmd.Watch {
|
||||
// FIXME dummy (almost)
|
||||
go cmd.watchJob(swg, slog, uuid)
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd PewCmd) testArtifact(swg *sizedwaitgroup.SizedWaitGroup,
|
||||
ka artifact.Artifact, ki distro.KernelInfo) {
|
||||
|
||||
defer swg.Done()
|
||||
|
||||
logdir := "logs/" + cmd.Tag
|
||||
err := os.MkdirAll(logdir, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("mkdir %s", logdir)
|
||||
return
|
||||
}
|
||||
|
||||
logfile := fmt.Sprintf("logs/%s/%s-%s-%s.log",
|
||||
cmd.Tag,
|
||||
ki.Distro.ID.String(),
|
||||
ki.Distro.Release,
|
||||
ki.KernelRelease,
|
||||
)
|
||||
f, err := os.Create(logfile)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("create %s", logfile)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
slog := zerolog.New(zerolog.MultiLevelWriter(
|
||||
&ConsoleWriter,
|
||||
&FileWriter,
|
||||
&zerolog.ConsoleWriter{
|
||||
Out: f,
|
||||
FieldsExclude: []string{
|
||||
"distro_release",
|
||||
"distro_type",
|
||||
"kernel",
|
||||
},
|
||||
NoColor: true,
|
||||
},
|
||||
))
|
||||
|
||||
switch LogLevel {
|
||||
case zerolog.TraceLevel, zerolog.DebugLevel:
|
||||
slog = slog.With().Caller().Logger()
|
||||
}
|
||||
|
||||
slog = slog.With().Timestamp().
|
||||
Str("distro_type", ki.Distro.ID.String()).
|
||||
Str("distro_release", ki.Distro.Release).
|
||||
Str("kernel", ki.KernelRelease).
|
||||
Logger()
|
||||
|
||||
ka.Process(slog, ki,
|
||||
cmd.Endless, cmd.Binary, cmd.EndlessStress, cmd.EndlessTimeout,
|
||||
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo, result *artifact.Result) {
|
||||
dumpResult(q, ka, ki, result, cmd.Dist, cmd.Tag, cmd.Binary, cmd.DB)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func shuffleKernels(a []distro.KernelInfo) []distro.KernelInfo {
|
||||
// Fisher–Yates shuffle
|
||||
for i := len(a) - 1; i > 0; i-- {
|
||||
j := rand.Intn(i + 1)
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (cmd PewCmd) process(swg *sizedwaitgroup.SizedWaitGroup,
|
||||
ka artifact.Artifact, kernel distro.KernelInfo) {
|
||||
|
||||
if cmd.useRemote {
|
||||
go cmd.remote(swg, ka, kernel)
|
||||
} else {
|
||||
go cmd.testArtifact(swg, ka, kernel)
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd PewCmd) performCI(ka artifact.Artifact) (err error) {
|
||||
found := false
|
||||
max := cmd.Max
|
||||
|
||||
threadCounter := 0
|
||||
|
||||
swg := sizedwaitgroup.New(cmd.Threads)
|
||||
if cmd.Shuffle {
|
||||
cmd.Kcfg.Kernels = shuffleKernels(cmd.Kcfg.Kernels)
|
||||
}
|
||||
for _, kernel := range cmd.Kcfg.Kernels {
|
||||
if max <= 0 {
|
||||
break
|
||||
}
|
||||
|
||||
var supported bool
|
||||
supported, err = ka.Supported(kernel)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if kernel.Blocklisted {
|
||||
log.Debug().Str("kernel", kernel.KernelVersion).
|
||||
Msgf("skip (blocklisted)")
|
||||
continue
|
||||
}
|
||||
|
||||
if cmd.RootFS != "" {
|
||||
kernel.RootFS = cmd.RootFS
|
||||
}
|
||||
|
||||
if supported {
|
||||
found = true
|
||||
max--
|
||||
for i := int64(0); i < cmd.Runs; i++ {
|
||||
if !cmd.TimeoutDeadline.IsZero() &&
|
||||
time.Now().After(cmd.TimeoutDeadline) {
|
||||
|
||||
break
|
||||
}
|
||||
swg.Add()
|
||||
if threadCounter < cmd.Threads {
|
||||
time.Sleep(time.Second)
|
||||
threadCounter++
|
||||
}
|
||||
|
||||
go cmd.process(&swg, ka, kernel)
|
||||
}
|
||||
}
|
||||
}
|
||||
swg.Wait()
|
||||
|
||||
if !found {
|
||||
err = errors.New("no supported kernels found")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func kernelMask(kernel string) (km artifact.Target, err error) {
|
||||
parts := strings.Split(kernel, ":")
|
||||
if len(parts) != 2 {
|
||||
err = errors.New("kernel is not 'distroType:regex'")
|
||||
return
|
||||
}
|
||||
|
||||
dt, err := distro.NewID(parts[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
km = artifact.Target{
|
||||
Distro: distro.Distro{ID: dt},
|
||||
Kernel: artifact.Kernel{Regex: parts[1]},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func genOkFail(name string, ok bool) (aurv aurora.Value) {
|
||||
s := " " + name
|
||||
if name == "" {
|
||||
s = ""
|
||||
}
|
||||
if ok {
|
||||
s += " SUCCESS "
|
||||
aurv = aurora.BgGreen(aurora.Black(s))
|
||||
} else {
|
||||
s += " FAILURE "
|
||||
aurv = aurora.BgRed(aurora.White(aurora.Bold(s)))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func dumpResult(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
|
||||
res *artifact.Result, dist, tag, binary string, db *sql.DB) {
|
||||
|
||||
// TODO refactor
|
||||
|
||||
if res.InternalError != nil {
|
||||
q.Log.Warn().Err(res.InternalError).
|
||||
Str("panic", fmt.Sprintf("%v", q.KernelPanic)).
|
||||
Str("timeout", fmt.Sprintf("%v", q.KilledByTimeout)).
|
||||
Msg("internal")
|
||||
res.InternalErrorString = res.InternalError.Error()
|
||||
state.InternalErrors += 1
|
||||
} else {
|
||||
colored := ""
|
||||
|
||||
state.Overall += 1
|
||||
|
||||
if res.Test.Ok {
|
||||
state.Success += 1
|
||||
}
|
||||
|
||||
switch ka.Type {
|
||||
case artifact.KernelExploit:
|
||||
colored = aurora.Sprintf("%s %s",
|
||||
genOkFail("BUILD", res.Build.Ok),
|
||||
genOkFail("LPE", res.Test.Ok))
|
||||
case artifact.KernelModule:
|
||||
colored = aurora.Sprintf("%s %s %s",
|
||||
genOkFail("BUILD", res.Build.Ok),
|
||||
genOkFail("INSMOD", res.Run.Ok),
|
||||
genOkFail("TEST", res.Test.Ok))
|
||||
case artifact.Script:
|
||||
colored = aurora.Sprintf("%s",
|
||||
genOkFail("", res.Test.Ok))
|
||||
}
|
||||
|
||||
additional := ""
|
||||
if q.KernelPanic {
|
||||
additional = "(panic)"
|
||||
} else if q.KilledByTimeout {
|
||||
additional = "(timeout)"
|
||||
}
|
||||
|
||||
if additional != "" {
|
||||
q.Log.Info().Msgf("%v %v", colored, additional)
|
||||
} else {
|
||||
q.Log.Info().Msgf("%v", colored)
|
||||
}
|
||||
}
|
||||
|
||||
err := addToLog(db, q, ka, ki, res, tag)
|
||||
if err != nil {
|
||||
q.Log.Warn().Err(err).Msgf("[db] addToLog (%v)", ka)
|
||||
}
|
||||
|
||||
if binary == "" && dist != pathDevNull {
|
||||
err = os.MkdirAll(dist, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msgf("os.MkdirAll (%v)", ka)
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s-%s-%s", dist, ki.Distro.ID,
|
||||
ki.Distro.Release, ki.KernelRelease)
|
||||
if ka.Type != artifact.KernelExploit {
|
||||
path += ".ko"
|
||||
}
|
||||
|
||||
err = artifact.CopyFile(res.BuildArtifact, path)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msgf("copy file (%v)", ka)
|
||||
}
|
||||
}
|
||||
}
|
179
config/config.go
179
config/config.go
|
@ -5,169 +5,17 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
|
||||
"github.com/naoina/toml"
|
||||
)
|
||||
|
||||
type KernelMask struct {
|
||||
DistroType DistroType
|
||||
DistroRelease string // 18.04/7.4.1708/9.1
|
||||
ReleaseMask string
|
||||
}
|
||||
|
||||
func (km KernelMask) DockerName() string {
|
||||
distro := strings.ToLower(km.DistroType.String())
|
||||
release := strings.Replace(km.DistroRelease, ".", "__", -1)
|
||||
return fmt.Sprintf("out_of_tree_%s_%s", distro, release)
|
||||
}
|
||||
|
||||
type ArtifactType int
|
||||
|
||||
const (
|
||||
KernelModule ArtifactType = iota
|
||||
KernelExploit
|
||||
)
|
||||
|
||||
func (at ArtifactType) String() string {
|
||||
return [...]string{"module", "exploit"}[at]
|
||||
}
|
||||
|
||||
func (at *ArtifactType) UnmarshalTOML(data []byte) (err error) {
|
||||
stype := strings.Trim(string(data), `"`)
|
||||
stypelower := strings.ToLower(stype)
|
||||
if strings.Contains(stypelower, "module") {
|
||||
*at = KernelModule
|
||||
} else if strings.Contains(stypelower, "exploit") {
|
||||
*at = KernelExploit
|
||||
} else {
|
||||
err = errors.New(fmt.Sprintf("Type %s is unsupported", stype))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (at ArtifactType) MarshalTOML() (data []byte, err error) {
|
||||
s := ""
|
||||
switch at {
|
||||
case KernelModule:
|
||||
s = "module"
|
||||
case KernelExploit:
|
||||
s = "exploit"
|
||||
default:
|
||||
err = errors.New(fmt.Sprintf("Cannot marshal %d", at))
|
||||
}
|
||||
data = []byte(`"` + s + `"`)
|
||||
return
|
||||
}
|
||||
|
||||
type Artifact struct {
|
||||
Name string
|
||||
Type ArtifactType
|
||||
SourcePath string
|
||||
SupportedKernels []KernelMask
|
||||
}
|
||||
|
||||
func (ka Artifact) checkSupport(ki KernelInfo, km KernelMask) (
|
||||
supported bool, err error) {
|
||||
|
||||
if ki.DistroType != km.DistroType {
|
||||
supported = false
|
||||
return
|
||||
}
|
||||
|
||||
// DistroRelease is optional
|
||||
if km.DistroRelease != "" && ki.DistroRelease != km.DistroRelease {
|
||||
supported = false
|
||||
return
|
||||
}
|
||||
|
||||
supported, err = regexp.MatchString(km.ReleaseMask, ki.KernelRelease)
|
||||
return
|
||||
}
|
||||
|
||||
func (ka Artifact) Supported(ki KernelInfo) (supported bool, err error) {
|
||||
for _, km := range ka.SupportedKernels {
|
||||
supported, err = ka.checkSupport(ki, km)
|
||||
if supported {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type DistroType int
|
||||
|
||||
const (
|
||||
Ubuntu DistroType = iota
|
||||
CentOS
|
||||
Debian
|
||||
)
|
||||
|
||||
var DistroTypeStrings = [...]string{"Ubuntu", "CentOS", "Debian"}
|
||||
|
||||
func NewDistroType(dType string) (dt DistroType, err error) {
|
||||
err = dt.UnmarshalTOML([]byte(dType))
|
||||
return
|
||||
}
|
||||
|
||||
func (dt DistroType) String() string {
|
||||
return DistroTypeStrings[dt]
|
||||
}
|
||||
|
||||
func (dt *DistroType) UnmarshalTOML(data []byte) (err error) {
|
||||
sDistro := strings.Trim(string(data), `"`)
|
||||
if strings.EqualFold(sDistro, "Ubuntu") {
|
||||
*dt = Ubuntu
|
||||
} else if strings.EqualFold(sDistro, "CentOS") {
|
||||
*dt = CentOS
|
||||
} else if strings.EqualFold(sDistro, "Debian") {
|
||||
*dt = Debian
|
||||
} else {
|
||||
err = errors.New(fmt.Sprintf("Distro %s is unsupported", sDistro))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (dt DistroType) MarshalTOML() (data []byte, err error) {
|
||||
s := ""
|
||||
switch dt {
|
||||
case Ubuntu:
|
||||
s = "Ubuntu"
|
||||
case CentOS:
|
||||
s = "CentOS"
|
||||
case Debian:
|
||||
s = "Debian"
|
||||
default:
|
||||
err = errors.New(fmt.Sprintf("Cannot marshal %d", dt))
|
||||
}
|
||||
data = []byte(`"` + s + `"`)
|
||||
return
|
||||
}
|
||||
|
||||
type KernelInfo struct {
|
||||
DistroType DistroType
|
||||
DistroRelease string // 18.04/7.4.1708/9.1
|
||||
|
||||
// Must be *exactly* same as in `uname -r`
|
||||
KernelRelease string
|
||||
|
||||
// Build-time information
|
||||
ContainerName string
|
||||
|
||||
// Runtime information
|
||||
KernelPath string
|
||||
InitrdPath string
|
||||
RootFS string
|
||||
}
|
||||
|
||||
// KernelConfig is the ~/.out-of-tree/kernels.toml configuration description
|
||||
type KernelConfig struct {
|
||||
Kernels []KernelInfo
|
||||
Kernels []distro.KernelInfo
|
||||
}
|
||||
|
||||
func readFileAll(path string) (buf []byte, err error) {
|
||||
|
@ -177,10 +25,11 @@ func readFileAll(path string) (buf []byte, err error) {
|
|||
}
|
||||
defer f.Close()
|
||||
|
||||
buf, err = ioutil.ReadAll(f)
|
||||
buf, err = io.ReadAll(f)
|
||||
return
|
||||
}
|
||||
|
||||
// ReadKernelConfig is for read kernels.toml
|
||||
func ReadKernelConfig(path string) (kernelCfg KernelConfig, err error) {
|
||||
buf, err := readFileAll(path)
|
||||
if err != nil {
|
||||
|
@ -194,17 +43,3 @@ func ReadKernelConfig(path string) (kernelCfg KernelConfig, err error) {
|
|||
|
||||
return
|
||||
}
|
||||
|
||||
func ReadArtifactConfig(path string) (artifactCfg Artifact, err error) {
|
||||
buf, err := readFileAll(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = toml.Unmarshal(buf, &artifactCfg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
package dotfiles
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// Directory for config files
|
||||
var Directory string
|
||||
|
||||
func directory() string {
|
||||
if Directory != "" {
|
||||
return Directory
|
||||
}
|
||||
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("get current user")
|
||||
}
|
||||
|
||||
Directory = filepath.Join(usr.HomeDir, ".out-of-tree")
|
||||
|
||||
return Directory
|
||||
}
|
||||
|
||||
// Dir that exist relative to config directory
|
||||
func Dir(s ...string) (dir string) {
|
||||
dir = filepath.Join(append([]string{directory()}, s...)...)
|
||||
err := os.MkdirAll(dir, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("mkdir")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// File in existing dir relative to config directory
|
||||
func File(s ...string) (file string) {
|
||||
file = filepath.Join(append([]string{directory()}, s...)...)
|
||||
err := os.MkdirAll(filepath.Dir(file), os.ModePerm)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("mkdir")
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
package dotfiles
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDirectory(t *testing.T) {
|
||||
testdir := "test"
|
||||
|
||||
Directory = testdir
|
||||
|
||||
if directory() != testdir {
|
||||
t.Fatalf("%s != %s", directory(), testdir)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDir(t *testing.T) {
|
||||
tmpdir, err := os.MkdirTemp("", "out-of-tree_")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
Directory = tmpdir
|
||||
|
||||
for _, testdir := range []string{"a", "a/b", "a/b/c"} {
|
||||
expected := filepath.Join(tmpdir, testdir)
|
||||
t.Log(testdir, "->", expected)
|
||||
resdir := Dir(testdir)
|
||||
if resdir != expected {
|
||||
t.Fatalf("%s != %s", resdir, expected)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
t.Fatal("not a directory")
|
||||
}
|
||||
}
|
||||
|
||||
testdir := []string{"a", "b", "c", "d"}
|
||||
expected := filepath.Join(append([]string{tmpdir}, testdir...)...)
|
||||
|
||||
t.Log(testdir, "->", expected)
|
||||
resdir := Dir(testdir...)
|
||||
if resdir != expected {
|
||||
t.Fatalf("%s != %s", resdir, expected)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
t.Fatal("not a directory")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFile(t *testing.T) {
|
||||
tmpdir, err := os.MkdirTemp("", "out-of-tree_")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
Directory = tmpdir
|
||||
|
||||
for _, testfile := range []string{"a", "a/b", "a/b/c"} {
|
||||
expected := filepath.Join(tmpdir, testfile)
|
||||
t.Log(testfile, "->", expected)
|
||||
resfile := File(testfile)
|
||||
if resfile != expected {
|
||||
t.Fatalf("%s != %s", resfile, expected)
|
||||
}
|
||||
|
||||
_, err := os.Stat(expected)
|
||||
if err == nil {
|
||||
t.Fatal("should not exist")
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filepath.Dir(expected))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
t.Fatal("not a directory")
|
||||
}
|
||||
}
|
||||
|
||||
testfile := []string{"a", "b", "c"}
|
||||
expected := filepath.Join(append([]string{tmpdir}, testfile...)...)
|
||||
t.Log(testfile, "->", expected)
|
||||
resdir := Dir(testfile...)
|
||||
if resdir != expected {
|
||||
t.Fatalf("%s != %s", resdir, expected)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
t.Fatal("not a directory")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2019 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/naoina/toml"
|
||||
)
|
||||
|
||||
type OutOfTree struct {
|
||||
// Directory for all files if not explicitly specified
|
||||
Directory string
|
||||
|
||||
Kernels string
|
||||
UserKernels string
|
||||
|
||||
Database string
|
||||
|
||||
Qemu struct {
|
||||
Timeout artifact.Duration
|
||||
}
|
||||
|
||||
Docker struct {
|
||||
Timeout artifact.Duration
|
||||
Registry string
|
||||
|
||||
// Commands that will be executed before
|
||||
// the base layer of Dockerfile
|
||||
Commands []distro.Command
|
||||
}
|
||||
}
|
||||
|
||||
func (c *OutOfTree) Decode(ctx *kong.DecodeContext) (err error) {
|
||||
if ctx.Value.Set {
|
||||
return
|
||||
}
|
||||
|
||||
s, err := homedir.Expand(ctx.Scan.Pop().String())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defaultValue, err := homedir.Expand(ctx.Value.Default)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = os.Stat(s)
|
||||
if s != defaultValue && errors.Is(err, os.ErrNotExist) {
|
||||
return errors.New("'" + s + "' does not exist")
|
||||
}
|
||||
|
||||
*c, err = ReadOutOfTreeConf(s)
|
||||
return
|
||||
}
|
||||
|
||||
func ReadOutOfTreeConf(path string) (c OutOfTree, err error) {
|
||||
buf, err := readFileAll(path)
|
||||
if err == nil {
|
||||
err = toml.Unmarshal(buf, &c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// It's ok if there's no configuration
|
||||
// then we'll just set default values
|
||||
err = nil
|
||||
}
|
||||
|
||||
if c.Directory != "" {
|
||||
dotfiles.Directory = c.Directory
|
||||
} else {
|
||||
c.Directory = dotfiles.Dir("")
|
||||
}
|
||||
|
||||
if c.Kernels == "" {
|
||||
c.Kernels = dotfiles.File("kernels.toml")
|
||||
}
|
||||
|
||||
if c.UserKernels == "" {
|
||||
c.UserKernels = dotfiles.File("kernels.user.toml")
|
||||
}
|
||||
|
||||
if c.Database == "" {
|
||||
c.Database = dotfiles.File("db.sqlite")
|
||||
}
|
||||
|
||||
if c.Qemu.Timeout.Duration == 0 {
|
||||
c.Qemu.Timeout.Duration = time.Minute
|
||||
}
|
||||
|
||||
if c.Docker.Timeout.Duration == 0 {
|
||||
c.Docker.Timeout.Duration = 8 * time.Minute
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,540 @@
|
|||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
var Runtime = "docker"
|
||||
|
||||
var Registry = ""
|
||||
|
||||
var Timeout time.Duration
|
||||
|
||||
var Commands []distro.Command
|
||||
|
||||
var UseCache = true
|
||||
|
||||
var Prune = true
|
||||
|
||||
type Image struct {
|
||||
Name string
|
||||
Distro distro.Distro
|
||||
}
|
||||
|
||||
func Images() (diis []Image, err error) {
|
||||
cmd := exec.Command(Runtime, "images")
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
rawOutput, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r, err := regexp.Compile("out_of_tree_.*")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
containers := r.FindAll(rawOutput, -1)
|
||||
for _, c := range containers {
|
||||
containerName := strings.Fields(string(c))[0]
|
||||
|
||||
s := strings.Replace(containerName, "__", ".", -1)
|
||||
values := strings.Split(s, "_")
|
||||
distroName, ver := values[3], values[4]
|
||||
|
||||
dii := Image{
|
||||
Name: containerName,
|
||||
}
|
||||
|
||||
dii.Distro.Release = ver
|
||||
dii.Distro.ID, err = distro.NewID(distroName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
diis = append(diis, dii)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Load(localpath string, name string) (err error) {
|
||||
exist := Container{name: name}.Exist()
|
||||
if exist && UseCache {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(Runtime, "load", "-i", localpath)
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
raw, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msg(string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
cmd = exec.Command(Runtime, "tag", "localhost/"+name, name)
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
raw, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msg(string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func Import(path, name string) (err error) {
|
||||
exist := Container{name: name}.Exist()
|
||||
if exist && UseCache {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(Runtime, "import", path, name)
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
raw, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msg(string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func Save(name, path string) (err error) {
|
||||
exist := Container{name: name}.Exist()
|
||||
if !exist {
|
||||
err = errors.New("container does not exist")
|
||||
log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(Runtime, "save", name, "-o", path)
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
raw, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg(string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type Volume struct {
|
||||
Src, Dest string
|
||||
}
|
||||
|
||||
type Container struct {
|
||||
name string
|
||||
dist distro.Distro
|
||||
|
||||
Volumes []Volume
|
||||
|
||||
// Additional arguments
|
||||
Args []string
|
||||
|
||||
Log zerolog.Logger
|
||||
}
|
||||
|
||||
func New(dist distro.Distro) (c Container, err error) {
|
||||
distro := strings.ToLower(dist.ID.String())
|
||||
release := strings.Replace(dist.Release, ".", "__", -1)
|
||||
c.name = fmt.Sprintf("out_of_tree_%s_%s", distro, release)
|
||||
|
||||
c.Log = log.With().
|
||||
Str("container", c.name).
|
||||
Logger()
|
||||
|
||||
c.dist = dist
|
||||
|
||||
c.Volumes = append(c.Volumes, Volume{
|
||||
Src: dotfiles.Dir("volumes", c.name, "lib", "modules"),
|
||||
Dest: "/lib/modules",
|
||||
})
|
||||
|
||||
c.Volumes = append(c.Volumes, Volume{
|
||||
Src: dotfiles.Dir("volumes", c.name, "usr", "src"),
|
||||
Dest: "/usr/src",
|
||||
})
|
||||
|
||||
c.Volumes = append(c.Volumes, Volume{
|
||||
Src: dotfiles.Dir("volumes", c.name, "boot"),
|
||||
Dest: "/boot",
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func NewFromKernelInfo(ki distro.KernelInfo) (
|
||||
c Container, err error) {
|
||||
|
||||
c.name = ki.ContainerName
|
||||
|
||||
c.Log = log.With().
|
||||
Str("container", c.name).
|
||||
Logger()
|
||||
|
||||
c.Volumes = append(c.Volumes, Volume{
|
||||
Src: path.Dir(ki.ModulesPath),
|
||||
Dest: "/lib/modules",
|
||||
})
|
||||
|
||||
c.Volumes = append(c.Volumes, Volume{
|
||||
Src: filepath.Join(path.Dir(ki.KernelPath), "../usr/src"),
|
||||
Dest: "/usr/src",
|
||||
})
|
||||
|
||||
c.Volumes = append(c.Volumes, Volume{
|
||||
Src: path.Dir(ki.KernelPath),
|
||||
Dest: "/boot",
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c Container) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c Container) Exist() (yes bool) {
|
||||
cmd := exec.Command(Runtime, "images", "-q", c.name)
|
||||
|
||||
c.Log.Debug().Msgf("run %v", cmd)
|
||||
|
||||
raw, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
c.Log.Error().Err(err).Msg(string(raw))
|
||||
return false
|
||||
}
|
||||
|
||||
yes = string(raw) != ""
|
||||
|
||||
if yes {
|
||||
c.Log.Debug().Msg("exist")
|
||||
} else {
|
||||
c.Log.Debug().Msg("does not exist")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c Container) Build(image string, envs, runs []string) (err error) {
|
||||
cdir := dotfiles.Dir("containers", c.name)
|
||||
cfile := filepath.Join(cdir, "Dockerfile")
|
||||
|
||||
cf := "FROM "
|
||||
if Registry != "" {
|
||||
cf += Registry + "/"
|
||||
}
|
||||
cf += image + "\n"
|
||||
|
||||
for _, c := range Commands {
|
||||
// TODO check for distro type
|
||||
cf += "RUN " + c.Command + "\n"
|
||||
}
|
||||
|
||||
for _, e := range envs {
|
||||
cf += "ENV " + e + "\n"
|
||||
}
|
||||
|
||||
for _, c := range runs {
|
||||
cf += "RUN " + c + "\n"
|
||||
}
|
||||
|
||||
buf, err := os.ReadFile(cfile)
|
||||
if err != nil {
|
||||
err = os.WriteFile(cfile, []byte(cf), os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if string(buf) == cf && c.Exist() && UseCache {
|
||||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(cfile, []byte(cf), os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if c.Exist() {
|
||||
c.Log.Info().Msg("update")
|
||||
} else {
|
||||
c.Log.Info().Msg("build")
|
||||
}
|
||||
|
||||
output, err := c.build(cdir)
|
||||
if err != nil {
|
||||
c.Log.Error().Err(err).Msg(output)
|
||||
return
|
||||
}
|
||||
|
||||
c.Log.Info().Msg("success")
|
||||
return
|
||||
}
|
||||
|
||||
func (c Container) prune() error {
|
||||
c.Log.Debug().Msg("remove dangling or unused images from local storage")
|
||||
return exec.Command(Runtime, "image", "prune", "-f").Run()
|
||||
}
|
||||
|
||||
func (c Container) build(imagePath string) (output string, err error) {
|
||||
if Prune {
|
||||
defer c.prune()
|
||||
}
|
||||
|
||||
args := []string{"build"}
|
||||
if !UseCache {
|
||||
args = append(args, "--pull", "--no-cache")
|
||||
}
|
||||
args = append(args, "-t", c.name, imagePath)
|
||||
|
||||
cmd := exec.Command(Runtime, args...)
|
||||
|
||||
flog := c.Log.With().
|
||||
Str("command", fmt.Sprintf("%v", cmd)).
|
||||
Logger()
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cmd.Stderr = cmd.Stdout
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
output += m + "\n"
|
||||
flog.Trace().Str("stdout", m).Msg("")
|
||||
}
|
||||
}()
|
||||
|
||||
err = cmd.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func (c Container) Run(workdir string, cmds []string) (out string, err error) {
|
||||
flog := c.Log.With().
|
||||
Str("workdir", workdir).
|
||||
Str("command", fmt.Sprintf("%v", cmds)).
|
||||
Logger()
|
||||
|
||||
var args []string
|
||||
args = append(args, "run", "--rm")
|
||||
args = append(args, c.Args...)
|
||||
if workdir != "" {
|
||||
args = append(args, "-v", workdir+":/work")
|
||||
}
|
||||
|
||||
for _, volume := range c.Volumes {
|
||||
mount := fmt.Sprintf("%s:%s", volume.Src, volume.Dest)
|
||||
args = append(args, "-v", mount)
|
||||
}
|
||||
|
||||
command := "true"
|
||||
for _, c := range cmds {
|
||||
command += fmt.Sprintf(" && %s", c)
|
||||
}
|
||||
|
||||
args = append(args, c.name, "bash", "-c")
|
||||
if workdir != "" {
|
||||
args = append(args, "cd /work && "+command)
|
||||
} else {
|
||||
args = append(args, command)
|
||||
}
|
||||
|
||||
cmd := exec.Command(Runtime, args...)
|
||||
|
||||
flog.Debug().Msgf("%v", cmd)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cmd.Stderr = cmd.Stdout
|
||||
|
||||
if Timeout != 0 {
|
||||
timer := time.AfterFunc(Timeout, func() {
|
||||
flog.Info().Msg("killing container by timeout")
|
||||
|
||||
flog.Debug().Msg("SIGINT")
|
||||
cmd.Process.Signal(os.Interrupt)
|
||||
|
||||
time.Sleep(time.Minute)
|
||||
|
||||
flog.Debug().Msg("SIGKILL")
|
||||
cmd.Process.Kill()
|
||||
})
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
out += m + "\n"
|
||||
flog.Trace().Str("stdout", m).Msg("")
|
||||
}
|
||||
}()
|
||||
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
|
||||
err, cmds, out)
|
||||
err = errors.New(e)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func FindKernel(entries []os.DirEntry, kname string) (name string, err error) {
|
||||
for _, e := range entries {
|
||||
var fi os.FileInfo
|
||||
fi, err = e.Info()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(fi.Name(), "vmlinuz") {
|
||||
if strings.Contains(fi.Name(), kname) {
|
||||
name = fi.Name()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New("cannot find kernel")
|
||||
return
|
||||
}
|
||||
|
||||
func FindInitrd(entries []os.DirEntry, kname string) (name string, err error) {
|
||||
for _, e := range entries {
|
||||
var fi os.FileInfo
|
||||
fi, err = e.Info()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(fi.Name(), "initrd") ||
|
||||
strings.HasPrefix(fi.Name(), "initramfs") {
|
||||
|
||||
if strings.Contains(fi.Name(), kname) {
|
||||
name = fi.Name()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New("cannot find kernel")
|
||||
return
|
||||
}
|
||||
|
||||
func (c Container) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
if !c.Exist() {
|
||||
return
|
||||
}
|
||||
|
||||
var libmodules, boot string
|
||||
for _, volume := range c.Volumes {
|
||||
switch volume.Dest {
|
||||
case "/lib/modules":
|
||||
libmodules = volume.Src
|
||||
case "/boot":
|
||||
boot = volume.Src
|
||||
}
|
||||
}
|
||||
|
||||
moddirs, err := os.ReadDir(libmodules)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
bootfiles, err := os.ReadDir(boot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, e := range moddirs {
|
||||
var krel os.FileInfo
|
||||
krel, err = e.Info()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.Log.Debug().Msgf("generate config entry for %s", krel.Name())
|
||||
|
||||
var kernelFile, initrdFile string
|
||||
kernelFile, err = FindKernel(bootfiles, krel.Name())
|
||||
if err != nil {
|
||||
c.Log.Warn().Msgf("cannot find kernel %s", krel.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
initrdFile, err = FindInitrd(bootfiles, krel.Name())
|
||||
if err != nil {
|
||||
c.Log.Warn().Msgf("cannot find initrd %s", krel.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
ki := distro.KernelInfo{
|
||||
Distro: c.dist,
|
||||
KernelVersion: krel.Name(),
|
||||
KernelRelease: krel.Name(),
|
||||
ContainerName: c.name,
|
||||
|
||||
KernelPath: filepath.Join(boot, kernelFile),
|
||||
InitrdPath: filepath.Join(boot, initrdFile),
|
||||
ModulesPath: filepath.Join(libmodules, krel.Name()),
|
||||
|
||||
RootFS: dotfiles.File("images", c.dist.RootFS()),
|
||||
}
|
||||
|
||||
kernels = append(kernels, ki)
|
||||
}
|
||||
|
||||
for _, cmd := range []string{
|
||||
"find /boot -type f -exec chmod a+r {} \\;",
|
||||
} {
|
||||
_, err = c.Run(dotfiles.Dir("tmp"), []string{cmd})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,302 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/daemon/db"
|
||||
)
|
||||
|
||||
type cmdenv struct {
|
||||
Conn net.Conn
|
||||
|
||||
Log zerolog.Logger
|
||||
|
||||
DB *sql.DB
|
||||
|
||||
WG *sync.WaitGroup
|
||||
|
||||
KernelConfig string
|
||||
}
|
||||
|
||||
func command(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
e.Log.Trace().Msgf("%v", spew.Sdump(req))
|
||||
defer e.Log.Trace().Msgf("%v", spew.Sdump(resp))
|
||||
|
||||
e.WG.Add(1)
|
||||
defer e.WG.Done()
|
||||
|
||||
e.Log.Debug().Msgf("%v", req.Command)
|
||||
|
||||
switch req.Command {
|
||||
case api.RawMode:
|
||||
err = rawMode(req, e)
|
||||
case api.AddJob:
|
||||
err = addJob(req, resp, e)
|
||||
case api.ListJobs:
|
||||
err = listJobs(req, resp, e)
|
||||
case api.AddRepo:
|
||||
err = addRepo(req, resp, e)
|
||||
case api.ListRepos:
|
||||
err = listRepos(resp, e)
|
||||
case api.Kernels:
|
||||
err = kernels(resp, e)
|
||||
case api.JobStatus:
|
||||
err = jobStatus(req, resp, e)
|
||||
case api.JobLogs:
|
||||
err = jobLogs(req, resp, e)
|
||||
default:
|
||||
err = errors.New("unknown command")
|
||||
}
|
||||
|
||||
resp.Err = err
|
||||
return
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
log zerolog.Logger
|
||||
}
|
||||
|
||||
func (lw logWriter) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
//lw.log.Trace().Msgf("%v", strconv.Quote(string(p)))
|
||||
return
|
||||
}
|
||||
|
||||
func rawMode(req *api.Req, e cmdenv) (err error) {
|
||||
uuid := uuid.New().String()
|
||||
|
||||
lwsend := logWriter{log.With().Str("uuid", uuid).Str("git", "send").Logger()}
|
||||
lwrecv := logWriter{log.With().Str("uuid", uuid).Str("git", "recv").Logger()}
|
||||
|
||||
conn, err := net.Dial("tcp", ":9418")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("dial")
|
||||
return
|
||||
}
|
||||
|
||||
go io.Copy(e.Conn, io.TeeReader(conn, lwrecv))
|
||||
io.Copy(conn, io.TeeReader(e.Conn, lwsend))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func listJobs(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
var params api.ListJobsParams
|
||||
err = req.GetData(¶ms)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
jobs, err := db.Jobs(e.DB, "updated >= ?", params.UpdatedAfter)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var result []api.Job
|
||||
for _, j := range jobs {
|
||||
if params.Group != "" && j.Group != params.Group {
|
||||
continue
|
||||
}
|
||||
if params.Repo != "" && j.RepoName != params.Repo {
|
||||
continue
|
||||
}
|
||||
if params.Commit != "" && j.Commit != params.Commit {
|
||||
continue
|
||||
}
|
||||
if params.Status != "" && j.Status != params.Status {
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, j)
|
||||
}
|
||||
|
||||
resp.SetData(&result)
|
||||
return
|
||||
}
|
||||
|
||||
func addJob(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
var job api.Job
|
||||
err = req.GetData(&job)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
job.GenUUID()
|
||||
|
||||
job.Created = time.Now()
|
||||
|
||||
var repos []api.Repo
|
||||
repos, err = db.Repos(e.DB)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, r := range repos {
|
||||
if job.RepoName == r.Name {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
err = errors.New("repo does not exist")
|
||||
return
|
||||
}
|
||||
|
||||
if job.RepoName == "" {
|
||||
err = errors.New("repo name cannot be empty")
|
||||
return
|
||||
}
|
||||
|
||||
if job.Commit == "" {
|
||||
err = errors.New("invalid commit")
|
||||
return
|
||||
}
|
||||
|
||||
err = db.AddJob(e.DB, &job)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp.SetData(&job.UUID)
|
||||
return
|
||||
}
|
||||
|
||||
func listRepos(resp *api.Resp, e cmdenv) (err error) {
|
||||
repos, err := db.Repos(e.DB)
|
||||
|
||||
if err != nil {
|
||||
e.Log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
for i := range repos {
|
||||
repos[i].Path = dotfiles.Dir("daemon/repos",
|
||||
repos[i].Name)
|
||||
}
|
||||
|
||||
log.Trace().Msgf("%v", spew.Sdump(repos))
|
||||
resp.SetData(&repos)
|
||||
return
|
||||
}
|
||||
|
||||
func addRepo(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
var repo api.Repo
|
||||
err = req.GetData(&repo)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var repos []api.Repo
|
||||
repos, err = db.Repos(e.DB)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, r := range repos {
|
||||
log.Debug().Msgf("%v, %v", r, repo.Name)
|
||||
if repo.Name == r.Name {
|
||||
err = fmt.Errorf("repo already exist")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("git", "init", "--bare")
|
||||
|
||||
cmd.Dir = dotfiles.Dir("daemon/repos", repo.Name)
|
||||
|
||||
var out []byte
|
||||
out, err = cmd.Output()
|
||||
e.Log.Debug().Msgf("%v -> %v\n%v", cmd, err, string(out))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = db.AddRepo(e.DB, &repo)
|
||||
return
|
||||
}
|
||||
|
||||
func kernels(resp *api.Resp, e cmdenv) (err error) {
|
||||
kcfg, err := config.ReadKernelConfig(e.KernelConfig)
|
||||
if err != nil {
|
||||
e.Log.Error().Err(err).Msg("read kernels config")
|
||||
return
|
||||
}
|
||||
|
||||
e.Log.Info().Msgf("send back %d kernels", len(kcfg.Kernels))
|
||||
resp.SetData(&kcfg.Kernels)
|
||||
return
|
||||
}
|
||||
|
||||
func jobLogs(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
var uuid string
|
||||
err = req.GetData(&uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
logdir := filepath.Join(dotfiles.File("daemon/logs"), uuid)
|
||||
if _, err = os.Stat(logdir); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(logdir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var logs []api.JobLog
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
logfile := filepath.Join(logdir, f.Name())
|
||||
|
||||
var buf []byte
|
||||
buf, err = os.ReadFile(logfile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
logs = append(logs, api.JobLog{
|
||||
Name: f.Name(),
|
||||
Text: string(buf),
|
||||
})
|
||||
}
|
||||
|
||||
resp.SetData(&logs)
|
||||
return
|
||||
}
|
||||
|
||||
func jobStatus(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
var uuid string
|
||||
err = req.GetData(&uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
st, err := db.JobStatus(e.DB, uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.SetData(&st)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,247 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"io"
|
||||
"net"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/daemon/db"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
|
||||
type Daemon struct {
|
||||
Threads int
|
||||
Resources *Resources
|
||||
|
||||
db *sql.DB
|
||||
kernelConfig string
|
||||
|
||||
shutdown bool
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func Init(kernelConfig string) (d *Daemon, err error) {
|
||||
d = &Daemon{}
|
||||
d.Threads = runtime.NumCPU()
|
||||
d.Resources = NewResources()
|
||||
|
||||
d.kernelConfig = kernelConfig
|
||||
|
||||
d.wg.Add(1) // matches with db.Close()
|
||||
d.db, err = db.OpenDatabase(dotfiles.File("daemon/daemon.db"))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("cannot open daemon.db")
|
||||
}
|
||||
|
||||
log.Info().Msgf("database %s", dotfiles.File("daemon/daemon.db"))
|
||||
return
|
||||
}
|
||||
|
||||
func (d *Daemon) Kill() {
|
||||
d.shutdown = true
|
||||
|
||||
d.db.Close()
|
||||
d.wg.Done()
|
||||
}
|
||||
|
||||
func (d *Daemon) Daemon() {
|
||||
if d.db == nil {
|
||||
log.Fatal().Msg("db is not initialized")
|
||||
}
|
||||
|
||||
swg := sizedwaitgroup.New(d.Threads)
|
||||
log.Info().Int("threads", d.Threads).Msg("start")
|
||||
|
||||
first := true
|
||||
|
||||
for !d.shutdown {
|
||||
d.wg.Add(1)
|
||||
|
||||
jobs, err := db.Jobs(d.db, "")
|
||||
if err != nil && !d.shutdown {
|
||||
log.Error().Err(err).Msg("")
|
||||
d.wg.Done()
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
if d.shutdown {
|
||||
break
|
||||
}
|
||||
|
||||
pj := newJobProcessor(job, d.db)
|
||||
|
||||
if first && job.Status == api.StatusRunning {
|
||||
pj.SetStatus(api.StatusWaiting)
|
||||
continue
|
||||
}
|
||||
|
||||
if job.Status == api.StatusNew {
|
||||
pj.SetStatus(api.StatusWaiting)
|
||||
continue
|
||||
}
|
||||
|
||||
if job.Status != api.StatusWaiting {
|
||||
continue
|
||||
}
|
||||
|
||||
swg.Add()
|
||||
go func(pj jobProcessor) {
|
||||
defer swg.Done()
|
||||
pj.Process(d.Resources)
|
||||
time.Sleep(time.Second)
|
||||
}(pj)
|
||||
}
|
||||
|
||||
first = false
|
||||
|
||||
d.wg.Done()
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
swg.Wait()
|
||||
}
|
||||
|
||||
func handler(conn net.Conn, e cmdenv) {
|
||||
defer conn.Close()
|
||||
|
||||
resp := api.NewResp()
|
||||
|
||||
e.Log = log.With().
|
||||
Str("resp_uuid", resp.UUID).
|
||||
Str("remote_addr", conn.RemoteAddr().String()).
|
||||
Logger()
|
||||
|
||||
e.Log.Info().Msg("")
|
||||
|
||||
var req api.Req
|
||||
|
||||
defer func() {
|
||||
if req.Command != api.RawMode {
|
||||
resp.Encode(conn)
|
||||
} else {
|
||||
log.Debug().Msg("raw mode, not encode response")
|
||||
}
|
||||
}()
|
||||
|
||||
err := req.Decode(conn)
|
||||
if err != nil {
|
||||
e.Log.Error().Err(err).Msg("cannot decode")
|
||||
return
|
||||
}
|
||||
|
||||
err = command(&req, &resp, e)
|
||||
if err != nil {
|
||||
e.Log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Daemon) Listen(addr string) {
|
||||
if d.db == nil {
|
||||
log.Fatal().Msg("db is not initialized")
|
||||
}
|
||||
|
||||
go func() {
|
||||
repodir := dotfiles.Dir("daemon/repos")
|
||||
git := exec.Command("git", "daemon", "--port=9418", "--verbose",
|
||||
"--reuseaddr",
|
||||
"--export-all", "--base-path="+repodir,
|
||||
"--enable=receive-pack",
|
||||
"--enable=upload-pack",
|
||||
repodir)
|
||||
|
||||
stdout, err := git.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("%v", git)
|
||||
return
|
||||
}
|
||||
|
||||
go io.Copy(logWriter{log: log.Logger}, stdout)
|
||||
|
||||
stderr, err := git.StderrPipe()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("%v", git)
|
||||
return
|
||||
}
|
||||
|
||||
go io.Copy(logWriter{log: log.Logger}, stderr)
|
||||
|
||||
log.Debug().Msgf("start %v", git)
|
||||
git.Start()
|
||||
defer func() {
|
||||
log.Debug().Msgf("stop %v", git)
|
||||
}()
|
||||
|
||||
err = git.Wait()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("%v", git)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if !fs.PathExists(dotfiles.File("daemon/cert.pem")) {
|
||||
log.Info().Msg("No cert.pem, generating...")
|
||||
cmd := exec.Command("openssl",
|
||||
"req", "-batch", "-newkey", "rsa:2048",
|
||||
"-new", "-nodes", "-x509",
|
||||
"-subj", "/CN=*",
|
||||
"-addext", "subjectAltName = DNS:*",
|
||||
"-out", dotfiles.File("daemon/cert.pem"),
|
||||
"-keyout", dotfiles.File("daemon/key.pem"))
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg(string(out))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Msg("copy to client:")
|
||||
log.Info().Msgf("cert: %s, key: %s",
|
||||
dotfiles.File("daemon/cert.pem"),
|
||||
dotfiles.File("daemon/key.pem"))
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(dotfiles.File("daemon/cert.pem"),
|
||||
dotfiles.File("daemon/key.pem"))
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("LoadX509KeyPair")
|
||||
}
|
||||
tlscfg := &tls.Config{Certificates: []tls.Certificate{cert}}
|
||||
|
||||
l, err := tls.Listen("tcp", addr, tlscfg)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("listen")
|
||||
}
|
||||
|
||||
log.Info().Str("addr", ":9418").Msg("git")
|
||||
log.Info().Str("addr", addr).Msg("daemon")
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("accept")
|
||||
}
|
||||
log.Info().Msgf("accept %s", conn.RemoteAddr())
|
||||
|
||||
e := cmdenv{
|
||||
DB: d.db,
|
||||
WG: &d.wg,
|
||||
Conn: conn,
|
||||
KernelConfig: d.kernelConfig,
|
||||
}
|
||||
|
||||
go handler(conn, e)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.Logger = zerolog.New(zerolog.ConsoleWriter{
|
||||
Out: os.Stdout,
|
||||
NoColor: true,
|
||||
})
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// Change on ANY database update
|
||||
const currentDatabaseVersion = 1
|
||||
|
||||
const versionField = "db_version"
|
||||
|
||||
func createMetadataTable(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS metadata (
|
||||
id INTEGER PRIMARY KEY,
|
||||
key TEXT UNIQUE,
|
||||
value TEXT
|
||||
)`)
|
||||
return
|
||||
}
|
||||
|
||||
func metaChkValue(db *sql.DB, key string) (exist bool, err error) {
|
||||
sql := "SELECT EXISTS(SELECT id FROM metadata WHERE key = $1)"
|
||||
stmt, err := db.Prepare(sql)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
err = stmt.QueryRow(key).Scan(&exist)
|
||||
return
|
||||
}
|
||||
|
||||
func metaGetValue(db *sql.DB, key string) (value string, err error) {
|
||||
stmt, err := db.Prepare("SELECT value FROM metadata " +
|
||||
"WHERE key = $1")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
err = stmt.QueryRow(key).Scan(&value)
|
||||
return
|
||||
}
|
||||
|
||||
func metaSetValue(db *sql.DB, key, value string) (err error) {
|
||||
stmt, err := db.Prepare("INSERT OR REPLACE INTO metadata " +
|
||||
"(key, value) VALUES ($1, $2)")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
_, err = stmt.Exec(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
func getVersion(db *sql.DB) (version int, err error) {
|
||||
s, err := metaGetValue(db, versionField)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version, err = strconv.Atoi(s)
|
||||
return
|
||||
}
|
||||
|
||||
func createSchema(db *sql.DB) (err error) {
|
||||
err = createMetadataTable(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = createJobTable(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = createRepoTable(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func OpenDatabase(path string) (db *sql.DB, err error) {
|
||||
db, err = sql.Open("sqlite3", path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
exists, _ := metaChkValue(db, versionField)
|
||||
if !exists {
|
||||
err = createSchema(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = metaSetValue(db, versionField,
|
||||
strconv.Itoa(currentDatabaseVersion))
|
||||
return
|
||||
}
|
||||
|
||||
version, err := getVersion(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if version != currentDatabaseVersion {
|
||||
err = fmt.Errorf("database is not supported (%d instead of %d)",
|
||||
version, currentDatabaseVersion)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func tmpdb(t *testing.T) (file *os.File, db *sql.DB) {
|
||||
file, err := os.CreateTemp("", "temp-sqlite.db")
|
||||
assert.Nil(t, err)
|
||||
// defer os.Remove(file.Name())
|
||||
|
||||
db, err = OpenDatabase(file.Name())
|
||||
assert.Nil(t, err)
|
||||
// defer db.Close()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestOpenDatabase(t *testing.T) {
|
||||
file, db := tmpdb(t)
|
||||
defer os.Remove(file.Name())
|
||||
db.Close()
|
||||
|
||||
db, err := OpenDatabase(file.Name())
|
||||
assert.Nil(t, err)
|
||||
db.Close()
|
||||
}
|
|
@ -0,0 +1,193 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"encoding/gob"
|
||||
"time"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
)
|
||||
|
||||
func createJobTable(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS job (
|
||||
id INTEGER PRIMARY KEY,
|
||||
updated INT,
|
||||
uuid TEXT,
|
||||
group_uuid TEXT,
|
||||
repo TEXT,
|
||||
"commit" TEXT,
|
||||
description TEXT,
|
||||
config TEXT,
|
||||
target TEXT,
|
||||
created INT,
|
||||
started INT,
|
||||
finished INT,
|
||||
status TEXT DEFAULT "new"
|
||||
)`)
|
||||
return
|
||||
}
|
||||
|
||||
func AddJob(db *sql.DB, job *api.Job) (err error) {
|
||||
stmt, err := db.Prepare(`INSERT INTO job (updated, uuid, group_uuid, repo, "commit", ` +
|
||||
`description, config, target, created, started, finished) ` +
|
||||
`VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
var abuf bytes.Buffer
|
||||
err = gob.NewEncoder(&abuf).Encode(job.Artifact)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
config := abuf.Bytes()
|
||||
|
||||
var tbuf bytes.Buffer
|
||||
err = gob.NewEncoder(&tbuf).Encode(job.Target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
target := tbuf.Bytes()
|
||||
|
||||
res, err := stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
|
||||
job.RepoName, job.Commit, job.Description, config, target,
|
||||
job.Created.Unix(), job.Started.Unix(),
|
||||
job.Finished.Unix(),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
job.ID, err = res.LastInsertId()
|
||||
return
|
||||
}
|
||||
|
||||
func UpdateJob(db *sql.DB, job *api.Job) (err error) {
|
||||
stmt, err := db.Prepare(`UPDATE job ` +
|
||||
`SET updated=$1, uuid=$2, group_uuid=$3, repo=$4, ` +
|
||||
`"commit"=$5, description=$6, config=$7, target=$8, ` +
|
||||
`created=$9, started=$10, finished=$11, ` +
|
||||
`status=$12 ` +
|
||||
`WHERE id=$13`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
var abuf bytes.Buffer
|
||||
err = gob.NewEncoder(&abuf).Encode(job.Artifact)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
config := abuf.Bytes()
|
||||
|
||||
var tbuf bytes.Buffer
|
||||
err = gob.NewEncoder(&tbuf).Encode(job.Target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
target := tbuf.Bytes()
|
||||
|
||||
_, err = stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
|
||||
job.RepoName, job.Commit, job.Description,
|
||||
config, target,
|
||||
job.Created.Unix(), job.Started.Unix(),
|
||||
job.Finished.Unix(), job.Status, job.ID)
|
||||
return
|
||||
}
|
||||
|
||||
func scanJob(scan func(dest ...any) error) (job api.Job, err error) {
|
||||
var config, target []byte
|
||||
var updated, created, started, finished int64
|
||||
err = scan(&job.ID, &updated, &job.UUID, &job.Group,
|
||||
&job.RepoName, &job.Commit, &job.Description,
|
||||
&config, &target,
|
||||
&created, &started, &finished, &job.Status)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
abuf := bytes.NewBuffer(config)
|
||||
err = gob.NewDecoder(abuf).Decode(&job.Artifact)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tbuf := bytes.NewBuffer(target)
|
||||
err = gob.NewDecoder(tbuf).Decode(&job.Target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
job.UpdatedAt = time.Unix(updated, 0)
|
||||
job.Created = time.Unix(created, 0)
|
||||
job.Started = time.Unix(started, 0)
|
||||
job.Finished = time.Unix(finished, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func Jobs(db *sql.DB, where string, args ...any) (jobs []api.Job, err error) {
|
||||
q := `SELECT id, updated, uuid, group_uuid, ` +
|
||||
`repo, "commit", description, config, target, created, ` +
|
||||
`started, finished, status FROM job`
|
||||
if len(where) != 0 {
|
||||
q += ` WHERE ` + where
|
||||
}
|
||||
stmt, err := db.Prepare(q)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
rows, err := stmt.Query(args...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var job api.Job
|
||||
job, err = scanJob(rows.Scan)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func Job(db *sql.DB, uuid string) (job api.Job, err error) {
|
||||
stmt, err := db.Prepare(`SELECT id, updated, uuid, ` +
|
||||
`group_uuid, ` +
|
||||
`repo, "commit", description, config, target, ` +
|
||||
`created, started, finished, status ` +
|
||||
`FROM job WHERE uuid=$1`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
return scanJob(stmt.QueryRow(uuid).Scan)
|
||||
}
|
||||
|
||||
func JobStatus(db *sql.DB, uuid string) (st api.Status, err error) {
|
||||
stmt, err := db.Prepare(`SELECT status FROM job ` +
|
||||
`WHERE uuid=$1`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
err = stmt.QueryRow(uuid).Scan(&st)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
)
|
||||
|
||||
func TestJobTable(t *testing.T) {
|
||||
file, db := tmpdb(t)
|
||||
defer os.Remove(file.Name())
|
||||
defer db.Close()
|
||||
|
||||
job := api.Job{
|
||||
RepoName: "testname",
|
||||
Commit: "test",
|
||||
Group: uuid.New().String(),
|
||||
}
|
||||
|
||||
err := AddJob(db, &job)
|
||||
assert.Nil(t, err)
|
||||
|
||||
job.Group = uuid.New().String()
|
||||
|
||||
job.Status = api.StatusSuccess
|
||||
|
||||
err = UpdateJob(db, &job)
|
||||
assert.Nil(t, err)
|
||||
|
||||
jobs, err := Jobs(db, "")
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(jobs))
|
||||
|
||||
assert.Equal(t, job.Group, jobs[0].Group)
|
||||
|
||||
job, err = Job(db, job.UUID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, api.StatusSuccess, job.Status)
|
||||
|
||||
st, err := JobStatus(db, job.UUID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, job.Status, st)
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
)
|
||||
|
||||
func createRepoTable(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS repo (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT UNIQUE
|
||||
)`)
|
||||
return
|
||||
}
|
||||
|
||||
func AddRepo(db *sql.DB, repo *api.Repo) (err error) {
|
||||
stmt, err := db.Prepare(`INSERT INTO repo (name) ` +
|
||||
`VALUES ($1);`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
res, err := stmt.Exec(repo.Name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
repo.ID, err = res.LastInsertId()
|
||||
return
|
||||
}
|
||||
|
||||
func Repos(db *sql.DB) (repos []api.Repo, err error) {
|
||||
stmt, err := db.Prepare(`SELECT id, name FROM repo`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
rows, err := stmt.Query()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var repo api.Repo
|
||||
err = rows.Scan(&repo.ID, &repo.Name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
)
|
||||
|
||||
func testCreateRepoTable(t *testing.T) (file *os.File, db *sql.DB) {
|
||||
file, err := os.CreateTemp("", "temp-sqlite.db")
|
||||
assert.Nil(t, err)
|
||||
// defer os.Remove(tempDB.Name())
|
||||
|
||||
db, err = sql.Open("sqlite3", file.Name())
|
||||
assert.Nil(t, err)
|
||||
// defer db.Close()
|
||||
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
err = createRepoTable(db)
|
||||
assert.Nil(t, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestRepoTable(t *testing.T) {
|
||||
file, db := testCreateRepoTable(t)
|
||||
defer db.Close()
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
repo := api.Repo{Name: "testname"}
|
||||
|
||||
err := AddRepo(db, &repo)
|
||||
assert.Nil(t, err)
|
||||
|
||||
repos, err := Repos(db)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(repos))
|
||||
|
||||
assert.Equal(t, repo, repos[0])
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/daemon/db"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
type jobProcessor struct {
|
||||
job api.Job
|
||||
log zerolog.Logger
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func newJobProcessor(job api.Job, db *sql.DB) (pj jobProcessor) {
|
||||
pj.job = job
|
||||
pj.db = db
|
||||
pj.log = log.With().
|
||||
Str("uuid", job.UUID).
|
||||
Str("group", job.Group).
|
||||
Logger()
|
||||
return
|
||||
}
|
||||
|
||||
func (pj jobProcessor) Update() (err error) {
|
||||
err = db.UpdateJob(pj.db, &pj.job)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msgf("update job %v", pj.job)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (pj jobProcessor) SetStatus(status api.Status) (err error) {
|
||||
pj.log.Info().Msgf(`%v -> %v`, pj.job.Status, status)
|
||||
pj.job.Status = status
|
||||
err = pj.Update()
|
||||
return
|
||||
}
|
||||
|
||||
func (pj *jobProcessor) Process(res *Resources) (err error) {
|
||||
if pj.job.Status != api.StatusWaiting {
|
||||
err = errors.New("job is not available to process")
|
||||
return
|
||||
}
|
||||
|
||||
if pj.job.Artifact.Qemu.Cpus == 0 {
|
||||
pj.job.Artifact.Qemu.Cpus = qemu.DefaultCPUs
|
||||
}
|
||||
|
||||
if pj.job.Artifact.Qemu.Memory == 0 {
|
||||
pj.job.Artifact.Qemu.Memory = qemu.DefaultMemory
|
||||
}
|
||||
|
||||
err = res.Allocate(pj.job)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
res.Release(pj.job)
|
||||
}()
|
||||
|
||||
log.Info().Msgf("process job %v", pj.job.UUID)
|
||||
|
||||
pj.SetStatus(api.StatusRunning)
|
||||
pj.job.Started = time.Now()
|
||||
|
||||
defer func() {
|
||||
pj.job.Finished = time.Now()
|
||||
if err != nil {
|
||||
pj.SetStatus(api.StatusFailure)
|
||||
} else {
|
||||
pj.SetStatus(api.StatusSuccess)
|
||||
}
|
||||
}()
|
||||
|
||||
var tmp string
|
||||
tmp, err = os.MkdirTemp(dotfiles.Dir("tmp"), "")
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("mktemp")
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
tmprepo := filepath.Join(tmp, "repo")
|
||||
|
||||
pj.log.Debug().Msgf("temp repo: %v", tmprepo)
|
||||
|
||||
remote := fmt.Sprintf("git://localhost:9418/%s", pj.job.RepoName)
|
||||
|
||||
pj.log.Debug().Msgf("remote: %v", remote)
|
||||
|
||||
var raw []byte
|
||||
|
||||
cmd := exec.Command("git", "clone", remote, tmprepo)
|
||||
|
||||
raw, err = cmd.CombinedOutput()
|
||||
pj.log.Trace().Msgf("%v\n%v", cmd, string(raw))
|
||||
if err != nil {
|
||||
pj.log.Error().Msgf("%v\n%v", cmd, string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "checkout", pj.job.Commit)
|
||||
|
||||
cmd.Dir = tmprepo
|
||||
|
||||
raw, err = cmd.CombinedOutput()
|
||||
pj.log.Trace().Msgf("%v\n%v", cmd, string(raw))
|
||||
if err != nil {
|
||||
pj.log.Error().Msgf("%v\n%v", cmd, string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
pj.job.Artifact.SourcePath = tmprepo
|
||||
|
||||
var result *artifact.Result
|
||||
var dq *qemu.System
|
||||
|
||||
pj.job.Artifact.Process(pj.log, pj.job.Target, false, "", "", 0,
|
||||
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
|
||||
res *artifact.Result) {
|
||||
|
||||
result = res
|
||||
dq = q
|
||||
},
|
||||
)
|
||||
|
||||
logdir := dotfiles.Dir("daemon/logs", pj.job.UUID)
|
||||
|
||||
err = os.WriteFile(filepath.Join(logdir, "build.log"),
|
||||
[]byte(result.Build.Output), 0644)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(logdir, "run.log"),
|
||||
[]byte(result.Run.Output), 0644)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(logdir, "test.log"),
|
||||
[]byte(result.Test.Output), 0644)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(logdir, "qemu.log"),
|
||||
[]byte(dq.Stdout), 0644)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
pj.log.Info().Msgf("build %v, run %v, test %v",
|
||||
result.Build.Ok, result.Run.Ok, result.Test.Ok)
|
||||
|
||||
if !result.Test.Ok {
|
||||
err = errors.New("tests failed")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,206 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
)
|
||||
|
||||
type Resources struct {
|
||||
initialized bool
|
||||
|
||||
CPU *CPUResource
|
||||
RAM *RAMResources
|
||||
}
|
||||
|
||||
func NewResources() (r *Resources) {
|
||||
r = &Resources{}
|
||||
r.CPU = NewCPUResources()
|
||||
r.RAM = NewRAMResources()
|
||||
r.initialized = true
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Resources) Allocate(job api.Job) (err error) {
|
||||
if !r.initialized {
|
||||
err = errors.New("resources not initialized")
|
||||
return
|
||||
}
|
||||
|
||||
if job.Artifact.Qemu.Cpus == 0 {
|
||||
err = errors.New("no cpus requested")
|
||||
return
|
||||
}
|
||||
|
||||
if job.Artifact.Qemu.Memory == 0 {
|
||||
err = errors.New("no memory requested")
|
||||
return
|
||||
}
|
||||
|
||||
origRam := r.RAM.GetSpent()
|
||||
origCPU := r.CPU.GetSpent()
|
||||
|
||||
err = r.CPU.Allocate(job.Artifact.Qemu.Cpus)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.RAM.Allocate(job.Artifact.Qemu.Memory)
|
||||
if err != nil {
|
||||
r.CPU.Release(job.Artifact.Qemu.Cpus)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().Msgf("allocated %d cpus, %d MB ram",
|
||||
r.CPU.GetSpent()-origCPU,
|
||||
r.RAM.GetSpent()-origRam)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Resources) Release(job api.Job) {
|
||||
if !r.initialized {
|
||||
log.Error().Msg("resources not initialized")
|
||||
return
|
||||
}
|
||||
|
||||
r.CPU.Release(job.Artifact.Qemu.Cpus)
|
||||
r.RAM.Release(job.Artifact.Qemu.Memory)
|
||||
|
||||
log.Debug().Msgf("released %d cpus, %d MB ram",
|
||||
job.Artifact.Qemu.Cpus,
|
||||
job.Artifact.Qemu.Memory)
|
||||
}
|
||||
|
||||
type CPUResource struct {
|
||||
num int
|
||||
overcommit float64
|
||||
|
||||
mu *sync.Mutex
|
||||
spent int
|
||||
}
|
||||
|
||||
const (
|
||||
Allocation = iota
|
||||
Release
|
||||
)
|
||||
|
||||
func NewCPUResources() (cpur *CPUResource) {
|
||||
cpur = &CPUResource{}
|
||||
cpur.mu = &sync.Mutex{}
|
||||
cpur.num = runtime.NumCPU()
|
||||
cpur.overcommit = 1
|
||||
log.Debug().Msgf("total cpus: %d", cpur.num)
|
||||
return
|
||||
}
|
||||
|
||||
func (cpur *CPUResource) SetOvercommit(oc float64) {
|
||||
log.Info().Int("cpus", cpur.num).
|
||||
Int("result", int(float64(cpur.num)*oc)).
|
||||
Msgf("%.02f", oc)
|
||||
cpur.overcommit = oc
|
||||
}
|
||||
|
||||
func (cpur *CPUResource) GetSpent() int {
|
||||
cpur.mu.Lock()
|
||||
defer cpur.mu.Unlock()
|
||||
return cpur.spent
|
||||
}
|
||||
|
||||
var ErrNotEnoughCpu = errors.New("not enough cpu")
|
||||
|
||||
func (cpur *CPUResource) Allocate(cpu int) (err error) {
|
||||
cpur.mu.Lock()
|
||||
defer cpur.mu.Unlock()
|
||||
|
||||
if cpur.spent+cpu > int(float64(cpur.num)*cpur.overcommit) {
|
||||
err = ErrNotEnoughCpu
|
||||
return
|
||||
}
|
||||
|
||||
cpur.spent += cpu
|
||||
return
|
||||
}
|
||||
|
||||
func (cpur *CPUResource) Release(cpu int) (err error) {
|
||||
cpur.mu.Lock()
|
||||
defer cpur.mu.Unlock()
|
||||
|
||||
if cpur.spent < cpu {
|
||||
err = ErrFreeingMoreThanAllocated
|
||||
return
|
||||
}
|
||||
|
||||
cpur.spent -= cpu
|
||||
return
|
||||
}
|
||||
|
||||
type RAMResources struct {
|
||||
mb int
|
||||
overcommit float64
|
||||
|
||||
mu *sync.Mutex
|
||||
spent int
|
||||
}
|
||||
|
||||
func NewRAMResources() (ramr *RAMResources) {
|
||||
ramr = &RAMResources{}
|
||||
ramr.mu = &sync.Mutex{}
|
||||
ramr.overcommit = 1
|
||||
|
||||
var info syscall.Sysinfo_t
|
||||
syscall.Sysinfo(&info)
|
||||
ramr.mb = int(info.Totalram / 1024 / 1024)
|
||||
log.Debug().Msgf("total ram: %d MB", ramr.mb)
|
||||
return
|
||||
}
|
||||
|
||||
func (ramr *RAMResources) SetOvercommit(oc float64) {
|
||||
log.Info().Int("ram", ramr.mb).
|
||||
Int("result", int(float64(ramr.mb)*oc)).
|
||||
Msgf("%.02f", oc)
|
||||
ramr.overcommit = oc
|
||||
}
|
||||
|
||||
func (ramr RAMResources) GetSpent() int {
|
||||
ramr.mu.Lock()
|
||||
defer ramr.mu.Unlock()
|
||||
return ramr.spent
|
||||
}
|
||||
|
||||
var ErrNotEnoughRam = errors.New("not enough ram")
|
||||
|
||||
func (ramr *RAMResources) Allocate(mb int) (err error) {
|
||||
ramr.mu.Lock()
|
||||
defer ramr.mu.Unlock()
|
||||
|
||||
ocmem := int(float64(ramr.mb) * ramr.overcommit)
|
||||
|
||||
if mb > ocmem-ramr.spent {
|
||||
err = ErrNotEnoughRam
|
||||
return
|
||||
}
|
||||
|
||||
ramr.spent += mb
|
||||
return
|
||||
}
|
||||
|
||||
var ErrFreeingMoreThanAllocated = errors.New("freeing more than allocated")
|
||||
|
||||
func (ramr *RAMResources) Release(mb int) (err error) {
|
||||
ramr.mu.Lock()
|
||||
defer ramr.mu.Unlock()
|
||||
|
||||
if ramr.spent < mb {
|
||||
err = ErrFreeingMoreThanAllocated
|
||||
return
|
||||
}
|
||||
|
||||
ramr.spent -= mb
|
||||
return
|
||||
}
|
143
debug.go
143
debug.go
|
@ -1,143 +0,0 @@
|
|||
// Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jollheef/out-of-tree/config"
|
||||
qemu "github.com/jollheef/out-of-tree/qemu"
|
||||
"github.com/logrusorgru/aurora"
|
||||
)
|
||||
|
||||
func firstSupported(kcfg config.KernelConfig, ka config.Artifact,
|
||||
kernel string) (ki config.KernelInfo, err error) {
|
||||
|
||||
km, err := kernelMask(kernel)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ka.SupportedKernels = []config.KernelMask{km}
|
||||
|
||||
for _, ki = range kcfg.Kernels {
|
||||
var supported bool
|
||||
supported, err = ka.Supported(ki)
|
||||
if err != nil || supported {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New("No supported kernel found")
|
||||
return
|
||||
}
|
||||
|
||||
func handleLine(q *qemu.QemuSystem) (err error) {
|
||||
fmt.Print("out-of-tree> ")
|
||||
rawLine := "help"
|
||||
fmt.Scanf("%s", &rawLine)
|
||||
params := strings.Fields(rawLine)
|
||||
cmd := params[0]
|
||||
|
||||
switch cmd {
|
||||
case "h", "help":
|
||||
fmt.Printf("help\t: print this help message\n")
|
||||
fmt.Printf("log\t: print qemu log\n")
|
||||
fmt.Printf("clog\t: print qemu log and cleanup buffer\n")
|
||||
fmt.Printf("cleanup\t: cleanup qemu log buffer\n")
|
||||
fmt.Printf("ssh\t: print arguments to ssh command\n")
|
||||
fmt.Printf("quit\t: quit\n")
|
||||
case "l", "log":
|
||||
fmt.Println(string(q.Stdout))
|
||||
case "cl", "clog":
|
||||
fmt.Println(string(q.Stdout))
|
||||
q.Stdout = []byte{}
|
||||
case "c", "cleanup":
|
||||
q.Stdout = []byte{}
|
||||
case "s", "ssh":
|
||||
fmt.Println(q.GetSshCommand())
|
||||
case "q", "quit":
|
||||
return errors.New("end of session")
|
||||
default:
|
||||
fmt.Println("No such command")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func interactive(q *qemu.QemuSystem) (err error) {
|
||||
for {
|
||||
err = handleLine(q)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func debugHandler(kcfg config.KernelConfig, workPath, kernRegex, gdb string,
|
||||
dockerTimeout time.Duration) (err error) {
|
||||
|
||||
ka, err := config.ReadArtifactConfig(workPath + "/.out-of-tree.toml")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ka.SourcePath == "" {
|
||||
ka.SourcePath = workPath
|
||||
}
|
||||
|
||||
ki, err := firstSupported(kcfg, ka, kernRegex)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
|
||||
q, err := qemu.NewQemuSystem(qemu.X86_64, kernel, ki.RootFS)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
q.Debug(gdb)
|
||||
coloredGdbAddress := aurora.BgGreen(aurora.Black(gdb))
|
||||
fmt.Printf("[*] gdb runned on %s\n", coloredGdbAddress)
|
||||
|
||||
err = q.Start()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer q.Stop()
|
||||
|
||||
tmp, err := ioutil.TempDir("/tmp/", "out-of-tree_")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
outFile, output, err := build(tmp, ka, ki, dockerTimeout)
|
||||
if err != nil {
|
||||
log.Println(err, output)
|
||||
return
|
||||
}
|
||||
|
||||
remoteFile := "/tmp/artifact"
|
||||
if ka.Type == config.KernelModule {
|
||||
remoteFile += ".ko"
|
||||
}
|
||||
|
||||
err = q.CopyFile("user", outFile, remoteFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
coloredRemoteFile := aurora.BgGreen(aurora.Black(remoteFile))
|
||||
fmt.Printf("[*] build result copied to %s\n", coloredRemoteFile)
|
||||
|
||||
err = interactive(q)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
{ pkgs ? (
|
||||
let
|
||||
inherit (builtins) fetchTree fromJSON readFile;
|
||||
inherit ((fromJSON (readFile ./flake.lock)).nodes) nixpkgs gomod2nix;
|
||||
in
|
||||
import (fetchTree nixpkgs.locked) {
|
||||
overlays = [
|
||||
(import "${fetchTree gomod2nix.locked}/overlay.nix")
|
||||
];
|
||||
}
|
||||
)
|
||||
, lib
|
||||
, version
|
||||
}:
|
||||
|
||||
pkgs.buildGoApplication rec {
|
||||
pname = "out-of-tree";
|
||||
|
||||
inherit version;
|
||||
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
|
||||
src = ./.;
|
||||
pwd = ./.;
|
||||
|
||||
doCheck = false;
|
||||
|
||||
postFixup = ''
|
||||
wrapProgram $out/bin/out-of-tree \
|
||||
--prefix PATH : "${lib.makeBinPath [ pkgs.qemu pkgs.podman pkgs.openssl ]}"
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "kernel {module, exploit} development tool";
|
||||
homepage = "https://out-of-tree.io";
|
||||
maintainers = [ maintainers.dump_stack ];
|
||||
license = licenses.agpl3Plus;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,217 @@
|
|||
package centos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
func init() {
|
||||
releases := []string{"6", "7", "8"}
|
||||
|
||||
for _, release := range releases {
|
||||
distro.Register(CentOS{release: release})
|
||||
}
|
||||
}
|
||||
|
||||
type CentOS struct {
|
||||
release string
|
||||
}
|
||||
|
||||
func (centos CentOS) Equal(d distro.Distro) bool {
|
||||
return centos.release == d.Release && distro.CentOS == d.ID
|
||||
}
|
||||
|
||||
func (centos CentOS) Distro() distro.Distro {
|
||||
return distro.Distro{ID: distro.CentOS, Release: centos.release}
|
||||
}
|
||||
|
||||
func (centos CentOS) Packages() (pkgs []string, err error) {
|
||||
c, err := container.New(centos.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build("centos:"+centos.release,
|
||||
centos.envs(), centos.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
cmd := "yum search kernel --showduplicates 2>/dev/null " +
|
||||
"| grep '^kernel-[0-9]' " +
|
||||
"| grep -v src " +
|
||||
"| cut -d ' ' -f 1"
|
||||
|
||||
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkgs = append(pkgs, strings.Fields(output)...)
|
||||
return
|
||||
}
|
||||
|
||||
func (centos CentOS) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
c, err := container.New(centos.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return c.Kernels()
|
||||
}
|
||||
|
||||
func (centos CentOS) envs() (envs []string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (centos CentOS) runs() (commands []string) {
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
var repos []string
|
||||
|
||||
// TODO refactor
|
||||
switch centos.release {
|
||||
case "6":
|
||||
repofmt := "[6.%d-%s]\\nbaseurl=https://vault.centos.org/6.%d/%s/$basearch/\\ngpgcheck=0"
|
||||
for i := 0; i <= 10; i++ {
|
||||
repos = append(repos, fmt.Sprintf(repofmt, i, "os", i, "os"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, i, "updates", i, "updates"))
|
||||
}
|
||||
cmdf("rm /etc/yum.repos.d/*")
|
||||
case "7":
|
||||
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/\\ngpgcheck=0"
|
||||
for _, ver := range []string{
|
||||
"7.0.1406", "7.1.1503", "7.2.1511",
|
||||
"7.3.1611", "7.4.1708", "7.5.1804",
|
||||
"7.6.1810", "7.7.1908", "7.8.2003",
|
||||
} {
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "os", ver, "os"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "updates", ver, "updates"))
|
||||
}
|
||||
|
||||
// FIXME http/gpgcheck=0
|
||||
repofmt = "[%s-%s]\\nbaseurl=http://mirror.centos.org/centos-7/%s/%s/$basearch/\\ngpgcheck=0"
|
||||
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "os", "7.9.2009", "os"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "updates", "7.9.2009", "updates"))
|
||||
case "8":
|
||||
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/os/\\ngpgcheck=0"
|
||||
|
||||
for _, ver := range []string{
|
||||
"8.0.1905", "8.1.1911", "8.2.2004",
|
||||
"8.3.2011", "8.4.2105", "8.5.2111",
|
||||
} {
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "baseos", ver, "BaseOS"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "appstream", ver, "AppStream"))
|
||||
}
|
||||
default:
|
||||
log.Fatal().Msgf("no support for centos %s", centos.release)
|
||||
return
|
||||
}
|
||||
|
||||
cmdf("sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/* || true")
|
||||
|
||||
for _, repo := range repos {
|
||||
cmdf("echo -e '%s' >> /etc/yum.repos.d/oot.repo\n", repo)
|
||||
}
|
||||
|
||||
// do not remove old kernels
|
||||
|
||||
cmdf("sed -i 's;installonly_limit=;installonly_limit=100500;' /etc/yum.conf")
|
||||
cmdf("yum -y update")
|
||||
|
||||
cmdf("yum -y groupinstall 'Development Tools'")
|
||||
|
||||
// TODO do not use lexicographical comparison, change to parse int
|
||||
if centos.release <= "6" {
|
||||
cmdf("yum -y install kernel-firmware")
|
||||
} else {
|
||||
cmdf("yum -y install linux-firmware")
|
||||
}
|
||||
|
||||
if centos.release < "8" {
|
||||
cmdf("yum -y install deltarpm")
|
||||
} else {
|
||||
cmdf("yum -y install grub2-tools-minimal elfutils-libelf-devel")
|
||||
}
|
||||
|
||||
var flags string
|
||||
if centos.release >= "8" {
|
||||
flags = "--noautoremove"
|
||||
}
|
||||
|
||||
// Install and remove a single kernel and headers.
|
||||
// This ensures that all dependencies are cached.
|
||||
|
||||
cmd := "export HEADERS=$(yum search kernel-devel --showduplicates " +
|
||||
"| grep '^kernel-devel' | cut -d ' ' -f 1 | head -n 1)"
|
||||
|
||||
cmd += " KERNEL=$(echo $HEADERS | sed 's/-devel//')"
|
||||
cmd += " MODULES=$(echo $HEADERS | sed 's/-devel/-modules/')"
|
||||
cmd += " CORE=$(echo $HEADERS | sed 's/-devel/-core/')"
|
||||
|
||||
cmd += " && yum -y install $KERNEL $HEADERS"
|
||||
cmd += " && yum -y remove %s $KERNEL $HEADERS $MODULES $CORE"
|
||||
|
||||
cmdf(cmd, flags)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (centos CentOS) Install(pkgname string, headers bool) (err error) {
|
||||
var headerspkg string
|
||||
if headers {
|
||||
headerspkg = strings.Replace(pkgname, "kernel", "kernel-devel", -1)
|
||||
}
|
||||
|
||||
var commands []string
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
cmdf("yum -y install %s %s", pkgname, headerspkg)
|
||||
|
||||
version := strings.Replace(pkgname, "kernel-", "", -1)
|
||||
|
||||
if centos.release <= "7" {
|
||||
cmdf("dracut -v --add-drivers 'e1000 ext4' -f "+
|
||||
"/boot/initramfs-%s.img %s", version, version)
|
||||
} else {
|
||||
cmdf("dracut -v --add-drivers 'ata_piix libata' "+
|
||||
"--force-drivers 'e1000 ext4 sd_mod' -f "+
|
||||
"/boot/initramfs-%s.img %s", version, version)
|
||||
}
|
||||
|
||||
cmdf("cp -r /boot /target/")
|
||||
cmdf("cp -r /lib/modules /target/lib/")
|
||||
cmdf("cp -r /usr/src /target/usr/")
|
||||
|
||||
c, err := container.New(centos.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range c.Volumes {
|
||||
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
|
||||
}
|
||||
|
||||
_, err = c.Run("", commands)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (centos CentOS) RootFS() string {
|
||||
return fmt.Sprintf("out_of_tree_centos_%s.img", centos.release)
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package centos
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
func TestCentOS(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
u := CentOS{release: "7"}
|
||||
|
||||
assert.True(u.Equal(distro.Distro{Release: "7", ID: distro.CentOS}))
|
||||
|
||||
assert.NotEmpty(u.Packages())
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
package debian
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/rapidloop/skv"
|
||||
)
|
||||
|
||||
type Cache struct {
|
||||
store *skv.KVStore
|
||||
}
|
||||
|
||||
// cache is not thread-safe, so make sure there are only one user
|
||||
var mu sync.Mutex
|
||||
|
||||
func NewCache(path string) (c *Cache, err error) {
|
||||
mu.Lock()
|
||||
|
||||
c = &Cache{}
|
||||
c.store, err = skv.Open(path)
|
||||
return
|
||||
}
|
||||
|
||||
func (c Cache) Put(p []DebianKernel) error {
|
||||
if len(p) == 0 {
|
||||
return errors.New("empty slice")
|
||||
}
|
||||
return c.store.Put(p[0].Version.Package, p)
|
||||
}
|
||||
|
||||
func (c Cache) Get(version string) (p []DebianKernel, err error) {
|
||||
err = c.store.Get(version, &p)
|
||||
if len(p) == 0 {
|
||||
err = skv.ErrNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c Cache) PutVersions(versions []string) error {
|
||||
return c.store.Put("versions", versions)
|
||||
}
|
||||
|
||||
func (c Cache) GetVersions() (versions []string, err error) {
|
||||
err = c.store.Get("versions", &versions)
|
||||
return
|
||||
}
|
||||
|
||||
func (c Cache) Close() (err error) {
|
||||
err = c.store.Close()
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
package debian
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rapidloop/skv"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("", "out-of-tree_cache_test_")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
path := filepath.Join(dir, "debian.cache")
|
||||
|
||||
c, err := NewCache(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
image := snapshot.Package{}
|
||||
image.Deb.Hash = "12345"
|
||||
|
||||
version := "4.17.14-1"
|
||||
|
||||
dk := DebianKernel{
|
||||
Version: DebianKernelVersion{Package: version},
|
||||
Image: image,
|
||||
}
|
||||
|
||||
err = c.Put([]DebianKernel{dk})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dk2s, err := c.Get(version)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dk2 := dk2s[0]
|
||||
|
||||
if dk.Image.Deb.Hash != dk2.Image.Deb.Hash {
|
||||
t.Fatalf("mismatch")
|
||||
}
|
||||
|
||||
c.Close()
|
||||
|
||||
c, err = NewCache(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
dk3s, err := c.Get(version)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dk3 := dk3s[0]
|
||||
|
||||
if dk.Image.Deb.Hash != dk3.Image.Deb.Hash {
|
||||
t.Fatalf("mismatch")
|
||||
}
|
||||
|
||||
_, err = c.Get("key not exist")
|
||||
if err == nil || err != skv.ErrNotFound {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersionsCache(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("", "out-of-tree_cache_test_")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
path := filepath.Join(dir, "debian.cache")
|
||||
|
||||
c, err := NewCache(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
versions := []string{"a", "b", "c"}
|
||||
|
||||
err = c.PutVersions(versions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result, err := c.GetVersions()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(versions) != len(result) {
|
||||
t.Fatal("mismatch")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,535 @@
|
|||
package debian
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
releases := []Release{
|
||||
Wheezy,
|
||||
Jessie,
|
||||
Stretch,
|
||||
Buster,
|
||||
Bullseye,
|
||||
Bookworm,
|
||||
}
|
||||
|
||||
for _, release := range releases {
|
||||
distro.Register(Debian{release: release})
|
||||
}
|
||||
}
|
||||
|
||||
type Debian struct {
|
||||
release Release
|
||||
}
|
||||
|
||||
func (d Debian) Equal(dd distro.Distro) bool {
|
||||
if dd.ID != distro.Debian {
|
||||
return false
|
||||
}
|
||||
|
||||
return ReleaseFromString(dd.Release) == d.release
|
||||
}
|
||||
|
||||
func (d Debian) Distro() distro.Distro {
|
||||
return distro.Distro{distro.Debian, d.release.String()}
|
||||
}
|
||||
|
||||
func (d Debian) Packages() (packages []string, err error) {
|
||||
c, err := container.New(d.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build(d.image(), d.envs(), d.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
kernels, err := GetKernels()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("get kernels")
|
||||
return
|
||||
}
|
||||
|
||||
for _, dk := range kernels {
|
||||
if d.release != dk.Release {
|
||||
continue
|
||||
}
|
||||
|
||||
version := kver(dk.Version.Package)
|
||||
|
||||
// filter out pre-release kernels
|
||||
switch dk.Release {
|
||||
case Wheezy:
|
||||
if version.LessThan(kver("3.2-rc0")) {
|
||||
continue
|
||||
}
|
||||
case Jessie:
|
||||
if version.LessThan(kver("3.16-rc0")) {
|
||||
continue
|
||||
}
|
||||
case Stretch:
|
||||
if version.LessThan(kver("4.9-rc0")) {
|
||||
continue
|
||||
}
|
||||
case Buster:
|
||||
if version.LessThan(kver("4.19-rc0")) {
|
||||
continue
|
||||
}
|
||||
case Bullseye:
|
||||
if version.LessThan(kver("5.10-rc0")) {
|
||||
continue
|
||||
}
|
||||
case Bookworm:
|
||||
if version.LessThan(kver("6.1-rc0")) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
p := dk.Image.Deb.Name[:len(dk.Image.Deb.Name)-4] // w/o .deb
|
||||
packages = append(packages, p)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type Release int
|
||||
|
||||
const (
|
||||
None Release = iota
|
||||
Buzz
|
||||
Hamm
|
||||
Woody
|
||||
Etch
|
||||
Lenny
|
||||
Squeeze
|
||||
Wheezy
|
||||
Jessie
|
||||
Stretch
|
||||
Buster
|
||||
Bullseye
|
||||
Bookworm
|
||||
)
|
||||
|
||||
var ReleaseStrings = [...]string{
|
||||
"",
|
||||
"buzz",
|
||||
"hamm",
|
||||
"woody",
|
||||
"etch",
|
||||
"lenny",
|
||||
"squeeze",
|
||||
"wheezy",
|
||||
"jessie",
|
||||
"stretch",
|
||||
"buster",
|
||||
"bullseye",
|
||||
"bookworm",
|
||||
}
|
||||
|
||||
func (cn Release) Name() string {
|
||||
return ReleaseStrings[cn]
|
||||
}
|
||||
|
||||
func (cn Release) String() string {
|
||||
return fmt.Sprintf("%d", cn)
|
||||
}
|
||||
|
||||
func ReleaseFromString(s string) (r Release) {
|
||||
switch strings.ToLower(s) {
|
||||
case "1", "buzz":
|
||||
r = Buzz
|
||||
case "2", "hamm":
|
||||
r = Hamm
|
||||
case "3", "woody":
|
||||
r = Woody
|
||||
case "4", "etch":
|
||||
r = Etch
|
||||
case "5", "lenny":
|
||||
r = Lenny
|
||||
case "6", "squeeze":
|
||||
r = Squeeze
|
||||
case "7", "wheezy":
|
||||
r = Wheezy
|
||||
case "8", "jessie":
|
||||
r = Jessie
|
||||
case "9", "stretch":
|
||||
r = Stretch
|
||||
case "10", "buster":
|
||||
r = Buster
|
||||
case "11", "bullseye":
|
||||
r = Bullseye
|
||||
case "12", "bookworm":
|
||||
r = Bookworm
|
||||
default:
|
||||
r = None
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d Debian) envs() (envs []string) {
|
||||
envs = append(envs, "DEBIAN_FRONTEND=noninteractive")
|
||||
return
|
||||
}
|
||||
|
||||
func (d Debian) image() (image string) {
|
||||
image += "debian:"
|
||||
|
||||
switch d.release {
|
||||
case Wheezy:
|
||||
image += "wheezy-20190228"
|
||||
case Jessie:
|
||||
image += "jessie-20210326"
|
||||
case Stretch:
|
||||
image += "stretch-20220622"
|
||||
default:
|
||||
image += d.release.Name()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func repositories(release Release) (repos []string) {
|
||||
var snapshot string
|
||||
|
||||
switch release {
|
||||
// Latest snapshots that include release
|
||||
case Wheezy:
|
||||
// doesn't include snapshot repos in /etc/apt/source.list
|
||||
snapshot = "20190321T212815Z"
|
||||
case Jessie:
|
||||
snapshot = "20230322T152120Z"
|
||||
case Stretch:
|
||||
snapshot = "20230423T032533Z"
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
repo := func(archive, s string) {
|
||||
format := "deb [check-valid-until=no trusted=yes] " +
|
||||
"http://snapshot.debian.org/archive/%s/%s " +
|
||||
"%s%s main"
|
||||
r := fmt.Sprintf(format, archive, snapshot, release.Name(), s)
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
repo("debian", "")
|
||||
repo("debian", "-updates")
|
||||
if release <= 7 {
|
||||
repo("debian", "-backports")
|
||||
}
|
||||
repo("debian-security", "/updates")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (d Debian) runs() (commands []string) {
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
repos := repositories(d.release)
|
||||
|
||||
if len(repos) != 0 {
|
||||
cmdf("rm /etc/apt/sources.list")
|
||||
for _, repo := range repos {
|
||||
cmdf("echo '%s' >> /etc/apt/sources.list", repo)
|
||||
}
|
||||
} else {
|
||||
cmdf("apt-get update || sed -i " +
|
||||
"-e '/snapshot/!d' " +
|
||||
"-e 's/# deb/deb [check-valid-until=no trusted=yes]/' " +
|
||||
"/etc/apt/sources.list")
|
||||
}
|
||||
|
||||
cmdf("apt-get update || apt-get update || apt-get update")
|
||||
|
||||
pkglist := []string{
|
||||
"wget", "build-essential", "libelf-dev", "git",
|
||||
"kmod", "linux-base", "libssl-dev",
|
||||
"firmware-linux-free",
|
||||
"libxml2", "libglib2.0.0", "irqbalance", "libcap-ng0",
|
||||
"libnuma1", "sgml-base", "shared-mime-info", "xdg-user-dirs",
|
||||
"xml-core", "python3",
|
||||
}
|
||||
|
||||
gccs := "'^(gcc-[0-9].[0-9]|gcc-[0-9]|gcc-[1-9][0-9])$'"
|
||||
pkglist = append(pkglist, gccs)
|
||||
|
||||
if d.release >= 8 {
|
||||
pkglist = append(pkglist, "initramfs-tools")
|
||||
} else {
|
||||
// by default Debian backports repositories have a lower
|
||||
// priority than stable, so we should specify it manually
|
||||
cmdf("apt-get -y install -t %s-backports "+
|
||||
"initramfs-tools", d.release.Name())
|
||||
}
|
||||
|
||||
if d.release >= 9 {
|
||||
pkglist = append(pkglist, "apparmor")
|
||||
}
|
||||
|
||||
if d.release < 9 {
|
||||
pkglist = append(pkglist, "module-init-tools")
|
||||
}
|
||||
|
||||
var packages string
|
||||
for _, pkg := range pkglist {
|
||||
packages += fmt.Sprintf("%s ", pkg)
|
||||
}
|
||||
|
||||
cmdf("timeout 5m apt-get install -y %s "+
|
||||
"|| timeout 10m apt-get install -y %s "+
|
||||
"|| apt-get install -y %s", packages, packages, packages)
|
||||
|
||||
if d.release == Wheezy {
|
||||
// We need newer libc for deb8*~bpo70+1
|
||||
format := "deb [check-valid-until=no trusted=yes] " +
|
||||
"http://snapshot.debian.org/archive/debian/%s " +
|
||||
"jessie main"
|
||||
// Keep it here not in repos to have apt-priority close
|
||||
repo := fmt.Sprintf(format, "20190321T212815Z")
|
||||
cmdf("echo '%s' >> /etc/apt/sources.list", repo)
|
||||
cmdf("echo 'Package: *' >> /etc/apt/preferences.d/jessie")
|
||||
cmdf("echo 'Pin: release a=jessie' >> /etc/apt/preferences.d/jessie")
|
||||
cmdf("echo 'Pin-Priority: 10' >> /etc/apt/preferences.d/jessie")
|
||||
|
||||
cmdf("apt-get -y update")
|
||||
|
||||
// glibc guarantee backwards compatibility, so should be no problem
|
||||
cmdf("apt-get -y install -t jessie libc6-dev")
|
||||
}
|
||||
|
||||
cmdf("mkdir -p /lib/modules")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (d Debian) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
c, err := container.New(d.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
return
|
||||
}
|
||||
|
||||
cpath := dotfiles.Dir("volumes", c.Name())
|
||||
rootfs := dotfiles.File("images", c.Name()+".img")
|
||||
|
||||
files, err := os.ReadDir(cpath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if !strings.Contains(file.Name(), "linux-image") {
|
||||
continue
|
||||
}
|
||||
|
||||
pkgname := file.Name()
|
||||
|
||||
kpkgdir := filepath.Join(cpath, pkgname)
|
||||
|
||||
bootdir := filepath.Join(kpkgdir, "boot")
|
||||
|
||||
vmlinuz, err := fs.FindBySubstring(bootdir, "vmlinuz")
|
||||
if err != nil {
|
||||
log.Warn().Msgf("cannot find vmlinuz for %s", pkgname)
|
||||
continue
|
||||
}
|
||||
|
||||
initrd, err := fs.FindBySubstring(bootdir, "initrd")
|
||||
if err != nil {
|
||||
log.Warn().Msgf("cannot find initrd for %s", pkgname)
|
||||
continue
|
||||
}
|
||||
|
||||
modulesdir := filepath.Join(kpkgdir, "lib/modules")
|
||||
|
||||
modules, err := fs.FindBySubstring(modulesdir, "")
|
||||
if err != nil {
|
||||
log.Warn().Msgf("cannot find modules for %s", pkgname)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug().Msgf("%s %s %s", vmlinuz, initrd, modules)
|
||||
|
||||
release := strings.Replace(pkgname, "linux-image-", "", -1)
|
||||
|
||||
ki := distro.KernelInfo{
|
||||
Distro: d.Distro(),
|
||||
KernelVersion: path.Base(modules),
|
||||
KernelRelease: release,
|
||||
ContainerName: c.Name(),
|
||||
|
||||
KernelPath: vmlinuz,
|
||||
InitrdPath: initrd,
|
||||
ModulesPath: modules,
|
||||
|
||||
RootFS: rootfs,
|
||||
|
||||
Package: pkgname,
|
||||
}
|
||||
|
||||
smapBlocklist := []string{
|
||||
"3.10.5-1~bpo70+1",
|
||||
"3.10.11-1~bpo70+1",
|
||||
"3.9.6-1~bpo70+1",
|
||||
}
|
||||
for _, ver := range smapBlocklist {
|
||||
if strings.Contains(release, ver) {
|
||||
ki.CPU.Flags = append(ki.CPU.Flags, "smap=off")
|
||||
}
|
||||
}
|
||||
|
||||
kernels = append(kernels, ki)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (d Debian) volumes(pkgname string) (volumes []container.Volume) {
|
||||
c, err := container.New(d.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkgdir := filepath.Join("volumes", c.Name(), pkgname)
|
||||
|
||||
volumes = append(volumes, container.Volume{
|
||||
Src: dotfiles.Dir(pkgdir, "/lib/modules"),
|
||||
Dest: "/lib/modules",
|
||||
})
|
||||
|
||||
volumes = append(volumes, container.Volume{
|
||||
Src: dotfiles.Dir(pkgdir, "/usr/src"),
|
||||
Dest: "/usr/src",
|
||||
})
|
||||
|
||||
volumes = append(volumes, container.Volume{
|
||||
Src: dotfiles.Dir(pkgdir, "/boot"),
|
||||
Dest: "/boot",
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (d Debian) Install(pkgname string, headers bool) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
d.cleanup(pkgname)
|
||||
}
|
||||
}()
|
||||
|
||||
dk, err := getCachedKernel(pkgname + ".deb")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var pkgs []snapshot.Package
|
||||
if headers {
|
||||
pkgs = dk.Packages()
|
||||
} else {
|
||||
pkgs = []snapshot.Package{dk.Image}
|
||||
}
|
||||
|
||||
var commands []string
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
found, newurl := cache.PackageURL(
|
||||
distro.Debian,
|
||||
pkg.Deb.URL,
|
||||
)
|
||||
if found {
|
||||
log.Debug().Msgf("cached deb found %s", newurl)
|
||||
pkg.Deb.URL = newurl
|
||||
}
|
||||
|
||||
// TODO use faketime on old releases?
|
||||
pkg.Deb.URL = strings.Replace(pkg.Deb.URL, "https", "http", -1)
|
||||
|
||||
cmdf("wget --no-verbose " +
|
||||
"--timeout=10 --waitretry=1 --tries=10 " +
|
||||
"--no-check-certificate " + pkg.Deb.URL)
|
||||
}
|
||||
|
||||
// prepare local repository
|
||||
cmdf("mkdir debs && mv *.deb debs/")
|
||||
cmdf("dpkg-scanpackages debs /dev/null | gzip > debs/Packages.gz")
|
||||
cmdf(`echo "deb [trusted=yes] file:$(pwd) debs/" >> /etc/apt/sources.list.d/local.list`)
|
||||
cmdf("apt-get update -o Dir::Etc::sourcelist='sources.list.d/local.list' -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0'")
|
||||
|
||||
// make sure apt-get will not download the repo version
|
||||
cmdf("echo 'Package: *' >> /etc/apt/preferences.d/pin")
|
||||
cmdf(`echo 'Pin: origin "*.debian.org"' >> /etc/apt/preferences.d/pin`)
|
||||
cmdf("echo 'Pin-Priority: 100' >> /etc/apt/preferences.d/pin")
|
||||
|
||||
// cut package names and install
|
||||
cmdf("ls debs | grep deb | cut -d '_' -f 1 | " +
|
||||
"xargs apt-get -y --force-yes install")
|
||||
|
||||
// for debug
|
||||
cmdf("ls debs | grep deb | cut -d '_' -f 1 | xargs apt-cache policy")
|
||||
|
||||
c, err := container.New(d.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.Volumes = d.volumes(pkgname)
|
||||
for i := range c.Volumes {
|
||||
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
|
||||
}
|
||||
|
||||
cmdf("cp -r /boot /target/")
|
||||
cmdf("cp -r /lib/modules /target/lib/")
|
||||
cmdf("cp -rL /usr/src /target/usr/")
|
||||
|
||||
_, err = c.Run("", commands)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (d Debian) cleanup(pkgname string) {
|
||||
c, err := container.New(d.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkgdir := dotfiles.Dir(filepath.Join("volumes", c.Name(), pkgname))
|
||||
|
||||
log.Debug().Msgf("cleanup %s", pkgdir)
|
||||
|
||||
err = os.RemoveAll(pkgdir)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("cleanup")
|
||||
}
|
||||
}
|
||||
|
||||
func (d Debian) RootFS() string {
|
||||
return fmt.Sprintf("out_of_tree_debian_%s.img", d.release.String())
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package debian
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
func TestDebian(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
u := Debian{release: Wheezy}
|
||||
|
||||
assert.True(u.Equal(distro.Distro{Release: "wheezy", ID: distro.Debian}))
|
||||
|
||||
if os.Getenv("CI") != "" {
|
||||
t.Skip("skip testing in CI")
|
||||
}
|
||||
|
||||
assert.NotEmpty(u.Packages())
|
||||
}
|
|
@ -0,0 +1,467 @@
|
|||
package debian
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot/metasnap"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
|
||||
type DebianKernelVersion struct {
|
||||
// linux-headers-4.17.0-2-amd64_4.17.14-1_amd64.deb
|
||||
|
||||
// Package version, e.g. "4.17.14-1"
|
||||
// See tags in https://salsa.debian.org/kernel-team/linux
|
||||
Package string
|
||||
|
||||
// ABI version, e.g. "4.17.0-2"
|
||||
ABI string
|
||||
}
|
||||
|
||||
func ParseKernelVersion(pkg string) (dkv DebianKernelVersion, err error) {
|
||||
// -> 4.11.0-trunk-amd64_4.11-1~exp2_amd64.deb
|
||||
pkg = strings.Replace(pkg, "linux-image-", "", -1)
|
||||
|
||||
// -> [4.11.0-trunk-amd64 4.11-1~exp2 amd64.deb]
|
||||
fields := strings.Split(pkg, "_")
|
||||
|
||||
if len(fields) != 3 {
|
||||
err = errors.New("incorrect input format")
|
||||
return
|
||||
}
|
||||
|
||||
// 4.11.0-trunk-amd64 -> 4.11.0-trunk
|
||||
// TODO other archs?
|
||||
dkv.ABI = strings.Split(fields[0], "-amd64")[0]
|
||||
if dkv.ABI == "" {
|
||||
err = errors.New("incorrect input format")
|
||||
return
|
||||
}
|
||||
|
||||
dkv.Package = fields[1]
|
||||
if dkv.Package == "" {
|
||||
err = errors.New("incorrect input format")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type DebianKernel struct {
|
||||
Version DebianKernelVersion
|
||||
Image snapshot.Package
|
||||
Headers []snapshot.Package
|
||||
Dependencies []snapshot.Package
|
||||
|
||||
// FIXME There is a better way
|
||||
Internal struct {
|
||||
Invalid bool
|
||||
LastFetch time.Time
|
||||
}
|
||||
|
||||
Release Release
|
||||
}
|
||||
|
||||
func (dk DebianKernel) HasDependency(pkgname string) bool {
|
||||
for _, deppkg := range dk.Dependencies {
|
||||
if strings.Contains(deppkg.Name, pkgname) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (dk DebianKernel) Packages() (pkgs []snapshot.Package) {
|
||||
pkgs = append(pkgs, dk.Image)
|
||||
pkgs = append(pkgs, dk.Headers...)
|
||||
pkgs = append(pkgs, dk.Dependencies...)
|
||||
return
|
||||
}
|
||||
|
||||
// use only for inline comparison
|
||||
func kver(ver string) *semver.Version {
|
||||
ver = strings.Replace(ver, "~", "-", -1)
|
||||
ver = strings.Replace(ver, "+", "-", -1)
|
||||
return semver.MustParse(ver)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrNoBinaryPackages = errors.New("no binary packages found")
|
||||
ErrNoHeadersPackage = errors.New("no headers package found")
|
||||
ErrNoImagePackage = errors.New("no image package found")
|
||||
)
|
||||
|
||||
func getDebianKernel(version string) (dk DebianKernel, err error) {
|
||||
flog := log.With().
|
||||
Str("version", version).
|
||||
Logger()
|
||||
|
||||
dk.Version.Package = version
|
||||
|
||||
regex := `^(linux-(image|headers)-[a-z+~0-9\.\-]*-(common|amd64|amd64-unsigned)|linux-kbuild-.*|linux-compiler-.*-x86)$`
|
||||
|
||||
filter := []string{
|
||||
"rt-amd64",
|
||||
"cloud-amd64",
|
||||
"all-amd64",
|
||||
"dbg",
|
||||
}
|
||||
|
||||
packages, err := snapshot.Packages("linux", version, regex,
|
||||
[]string{"amd64", "all"}, filter)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(packages) == 0 {
|
||||
err = ErrNoBinaryPackages
|
||||
return
|
||||
}
|
||||
|
||||
var imageFound, headersFound bool
|
||||
for _, p := range packages {
|
||||
if strings.Contains(p.Name, "image") {
|
||||
imageFound = true
|
||||
dk.Image = p
|
||||
} else if strings.Contains(p.Name, "headers") {
|
||||
headersFound = true
|
||||
dk.Headers = append(dk.Headers, p)
|
||||
} else {
|
||||
dk.Dependencies = append(dk.Dependencies, p)
|
||||
}
|
||||
}
|
||||
|
||||
if !imageFound {
|
||||
err = ErrNoImagePackage
|
||||
return
|
||||
}
|
||||
|
||||
if !headersFound {
|
||||
err = ErrNoHeadersPackage
|
||||
return
|
||||
}
|
||||
|
||||
s := strings.Replace(dk.Image.Name, "linux-image-", "", -1)
|
||||
dk.Version.ABI = strings.Replace(s, "-amd64", "", -1)
|
||||
|
||||
dk.Release = getRelease(dk.Image)
|
||||
if dk.Release == None {
|
||||
flog.Warn().Msg("release not found")
|
||||
} else {
|
||||
flog.Debug().Msgf("release is %s", dk.Release.Name())
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getRelease(p snapshot.Package) Release {
|
||||
repos, err := metasnap.GetRepos(p.Repo.Archive, p.Name, p.Arch, p.Version)
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msg("metasnap")
|
||||
return None
|
||||
}
|
||||
|
||||
for _, repo := range repos {
|
||||
for _, rel := range ReleaseStrings[1:] {
|
||||
switch repo.Suite {
|
||||
case rel, rel + "-backports",
|
||||
rel + "-updates",
|
||||
rel + "-proposed-updates":
|
||||
|
||||
return ReleaseFromString(rel)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return None
|
||||
}
|
||||
|
||||
// GetCachedKernel by deb package name
|
||||
func getCachedKernel(deb string) (dk DebianKernel, err error) {
|
||||
c, err := NewCache(CachePath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("cache")
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
versions, err := c.GetVersions()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("get source package versions from cache")
|
||||
return
|
||||
}
|
||||
|
||||
for _, version := range versions {
|
||||
var tmpdks []DebianKernel
|
||||
tmpdks, err = c.Get(version)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
tmpdk := tmpdks[0]
|
||||
|
||||
if deb == tmpdk.Image.Deb.Name {
|
||||
dk = tmpdk
|
||||
return
|
||||
}
|
||||
|
||||
for _, h := range tmpdk.Headers {
|
||||
if deb == h.Deb.Name {
|
||||
dk = tmpdk
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func kbuildVersion(versions []string, kpkgver string) string {
|
||||
for _, v := range versions {
|
||||
if v == kpkgver {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
ver := kver(kpkgver)
|
||||
|
||||
// Not able to find the exact version, try similar
|
||||
for _, v := range versions {
|
||||
cver := kver(v)
|
||||
|
||||
// It's certainly not fit for purpose if the major and
|
||||
// minor versions aren't the same
|
||||
|
||||
if ver.Major() != cver.Major() {
|
||||
continue
|
||||
}
|
||||
|
||||
if ver.Minor() != cver.Minor() {
|
||||
continue
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func findKbuild(versions []string, kpkgver string) (
|
||||
pkg snapshot.Package, err error) {
|
||||
|
||||
version := kbuildVersion(versions, kpkgver)
|
||||
if version == "" {
|
||||
err = errors.New("cannot find kbuild version")
|
||||
return
|
||||
}
|
||||
|
||||
packages, err := snapshot.Packages("linux-tools", version,
|
||||
`^linux-kbuild`, []string{"amd64"}, []string{"dbg"})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(packages) == 0 {
|
||||
err = errors.New("cannot find kbuild package")
|
||||
}
|
||||
|
||||
pkg = packages[0]
|
||||
return
|
||||
}
|
||||
|
||||
func updateKbuild(toolsVersions []string, dk *DebianKernel) {
|
||||
if !kver(dk.Version.Package).LessThan(kver("4.5-rc0")) {
|
||||
return
|
||||
}
|
||||
|
||||
var deps []snapshot.Package
|
||||
for _, pkg := range dk.Dependencies {
|
||||
if strings.Contains(pkg.Name, "kbuild") {
|
||||
continue
|
||||
}
|
||||
deps = append(deps, pkg)
|
||||
}
|
||||
dk.Dependencies = deps
|
||||
|
||||
kbuildpkg, err := findKbuild(toolsVersions, dk.Version.Package)
|
||||
if err != nil {
|
||||
dk.Internal.Invalid = true
|
||||
return
|
||||
}
|
||||
|
||||
dk.Dependencies = append(dk.Dependencies, kbuildpkg)
|
||||
}
|
||||
|
||||
func getKernelsByVersion(slog zerolog.Logger, c *Cache, toolsVersions []string,
|
||||
version string, mode GetKernelsMode) (kernels []DebianKernel,
|
||||
fromcache bool) {
|
||||
|
||||
var dk DebianKernel
|
||||
dks, err := c.Get(version)
|
||||
if err == nil {
|
||||
dk = dks[0]
|
||||
if !dk.Internal.Invalid {
|
||||
// TODO refactor
|
||||
slog.Trace().Msgf("found in cache")
|
||||
if dk.Release == None && mode&UpdateRelease != 0 {
|
||||
slog.Debug().Msg("update release")
|
||||
dk.Release = getRelease(dk.Image)
|
||||
if dk.Release != None {
|
||||
slog.Debug().Msg("update cache")
|
||||
err = c.Put([]DebianKernel{dk})
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if mode&UpdateKbuild != 0 {
|
||||
slog.Debug().Msg("update kbuild")
|
||||
updateKbuild(toolsVersions, &dk)
|
||||
slog.Debug().Msg("update cache")
|
||||
err = c.Put([]DebianKernel{dk})
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
}
|
||||
kernels = append(kernels, dk)
|
||||
fromcache = true
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if dk.Internal.Invalid {
|
||||
refetch := dk.Internal.LastFetch.AddDate(0, 0, RefetchDays)
|
||||
if refetch.After(time.Now()) {
|
||||
slog.Trace().Msgf("refetch at %v", refetch)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
dk, err = getDebianKernel(version)
|
||||
if err != nil {
|
||||
if err == ErrNoBinaryPackages {
|
||||
slog.Warn().Err(err).Msg("")
|
||||
} else {
|
||||
slog.Error().Err(err).Msg("get debian kernel")
|
||||
}
|
||||
|
||||
dk.Internal.Invalid = true
|
||||
}
|
||||
|
||||
if !dk.HasDependency("kbuild") {
|
||||
// Debian kernels prior to the 4.5 package
|
||||
// version did not have a kbuild built from
|
||||
// the linux source itself, but used the
|
||||
// linux-tools source package.
|
||||
if !kver(dk.Version.Package).LessThan(kver("4.5-rc0")) {
|
||||
dk.Internal.Invalid = true
|
||||
} else {
|
||||
updateKbuild(toolsVersions, &dk)
|
||||
}
|
||||
}
|
||||
|
||||
dk.Internal.LastFetch = time.Now()
|
||||
|
||||
if !dk.Internal.Invalid {
|
||||
kernels = append(kernels, dk)
|
||||
}
|
||||
|
||||
err = c.Put([]DebianKernel{dk})
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("put to cache")
|
||||
return
|
||||
}
|
||||
|
||||
slog.Debug().Msgf("%s cached", version)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
CachePath string
|
||||
RefetchDays int = 14
|
||||
)
|
||||
|
||||
type GetKernelsMode int
|
||||
|
||||
const (
|
||||
NoMode GetKernelsMode = iota
|
||||
UpdateRelease
|
||||
UpdateKbuild
|
||||
)
|
||||
|
||||
// GetKernelsWithLimit is workaround for testing and building the
|
||||
// first cache, which is heavily rate limited by snapshot.debian.org
|
||||
func GetKernelsWithLimit(limit int, mode GetKernelsMode) (kernels []DebianKernel,
|
||||
err error) {
|
||||
|
||||
if CachePath == "" {
|
||||
CachePath = dotfiles.File("debian.cache")
|
||||
log.Debug().Msgf("Use default kernels cache path: %s", CachePath)
|
||||
|
||||
if !fs.PathExists(CachePath) {
|
||||
log.Debug().Msgf("No cache, download")
|
||||
err = cache.DownloadDebianCache(CachePath)
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msg(
|
||||
"No remote cache, will take some time")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Debug().Msgf("Debian kernels cache path: %s", CachePath)
|
||||
}
|
||||
|
||||
c, err := NewCache(CachePath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("cache")
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
toolsVersions, err := snapshot.SourcePackageVersions("linux-tools")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("get linux-tools source pkg versions")
|
||||
return
|
||||
}
|
||||
|
||||
versions, err := snapshot.SourcePackageVersions("linux")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("get linux source package versions")
|
||||
return
|
||||
}
|
||||
|
||||
err = c.PutVersions(versions)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("put source package versions to cache")
|
||||
return
|
||||
}
|
||||
|
||||
for i, version := range versions {
|
||||
slog := log.With().Str("version", version).Logger()
|
||||
slog.Trace().Msgf("%03d/%03d", i, len(versions))
|
||||
vkernels, fromcache := getKernelsByVersion(slog, c, toolsVersions, version, mode)
|
||||
kernels = append(kernels, vkernels...)
|
||||
if !fromcache {
|
||||
limit--
|
||||
}
|
||||
if limit <= 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func GetKernels() (kernels []DebianKernel, err error) {
|
||||
return GetKernelsWithLimit(math.MaxInt32, NoMode)
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
package debian
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
|
||||
)
|
||||
|
||||
func TestGetDebianKernel(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
dk, err := getDebianKernel("4.6.4-1")
|
||||
assert.Nil(err)
|
||||
|
||||
assert.Equal(getRelease(dk.Image), Stretch)
|
||||
|
||||
t.Logf("%s", spew.Sdump(dk))
|
||||
}
|
||||
|
||||
func TestParseKernelVersion(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
kernels, err := GetKernelsWithLimit(16, NoMode)
|
||||
assert.Nil(err)
|
||||
assert.NotEmpty(kernels)
|
||||
|
||||
versions := make(map[string]bool)
|
||||
|
||||
for _, dk := range kernels {
|
||||
dkv, err := ParseKernelVersion(dk.Image.Deb.Name)
|
||||
assert.Nil(err)
|
||||
|
||||
_, found := versions[dkv.Package]
|
||||
assert.True(!found)
|
||||
|
||||
versions[dkv.Package] = true
|
||||
}
|
||||
}
|
||||
|
||||
func TestKbuildVersion(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
kernels, err := GetKernelsWithLimit(16, NoMode)
|
||||
assert.Nil(err)
|
||||
assert.NotEmpty(kernels)
|
||||
|
||||
toolsVersions, err := snapshot.SourcePackageVersions("linux-tools")
|
||||
assert.Nil(err)
|
||||
|
||||
for _, dk := range kernels {
|
||||
if !kver(dk.Version.Package).LessThan(kver("4.5-rc0")) {
|
||||
continue
|
||||
}
|
||||
|
||||
version := kbuildVersion(
|
||||
toolsVersions,
|
||||
dk.Version.Package,
|
||||
)
|
||||
assert.Nil(err)
|
||||
assert.NotEmpty(version)
|
||||
|
||||
t.Log(dk.Version.Package, "->", version)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
package metasnap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// Note: Metasnap does not have all the packages, and its API is
|
||||
// rather buggy.
|
||||
|
||||
const apiURL = "http://metasnap.debian.net/cgi-bin/api?"
|
||||
|
||||
var (
|
||||
limiterTimeout time.Duration = time.Second / 20
|
||||
limiterMaxTimeout time.Duration = time.Second * 2
|
||||
limiterBurst int = 1
|
||||
limiterUpdateDelay time.Duration = time.Second
|
||||
|
||||
Limiter = rate.NewLimiter(rate.Every(limiterTimeout), limiterBurst)
|
||||
)
|
||||
|
||||
func lowerLimit() {
|
||||
limiterTimeout = limiterTimeout * 2
|
||||
if limiterTimeout > limiterMaxTimeout {
|
||||
limiterTimeout = limiterMaxTimeout
|
||||
}
|
||||
log.Info().Msgf("limiter timeout set to %v", limiterTimeout)
|
||||
Limiter.SetLimitAt(
|
||||
time.Now().Add(limiterUpdateDelay),
|
||||
rate.Every(limiterTimeout),
|
||||
)
|
||||
log.Info().Msgf("wait %v", limiterUpdateDelay)
|
||||
time.Sleep(limiterUpdateDelay)
|
||||
}
|
||||
|
||||
// Retries in case of 5xx errors
|
||||
var Retries = 10
|
||||
|
||||
var ErrNotFound = errors.New("404 not found")
|
||||
|
||||
func query(q string) (result string, err error) {
|
||||
flog := log.With().Str("url", q).Logger()
|
||||
|
||||
var resp *http.Response
|
||||
for i := Retries; i > 0; i-- {
|
||||
flog.Trace().Msg("wait")
|
||||
Limiter.Wait(context.Background())
|
||||
|
||||
flog.Trace().Msg("start")
|
||||
resp, err = http.Get(q)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "reset by peer") {
|
||||
flog.Debug().Err(err).Msg("")
|
||||
lowerLimit()
|
||||
continue
|
||||
}
|
||||
flog.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
flog.Debug().Msgf("%s", resp.Status)
|
||||
|
||||
if resp.StatusCode == 404 {
|
||||
err = ErrNotFound
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode < 500 {
|
||||
break
|
||||
}
|
||||
|
||||
flog.Debug().Msgf("retry (%d left)", i)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
err = fmt.Errorf("%d (%s)", resp.StatusCode, q)
|
||||
}
|
||||
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result = string(buf)
|
||||
return
|
||||
}
|
||||
|
||||
func queryAPIf(f string, s ...interface{}) (result string, err error) {
|
||||
return query(apiURL + fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
First string
|
||||
Last string
|
||||
}
|
||||
|
||||
type Repo struct {
|
||||
Archive string
|
||||
Suite string
|
||||
Component string
|
||||
Snapshot Snapshot
|
||||
}
|
||||
|
||||
func GetRepos(archive, pkg, arch, ver string) (repos []Repo, err error) {
|
||||
result, err := queryAPIf("archive=%s&pkg=%s&arch=%s",
|
||||
archive, pkg, arch)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if result == "" {
|
||||
err = ErrNotFound
|
||||
return
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(result, "\n") {
|
||||
if line == "" {
|
||||
break
|
||||
}
|
||||
|
||||
fields := strings.Split(line, " ")
|
||||
if len(fields) != 5 {
|
||||
err = fmt.Errorf("metasnap api returned %s", result)
|
||||
return
|
||||
}
|
||||
|
||||
repo := Repo{
|
||||
Archive: archive,
|
||||
Suite: fields[1],
|
||||
Component: fields[2],
|
||||
Snapshot: Snapshot{
|
||||
First: fields[3],
|
||||
Last: fields[4],
|
||||
},
|
||||
}
|
||||
|
||||
if fields[0] == ver {
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
}
|
||||
|
||||
if len(repos) == 0 {
|
||||
err = ErrNotFound
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
package metasnap
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
func TestGetRepos(t *testing.T) {
|
||||
// existing
|
||||
infos, err := GetRepos("debian", "linux-image-3.8-trunk-amd64",
|
||||
"amd64", "3.8.2-1~experimental.1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(spew.Sdump(infos))
|
||||
|
||||
// non-existing
|
||||
infos, err = GetRepos("debian", "meh", "amd64", "meh")
|
||||
if err == nil {
|
||||
t.Fatalf("should not be ok, result: %s", spew.Sdump(infos))
|
||||
}
|
||||
|
||||
if err != ErrNotFound {
|
||||
t.Fatal("wrong error type")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,186 @@
|
|||
package mr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const apiURL = "https://snapshot.debian.org/mr"
|
||||
|
||||
var (
|
||||
limiterTimeout time.Duration = time.Second / 20
|
||||
limiterMaxTimeout time.Duration = time.Second * 2
|
||||
limiterBurst int = 1
|
||||
limiterUpdateDelay time.Duration = time.Second
|
||||
|
||||
Limiter = rate.NewLimiter(rate.Every(limiterTimeout), limiterBurst)
|
||||
)
|
||||
|
||||
func lowerLimit() {
|
||||
limiterTimeout = limiterTimeout * 2
|
||||
if limiterTimeout > limiterMaxTimeout {
|
||||
limiterTimeout = limiterMaxTimeout
|
||||
}
|
||||
log.Info().Msgf("limiter timeout set to %v", limiterTimeout)
|
||||
Limiter.SetLimitAt(
|
||||
time.Now().Add(limiterUpdateDelay),
|
||||
rate.Every(limiterTimeout),
|
||||
)
|
||||
log.Info().Msgf("wait %v", limiterUpdateDelay)
|
||||
time.Sleep(limiterUpdateDelay)
|
||||
}
|
||||
|
||||
// Retries in case of 5xx errors
|
||||
var Retries = 10
|
||||
|
||||
// https://salsa.debian.org/snapshot-team/snapshot/blob/master/API
|
||||
|
||||
// /mr/package/<package>/
|
||||
type Package struct {
|
||||
Comment string `json:"_comment"`
|
||||
Package string `json:"package"`
|
||||
Result []struct {
|
||||
Version string `json:"version"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// /mr/package/<package>/<version>/binpackages
|
||||
type Binpackages struct {
|
||||
Comment string `json:"_comment"`
|
||||
Package string `json:"package"`
|
||||
Result []struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
} `json:"result"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// /mr/binary/<binary>/
|
||||
type Binary struct {
|
||||
Comment string `json:"_comment"`
|
||||
Binary string `json:"binary"`
|
||||
Result []struct {
|
||||
BinaryVersion string `json:"binary_version"`
|
||||
Name string `json:"name"`
|
||||
Source string `json:"source"`
|
||||
Version string `json:"version"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// /mr/binary/<binpkg>/<binversion>/binfiles
|
||||
type Binfiles struct {
|
||||
Comment string `json:"_comment"`
|
||||
Binary string `json:"binary"`
|
||||
BinaryVersion string `json:"binary_version"`
|
||||
Result []struct {
|
||||
Architecture string `json:"architecture"`
|
||||
Hash string `json:"hash"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
type Fileinfo struct {
|
||||
ArchiveName string `json:"archive_name"`
|
||||
FirstSeen string `json:"first_seen"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
// /mr/file/<hash>/info
|
||||
type Info struct {
|
||||
Comment string `json:"_comment"`
|
||||
Hash string `json:"hash"`
|
||||
Result []Fileinfo `json:"result"`
|
||||
}
|
||||
|
||||
var ErrNotFound = errors.New("404 not found")
|
||||
|
||||
func getJson(query string, target interface{}) (err error) {
|
||||
flog := log.With().Str("url", query).Logger()
|
||||
|
||||
var resp *http.Response
|
||||
for i := Retries; i > 0; i-- {
|
||||
flog.Trace().Msg("wait")
|
||||
Limiter.Wait(context.Background())
|
||||
|
||||
flog.Trace().Msg("start")
|
||||
resp, err = http.Get(query)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "reset by peer") ||
|
||||
strings.Contains(err.Error(), "connection refused") {
|
||||
flog.Debug().Err(err).Msg("")
|
||||
lowerLimit()
|
||||
continue
|
||||
}
|
||||
flog.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
flog.Debug().Msgf("%s", resp.Status)
|
||||
|
||||
if resp.StatusCode == 404 {
|
||||
err = ErrNotFound
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode < 500 {
|
||||
break
|
||||
}
|
||||
|
||||
flog.Debug().Msgf("retry (%d left)", i)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
err = fmt.Errorf("%d (%s)", resp.StatusCode, query)
|
||||
}
|
||||
return json.NewDecoder(resp.Body).Decode(target)
|
||||
}
|
||||
|
||||
func GetPackage(name string) (pkg Package, err error) {
|
||||
query := fmt.Sprintf("%s/package/%s/", apiURL, name)
|
||||
err = getJson(query, &pkg)
|
||||
return
|
||||
}
|
||||
|
||||
func GetBinpackages(name, version string) (binpkgs Binpackages, err error) {
|
||||
query := fmt.Sprintf("%s/package/%s/%s/binpackages",
|
||||
apiURL, name, version)
|
||||
err = getJson(query, &binpkgs)
|
||||
return
|
||||
}
|
||||
|
||||
func GetBinary(pkg string) (binary Binary, err error) {
|
||||
query := fmt.Sprintf("%s/binary/%s/", apiURL, pkg)
|
||||
err = getJson(query, &binary)
|
||||
return
|
||||
}
|
||||
|
||||
func GetBinfiles(binpkg, binversion string) (binfiles Binfiles, err error) {
|
||||
query := fmt.Sprintf("%s/binary/%s/%s/binfiles",
|
||||
apiURL, binpkg, binversion)
|
||||
err = getJson(query, &binfiles)
|
||||
return
|
||||
}
|
||||
|
||||
func GetInfo(hash string) (info Info, err error) {
|
||||
query := fmt.Sprintf("%s/file/%s/info", apiURL, hash)
|
||||
|
||||
err = getJson(query, &info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(info.Result) == 0 {
|
||||
err = errors.New("empty response")
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
package mr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMR(t *testing.T) {
|
||||
name := "linux"
|
||||
t.Log(name)
|
||||
|
||||
pkg, err := GetPackage(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
version := pkg.Result[0].Version
|
||||
t.Log(version)
|
||||
|
||||
binpkgs, err := GetBinpackages(name, version)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
binpkgName := binpkgs.Result[0].Name
|
||||
t.Log(binpkgName)
|
||||
|
||||
binary, err := GetBinary(binpkgName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
binaryName := binary.Result[0].Name
|
||||
binaryVersion := binary.Result[0].BinaryVersion
|
||||
t.Log(binaryName, binaryVersion)
|
||||
|
||||
binfiles, err := GetBinfiles(binaryName, binaryVersion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hash := binfiles.Result[0].Hash
|
||||
t.Log(hash)
|
||||
|
||||
info, err := GetInfo(hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(info)
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
package snapshot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot/mr"
|
||||
)
|
||||
|
||||
const URL = "https://snapshot.debian.org"
|
||||
|
||||
func SourcePackageVersions(name string) (versions []string, err error) {
|
||||
pkg, err := mr.GetPackage(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, res := range pkg.Result {
|
||||
versions = append(versions, res.Version)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type Package struct {
|
||||
Name string
|
||||
Source string
|
||||
Version string
|
||||
Arch string
|
||||
|
||||
Deb struct {
|
||||
Name string
|
||||
Hash string
|
||||
URL string
|
||||
}
|
||||
|
||||
Repo struct {
|
||||
Snapshot string
|
||||
|
||||
Archive string
|
||||
|
||||
Component string
|
||||
}
|
||||
}
|
||||
|
||||
func NewPackage(name, srcname, version string, archs []string) (
|
||||
p Package, err error) {
|
||||
|
||||
p.Name = name
|
||||
p.Source = srcname
|
||||
p.Version = version
|
||||
|
||||
p.Arch, p.Deb.Hash, err = p.getHash(archs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
info, err := mr.GetInfo(p.Deb.Hash)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
p.Deb.Name = info.Result[0].Name
|
||||
|
||||
p.Repo.Archive = info.Result[0].ArchiveName
|
||||
p.Repo.Snapshot = info.Result[0].FirstSeen
|
||||
|
||||
p.Deb.URL, err = url.JoinPath(URL, "archive", p.Repo.Archive,
|
||||
p.Repo.Snapshot, info.Result[0].Path, p.Deb.Name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
split := strings.Split(info.Result[0].Path, "/")
|
||||
if split[1] != "pool" || len(split) < 3 {
|
||||
err = fmt.Errorf("incorrect path: %s", info.Result[0].Path)
|
||||
return
|
||||
}
|
||||
p.Repo.Component = split[2]
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p Package) getHash(archs []string) (arch, hash string, err error) {
|
||||
binfiles, err := mr.GetBinfiles(p.Name, p.Version)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, res := range binfiles.Result {
|
||||
for _, allowedArch := range archs {
|
||||
if res.Architecture == allowedArch {
|
||||
arch = res.Architecture
|
||||
hash = res.Hash
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New("hash not found")
|
||||
return
|
||||
}
|
||||
|
||||
func contains(pkgs []Package, pkg Package) bool {
|
||||
for _, p := range pkgs {
|
||||
if p.Name == pkg.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func filtered(s string, filter []string) bool {
|
||||
for _, f := range filter {
|
||||
if strings.Contains(s, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Packages(srcname, version, regex string, archs, filter []string) (
|
||||
pkgs []Package, err error) {
|
||||
|
||||
binpkgs, err := mr.GetBinpackages(srcname, version)
|
||||
if err == mr.ErrNotFound {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r := regexp.MustCompile(regex)
|
||||
|
||||
for _, res := range binpkgs.Result {
|
||||
if res.Version != version {
|
||||
continue
|
||||
}
|
||||
if !r.MatchString(res.Name) || filtered(res.Name, filter) {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Trace().Msgf("matched %v", res.Name)
|
||||
|
||||
var pkg Package
|
||||
pkg, err = NewPackage(res.Name, srcname, version, archs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if contains(pkgs, pkg) {
|
||||
log.Trace().Msgf("%v already in slice O_o", pkg.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Trace().Msgf("append %v", pkg.Name)
|
||||
pkgs = append(pkgs, pkg)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
package snapshot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSourcePackageVersions(t *testing.T) {
|
||||
versions, err := SourcePackageVersions("linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(versions) == 0 {
|
||||
t.Fatal(errors.New("empty response"))
|
||||
}
|
||||
|
||||
t.Logf("found %d package versions", len(versions))
|
||||
}
|
||||
|
||||
func TestPackages(t *testing.T) {
|
||||
rx := `^(linux-(image|headers)-[a-z+~0-9\.\-]*-(common|amd64|amd64-unsigned)|linux-kbuild-.*)$`
|
||||
|
||||
packages, err := Packages("linux", "5.10.179-1", rx,
|
||||
[]string{"amd64", "all"}, []string{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(packages) == 0 {
|
||||
t.Fatal(errors.New("empty response"))
|
||||
}
|
||||
|
||||
for _, pkg := range packages {
|
||||
t.Logf("%#v", pkg)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
package distro
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var mu sync.Mutex
|
||||
var distros []distribution
|
||||
|
||||
type distribution interface {
|
||||
Distro() Distro
|
||||
Equal(Distro) bool
|
||||
Packages() (packages []string, err error)
|
||||
Install(pkg string, headers bool) (err error)
|
||||
Kernels() (kernels []KernelInfo, err error)
|
||||
RootFS() string
|
||||
}
|
||||
|
||||
func Register(d distribution) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
distros = append(distros, d)
|
||||
}
|
||||
|
||||
func List() (dds []Distro) {
|
||||
for _, dd := range distros {
|
||||
dds = append(dds, dd.Distro())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type Distro struct {
|
||||
ID ID
|
||||
Release string
|
||||
}
|
||||
|
||||
func (d Distro) String() string {
|
||||
return d.ID.String() + " " + d.Release
|
||||
}
|
||||
|
||||
func (d Distro) Packages() (packages []string, err error) {
|
||||
for _, dd := range distros {
|
||||
if d.ID != None && d.ID != dd.Distro().ID {
|
||||
continue
|
||||
}
|
||||
|
||||
if d.Release != "" && !dd.Equal(d) {
|
||||
continue
|
||||
}
|
||||
|
||||
var pkgs []string
|
||||
pkgs, err = dd.Packages()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
packages = append(packages, pkgs...)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d Distro) Install(pkg string, headers bool) (err error) {
|
||||
for _, dd := range distros {
|
||||
if !dd.Equal(d) {
|
||||
continue
|
||||
}
|
||||
|
||||
return dd.Install(pkg, headers)
|
||||
}
|
||||
return errors.New("not found")
|
||||
}
|
||||
|
||||
func (d Distro) Kernels() (kernels []KernelInfo, err error) {
|
||||
for _, dd := range distros {
|
||||
if dd.Equal(d) {
|
||||
return dd.Kernels()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d Distro) Equal(to Distro) bool {
|
||||
for _, dd := range distros {
|
||||
if dd.Equal(d) {
|
||||
return dd.Equal(to)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d Distro) RootFS() string {
|
||||
for _, dd := range distros {
|
||||
if dd.Equal(d) {
|
||||
return dd.RootFS()
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
type Command struct {
|
||||
Distro Distro
|
||||
Command string
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package distro
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ID of the distro
|
||||
type ID int
|
||||
|
||||
const (
|
||||
None ID = iota
|
||||
// Ubuntu https://ubuntu.com/
|
||||
Ubuntu
|
||||
// CentOS https://www.centos.org/
|
||||
CentOS
|
||||
// Debian https://www.debian.org/
|
||||
Debian
|
||||
// OracleLinux https://www.oracle.com/linux/
|
||||
OracleLinux
|
||||
// OpenSUSE https://opensuse.org/
|
||||
OpenSUSE
|
||||
)
|
||||
|
||||
var IDs = []ID{
|
||||
None, Ubuntu, CentOS, Debian, OracleLinux, OpenSUSE,
|
||||
}
|
||||
|
||||
var nameStrings = [...]string{
|
||||
"",
|
||||
"Ubuntu",
|
||||
"CentOS",
|
||||
"Debian",
|
||||
"OracleLinux",
|
||||
"openSUSE",
|
||||
}
|
||||
|
||||
func NewID(name string) (id ID, err error) {
|
||||
err = id.UnmarshalTOML([]byte(name))
|
||||
return
|
||||
}
|
||||
|
||||
func (id ID) String() string {
|
||||
return nameStrings[id]
|
||||
}
|
||||
|
||||
// UnmarshalTOML is for support github.com/naoina/toml
|
||||
func (id *ID) UnmarshalTOML(data []byte) (err error) {
|
||||
name := strings.Trim(string(data), `"`)
|
||||
if strings.EqualFold(name, "Ubuntu") {
|
||||
*id = Ubuntu
|
||||
} else if strings.EqualFold(name, "CentOS") {
|
||||
*id = CentOS
|
||||
} else if strings.EqualFold(name, "Debian") {
|
||||
*id = Debian
|
||||
} else if strings.EqualFold(name, "OracleLinux") {
|
||||
*id = OracleLinux
|
||||
} else if strings.EqualFold(name, "openSUSE") {
|
||||
*id = OpenSUSE
|
||||
} else if name != "" {
|
||||
err = fmt.Errorf("distro %s is not supported", name)
|
||||
} else {
|
||||
*id = None
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalTOML is for support github.com/naoina/toml
|
||||
func (id ID) MarshalTOML() (data []byte, err error) {
|
||||
data = []byte(`"` + id.String() + `"`)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package distro
|
||||
|
||||
import "code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
|
||||
// ByRootFS is sorting by .RootFS lexicographically
|
||||
type ByRootFS []KernelInfo
|
||||
|
||||
func (a ByRootFS) Len() int { return len(a) }
|
||||
func (a ByRootFS) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByRootFS) Less(i, j int) bool { return a[i].RootFS < a[j].RootFS }
|
||||
|
||||
// KernelInfo defines kernels.toml entries
|
||||
type KernelInfo struct {
|
||||
Distro Distro
|
||||
|
||||
// Must be *exactly* same as in `uname -r`
|
||||
KernelVersion string
|
||||
|
||||
KernelRelease string
|
||||
|
||||
// Build-time information
|
||||
KernelSource string // module/exploit will be build on host
|
||||
ContainerName string
|
||||
|
||||
// Runtime information
|
||||
KernelPath string
|
||||
InitrdPath string
|
||||
ModulesPath string
|
||||
|
||||
CPU qemu.CPU
|
||||
|
||||
RootFS string
|
||||
|
||||
// Debug symbols
|
||||
VmlinuxPath string
|
||||
|
||||
// Package name, not mandatory (yet)
|
||||
Package string
|
||||
|
||||
Blocklisted bool
|
||||
}
|
|
@ -0,0 +1,301 @@
|
|||
package opensuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
func init() {
|
||||
releases := []string{
|
||||
"12.1", "12.2", "12.3",
|
||||
"13.1", "13.2",
|
||||
"42.1", "42.2", "42.3",
|
||||
"15.0", "15.1", "15.2", "15.3", "15.4", "15.5",
|
||||
}
|
||||
|
||||
for _, release := range releases {
|
||||
distro.Register(OpenSUSE{release: release})
|
||||
}
|
||||
}
|
||||
|
||||
type OpenSUSE struct {
|
||||
release string
|
||||
}
|
||||
|
||||
func (suse OpenSUSE) Equal(d distro.Distro) bool {
|
||||
return suse.release == d.Release && distro.OpenSUSE == d.ID
|
||||
}
|
||||
|
||||
func (suse OpenSUSE) Distro() distro.Distro {
|
||||
return distro.Distro{ID: distro.OpenSUSE, Release: suse.release}
|
||||
}
|
||||
|
||||
func (suse OpenSUSE) Packages() (pkgs []string, err error) {
|
||||
c, err := container.New(suse.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var name string
|
||||
if strings.HasPrefix(suse.release, "12") {
|
||||
var cnt string
|
||||
switch suse.release {
|
||||
case "12.1", "12.2":
|
||||
name = "opensuse:12.1"
|
||||
cnt = "openSUSE-12.1"
|
||||
case "12.3":
|
||||
name = "opensuse:12.3"
|
||||
cnt = "openSUSE-12.3"
|
||||
}
|
||||
|
||||
cnturl := cache.ContainerURL(cnt)
|
||||
err = container.Import(cnturl, name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else if strings.HasPrefix(suse.release, "13") {
|
||||
name = "opensuse:13"
|
||||
cnturl := cache.ContainerURL("openSUSE-13.2")
|
||||
err = container.Import(cnturl, name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else if strings.HasPrefix(suse.release, "42") {
|
||||
name = "opensuse/leap:42"
|
||||
} else if strings.HasPrefix(suse.release, "15") {
|
||||
name = "opensuse/leap:" + suse.release
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build(name, suse.envs(), suse.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
cmd := "zypper search -s --match-exact kernel-default | grep x86_64 " +
|
||||
"| cut -d '|' -f 4 | sed 's/ //g'"
|
||||
|
||||
output, err := c.Run("", []string{cmd})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkgs = append(pkgs, strings.Fields(output)...)
|
||||
return
|
||||
}
|
||||
|
||||
func (suse OpenSUSE) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
c, err := container.New(suse.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernels, err = c.Kernels()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range kernels {
|
||||
kernels[i].KernelRelease = strings.Replace(
|
||||
kernels[i].KernelRelease, "-default", "", -1)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (suse OpenSUSE) envs() (envs []string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (suse OpenSUSE) runs() (commands []string) {
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
main := "http://download.opensuse.org/"
|
||||
discontinued := "http://ftp.gwdg.de/pub/opensuse/discontinued/"
|
||||
|
||||
var repourls []string
|
||||
|
||||
if strings.HasPrefix(suse.release, "12") ||
|
||||
strings.HasPrefix(suse.release, "13") {
|
||||
|
||||
dist := discontinued + "distribution/%s/repo/oss/"
|
||||
update := discontinued + "update/%s/"
|
||||
repourls = append(repourls,
|
||||
fmt.Sprintf(dist, suse.release),
|
||||
fmt.Sprintf(update, suse.release),
|
||||
)
|
||||
} else if strings.HasPrefix(suse.release, "42") {
|
||||
dist := discontinued + "distribution/leap/%s/repo/oss/suse/"
|
||||
update := discontinued + "update/leap/%s/oss/"
|
||||
repourls = append(repourls,
|
||||
fmt.Sprintf(dist, suse.release),
|
||||
fmt.Sprintf(update, suse.release),
|
||||
)
|
||||
} else if strings.HasPrefix(suse.release, "15") {
|
||||
dist := main + "distribution/leap/%s/repo/oss/"
|
||||
update := main + "update/leap/%s/oss/"
|
||||
repourls = append(repourls,
|
||||
fmt.Sprintf(dist, suse.release),
|
||||
fmt.Sprintf(update, suse.release),
|
||||
)
|
||||
|
||||
switch suse.release {
|
||||
case "15.3", "15.4", "15.5":
|
||||
sle := main + "update/leap/%s/sle/"
|
||||
repourls = append(repourls,
|
||||
fmt.Sprintf(sle, suse.release),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
cmdf("rm /etc/zypp/repos.d/*")
|
||||
|
||||
switch suse.release {
|
||||
case "12.1", "12.2":
|
||||
repourl := discontinued + "distribution/12.3/repo/oss/"
|
||||
cmdf(`echo -e `+
|
||||
`"[dracut]\n`+
|
||||
`name=dracut\n`+
|
||||
`enabled=1\n`+
|
||||
`autorefresh=0\n`+
|
||||
`gpgcheck=0\n`+
|
||||
// higher number is lower priority
|
||||
// default is 99
|
||||
`priority=100\n`+
|
||||
`baseurl=%s" > /etc/zypp/repos.d/dracut.repo`,
|
||||
repourl,
|
||||
)
|
||||
}
|
||||
|
||||
for i, repourl := range repourls {
|
||||
cmdf(`echo -e `+
|
||||
`"[%d]\n`+
|
||||
`name=%d\n`+
|
||||
`enabled=1\n`+
|
||||
`autorefresh=0\n`+
|
||||
`gpgcheck=0\n`+
|
||||
`baseurl=%s" > /etc/zypp/repos.d/%d.repo`,
|
||||
i, i, repourl, i,
|
||||
)
|
||||
}
|
||||
|
||||
cmdf("zypper -n refresh")
|
||||
|
||||
params := "--no-recommends --force-resolution"
|
||||
if !strings.HasPrefix(suse.release, "12") {
|
||||
params += " --replacefiles"
|
||||
}
|
||||
|
||||
cmdf("zypper -n update %s", params)
|
||||
|
||||
cmdf("zypper --no-refresh -n install %s -t pattern devel_kernel", params)
|
||||
|
||||
// Cache dependencies
|
||||
cmdf("zypper -n install %s kernel-default kernel-default-devel "+
|
||||
"&& zypper -n remove -U kernel-default kernel-default-devel",
|
||||
params)
|
||||
|
||||
switch suse.release {
|
||||
case "12.1", "12.2":
|
||||
cmdf("zypper -n install %s -r dracut dracut", params)
|
||||
cmdf("rm /etc/zypp/repos.d/dracut.repo")
|
||||
case "12.3":
|
||||
cmdf("zypper -n install %s dracut", params)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(suse.release, "12") {
|
||||
cmdf("zypper --no-refresh -n install %s kmod which", params)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(suse.release, "13") {
|
||||
cmdf("zypper --no-refresh -n install %s kernel-firmware", params)
|
||||
}
|
||||
|
||||
cmdf("rm -rf /boot/*")
|
||||
cmdf("rm -rf /lib/modules/*")
|
||||
return
|
||||
}
|
||||
|
||||
func (suse OpenSUSE) Install(version string, headers bool) (err error) {
|
||||
var commands []string
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
installcmd := "zypper --no-refresh -n install "
|
||||
if !strings.HasPrefix(suse.release, "12") {
|
||||
installcmd += " --replacefiles"
|
||||
}
|
||||
installcmd += " --no-recommends --force-resolution --capability"
|
||||
cmdf("%s kernel-default=%s", installcmd, version)
|
||||
if headers {
|
||||
cmdf("%s kernel-default-devel=%s", installcmd, version)
|
||||
}
|
||||
|
||||
cmdf("mkdir /usr/lib/dracut/modules.d/42workaround")
|
||||
wsetuppath := "/usr/lib/dracut/modules.d/42workaround/module-setup.sh"
|
||||
|
||||
cmdf("echo 'check() { return 0; }' >> %s", wsetuppath)
|
||||
cmdf("echo 'depends() { return 0; }' >> %s", wsetuppath)
|
||||
cmdf(`echo 'install() { `+
|
||||
`inst_hook pre-mount 91 "$moddir/workaround.sh"; `+
|
||||
`}' >> %s`, wsetuppath)
|
||||
cmdf("echo 'installkernel() { "+
|
||||
"instmods af_packet e1000; "+
|
||||
"}' >> %s", wsetuppath)
|
||||
|
||||
wpath := "/usr/lib/dracut/modules.d/42workaround/workaround.sh"
|
||||
|
||||
cmdf("echo '#!/bin/sh' >> %s", wpath)
|
||||
cmdf("echo 'modprobe af_packet' >> %s", wpath)
|
||||
cmdf("echo 'modprobe e1000' >> %s", wpath)
|
||||
|
||||
modules := "ata_piix e1000 rfkill af_packet"
|
||||
if suse.release != "15.2" {
|
||||
modules += " libata ext4 sd_mod"
|
||||
}
|
||||
|
||||
format := "dracut "
|
||||
format += "-a workaround "
|
||||
|
||||
if strings.HasPrefix(suse.release, "12") {
|
||||
format += "--no-hostonly --add-drivers '%s' "
|
||||
} else {
|
||||
format += "--force-drivers '%s' "
|
||||
}
|
||||
format += "-f /boot/initrd-$(ls /lib/modules) $(ls /lib/modules)"
|
||||
|
||||
cmdf(format, modules)
|
||||
|
||||
cmdf("cp -r /boot /target/")
|
||||
cmdf("cp -r /lib/modules /target/lib/")
|
||||
cmdf("cp -r /usr/src /target/usr/")
|
||||
|
||||
c, err := container.New(suse.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range c.Volumes {
|
||||
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
|
||||
}
|
||||
|
||||
_, err = c.Run("", commands)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (suse OpenSUSE) RootFS() string {
|
||||
return fmt.Sprintf("out_of_tree_opensuse_%s.img",
|
||||
strings.Split(suse.release, ".")[0])
|
||||
}
|
|
@ -0,0 +1,222 @@
|
|||
package oraclelinux
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
func init() {
|
||||
releases := []string{"6", "7", "8", "9"}
|
||||
|
||||
for _, release := range releases {
|
||||
distro.Register(OracleLinux{release: release})
|
||||
}
|
||||
}
|
||||
|
||||
type OracleLinux struct {
|
||||
release string
|
||||
}
|
||||
|
||||
func (ol OracleLinux) Equal(d distro.Distro) bool {
|
||||
return ol.release == d.Release && distro.OracleLinux == d.ID
|
||||
}
|
||||
|
||||
func (ol OracleLinux) Distro() distro.Distro {
|
||||
return distro.Distro{ID: distro.OracleLinux, Release: ol.release}
|
||||
}
|
||||
|
||||
func (ol OracleLinux) Packages() (pkgs []string, err error) {
|
||||
c, err := container.New(ol.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build("oraclelinux:"+ol.release,
|
||||
ol.envs(), ol.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if ol.release == "8" {
|
||||
// Image for ol9 is required for some kernels
|
||||
// See notes in OracleLinux.Kernels()
|
||||
_, err = OracleLinux{release: "9"}.Packages()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
cmd := "yum search kernel --showduplicates 2>/dev/null " +
|
||||
"| grep '^kernel-[0-9]\\|^kernel-uek-[0-9]' " +
|
||||
"| grep -v src " +
|
||||
"| cut -d ' ' -f 1"
|
||||
|
||||
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkgs = append(pkgs, strings.Fields(output)...)
|
||||
return
|
||||
}
|
||||
|
||||
func (ol OracleLinux) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
c, err := container.New(ol.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernels, err = c.Kernels()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Some kernels do not work with the smap enabled
|
||||
//
|
||||
// BUG: unable to handle kernel paging request at 00007fffc64b2fda
|
||||
// IP: [<ffffffff8127a9ed>] strnlen+0xd/0x40"
|
||||
// ...
|
||||
// Call Trace:
|
||||
// [<ffffffff81123bf8>] dtrace_psinfo_alloc+0x138/0x390
|
||||
// [<ffffffff8118b143>] do_execve_common.isra.24+0x3c3/0x460
|
||||
// [<ffffffff81554d70>] ? rest_init+0x80/0x80
|
||||
// [<ffffffff8118b1f8>] do_execve+0x18/0x20
|
||||
// [<ffffffff81554dc2>] kernel_init+0x52/0x180
|
||||
// [<ffffffff8157cd2c>] ret_from_fork+0x7c/0xb0
|
||||
//
|
||||
smapBlocklist := []string{
|
||||
"3.8.13-16",
|
||||
"3.8.13-26",
|
||||
"3.8.13-35",
|
||||
"3.8.13-44",
|
||||
"3.8.13-55",
|
||||
"3.8.13-68",
|
||||
"3.8.13-98",
|
||||
}
|
||||
|
||||
// BUG: soft lockup - CPU#0 stuck for 61s!
|
||||
blocklistr := regexp.MustCompile(
|
||||
`2[.]6[.]32-300[.]3(2[.][2-3]|[3-9][.][0-9])`)
|
||||
|
||||
for i, k := range kernels {
|
||||
// The latest uek kernels require gcc-11, which is
|
||||
// only present in el8 with scl load, so not so
|
||||
// convinient. It is possible to just build from
|
||||
// the next release container.
|
||||
if strings.Contains(k.KernelVersion, "5.15.0") {
|
||||
cnt := strings.Replace(k.ContainerName, "8", "9", -1)
|
||||
kernels[i].ContainerName = cnt
|
||||
}
|
||||
|
||||
for _, ver := range smapBlocklist {
|
||||
if strings.Contains(k.KernelVersion, ver) {
|
||||
kernels[i].CPU.Flags = append(
|
||||
kernels[i].CPU.Flags, "smap=off",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if blocklistr.MatchString(k.KernelVersion) {
|
||||
kernels[i].Blocklisted = true
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (ol OracleLinux) envs() (envs []string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (ol OracleLinux) runs() (commands []string) {
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
if ol.release < "6" {
|
||||
log.Fatal().Msgf("no support for pre-EL6")
|
||||
}
|
||||
|
||||
cmdf("sed -i 's/enabled=0/enabled=1/' /etc/yum.repos.d/*")
|
||||
cmdf("sed -i 's;installonly_limit=;installonly_limit=100500;' /etc/yum.conf /etc/dnf/dnf.conf || true")
|
||||
cmdf("yum -y update")
|
||||
cmdf("yum -y groupinstall 'Development Tools'")
|
||||
|
||||
packages := "linux-firmware grubby"
|
||||
if ol.release <= "7" {
|
||||
packages += " libdtrace-ctf"
|
||||
}
|
||||
|
||||
cmdf("yum -y install %s", packages)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (ol OracleLinux) Install(pkgname string, headers bool) (err error) {
|
||||
var headerspkg string
|
||||
if headers {
|
||||
if strings.Contains(pkgname, "uek") {
|
||||
headerspkg = strings.Replace(pkgname,
|
||||
"kernel-uek", "kernel-uek-devel", -1)
|
||||
} else {
|
||||
headerspkg = strings.Replace(pkgname,
|
||||
"kernel", "kernel-devel", -1)
|
||||
}
|
||||
}
|
||||
|
||||
var commands []string
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
cmdf("yum -y install %s %s", pkgname, headerspkg)
|
||||
|
||||
var version string
|
||||
if strings.Contains(pkgname, "uek") {
|
||||
version = strings.Replace(pkgname, "kernel-uek-", "", -1)
|
||||
} else {
|
||||
version = strings.Replace(pkgname, "kernel-", "", -1)
|
||||
}
|
||||
|
||||
if ol.release <= "7" {
|
||||
cmdf("dracut -v --add-drivers 'e1000 ext4' -f "+
|
||||
"/boot/initramfs-%s.img %s", version, version)
|
||||
} else {
|
||||
cmdf("dracut -v --add-drivers 'ata_piix libata' "+
|
||||
"--force-drivers 'e1000 ext4 sd_mod' -f "+
|
||||
"/boot/initramfs-%s.img %s", version, version)
|
||||
}
|
||||
|
||||
cmdf("cp -r /boot /target/")
|
||||
cmdf("cp -r /lib/modules /target/lib/")
|
||||
cmdf("cp -r /usr/src /target/usr/")
|
||||
|
||||
c, err := container.New(ol.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range c.Volumes {
|
||||
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
|
||||
}
|
||||
|
||||
_, err = c.Run("", commands)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (ol OracleLinux) RootFS() string {
|
||||
return fmt.Sprintf("out_of_tree_oraclelinux_%s.img", ol.release)
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package oraclelinux
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
func TestOracleLinux(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
u := OracleLinux{release: "9"}
|
||||
|
||||
assert.True(u.Equal(distro.Distro{Release: "9", ID: distro.OracleLinux}))
|
||||
|
||||
assert.NotEmpty(u.Packages())
|
||||
}
|
|
@ -0,0 +1,165 @@
|
|||
package ubuntu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
func init() {
|
||||
releases := []string{
|
||||
"12.04",
|
||||
"14.04",
|
||||
"16.04",
|
||||
"18.04",
|
||||
"20.04",
|
||||
"22.04",
|
||||
}
|
||||
|
||||
for _, release := range releases {
|
||||
distro.Register(Ubuntu{release: release})
|
||||
}
|
||||
}
|
||||
|
||||
type Ubuntu struct {
|
||||
release string
|
||||
}
|
||||
|
||||
func (u Ubuntu) Equal(d distro.Distro) bool {
|
||||
return u.release == d.Release && distro.Ubuntu == d.ID
|
||||
}
|
||||
|
||||
func (u Ubuntu) Distro() distro.Distro {
|
||||
return distro.Distro{ID: distro.Ubuntu, Release: u.release}
|
||||
}
|
||||
|
||||
func (u Ubuntu) Packages() (pkgs []string, err error) {
|
||||
c, err := container.New(u.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build("ubuntu:"+u.release, u.envs(), u.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
cmd := "apt-cache search " +
|
||||
"--names-only '^linux-image-[0-9\\.\\-]*-generic$' " +
|
||||
"| awk '{ print $1 }'"
|
||||
|
||||
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkgs = append(pkgs, strings.Fields(output)...)
|
||||
return
|
||||
}
|
||||
|
||||
func (u Ubuntu) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
c, err := container.New(u.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return c.Kernels()
|
||||
}
|
||||
|
||||
func (u Ubuntu) envs() (envs []string) {
|
||||
envs = append(envs, "DEBIAN_FRONTEND=noninteractive")
|
||||
return
|
||||
}
|
||||
|
||||
func (u Ubuntu) runs() (commands []string) {
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
if u.release < "14.04" {
|
||||
cmdf("sed -i 's/archive.ubuntu.com/old-releases.ubuntu.com/' " +
|
||||
"/etc/apt/sources.list")
|
||||
}
|
||||
|
||||
cmdf("apt-get update")
|
||||
cmdf("apt-get install -y build-essential libelf-dev")
|
||||
cmdf("apt-get install -y wget git")
|
||||
|
||||
if u.release == "12.04" {
|
||||
cmdf("apt-get install -y grub")
|
||||
cmdf("cp /bin/true /usr/sbin/grub-probe")
|
||||
cmdf("mkdir -p /boot/grub")
|
||||
cmdf("touch /boot/grub/menu.lst")
|
||||
}
|
||||
|
||||
if u.release < "14.04" {
|
||||
return
|
||||
}
|
||||
|
||||
if u.release == "22.04" {
|
||||
cmdf("apt-get install -y gcc-12")
|
||||
return
|
||||
}
|
||||
|
||||
cmdf("apt-get install -y libseccomp-dev")
|
||||
|
||||
// Install and remove a single kernel and headers.
|
||||
// This ensures that all dependencies are cached.
|
||||
|
||||
cmd := "export HEADERS=$(apt-cache search " +
|
||||
"--names-only '^linux-headers-[0-9\\.\\-]*-generic' " +
|
||||
"| awk '{ print $1 }' | head -n 1)"
|
||||
|
||||
cmd += " KERNEL=$(echo $HEADERS | sed 's/headers/image/')"
|
||||
cmd += " MODULES=$(echo $HEADERS | sed 's/headers/modules/')"
|
||||
|
||||
cmd += " && apt-get install -y $HEADERS $KERNEL $MODULES"
|
||||
cmd += " && apt-get remove -y $HEADERS $KERNEL $MODULES"
|
||||
|
||||
cmdf(cmd)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (u Ubuntu) Install(pkgname string, headers bool) (err error) {
|
||||
var headerspkg string
|
||||
if headers {
|
||||
headerspkg = strings.Replace(pkgname, "image", "headers", -1)
|
||||
}
|
||||
|
||||
var commands []string
|
||||
cmdf := func(f string, s ...interface{}) {
|
||||
commands = append(commands, fmt.Sprintf(f, s...))
|
||||
}
|
||||
|
||||
cmdf("apt-get install -y %s %s", pkgname, headerspkg)
|
||||
cmdf("cp -r /boot /target/")
|
||||
cmdf("cp -r /lib/modules /target/lib/")
|
||||
cmdf("cp -r /usr/src /target/usr/")
|
||||
|
||||
c, err := container.New(u.Distro())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range c.Volumes {
|
||||
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
|
||||
}
|
||||
|
||||
_, err = c.Run("", commands)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (u Ubuntu) RootFS() string {
|
||||
return fmt.Sprintf("out_of_tree_ubuntu_%s.img",
|
||||
strings.Replace(u.release, ".", "__", -1))
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package ubuntu
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
func TestUbuntu(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
u := Ubuntu{release: "22.04"}
|
||||
|
||||
assert.True(u.Equal(distro.Distro{Release: "22.04", ID: distro.Ubuntu}))
|
||||
|
||||
assert.NotEmpty(u.Packages())
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
project = "out-of-tree"
|
|
@ -0,0 +1,30 @@
|
|||
out-of-tree
|
||||
===========
|
||||
|
||||
*out-of-tree* is the kernel {module, exploit} development tool.
|
||||
|
||||
*out-of-tree* was created on the purpose of decreasing complexity of
|
||||
environment for developing, testing and debugging Linux kernel
|
||||
exploits and out-of-tree kernel modules (that's why tool got a name
|
||||
"out-of-tree").
|
||||
|
||||
While I'm trying to keep that documentation up-to-date, there may be
|
||||
some missing information. Use ``out-of-tree --help-long`` for checking
|
||||
all features.
|
||||
|
||||
If you found anything missed here, please make a pull request or send
|
||||
patches to patch@dumpstack.io.
|
||||
|
||||
If you need personal support, your company is interested in the
|
||||
project or you just want to share some thoughts -- feel free to write
|
||||
to root@dumpstack.io.
|
||||
|
||||
Contents
|
||||
========
|
||||
|
||||
:ref:`Keyword Index <genindex>`
|
||||
|
||||
.. toctree::
|
||||
|
||||
introduction.rst
|
||||
installation.rst
|
|
@ -0,0 +1,79 @@
|
|||
Installation (from source)
|
||||
============
|
||||
|
||||
OS/Distro-specific
|
||||
==================
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
Install dependencies::
|
||||
|
||||
$ sudo snap install go --classic
|
||||
$ # Install docker: https://docs.docker.com/engine/install/ubuntu/
|
||||
$ sudo apt install qemu-system-x86 build-essential gdb
|
||||
|
||||
macOS
|
||||
-----
|
||||
|
||||
Install dependencies::
|
||||
|
||||
$ brew install go qemu
|
||||
$ brew cask install docker
|
||||
|
||||
NixOS
|
||||
-----
|
||||
|
||||
There's a minimal configuration that you need to apply::
|
||||
|
||||
#!nix
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
virtualisation.docker.enable = true;
|
||||
virtualisation.libvirtd.enable = true;
|
||||
environment.systemPackages = with pkgs; [
|
||||
go git
|
||||
];
|
||||
}
|
||||
|
||||
Gentoo
|
||||
------
|
||||
|
||||
Install dependencies::
|
||||
|
||||
$ sudo emerge app-emulation/qemu app-emulation/docker dev-lang/go
|
||||
|
||||
Fedora
|
||||
------
|
||||
|
||||
Install dependencies::
|
||||
|
||||
$ sudo dnf install go qemu moby-engine
|
||||
|
||||
Common
|
||||
======
|
||||
|
||||
Setup environment::
|
||||
|
||||
$ echo 'export PATH=$PATH:$HOME/bin' >> ~/.bashrc
|
||||
$ source ~/.bashrc
|
||||
|
||||
Build *out-of-tree*::
|
||||
|
||||
$ git clone https://code.dumpstack.io/tools/out-of-tree
|
||||
$ cd out-of-tree
|
||||
$ CGO_ENABLED=1 go build -o ~/bin/out-of-tree
|
||||
|
||||
.. note::
|
||||
On a GNU/Linux you need to add your user to docker group if you want
|
||||
to use *out-of-tree* without sudo. Note that this has a **serious**
|
||||
security implications. Check *Docker* documentation for more
|
||||
information.
|
||||
|
||||
Test that everything works::
|
||||
|
||||
$ cd out-of-tree/examples/kernel-exploit
|
||||
$ out-of-tree kernel autogen --max=1
|
||||
$ out-of-tree pew --max=1
|
||||
|
||||
Enjoy!
|
|
@ -0,0 +1,109 @@
|
|||
Introduction
|
||||
============
|
||||
|
||||
*out-of-tree* is written in *Go*, it uses *Docker* for generating
|
||||
kernel/filesystem images and *Qemu* for virtualization.
|
||||
|
||||
Also it possible to generate kernels from the host system and use the
|
||||
custom one.
|
||||
|
||||
*out-of-tree* supports *GNU/Linux* (usually it's tested on NixOS and
|
||||
latest Ubuntu LTS) and *macOS*. Technically all systems that supported
|
||||
by Go, Docker, and Qemu must work well. Create the issue if you'll
|
||||
notice any issue in integration for your operating system.
|
||||
|
||||
All *Qemu* interaction is stateless.
|
||||
|
||||
*out-of-tree* is allow and require metadata (``.out-of-tree.toml``)
|
||||
for work. TOML (Tom's Obvious, Minimal Language) is used for kernel
|
||||
module/exploit description.
|
||||
|
||||
``.out-of-tree.toml`` is mandatory, you need to have in the current
|
||||
directory (usually, it's a project of kernel module/exploit) or use
|
||||
the ``--path`` flag.
|
||||
|
||||
Files
|
||||
-----
|
||||
|
||||
All data is stored in ``~/.out-of-tree/``.
|
||||
|
||||
- *db.sqlite* contains logs related to run with ``out-of-tree pew``,
|
||||
debug mode (``out-of-tree debug``) is not store any data.
|
||||
|
||||
- *images* used for filesystem images (rootfs images that used for
|
||||
``qemu -hda ...``) that can be generated with the
|
||||
``tools/qemu-*-img/...``.
|
||||
|
||||
- *kernels* stores all kernel ``vmlinuz/initrd/config/...`` files that
|
||||
generated previously with a some *Docker magic*.
|
||||
|
||||
- *kernels.toml* contains metadata for generated kernels. It's not
|
||||
supposed to be edited by hands.
|
||||
|
||||
- *kernels.user.toml* is default path for custom kernels definition.
|
||||
|
||||
- *Ubuntu* (or *Centos*/*Debian*/...) is the Dockerfiles tree
|
||||
(DistroName/DistroVersion/Dockerfile). Each Dockerfile contains a
|
||||
base layer and incrementally updated list of kernels that must be
|
||||
installed.
|
||||
|
||||
Overview
|
||||
---------
|
||||
|
||||
*out-of-tree* creating debugging environment based on **defined** kernels::
|
||||
|
||||
$ out-of-tree debug --kernel 'Ubuntu:4.15.0-58-generic'
|
||||
[*] KASLR SMEP SMAP
|
||||
[*] gdb is listening on tcp::1234
|
||||
[*] build result copied to /tmp/exploit
|
||||
|
||||
ssh -o StrictHostKeyChecking=no -p 29308 root@127.133.45.236
|
||||
gdb /usr/lib/debug/boot/vmlinux-4.15.0-58-generic -ex 'target remote tcp::1234'
|
||||
|
||||
out-of-tree> help
|
||||
help : print this help message
|
||||
log : print qemu log
|
||||
clog : print qemu log and cleanup buffer
|
||||
cleanup : cleanup qemu log buffer
|
||||
ssh : print arguments to ssh command
|
||||
quit : quit
|
||||
out-of-tree>
|
||||
|
||||
*out-of-tree* uses three stages for automated runs:
|
||||
|
||||
- Build
|
||||
|
||||
- Inside the docker container (default).
|
||||
- Binary version (de facto skip stage).
|
||||
- On host.
|
||||
|
||||
- Run
|
||||
|
||||
- Insmod for the kernel module.
|
||||
- This step is skipped for exploits.
|
||||
|
||||
- Test
|
||||
|
||||
- Run the test.sh script on the target machine.
|
||||
- Test script is run from *root* for the kernel module.
|
||||
- Test script is run from *user* for the kernel exploit.
|
||||
- Test script for the kernel module is fully custom (only return
|
||||
value is checked).
|
||||
- Test script for the kernel exploit receives two parameters:
|
||||
|
||||
- Path to exploit
|
||||
- Path to file that must be created with root privileges.
|
||||
|
||||
- If there's no test.sh script then default
|
||||
(``echo touch FILE | exploit``) one is used.
|
||||
|
||||
Security
|
||||
--------
|
||||
|
||||
*out-of-tree* is not supposed to be used on multi-user systems or with
|
||||
an untrusted input.
|
||||
|
||||
Meanwhile, all modern hypervisors are supporting nested
|
||||
virtualization, which means you can use it for isolating *out-of-tree*
|
||||
if you want to work with an untrusted input (e.g. with a mass-scale
|
||||
testing public proofs-of-concept).
|
|
@ -0,0 +1 @@
|
|||
logs
|
|
@ -1,33 +1,28 @@
|
|||
# out-of-tree configuration file
|
||||
# docs at https://out-of-tree.io
|
||||
name = "out-of-tree exploit example"
|
||||
name = "exploit_example"
|
||||
type = "exploit"
|
||||
|
||||
[[supported_kernels]]
|
||||
distro_type = "Ubuntu"
|
||||
distro_release = "16.04"
|
||||
release_mask = "4.4.0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59|60|61|62|63|64|65|66|67|68|69|70|71|72|73|74|75|76|77|78|79|80|81|82|83|84|85|86|87|88|89|90|91|92|93|94|95|96|97|98|99|100|101|102|103|104|105|106|107|108|109|110|111|112|113|114|115|116)-.*"
|
||||
[[targets]]
|
||||
distro = { id = "Ubuntu", release = "16.04" }
|
||||
kernel = { regex = "4[.]4[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59|60|61|62|63|64|65|66|67|68|69|70|71|72|73|74|75|76|77|78|79|80|81|82|83|84|85|86|87|88|89|90|91|92|93|94|95|96|97|98|99|100|101|102|103|104|105|106|107|108|109|110|111|112|113|114|115|116)-.*" }
|
||||
|
||||
[[supported_kernels]]
|
||||
distro_type = "Ubuntu"
|
||||
distro_release = "16.04"
|
||||
release_mask = "4.8.0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58)-.*"
|
||||
[[targets]]
|
||||
distro = { id = "Ubuntu", release = "16.04" }
|
||||
kernel = { regex = "4[.]8[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58)-.*" }
|
||||
|
||||
[[supported_kernels]]
|
||||
[[targets]]
|
||||
# Can be Ubuntu/CentOS/Debian/etc.
|
||||
distro_type = "Ubuntu"
|
||||
distro_release = "16.04"
|
||||
distro = { id = "Ubuntu", release = "16.04" }
|
||||
# regex for `uname -r`
|
||||
# See also: regex-golang.appspot.com
|
||||
# stupid way to generate: $ echo '4.4.0-('$(seq 44 | xargs echo | sed 's/ /|/g')')-.*'
|
||||
release_mask = "4.10.0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42)-.*"
|
||||
kernel = { regex = "4[.]10[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42)-.*" }
|
||||
|
||||
[[supported_kernels]]
|
||||
distro_type = "Ubuntu"
|
||||
distro_release = "16.04"
|
||||
release_mask = "4.11.0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14)-.*"
|
||||
[[targets]]
|
||||
distro = { id = "Ubuntu", release = "16.04" }
|
||||
kernel = { regex = "4[.]11[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14)-.*" }
|
||||
|
||||
[[supported_kernels]]
|
||||
distro_type = "Ubuntu"
|
||||
distro_release = "16.04"
|
||||
release_mask = "4.13.0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21)-.*"
|
||||
[[targets]]
|
||||
distro = { id = "Ubuntu", release = "16.04" }
|
||||
kernel = { regex = "4[.]13[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21)-.*" }
|
||||
|
|
|
@ -317,6 +317,7 @@ void redact(const char *fmt, ...) {
|
|||
va_start(args, fmt);
|
||||
if(doredact) {
|
||||
fprintf(stdout, "[!] ( ( R E D A C T E D ) )\n");
|
||||
va_end(args);
|
||||
return;
|
||||
}
|
||||
fprintf(stdout, "[*] ");
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# - KERNEL: kernel headers path
|
||||
# - TARGET: name of exploit binary that MUST be produced by makefile.
|
||||
# - $(TARGET)_test: name of test binary that MUST be produced by makefile
|
||||
# and it's will be runned on a LPE stage. TARGET_TEST MUST accept two argument:
|
||||
# and it's will be executed on a LPE stage. TARGET_TEST MUST accept two argument:
|
||||
# - Path to exploit binary
|
||||
# - File that MUST be created with exploit. It uses for test that exploit works
|
||||
# correctly.
|
||||
|
|
|
@ -12,3 +12,4 @@ GPATH
|
|||
GRTAGS
|
||||
GTAGS
|
||||
.cache.mk
|
||||
logs
|
|
@ -1,25 +1,26 @@
|
|||
# out-of-tree configuration file
|
||||
# docs at https://out-of-tree.io
|
||||
name = "out-of-tree module example"
|
||||
name = "module_example"
|
||||
type = "module"
|
||||
|
||||
[[supported_kernels]]
|
||||
[[targets]]
|
||||
# Can be Ubuntu/CentOS/Debian/etc.
|
||||
distro_type = "Ubuntu"
|
||||
distro_release = "16.04"
|
||||
distro = { id = "Ubuntu", release = "16.04" }
|
||||
# regex for `uname -r`
|
||||
# See also: regex-golang.appspot.com
|
||||
release_mask = "4.4.0-70-.*"
|
||||
kernel = { regex = "4[.]4[.]0-70-.*" }
|
||||
|
||||
# [[supported_kernels]] may be defined unlimited number of times
|
||||
[[supported_kernels]]
|
||||
distro_type = "Ubuntu"
|
||||
distro_release = "18.04"
|
||||
# [[targets]] may be defined unlimited number of times
|
||||
[[targets]]
|
||||
distro = { id = "Ubuntu", release = "18.04" }
|
||||
# Also you can use only one kernel
|
||||
release_mask = "4.15.0-(24|29)-generic"
|
||||
kernel = { regex = "4[.]15[.]0-(24|29)-generic" }
|
||||
|
||||
[[supported_kernels]]
|
||||
distro_type = "Ubuntu"
|
||||
distro_release = "18.04"
|
||||
[[targets]]
|
||||
distro = { id = "Ubuntu", release = "18.04" }
|
||||
# Also you can use only one kernel
|
||||
release_mask = "4.15.0-23-generic"
|
||||
kernel = { regex = "4[.]15[.]0-23-generic" }
|
||||
|
||||
[[targets]]
|
||||
distro = { id = "CentOS", release = "7" }
|
||||
kernel = { regex = "3[.]10[.]0-862.el7.x86_64" }
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
logs
|
|
@ -0,0 +1,11 @@
|
|||
name = "preload_example"
|
||||
type = "module"
|
||||
|
||||
[[targets]]
|
||||
distro = { id = "Ubuntu", release = "18.04" }
|
||||
kernel = { regex = ".*" }
|
||||
|
||||
[[preload]]
|
||||
repo = "https://github.com/openwall/lkrg"
|
||||
#path = "/local/path/to/lkrg"
|
||||
timeout_after_load = "1s"
|
|
@ -0,0 +1,11 @@
|
|||
KERNEL := /lib/modules/$(shell uname -r)/build
|
||||
TARGET := module
|
||||
|
||||
obj-m += $(TARGET).o
|
||||
$(TARGET)-objs = module.o
|
||||
|
||||
all:
|
||||
make -C $(KERNEL) M=$(PWD) modules
|
||||
|
||||
clean:
|
||||
make -C $(KERNEL) M=$(PWD) clean
|
|
@ -0,0 +1,5 @@
|
|||
# out-of-tree kernel module preload example
|
||||
|
||||
See .out-of-tree.toml
|
||||
|
||||
Note that it should fail to insert module if lkrg is enabled in the preload list.
|
|
@ -0,0 +1,17 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
int init_module(void)
|
||||
{
|
||||
char *argv[] = { "/bin/sh", "--help", NULL };
|
||||
char *envp[] = { NULL };
|
||||
|
||||
/* trigger lkrg */
|
||||
return call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
|
||||
}
|
||||
|
||||
void cleanup_module(void)
|
||||
{
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
dmesg | grep BLOCK
|
|
@ -0,0 +1 @@
|
|||
logs
|
|
@ -0,0 +1,10 @@
|
|||
# out-of-tree configuration file
|
||||
# docs at https://out-of-tree.io
|
||||
name = "script_example"
|
||||
type = "script"
|
||||
|
||||
script = "script.sh"
|
||||
|
||||
[[targets]]
|
||||
distro = { id = "Ubuntu", release = "22.04" }
|
||||
kernel = { regex = ".*" }
|
|
@ -0,0 +1,3 @@
|
|||
# out-of-tree script example
|
||||
|
||||
See .out-of-tree.toml
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/sh
|
||||
|
||||
uname -a
|
||||
|
||||
ls /proc | grep config
|
|
@ -0,0 +1,130 @@
|
|||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1705309234,
|
||||
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1694529238,
|
||||
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gomod2nix": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1705314449,
|
||||
"narHash": "sha256-yfQQ67dLejP0FLK76LKHbkzcQqNIrux6MFe32MMFGNQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "gomod2nix",
|
||||
"rev": "30e3c3a9ec4ac8453282ca7f67fca9e1da12c3e6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "gomod2nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1658285632,
|
||||
"narHash": "sha256-zRS5S/hoeDGUbO+L95wXG9vJNwsSYcl93XiD0HQBXLk=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "5342fc6fb59d0595d26883c3cadff16ce58e44f3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "master",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1708296515,
|
||||
"narHash": "sha256-FyF489fYNAUy7b6dkYV6rGPyzp+4tThhr80KNAaF/yY=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b98a4e1746acceb92c509bc496ef3d0e5ad8d4aa",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"gomod2nix": "gomod2nix",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
description = "kernel {module, exploit} development tool";
|
||||
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||
inputs.gomod2nix.url = "github:nix-community/gomod2nix";
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils, gomod2nix }:
|
||||
(flake-utils.lib.eachDefaultSystem
|
||||
(system:
|
||||
let
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
overlays = [ gomod2nix.overlays.default ];
|
||||
};
|
||||
|
||||
version = self.lastModifiedDate;
|
||||
in
|
||||
{
|
||||
packages.default = pkgs.callPackage ./. { inherit version; };
|
||||
devShells.default = import ./shell.nix { inherit pkgs; };
|
||||
})
|
||||
);
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
)
|
||||
|
||||
// CaseInsensitive check
|
||||
func CaseInsensitive(dir string) (yes bool, err error) {
|
||||
pathLowercase := filepath.Join(dir, "file")
|
||||
fLowercase, err := os.Create(pathLowercase)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fLowercase.Close()
|
||||
defer os.Remove(pathLowercase)
|
||||
|
||||
pathUppercase := filepath.Join(dir, "FILE")
|
||||
fUppercase, err := os.Create(pathUppercase)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fUppercase.Close()
|
||||
defer os.Remove(pathUppercase)
|
||||
|
||||
statLowercase, err := fLowercase.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
statUppercase, err := fUppercase.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
yes = os.SameFile(statLowercase, statUppercase)
|
||||
return
|
||||
}
|
||||
|
||||
// PathExists check
|
||||
func PathExists(path string) bool {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// TempDir that exist relative to config directory
|
||||
func TempDir() (string, error) {
|
||||
return os.MkdirTemp(dotfiles.Dir("tmp"), "")
|
||||
}
|
||||
|
||||
func FindBySubstring(dir, substring string) (k string, err error) {
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if strings.Contains(file.Name(), substring) {
|
||||
k = filepath.Join(dir, file.Name())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New("not found")
|
||||
return
|
||||
}
|
30
gen.go
30
gen.go
|
@ -1,30 +0,0 @@
|
|||
// Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jollheef/out-of-tree/config"
|
||||
"github.com/naoina/toml"
|
||||
)
|
||||
|
||||
func genConfig(at config.ArtifactType) (err error) {
|
||||
a := config.Artifact{
|
||||
Name: "Put name here",
|
||||
Type: at,
|
||||
}
|
||||
a.SupportedKernels = append(a.SupportedKernels, config.KernelMask{
|
||||
config.Ubuntu, "18.04", ".*",
|
||||
})
|
||||
|
||||
buf, err := toml.Marshal(&a)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Print(string(buf))
|
||||
return
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
module code.dumpstack.io/tools/out-of-tree
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/alecthomas/kong v0.7.1
|
||||
github.com/cavaliergopher/grab/v3 v3.0.1
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-git/go-git/v5 v5.6.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mattn/go-sqlite3 v1.14.16
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/naoina/toml v0.1.1
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/otiai10/copy v1.11.0
|
||||
github.com/povsister/scp v0.0.0-20210427074412-33febfd9f13e
|
||||
github.com/rapidloop/skv v0.0.0-20180909015525-9def2caac4cc
|
||||
github.com/remeh/sizedwaitgroup v1.0.0
|
||||
github.com/rs/zerolog v1.29.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/zcalusic/sysinfo v0.9.5
|
||||
golang.org/x/crypto v0.9.0
|
||||
golang.org/x/time v0.3.0
|
||||
gopkg.in/logrusorgru/aurora.v2 v2.0.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.4 // indirect
|
||||
github.com/boltdb/bolt v1.3.1 // indirect
|
||||
github.com/cloudflare/circl v1.1.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/go-git/gcfg v1.5.0 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.4.1 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/naoina/go-stringutil v0.1.0 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/sergi/go-diff v1.1.0 // indirect
|
||||
github.com/skeema/knownhosts v1.1.0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0 // indirect
|
||||
)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue