1
0

refactor: move commands to cmd/

This commit is contained in:
2024-02-17 22:38:43 +00:00
parent 1b3e23d188
commit 4e92950929
13 changed files with 66 additions and 60 deletions

61
cmd/container.go Normal file
View File

@ -0,0 +1,61 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os/exec"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/container"
)
type ContainerCmd struct {
Filter string `help:"filter by name"`
List ContainerListCmd `cmd:"" help:"list containers"`
Cleanup ContainerCleanupCmd `cmd:"" help:"cleanup containers"`
}
func (cmd ContainerCmd) Containers() (names []string) {
images, err := container.Images()
if err != nil {
log.Fatal().Err(err).Msg("")
}
for _, img := range images {
if cmd.Filter != "" && !strings.Contains(img.Name, cmd.Filter) {
continue
}
names = append(names, img.Name)
}
return
}
type ContainerListCmd struct{}
func (cmd ContainerListCmd) Run(containerCmd *ContainerCmd) (err error) {
for _, name := range containerCmd.Containers() {
fmt.Println(name)
}
return
}
type ContainerCleanupCmd struct{}
func (cmd ContainerCleanupCmd) Run(containerCmd *ContainerCmd) (err error) {
var output []byte
for _, name := range containerCmd.Containers() {
output, err = exec.Command(container.Runtime, "image", "rm", name).
CombinedOutput()
if err != nil {
log.Error().Err(err).Str("output", string(output)).Msg("")
return
}
}
return
}

380
cmd/db.go Normal file
View File

@ -0,0 +1,380 @@
// Copyright 2019 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"database/sql"
"fmt"
"strconv"
"time"
_ "github.com/mattn/go-sqlite3"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
// Change on ANY database update
const currentDatabaseVersion = 3
const versionField = "db_version"
type logEntry struct {
ID int
Tag string
Timestamp time.Time
qemu.System
config.Artifact
distro.KernelInfo
phasesResult
}
func createLogTable(db *sql.DB) (err error) {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS log (
id INTEGER PRIMARY KEY,
time DATETIME DEFAULT CURRENT_TIMESTAMP,
tag TEXT,
name TEXT,
type TEXT,
distro_type TEXT,
distro_release TEXT,
kernel_release TEXT,
internal_err TEXT,
build_output TEXT,
build_ok BOOLEAN,
run_output TEXT,
run_ok BOOLEAN,
test_output TEXT,
test_ok BOOLEAN,
qemu_stdout TEXT,
qemu_stderr TEXT,
kernel_panic BOOLEAN,
timeout_kill BOOLEAN
)`)
return
}
func createMetadataTable(db *sql.DB) (err error) {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS metadata (
id INTEGER PRIMARY KEY,
key TEXT UNIQUE,
value TEXT
)`)
return
}
func metaChkValue(db *sql.DB, key string) (exist bool, err error) {
sql := "SELECT EXISTS(SELECT id FROM metadata WHERE key = $1)"
stmt, err := db.Prepare(sql)
if err != nil {
return
}
defer stmt.Close()
err = stmt.QueryRow(key).Scan(&exist)
return
}
func metaGetValue(db *sql.DB, key string) (value string, err error) {
stmt, err := db.Prepare("SELECT value FROM metadata " +
"WHERE key = $1")
if err != nil {
return
}
defer stmt.Close()
err = stmt.QueryRow(key).Scan(&value)
return
}
func metaSetValue(db *sql.DB, key, value string) (err error) {
stmt, err := db.Prepare("INSERT OR REPLACE INTO metadata " +
"(key, value) VALUES ($1, $2)")
if err != nil {
return
}
defer stmt.Close()
_, err = stmt.Exec(key, value)
return
}
func getVersion(db *sql.DB) (version int, err error) {
s, err := metaGetValue(db, versionField)
if err != nil {
return
}
version, err = strconv.Atoi(s)
return
}
func addToLog(db *sql.DB, q *qemu.System, ka config.Artifact,
ki distro.KernelInfo, res *phasesResult, tag string) (err error) {
stmt, err := db.Prepare("INSERT INTO log (name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_output, build_ok, " +
"run_output, run_ok, " +
"test_output, test_ok, " +
"qemu_stdout, qemu_stderr, " +
"kernel_panic, timeout_kill) " +
"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, " +
"$10, $11, $12, $13, $14, $15, $16, $17);")
if err != nil {
return
}
defer stmt.Close()
_, err = stmt.Exec(
ka.Name, ka.Type, tag,
ki.Distro.ID, ki.Distro.Release, ki.KernelRelease,
res.InternalErrorString,
res.Build.Output, res.Build.Ok,
res.Run.Output, res.Run.Ok,
res.Test.Output, res.Test.Ok,
q.Stdout, q.Stderr,
q.KernelPanic, q.KilledByTimeout,
)
if err != nil {
return
}
return
}
func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_ok, run_ok, test_ok, kernel_panic, " +
"timeout_kill FROM log ORDER BY datetime(time) DESC " +
"LIMIT $1")
if err != nil {
return
}
defer stmt.Close()
rows, err := stmt.Query(num)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
var internalErr sql.NullString
le := logEntry{}
err = rows.Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.KernelPanic, &le.KilledByTimeout,
)
if err != nil {
return
}
le.InternalErrorString = internalErr.String
if tag == "" || tag == le.Tag {
les = append(les, le)
}
}
return
}
func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
les []logEntry, err error) {
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_ok, run_ok, test_ok, kernel_panic, " +
"timeout_kill FROM log WHERE name=$1 AND type=$2 " +
"ORDER BY datetime(time) DESC LIMIT $3")
if err != nil {
return
}
defer stmt.Close()
rows, err := stmt.Query(ka.Name, ka.Type, num)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
var internalErr sql.NullString
le := logEntry{}
err = rows.Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.KernelPanic, &le.KilledByTimeout,
)
if err != nil {
return
}
le.InternalErrorString = internalErr.String
if tag == "" || tag == le.Tag {
les = append(les, le)
}
}
return
}
func getLogByID(db *sql.DB, id int) (le logEntry, err error) {
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_ok, run_ok, test_ok, " +
"build_output, run_output, test_output, " +
"qemu_stdout, qemu_stderr, " +
"kernel_panic, timeout_kill " +
"FROM log WHERE id=$1")
if err != nil {
return
}
defer stmt.Close()
var internalErr sql.NullString
err = stmt.QueryRow(id).Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.Build.Output, &le.Run.Output, &le.Test.Output,
&le.Stdout, &le.Stderr,
&le.KernelPanic, &le.KilledByTimeout,
)
if err != nil {
return
}
le.InternalErrorString = internalErr.String
return
}
func getLastLog(db *sql.DB) (le logEntry, err error) {
var internalErr sql.NullString
err = db.QueryRow("SELECT MAX(id), time, name, type, tag, "+
"distro_type, distro_release, kernel_release, "+
"internal_err, "+
"build_ok, run_ok, test_ok, "+
"build_output, run_output, test_output, "+
"qemu_stdout, qemu_stderr, "+
"kernel_panic, timeout_kill "+
"FROM log").Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.Build.Output, &le.Run.Output, &le.Test.Output,
&le.Stdout, &le.Stderr,
&le.KernelPanic, &le.KilledByTimeout,
)
if err != nil {
return
}
le.InternalErrorString = internalErr.String
return
}
func createSchema(db *sql.DB) (err error) {
err = createMetadataTable(db)
if err != nil {
return
}
err = createLogTable(db)
if err != nil {
return
}
return
}
func openDatabase(path string) (db *sql.DB, err error) {
db, err = sql.Open("sqlite3", path)
if err != nil {
return
}
db.SetMaxOpenConns(1)
exists, _ := metaChkValue(db, versionField)
if !exists {
err = createSchema(db)
if err != nil {
return
}
err = metaSetValue(db, versionField,
strconv.Itoa(currentDatabaseVersion))
return
}
version, err := getVersion(db)
if err != nil {
return
}
if version == 1 {
_, err = db.Exec(`ALTER TABLE log ADD tag TEXT`)
if err != nil {
return
}
err = metaSetValue(db, versionField, "2")
if err != nil {
return
}
version = 2
} else if version == 2 {
_, err = db.Exec(`ALTER TABLE log ADD internal_err TEXT`)
if err != nil {
return
}
err = metaSetValue(db, versionField, "3")
if err != nil {
return
}
version = 3
}
if version != currentDatabaseVersion {
err = fmt.Errorf("Database is not supported (%d instead of %d)",
version, currentDatabaseVersion)
return
}
return
}

286
cmd/debug.go Normal file
View File

@ -0,0 +1,286 @@
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type DebugCmd struct {
Kernel string `help:"regexp (first match)" required:""`
Gdb string `help:"gdb listen address" default:"tcp::1234"`
SshAddr string `help:"ssh address to listen" default:"127.0.0.1"`
SshPort int `help:"ssh port to listen" default:"50022"`
ArtifactConfig string `help:"path to artifact config" type:"path"`
Kaslr bool `help:"Enable KASLR"`
Smep bool `help:"Enable SMEP"`
Smap bool `help:"Enable SMAP"`
Kpti bool `help:"Enable KPTI"`
NoKaslr bool `help:"Disable KASLR"`
NoSmep bool `help:"Disable SMEP"`
NoSmap bool `help:"Disable SMAP"`
NoKpti bool `help:"Disable KPTI"`
}
// TODO: merge with pew.go
func (cmd *DebugCmd) Run(g *Globals) (err error) {
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Print(err)
}
var configPath string
if cmd.ArtifactConfig == "" {
configPath = g.WorkDir + "/.out-of-tree.toml"
} else {
configPath = cmd.ArtifactConfig
}
ka, err := config.ReadArtifactConfig(configPath)
if err != nil {
return
}
if ka.SourcePath == "" {
ka.SourcePath = g.WorkDir
}
ki, err := firstSupported(kcfg, ka, cmd.Kernel)
if err != nil {
return
}
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
if err != nil {
return
}
err = q.SetSSHAddrPort(cmd.SshAddr, cmd.SshPort)
if err != nil {
return
}
if ka.Qemu.Cpus != 0 {
q.Cpus = ka.Qemu.Cpus
}
if ka.Qemu.Memory != 0 {
q.Memory = ka.Qemu.Memory
}
if ka.Docker.Timeout.Duration != 0 {
g.Config.Docker.Timeout.Duration = ka.Docker.Timeout.Duration
}
q.SetKASLR(false) // set KASLR to false by default because of gdb
q.SetSMEP(!ka.Mitigations.DisableSmep)
q.SetSMAP(!ka.Mitigations.DisableSmap)
q.SetKPTI(!ka.Mitigations.DisableKpti)
if cmd.Kaslr {
q.SetKASLR(true)
} else if cmd.NoKaslr {
q.SetKASLR(false)
}
if cmd.Smep {
q.SetSMEP(true)
} else if cmd.NoSmep {
q.SetSMEP(false)
}
if cmd.Smap {
q.SetSMAP(true)
} else if cmd.NoSmap {
q.SetSMAP(false)
}
if cmd.Kpti {
q.SetKPTI(true)
} else if cmd.NoKpti {
q.SetKPTI(false)
}
redgreen := func(name string, enabled bool) aurora.Value {
if enabled {
return aurora.BgGreen(aurora.Black(name))
}
return aurora.BgRed(aurora.White(name))
}
fmt.Printf("[*] %s %s %s %s\n",
redgreen("KASLR", q.GetKASLR()),
redgreen("SMEP", q.GetSMEP()),
redgreen("SMAP", q.GetSMAP()),
redgreen("KPTI", q.GetKPTI()))
fmt.Printf("[*] SMP: %d CPUs\n", q.Cpus)
fmt.Printf("[*] Memory: %d MB\n", q.Memory)
q.Debug(cmd.Gdb)
coloredGdbAddress := aurora.BgGreen(aurora.Black(cmd.Gdb))
fmt.Printf("[*] gdb is listening on %s\n", coloredGdbAddress)
err = q.Start()
if err != nil {
return
}
defer q.Stop()
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
err = q.WaitForSSH(time.Minute)
if err != nil {
return
}
if ka.StandardModules {
// Module depends on one of the standard modules
err = copyStandardModules(q, ki)
if err != nil {
log.Print(err)
return
}
}
err = preloadModules(q, ka, ki, g.Config.Docker.Timeout.Duration)
if err != nil {
log.Print(err)
return
}
var buildDir, outFile, output, remoteFile string
if ka.Type == config.Script {
err = q.CopyFile("root", ka.Script, ka.Script)
if err != nil {
return
}
} else {
buildDir, outFile, output, err = build(log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration)
if err != nil {
log.Print(err, output)
return
}
remoteFile = "/tmp/" + strings.Replace(ka.Name, " ", "_", -1)
if ka.Type == config.KernelModule {
remoteFile += ".ko"
}
err = q.CopyFile("user", outFile, remoteFile)
if err != nil {
return
}
}
// Copy all test files to the remote machine
for _, f := range ka.TestFiles {
if f.Local[0] != '/' {
if buildDir != "" {
f.Local = buildDir + "/" + f.Local
}
}
err = q.CopyFile(f.User, f.Local, f.Remote)
if err != nil {
log.Print("error copy err:", err, f.Local, f.Remote)
return
}
}
coloredRemoteFile := aurora.BgGreen(aurora.Black(remoteFile))
fmt.Printf("[*] build result copied to %s\n", coloredRemoteFile)
fmt.Printf("\n%s\n", q.GetSSHCommand())
fmt.Printf("gdb %s -ex 'target remote %s'\n\n", ki.VmlinuxPath, cmd.Gdb)
// TODO set substitute-path /build/.../linux-... /path/to/linux-source
err = interactive(q)
return
}
func firstSupported(kcfg config.KernelConfig, ka config.Artifact,
kernel string) (ki distro.KernelInfo, err error) {
km, err := kernelMask(kernel)
if err != nil {
return
}
ka.Targets = []config.Target{km}
for _, ki = range kcfg.Kernels {
var supported bool
supported, err = ka.Supported(ki)
if err != nil || supported {
return
}
}
err = errors.New("No supported kernel found")
return
}
func handleLine(q *qemu.System) (err error) {
fmt.Print("out-of-tree> ")
rawLine := "help"
fmt.Scanf("%s", &rawLine)
params := strings.Fields(rawLine)
cmd := params[0]
switch cmd {
case "h", "help":
fmt.Printf("help\t: print this help message\n")
fmt.Printf("log\t: print qemu log\n")
fmt.Printf("clog\t: print qemu log and cleanup buffer\n")
fmt.Printf("cleanup\t: cleanup qemu log buffer\n")
fmt.Printf("ssh\t: print arguments to ssh command\n")
fmt.Printf("quit\t: quit\n")
case "l", "log":
fmt.Println(q.Stdout)
case "cl", "clog":
fmt.Println(q.Stdout)
q.Stdout = ""
case "c", "cleanup":
q.Stdout = ""
case "s", "ssh":
fmt.Println(q.GetSSHCommand())
case "q", "quit":
return errors.New("end of session")
default:
fmt.Println("No such command")
}
return
}
func interactive(q *qemu.System) (err error) {
for {
err = handleLine(q)
if err != nil {
return
}
}
}

216
cmd/distro.go Normal file
View File

@ -0,0 +1,216 @@
package cmd
import (
"context"
"fmt"
"math"
"os"
"path/filepath"
"regexp"
"time"
"github.com/cavaliergopher/grab/v3"
"github.com/davecgh/go-spew/spew"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/distro/debian"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
"code.dumpstack.io/tools/out-of-tree/fs"
)
type DistroCmd struct {
List DistroListCmd `cmd:"" help:"list available distros"`
Debian DebianCmd `cmd:"" hidden:""`
}
type DebianCmd struct {
Cache DebianCacheCmd `cmd:"" help:"populate cache"`
Fetch DebianFetchCmd `cmd:"" help:"download deb packages"`
Limit int `help:"limit amount of kernels to fetch"`
Regex string `help:"match deb pkg names by regex" default:".*"`
}
type DebianCacheCmd struct {
Path string `help:"path to cache"`
Refetch int `help:"days before refetch versions without deb package" default:"7"`
UpdateRelease bool `help:"update release data"`
UpdateKbuild bool `help:"update kbuild package"`
Dump bool `help:"dump cache"`
}
func (cmd *DebianCacheCmd) Run(dcmd *DebianCmd) (err error) {
if cmd.Path != "" {
debian.CachePath = cmd.Path
}
debian.RefetchDays = cmd.Refetch
log.Info().Msg("Fetching kernels...")
if dcmd.Limit == 0 {
dcmd.Limit = math.MaxInt32
}
mode := debian.NoMode
if cmd.UpdateRelease {
mode |= debian.UpdateRelease
}
if cmd.UpdateKbuild {
mode |= debian.UpdateKbuild
}
kernels, err := debian.GetKernelsWithLimit(dcmd.Limit, mode)
if err != nil {
log.Error().Err(err).Msg("")
return
}
if cmd.Dump {
re, err := regexp.Compile(dcmd.Regex)
if err != nil {
log.Fatal().Err(err).Msg("regex")
}
for _, kernel := range kernels {
if !re.MatchString(kernel.Image.Deb.Name) {
continue
}
fmt.Println(spew.Sdump(kernel))
}
}
log.Info().Msg("Success")
return
}
type DebianFetchCmd struct {
Path string `help:"path to download directory" type:"existingdir" default:"./"`
IgnoreMirror bool `help:"ignore check if packages on the mirror"`
Max int `help:"do not download more than X" default:"100500"`
Threads int `help:"parallel download threads" default:"8"`
Timeout time.Duration `help:"timeout for each download" default:"1m"`
swg sizedwaitgroup.SizedWaitGroup
hasResults bool
}
func (cmd *DebianFetchCmd) fetch(pkg snapshot.Package) {
flog := log.With().
Str("pkg", pkg.Deb.Name).
Logger()
defer cmd.swg.Done()
if !cmd.IgnoreMirror {
flog.Debug().Msg("check mirror")
found, _ := cache.PackageURL(distro.Debian, pkg.Deb.URL)
if found {
flog.Debug().Msg("found on the mirror")
return
}
}
target := filepath.Join(cmd.Path, filepath.Base(pkg.Deb.URL))
if fs.PathExists(target) {
flog.Debug().Msg("already exists")
return
}
tmp, err := os.MkdirTemp(cmd.Path, "tmp-")
if err != nil {
flog.Fatal().Err(err).Msg("mkdir")
return
}
defer os.RemoveAll(tmp)
flog.Info().Msg("fetch")
flog.Debug().Msg(pkg.Deb.URL)
ctx, cancel := context.WithTimeout(context.Background(), cmd.Timeout)
defer cancel()
req, err := grab.NewRequest(tmp, pkg.Deb.URL)
if err != nil {
flog.Warn().Err(err).Msg("cannot create request")
return
}
req = req.WithContext(ctx)
resp := grab.DefaultClient.Do(req)
if err := resp.Err(); err != nil {
flog.Warn().Err(err).Msg("request cancelled")
return
}
err = os.Rename(resp.Filename, target)
if err != nil {
flog.Fatal().Err(err).Msg("mv")
}
cmd.hasResults = true
cmd.Max--
}
func (cmd *DebianFetchCmd) Run(dcmd *DebianCmd) (err error) {
re, err := regexp.Compile(dcmd.Regex)
if err != nil {
log.Fatal().Err(err).Msg("regex")
}
log.Info().Msg("will not download packages that exist on the mirror")
log.Info().Msg("use --ignore-mirror if you really need it")
if dcmd.Limit == 0 {
dcmd.Limit = math.MaxInt32
}
kernels, err := debian.GetKernelsWithLimit(dcmd.Limit, debian.NoMode)
if err != nil {
log.Error().Err(err).Msg("")
return
}
var packages []snapshot.Package
for _, kernel := range kernels {
for _, pkg := range kernel.Packages() {
if !re.MatchString(pkg.Deb.Name) {
continue
}
packages = append(packages, pkg)
}
}
cmd.swg = sizedwaitgroup.New(cmd.Threads)
for _, pkg := range packages {
if cmd.Max <= 0 {
break
}
cmd.swg.Add()
go cmd.fetch(pkg)
}
cmd.swg.Wait()
if !cmd.hasResults {
log.Fatal().Msg("no packages found to download")
}
return
}
type DistroListCmd struct{}
func (cmd *DistroListCmd) Run() (err error) {
for _, d := range distro.List() {
fmt.Println(d.ID, d.Release)
}
return
}

57
cmd/gen.go Normal file
View File

@ -0,0 +1,57 @@
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"fmt"
"github.com/naoina/toml"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
)
type GenCmd struct {
Type string `enum:"module,exploit" required:"" help:"module/exploit"`
}
func (cmd *GenCmd) Run(g *Globals) (err error) {
switch cmd.Type {
case "module":
err = genConfig(config.KernelModule)
case "exploit":
err = genConfig(config.KernelExploit)
}
return
}
func genConfig(at config.ArtifactType) (err error) {
a := config.Artifact{
Name: "Put name here",
Type: at,
}
a.Targets = append(a.Targets, config.Target{
Distro: distro.Distro{ID: distro.Ubuntu, Release: "18.04"},
Kernel: config.Kernel{Regex: ".*"},
})
a.Targets = append(a.Targets, config.Target{
Distro: distro.Distro{ID: distro.Debian, Release: "8"},
Kernel: config.Kernel{Regex: ".*"},
})
a.Preload = append(a.Preload, config.PreloadModule{
Repo: "Repo name (e.g. https://github.com/openwall/lkrg)",
})
a.Patches = append(a.Patches, config.Patch{
Path: "/path/to/profiling.patch",
})
buf, err := toml.Marshal(&a)
if err != nil {
return
}
fmt.Print(string(buf))
return
}

15
cmd/globals.go Normal file
View File

@ -0,0 +1,15 @@
package cmd
import (
"net/url"
"code.dumpstack.io/tools/out-of-tree/config"
)
type Globals struct {
Config config.OutOfTree `help:"path to out-of-tree configuration" default:"~/.out-of-tree/out-of-tree.toml"`
WorkDir string `help:"path to work directory" default:"./" type:"path"`
CacheURL url.URL
}

113
cmd/images.go Normal file
View File

@ -0,0 +1,113 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type ImageCmd struct {
List ImageListCmd `cmd:"" help:"list images"`
Edit ImageEditCmd `cmd:"" help:"edit image"`
}
type ImageListCmd struct{}
func (cmd *ImageListCmd) Run(g *Globals) (err error) {
entries, err := os.ReadDir(config.Dir("images"))
if err != nil {
return
}
for _, e := range entries {
fmt.Println(e.Name())
}
return
}
type ImageEditCmd struct {
Name string `help:"image name" required:""`
DryRun bool `help:"do nothing, just print commands"`
}
func (cmd *ImageEditCmd) Run(g *Globals) (err error) {
image := filepath.Join(config.Dir("images"), cmd.Name)
if !fs.PathExists(image) {
fmt.Println("image does not exist")
}
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
return
}
if len(kcfg.Kernels) == 0 {
return errors.New("No kernels found")
}
ki := distro.KernelInfo{}
for _, k := range kcfg.Kernels {
if k.RootFS == image {
ki = k
break
}
}
kernel := qemu.Kernel{
KernelPath: ki.KernelPath,
InitrdPath: ki.InitrdPath,
}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
q.Mutable = true
if cmd.DryRun {
s := q.Executable()
for _, arg := range q.Args() {
if strings.Contains(arg, " ") ||
strings.Contains(arg, ",") {
s += fmt.Sprintf(` "%s"`, arg)
} else {
s += fmt.Sprintf(" %s", arg)
}
}
fmt.Println(s)
fmt.Println(q.GetSSHCommand())
return
}
err = q.Start()
if err != nil {
fmt.Println("Qemu start error:", err)
return
}
defer q.Stop()
fmt.Print("ssh command:\n\n\t")
fmt.Println(q.GetSSHCommand())
fmt.Print("\npress enter to stop")
fmt.Scanln()
q.Command("root", "poweroff")
for !q.Died {
time.Sleep(time.Second)
}
return
}

387
cmd/kernel.go Normal file
View File

@ -0,0 +1,387 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/naoina/toml"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/kernel"
)
type KernelCmd struct {
NoDownload bool `help:"do not download qemu image while kernel generation"`
UseHost bool `help:"also use host kernels"`
Force bool `help:"force reinstall kernel"`
NoHeaders bool `help:"do not install kernel headers"`
Shuffle bool `help:"randomize kernels installation order"`
Retries int `help:"amount of tries for each kernel" default:"2"`
Threads int `help:"threads for parallel installation" default:"1"`
Update bool `help:"update container"`
Max int `help:"maximum kernels to download" default:"100500"`
NoPrune bool `help:"do not remove dangling or unused images from local storage after build"`
ContainerTimeout time.Duration `help:"container timeout"`
List KernelListCmd `cmd:"" help:"list kernels"`
ListRemote KernelListRemoteCmd `cmd:"" help:"list remote kernels"`
Autogen KernelAutogenCmd `cmd:"" help:"generate kernels based on the current config"`
Genall KernelGenallCmd `cmd:"" help:"generate all kernels for distro"`
Install KernelInstallCmd `cmd:"" help:"install specific kernel"`
ConfigRegen KernelConfigRegenCmd `cmd:"" help:"regenerate config"`
shutdown bool
kcfg config.KernelConfig
stats struct {
overall int
success int
}
}
func (cmd KernelCmd) UpdateConfig() (err error) {
if cmd.stats.success != cmd.stats.overall {
log.Warn().Msgf("%d kernels failed to install",
cmd.stats.overall-cmd.stats.success)
}
log.Info().Msgf("updating kernels.toml")
kcfg := config.KernelConfig{}
if cmd.UseHost {
// Get host kernels
kcfg.Kernels, err = kernel.GenHostKernels(!cmd.NoDownload)
if err != nil {
return
}
}
for _, dist := range distro.List() {
var kernels []distro.KernelInfo
kernels, err = dist.Kernels()
if err != nil {
return
}
kcfg.Kernels = append(kcfg.Kernels, kernels...)
}
buf, err := toml.Marshal(&kcfg)
if err != nil {
return
}
err = os.WriteFile(config.File("kernels.toml"), buf, os.ModePerm)
if err != nil {
return
}
log.Info().Msgf("kernels.toml successfully updated")
return
}
func (cmd *KernelCmd) GenKernel(km config.Target, pkg string) {
flog := log.With().
Str("kernel", pkg).
Str("distro", km.Distro.String()).
Logger()
reinstall := false
for _, kinfo := range cmd.kcfg.Kernels {
if !km.Distro.Equal(kinfo.Distro) {
continue
}
var found bool
if kinfo.Distro.ID == distro.Debian { // FIXME
found = pkg == kinfo.Package
} else if kinfo.Distro.ID == distro.OpenSUSE {
found = strings.Contains(pkg, kinfo.KernelRelease)
} else {
found = strings.Contains(pkg, kinfo.KernelVersion)
}
if found {
if !cmd.Force {
flog.Info().Msg("already installed")
return
}
reinstall = true
break
}
}
if reinstall {
flog.Info().Msg("reinstall")
} else {
flog.Info().Msg("install")
}
cmd.stats.overall += 1
var attempt int
for {
attempt++
if cmd.shutdown {
return
}
err := km.Distro.Install(pkg, !cmd.NoHeaders)
if err == nil {
cmd.stats.success += 1
flog.Info().Msg("success")
break
} else if attempt >= cmd.Retries {
flog.Error().Err(err).Msg("install kernel")
flog.Debug().Msg("skip")
break
} else {
flog.Warn().Err(err).Msg("install kernel")
time.Sleep(time.Second)
flog.Info().Msg("retry")
}
}
}
func (cmd *KernelCmd) Generate(g *Globals, km config.Target) (err error) {
if cmd.Update {
container.UseCache = false
}
if cmd.NoPrune {
container.Prune = false
}
cmd.kcfg, err = config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Debug().Err(err).Msg("read kernels config")
}
container.Commands = g.Config.Docker.Commands
container.Registry = g.Config.Docker.Registry
container.Timeout = g.Config.Docker.Timeout.Duration
if cmd.ContainerTimeout != 0 {
container.Timeout = cmd.ContainerTimeout
}
log.Info().Msgf("Generating for target %v", km)
_, err = kernel.GenRootfsImage(km.Distro.RootFS(), !cmd.NoDownload)
if err != nil || cmd.shutdown {
return
}
pkgs, err := kernel.MatchPackages(km)
if err != nil || cmd.shutdown {
return
}
if cmd.Shuffle {
pkgs = kernel.ShuffleStrings(pkgs)
}
swg := sizedwaitgroup.New(cmd.Threads)
for i, pkg := range pkgs {
if cmd.shutdown {
err = nil
return
}
swg.Add()
if cmd.shutdown {
err = nil
swg.Done()
return
}
if cmd.stats.success >= cmd.Max {
log.Print("Max is reached")
swg.Done()
break
}
log.Info().Msgf("%d/%d %s", i+1, len(pkgs), pkg)
go func(p string) {
defer swg.Done()
cmd.GenKernel(km, p)
}(pkg)
}
swg.Wait()
return
}
type KernelListCmd struct{}
func (cmd *KernelListCmd) Run(g *Globals) (err error) {
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Debug().Err(err).Msg("read kernel config")
}
if len(kcfg.Kernels) == 0 {
return errors.New("No kernels found")
}
for _, k := range kcfg.Kernels {
fmt.Println(k.Distro.ID, k.Distro.Release, k.KernelRelease)
}
return
}
type KernelListRemoteCmd struct {
Distro string `required:"" help:"distribution"`
Ver string `help:"distro version"`
}
func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
if kernelCmd.Update {
container.UseCache = false
}
if kernelCmd.NoPrune {
container.Prune = false
}
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
km := config.Target{
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
Kernel: config.Kernel{Regex: ".*"},
}
_, err = kernel.GenRootfsImage(km.Distro.RootFS(), false)
if err != nil {
return
}
container.Registry = g.Config.Docker.Registry
container.Commands = g.Config.Docker.Commands
pkgs, err := kernel.MatchPackages(km)
// error check skipped on purpose
for _, k := range pkgs {
fmt.Println(k)
}
return
}
type KernelAutogenCmd struct{}
func (cmd *KernelAutogenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
ka, err := config.ReadArtifactConfig(g.WorkDir + "/.out-of-tree.toml")
if err != nil {
return
}
kernel.SetSigintHandler(&kernelCmd.shutdown)
for _, sk := range ka.Targets {
if sk.Distro.Release == "" {
err = errors.New("Please set distro_release")
return
}
err = kernelCmd.Generate(g, sk)
if err != nil {
return
}
if kernelCmd.shutdown {
break
}
}
return kernelCmd.UpdateConfig()
}
type KernelGenallCmd struct {
Distro string `help:"distribution"`
Ver string `help:"distro version"`
}
func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
kernel.SetSigintHandler(&kernelCmd.shutdown)
for _, dist := range distro.List() {
if kernelCmd.shutdown {
break
}
if distroType != distro.None && distroType != dist.ID {
continue
}
if cmd.Ver != "" && dist.Release != cmd.Ver {
continue
}
target := config.Target{
Distro: dist,
Kernel: config.Kernel{Regex: ".*"},
}
err = kernelCmd.Generate(g, target)
if err != nil {
return
}
}
return kernelCmd.UpdateConfig()
}
type KernelInstallCmd struct {
Distro string `required:"" help:"distribution"`
Ver string `required:"" help:"distro version"`
Kernel string `required:"" help:"kernel release mask"`
}
func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
kernel.SetSigintHandler(&kernelCmd.shutdown)
km := config.Target{
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
Kernel: config.Kernel{Regex: cmd.Kernel},
}
err = kernelCmd.Generate(g, km)
if err != nil {
return
}
return kernelCmd.UpdateConfig()
}
type KernelConfigRegenCmd struct{}
func (cmd *KernelConfigRegenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
return kernelCmd.UpdateConfig()
}

322
cmd/log.go Normal file
View File

@ -0,0 +1,322 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"database/sql"
"encoding/json"
"fmt"
"math"
"os"
"github.com/olekukonko/tablewriter"
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/config"
)
type LogCmd struct {
Query LogQueryCmd `cmd:"" help:"query logs"`
Dump LogDumpCmd `cmd:"" help:"show all info for log entry with ID"`
Json LogJsonCmd `cmd:"" help:"generate json statistics"`
Markdown LogMarkdownCmd `cmd:"" help:"generate markdown statistics"`
}
type LogQueryCmd struct {
Num int `help:"how much lines" default:"50"`
Rate bool `help:"show artifact success rate"`
Tag string `help:"filter tag"`
}
func (cmd *LogQueryCmd) Run(g *Globals) (err error) {
db, err := openDatabase(g.Config.Database)
if err != nil {
panic(err)
}
defer db.Close()
var les []logEntry
ka, kaErr := config.ReadArtifactConfig(g.WorkDir + "/.out-of-tree.toml")
if kaErr == nil {
log.Print(".out-of-tree.toml found, filter by artifact name")
les, err = getAllArtifactLogs(db, cmd.Tag, cmd.Num, ka)
} else {
les, err = getAllLogs(db, cmd.Tag, cmd.Num)
}
if err != nil {
return
}
s := "\nS"
if cmd.Rate {
if kaErr != nil {
err = kaErr
return
}
s = fmt.Sprintf("{[%s] %s} Overall s", ka.Type, ka.Name)
les, err = getAllArtifactLogs(db, cmd.Tag, math.MaxInt64, ka)
if err != nil {
return
}
} else {
for _, l := range les {
logLogEntry(l)
}
}
success := 0
for _, l := range les {
if l.Test.Ok {
success++
}
}
overall := float64(success) / float64(len(les))
fmt.Printf("%success rate: %.04f (~%.0f%%)\n",
s, overall, overall*100)
return
}
type LogDumpCmd struct {
ID int `help:"id" default:"-1"`
}
func (cmd *LogDumpCmd) Run(g *Globals) (err error) {
db, err := openDatabase(g.Config.Database)
if err != nil {
panic(err)
}
defer db.Close()
var l logEntry
if cmd.ID > 0 {
l, err = getLogByID(db, cmd.ID)
} else {
l, err = getLastLog(db)
}
if err != nil {
return
}
fmt.Println("ID:", l.ID)
fmt.Println("Date:", l.Timestamp.Format("2006-01-02 15:04"))
fmt.Println("Tag:", l.Tag)
fmt.Println()
fmt.Println("Type:", l.Type.String())
fmt.Println("Name:", l.Name)
fmt.Println()
fmt.Println("Distro:", l.Distro.ID.String(), l.Distro.Release)
fmt.Println("Kernel:", l.KernelRelease)
fmt.Println()
fmt.Println("Build ok:", l.Build.Ok)
if l.Type == config.KernelModule {
fmt.Println("Insmod ok:", l.Run.Ok)
}
fmt.Println("Test ok:", l.Test.Ok)
fmt.Println()
fmt.Printf("Build output:\n%s\n", l.Build.Output)
fmt.Println()
if l.Type == config.KernelModule {
fmt.Printf("Insmod output:\n%s\n", l.Run.Output)
fmt.Println()
}
fmt.Printf("Test output:\n%s\n", l.Test.Output)
fmt.Println()
fmt.Printf("Qemu stdout:\n%s\n", l.Stdout)
fmt.Println()
fmt.Printf("Qemu stderr:\n%s\n", l.Stderr)
fmt.Println()
return
}
type LogJsonCmd struct {
Tag string `required:"" help:"filter tag"`
}
func (cmd *LogJsonCmd) Run(g *Globals) (err error) {
db, err := openDatabase(g.Config.Database)
if err != nil {
panic(err)
}
defer db.Close()
distros, err := getStats(db, g.WorkDir, cmd.Tag)
if err != nil {
return
}
bytes, err := json.Marshal(&distros)
if err != nil {
return
}
fmt.Println(string(bytes))
return
}
type LogMarkdownCmd struct {
Tag string `required:"" help:"filter tag"`
}
func (cmd *LogMarkdownCmd) Run(g *Globals) (err error) {
db, err := openDatabase(g.Config.Database)
if err != nil {
panic(err)
}
defer db.Close()
distros, err := getStats(db, g.WorkDir, cmd.Tag)
if err != nil {
return
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Distro", "Release", "Kernel", "Reliability"})
table.SetBorders(tablewriter.Border{
Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
for distro, releases := range distros {
for release, kernels := range releases {
for kernel, stats := range kernels {
all := float64(stats.All)
ok := float64(stats.TestOK)
r := fmt.Sprintf("%6.02f%%", (ok/all)*100)
table.Append([]string{distro, release, kernel, r})
}
}
}
table.Render()
return
}
func center(s string, w int) string {
return fmt.Sprintf("%[1]*s", -w, fmt.Sprintf("%[1]*s", (w+len(s))/2, s))
}
func genOkFailCentered(name string, ok bool) (aurv aurora.Value) {
name = center(name, 10)
if ok {
aurv = aurora.BgGreen(aurora.Black(name))
} else {
aurv = aurora.BgRed(aurora.White(aurora.Bold(name)))
}
return
}
func logLogEntry(l logEntry) {
distroInfo := fmt.Sprintf("%s-%s {%s}", l.Distro.ID,
l.Distro.Release, l.KernelRelease)
artifactInfo := fmt.Sprintf("{[%s] %s}", l.Type, l.Name)
timestamp := l.Timestamp.Format("2006-01-02 15:04")
var status aurora.Value
if l.InternalErrorString != "" {
status = genOkFailCentered("INTERNAL", false)
} else if l.Type == config.KernelExploit {
if l.Build.Ok {
status = genOkFailCentered("LPE", l.Test.Ok)
} else {
status = genOkFailCentered("BUILD", l.Build.Ok)
}
} else {
if l.Build.Ok {
if l.Run.Ok {
status = genOkFailCentered("TEST", l.Test.Ok)
} else {
status = genOkFailCentered("INSMOD", l.Run.Ok)
}
} else {
status = genOkFailCentered("BUILD", l.Build.Ok)
}
}
additional := ""
if l.KernelPanic {
additional = "(panic)"
} else if l.KilledByTimeout {
additional = "(timeout)"
}
colored := aurora.Sprintf("[%4d %4s] [%s] %s %-70s: %s %s",
l.ID, l.Tag, timestamp, artifactInfo, distroInfo, status,
additional)
fmt.Println(colored)
}
type runstat struct {
All, BuildOK, RunOK, TestOK, Timeout, Panic int
}
func getStats(db *sql.DB, path, tag string) (
distros map[string]map[string]map[string]runstat, err error) {
var les []logEntry
ka, kaErr := config.ReadArtifactConfig(path + "/.out-of-tree.toml")
if kaErr == nil {
les, err = getAllArtifactLogs(db, tag, -1, ka)
} else {
les, err = getAllLogs(db, tag, -1)
}
if err != nil {
return
}
distros = make(map[string]map[string]map[string]runstat)
for _, l := range les {
_, ok := distros[l.Distro.ID.String()]
if !ok {
distros[l.Distro.ID.String()] = make(map[string]map[string]runstat)
}
_, ok = distros[l.Distro.ID.String()][l.Distro.Release]
if !ok {
distros[l.Distro.ID.String()][l.Distro.Release] = make(map[string]runstat)
}
rs := distros[l.Distro.ID.String()][l.Distro.Release][l.KernelRelease]
rs.All++
if l.Build.Ok {
rs.BuildOK++
}
if l.Run.Ok {
rs.RunOK++
}
if l.Test.Ok {
rs.TestOK++
}
if l.KernelPanic {
rs.Panic++
}
if l.KilledByTimeout {
rs.Timeout++
}
distros[l.Distro.ID.String()][l.Distro.Release][l.KernelRelease] = rs
}
return
}

88
cmd/pack.go Normal file
View File

@ -0,0 +1,88 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"fmt"
"io/ioutil"
"time"
"code.dumpstack.io/tools/out-of-tree/fs"
"github.com/rs/zerolog/log"
)
type PackCmd struct {
Autogen bool `help:"kernel autogeneration"`
UseHost bool `help:"also use host kernels"`
NoDownload bool `help:"do not download qemu image while kernel generation"`
ExploitRuns int64 `default:"4" help:"amount of runs of each exploit"`
KernelRuns int64 `default:"1" help:"amount of runs of each kernel"`
Max int `help:"download random kernels from set defined by regex in release_mask, but no more than X for each of release_mask" default:"1"`
Threads int `help:"threads" default:"4"`
Tag string `help:"filter tag"`
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
QemuTimeout time.Duration `help:"timeout for qemu"`
DockerTimeout time.Duration `help:"timeout for docker"`
}
func (cmd *PackCmd) Run(g *Globals) (err error) {
tag := fmt.Sprintf("pack_run_%d", time.Now().Unix())
log.Print("Tag:", tag)
files, err := ioutil.ReadDir(g.WorkDir)
if err != nil {
return
}
for _, f := range files {
workPath := g.WorkDir + "/" + f.Name()
if !fs.PathExists(workPath + "/.out-of-tree.toml") {
continue
}
if cmd.Autogen {
autogen := KernelAutogenCmd{}
err = autogen.Run(
&KernelCmd{
NoDownload: cmd.NoDownload,
UseHost: cmd.UseHost,
Max: cmd.Max,
},
&Globals{
Config: g.Config,
WorkDir: workPath,
},
)
if err != nil {
return
}
}
log.Print(f.Name())
pew := PewCmd{
Max: cmd.KernelRuns,
Runs: cmd.ExploitRuns,
Threads: cmd.Threads,
Tag: tag,
Timeout: cmd.Timeout,
QemuTimeout: cmd.QemuTimeout,
DockerTimeout: cmd.DockerTimeout,
Dist: pathDevNull,
}
pew.Run(&Globals{
Config: g.Config,
WorkDir: workPath,
})
}
return
}

966
cmd/pew.go Normal file
View File

@ -0,0 +1,966 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"bufio"
"database/sql"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"strings"
"time"
"github.com/otiai10/copy"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type LevelWriter struct {
io.Writer
Level zerolog.Level
}
func (lw *LevelWriter) WriteLevel(l zerolog.Level, p []byte) (n int, err error) {
if l >= lw.Level {
return lw.Writer.Write(p)
}
return len(p), nil
}
var ConsoleWriter, FileWriter LevelWriter
var LogLevel zerolog.Level
type PewCmd struct {
Max int64 `help:"test no more than X kernels" default:"100500"`
Runs int64 `help:"runs per each kernel" default:"1"`
Kernel string `help:"override kernel regex"`
RootFS string `help:"override rootfs image" type:"existingfile"`
Guess bool `help:"try all defined kernels"`
Shuffle bool `help:"randomize kernels test order"`
Binary string `help:"use binary, do not build"`
Test string `help:"override path for test"`
Dist string `help:"build result path" default:"/dev/null"`
Threads int `help:"threads" default:"1"`
Tag string `help:"log tagging"`
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
ArtifactConfig string `help:"path to artifact config" type:"path"`
QemuTimeout time.Duration `help:"timeout for qemu"`
QemuAfterStartTimeout time.Duration `help:"timeout after starting of the qemu vm before tests"`
DockerTimeout time.Duration `help:"timeout for docker"`
Threshold float64 `help:"reliablity threshold for exit code" default:"1.00"`
IncludeInternalErrors bool `help:"count internal errors as part of the success rate"`
Endless bool `help:"endless tests"`
EndlessTimeout time.Duration `help:"timeout between tests" default:"1m"`
EndlessStress string `help:"endless stress script" type:"existingfile"`
db *sql.DB
kcfg config.KernelConfig
timeoutDeadline time.Time
}
func (cmd *PewCmd) Run(g *Globals) (err error) {
cmd.kcfg, err = config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Fatal().Err(err).Msg("read kernels config")
}
if cmd.Timeout != 0 {
log.Info().Msgf("Set global timeout to %s", cmd.Timeout)
cmd.timeoutDeadline = time.Now().Add(cmd.Timeout)
}
cmd.db, err = openDatabase(g.Config.Database)
if err != nil {
log.Fatal().Err(err).
Msgf("Cannot open database %s", g.Config.Database)
}
defer cmd.db.Close()
var configPath string
if cmd.ArtifactConfig == "" {
configPath = g.WorkDir + "/.out-of-tree.toml"
} else {
configPath = cmd.ArtifactConfig
}
ka, err := config.ReadArtifactConfig(configPath)
if err != nil {
return
}
if len(ka.Targets) == 0 || cmd.Guess {
log.Debug().Msg("will use all available targets")
for _, dist := range distro.List() {
ka.Targets = append(ka.Targets, config.Target{
Distro: dist,
Kernel: config.Kernel{
Regex: ".*",
},
})
}
}
if ka.SourcePath == "" {
ka.SourcePath = g.WorkDir
}
if cmd.Kernel != "" {
var km config.Target
km, err = kernelMask(cmd.Kernel)
if err != nil {
return
}
ka.Targets = []config.Target{km}
}
if cmd.QemuTimeout != 0 {
log.Info().Msgf("Set qemu timeout to %s", cmd.QemuTimeout)
} else {
cmd.QemuTimeout = g.Config.Qemu.Timeout.Duration
}
if cmd.DockerTimeout != 0 {
log.Info().Msgf("Set docker timeout to %s", cmd.DockerTimeout)
} else {
cmd.DockerTimeout = g.Config.Docker.Timeout.Duration
}
if cmd.Tag == "" {
cmd.Tag = fmt.Sprintf("%d", time.Now().Unix())
}
log.Info().Str("tag", cmd.Tag).Msg("")
err = cmd.performCI(ka)
if err != nil {
return
}
if state.InternalErrors > 0 {
s := "not counted towards success rate"
if cmd.IncludeInternalErrors {
s = "included in success rate"
}
log.Warn().Msgf("%d internal errors "+
"(%s)", state.InternalErrors, s)
}
if cmd.IncludeInternalErrors {
state.Overall += float64(state.InternalErrors)
}
msg := fmt.Sprintf("Success rate: %.02f (%d/%d), Threshold: %.02f",
successRate(state),
int(state.Success), int(state.Overall),
cmd.Threshold)
if successRate(state) < cmd.Threshold {
log.Error().Msg(msg)
err = errors.New("reliability threshold not met")
} else {
log.Info().Msg(msg)
}
return
}
type runstate struct {
Overall, Success float64
InternalErrors int
}
var (
state runstate
)
func successRate(state runstate) float64 {
return state.Success / state.Overall
}
const pathDevNull = "/dev/null"
func sh(workdir, command string) (output string, err error) {
flog := log.With().
Str("workdir", workdir).
Str("command", command).
Logger()
cmd := exec.Command("sh", "-c", "cd "+workdir+" && "+command)
flog.Debug().Msgf("%v", cmd)
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
if err != nil {
e := fmt.Sprintf("%v %v output: %v", cmd, err, output)
err = errors.New(e)
}
return
}
func applyPatches(src string, ka config.Artifact) (err error) {
for i, patch := range ka.Patches {
name := fmt.Sprintf("patch_%02d", i)
path := src + "/" + name + ".diff"
if patch.Source != "" && patch.Path != "" {
err = errors.New("path and source are mutually exclusive")
return
} else if patch.Source != "" {
err = os.WriteFile(path, []byte(patch.Source), 0644)
if err != nil {
return
}
} else if patch.Path != "" {
err = copy.Copy(patch.Path, path)
if err != nil {
return
}
}
if patch.Source != "" || patch.Path != "" {
_, err = sh(src, "patch < "+path)
if err != nil {
return
}
}
if patch.Script != "" {
script := src + "/" + name + ".sh"
err = os.WriteFile(script, []byte(patch.Script), 0755)
if err != nil {
return
}
_, err = sh(src, script)
if err != nil {
return
}
}
}
return
}
func build(flog zerolog.Logger, tmp string, ka config.Artifact,
ki distro.KernelInfo, dockerTimeout time.Duration) (
outdir, outpath, output string, err error) {
target := strings.Replace(ka.Name, " ", "_", -1)
if target == "" {
target = fmt.Sprintf("%d", rand.Int())
}
outdir = tmp + "/source"
err = copy.Copy(ka.SourcePath, outdir)
if err != nil {
return
}
err = applyPatches(outdir, ka)
if err != nil {
return
}
outpath = outdir + "/" + target
if ka.Type == config.KernelModule {
outpath += ".ko"
}
if ki.KernelVersion == "" {
ki.KernelVersion = ki.KernelRelease
}
kernel := "/lib/modules/" + ki.KernelVersion + "/build"
if ki.KernelSource != "" {
kernel = ki.KernelSource
}
buildCommand := "make KERNEL=" + kernel + " TARGET=" + target
if ka.Make.Target != "" {
buildCommand += " " + ka.Make.Target
}
if ki.ContainerName != "" {
var c container.Container
container.Timeout = dockerTimeout
c, err = container.NewFromKernelInfo(ki)
c.Log = flog
if err != nil {
log.Fatal().Err(err).Msg("container creation failure")
}
output, err = c.Run(outdir, []string{
buildCommand + " && chmod -R 777 /work",
})
} else {
cmd := exec.Command("bash", "-c", "cd "+outdir+" && "+
buildCommand)
log.Debug().Msgf("%v", cmd)
timer := time.AfterFunc(dockerTimeout, func() {
cmd.Process.Kill()
})
defer timer.Stop()
var raw []byte
raw, err = cmd.CombinedOutput()
if err != nil {
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
err, buildCommand, string(raw))
err = errors.New(e)
return
}
output = string(raw)
}
return
}
func runScript(q *qemu.System, script string) (output string, err error) {
return q.Command("root", script)
}
func testKernelModule(q *qemu.System, ka config.Artifact,
test string) (output string, err error) {
output, err = q.Command("root", test)
// TODO generic checks for WARNING's and so on
return
}
func testKernelExploit(q *qemu.System, ka config.Artifact,
test, exploit string) (output string, err error) {
output, err = q.Command("user", "chmod +x "+exploit)
if err != nil {
return
}
randFilePath := fmt.Sprintf("/root/%d", rand.Int())
cmd := fmt.Sprintf("%s %s %s", test, exploit, randFilePath)
output, err = q.Command("user", cmd)
if err != nil {
return
}
_, err = q.Command("root", "stat "+randFilePath)
if err != nil {
return
}
return
}
func genOkFail(name string, ok bool) (aurv aurora.Value) {
s := " " + name
if name == "" {
s = ""
}
if ok {
s += " SUCCESS "
aurv = aurora.BgGreen(aurora.Black(s))
} else {
s += " FAILURE "
aurv = aurora.BgRed(aurora.White(aurora.Bold(s)))
}
return
}
type phasesResult struct {
BuildDir string
BuildArtifact string
Build, Run, Test struct {
Output string
Ok bool
}
InternalError error
InternalErrorString string
}
func copyFile(sourcePath, destinationPath string) (err error) {
sourceFile, err := os.Open(sourcePath)
if err != nil {
return
}
defer sourceFile.Close()
destinationFile, err := os.Create(destinationPath)
if err != nil {
return err
}
if _, err := io.Copy(destinationFile, sourceFile); err != nil {
destinationFile.Close()
return err
}
return destinationFile.Close()
}
func dumpResult(q *qemu.System, ka config.Artifact, ki distro.KernelInfo,
res *phasesResult, dist, tag, binary string, db *sql.DB) {
// TODO refactor
if res.InternalError != nil {
q.Log.Warn().Err(res.InternalError).
Str("panic", fmt.Sprintf("%v", q.KernelPanic)).
Str("timeout", fmt.Sprintf("%v", q.KilledByTimeout)).
Msg("internal")
res.InternalErrorString = res.InternalError.Error()
state.InternalErrors += 1
} else {
colored := ""
state.Overall += 1
if res.Test.Ok {
state.Success += 1
}
switch ka.Type {
case config.KernelExploit:
colored = aurora.Sprintf("%s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("LPE", res.Test.Ok))
case config.KernelModule:
colored = aurora.Sprintf("%s %s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("INSMOD", res.Run.Ok),
genOkFail("TEST", res.Test.Ok))
case config.Script:
colored = aurora.Sprintf("%s",
genOkFail("", res.Test.Ok))
}
additional := ""
if q.KernelPanic {
additional = "(panic)"
} else if q.KilledByTimeout {
additional = "(timeout)"
}
if additional != "" {
q.Log.Info().Msgf("%v %v", colored, additional)
} else {
q.Log.Info().Msgf("%v", colored)
}
}
err := addToLog(db, q, ka, ki, res, tag)
if err != nil {
q.Log.Warn().Err(err).Msgf("[db] addToLog (%v)", ka)
}
if binary == "" && dist != pathDevNull {
err = os.MkdirAll(dist, os.ModePerm)
if err != nil {
log.Warn().Err(err).Msgf("os.MkdirAll (%v)", ka)
}
path := fmt.Sprintf("%s/%s-%s-%s", dist, ki.Distro.ID,
ki.Distro.Release, ki.KernelRelease)
if ka.Type != config.KernelExploit {
path += ".ko"
}
err = copyFile(res.BuildArtifact, path)
if err != nil {
log.Warn().Err(err).Msgf("copy file (%v)", ka)
}
}
}
func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka config.Artifact,
res *phasesResult, remoteTest string) (err error) {
// Copy all test files to the remote machine
for _, f := range ka.TestFiles {
if f.Local[0] != '/' {
if res.BuildDir != "" {
f.Local = res.BuildDir + "/" + f.Local
}
}
err = q.CopyFile(f.User, f.Local, f.Remote)
if err != nil {
res.InternalError = err
slog.Error().Err(err).Msg("copy test file")
return
}
}
switch ka.Type {
case config.KernelModule:
res.Run.Output, err = q.CopyAndInsmod(res.BuildArtifact)
if err != nil {
slog.Error().Err(err).Msg(res.Run.Output)
// TODO errors.As
if strings.Contains(err.Error(), "connection refused") {
res.InternalError = err
}
return
}
res.Run.Ok = true
res.Test.Output, err = testKernelModule(q, ka, remoteTest)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Test.Ok = true
case config.KernelExploit:
remoteExploit := fmt.Sprintf("/tmp/exploit_%d", rand.Int())
err = q.CopyFile("user", res.BuildArtifact, remoteExploit)
if err != nil {
return
}
res.Test.Output, err = testKernelExploit(q, ka, remoteTest,
remoteExploit)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Run.Ok = true // does not really used
res.Test.Ok = true
case config.Script:
res.Test.Output, err = runScript(q, remoteTest)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
slog.Info().Msgf("\n%v\n", res.Test.Output)
res.Run.Ok = true
res.Test.Ok = true
default:
slog.Fatal().Msg("Unsupported artifact type")
}
_, err = q.Command("root", "echo")
if err != nil {
slog.Error().Err(err).Msg("after-test ssh reconnect")
res.Test.Ok = false
return
}
return
}
func copyTest(q *qemu.System, testPath string, ka config.Artifact) (
remoteTest string, err error) {
remoteTest = fmt.Sprintf("/tmp/test_%d", rand.Int())
err = q.CopyFile("user", testPath, remoteTest)
if err != nil {
if ka.Type == config.KernelExploit {
q.Command("user",
"echo -e '#!/bin/sh\necho touch $2 | $1' "+
"> "+remoteTest+
" && chmod +x "+remoteTest)
} else {
q.Command("user", "echo '#!/bin/sh' "+
"> "+remoteTest+" && chmod +x "+remoteTest)
}
}
_, err = q.Command("root", "chmod +x "+remoteTest)
return
}
func copyStandardModules(q *qemu.System, ki distro.KernelInfo) (err error) {
_, err = q.Command("root", "mkdir -p /lib/modules/"+ki.KernelVersion)
if err != nil {
return
}
remotePath := "/lib/modules/" + ki.KernelVersion + "/"
err = q.CopyDirectory("root", ki.ModulesPath+"/kernel", remotePath+"/kernel")
if err != nil {
return
}
files, err := ioutil.ReadDir(ki.ModulesPath)
if err != nil {
return
}
for _, f := range files {
if f.Mode()&os.ModeSymlink == os.ModeSymlink {
continue
}
if !strings.HasPrefix(f.Name(), "modules") {
continue
}
err = q.CopyFile("root", ki.ModulesPath+"/"+f.Name(), remotePath)
}
return
}
func (cmd PewCmd) testArtifact(swg *sizedwaitgroup.SizedWaitGroup,
ka config.Artifact, ki distro.KernelInfo) {
defer swg.Done()
logdir := "logs/" + cmd.Tag
err := os.MkdirAll(logdir, os.ModePerm)
if err != nil {
log.Error().Err(err).Msgf("mkdir %s", logdir)
return
}
logfile := fmt.Sprintf("logs/%s/%s-%s-%s.log",
cmd.Tag,
ki.Distro.ID.String(),
ki.Distro.Release,
ki.KernelRelease,
)
f, err := os.Create(logfile)
if err != nil {
log.Error().Err(err).Msgf("create %s", logfile)
return
}
defer f.Close()
slog := zerolog.New(zerolog.MultiLevelWriter(
&ConsoleWriter,
&FileWriter,
&zerolog.ConsoleWriter{
Out: f,
FieldsExclude: []string{
"distro_release",
"distro_type",
"kernel",
},
NoColor: true,
},
))
switch LogLevel {
case zerolog.TraceLevel, zerolog.DebugLevel:
slog = slog.With().Caller().Logger()
}
slog = slog.With().Timestamp().
Str("distro_type", ki.Distro.ID.String()).
Str("distro_release", ki.Distro.Release).
Str("kernel", ki.KernelRelease).
Logger()
slog.Info().Msg("start")
testStart := time.Now()
defer func() {
slog.Debug().Str("test_duration",
time.Now().Sub(testStart).String()).
Msg("")
}()
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
if cmd.RootFS != "" {
ki.RootFS = cmd.RootFS
}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
if err != nil {
slog.Error().Err(err).Msg("qemu init")
return
}
q.Log = slog
q.Timeout = cmd.QemuTimeout
if ka.Qemu.Timeout.Duration != 0 {
q.Timeout = ka.Qemu.Timeout.Duration
}
if ka.Qemu.Cpus != 0 {
q.Cpus = ka.Qemu.Cpus
}
if ka.Qemu.Memory != 0 {
q.Memory = ka.Qemu.Memory
}
if ka.Docker.Timeout.Duration != 0 {
cmd.DockerTimeout = ka.Docker.Timeout.Duration
}
q.SetKASLR(!ka.Mitigations.DisableKaslr)
q.SetSMEP(!ka.Mitigations.DisableSmep)
q.SetSMAP(!ka.Mitigations.DisableSmap)
q.SetKPTI(!ka.Mitigations.DisableKpti)
if ki.CPU.Model != "" {
q.CPU.Model = ki.CPU.Model
}
if len(ki.CPU.Flags) != 0 {
q.CPU.Flags = ki.CPU.Flags
}
if cmd.Endless {
q.Timeout = 0
}
qemuStart := time.Now()
err = q.Start()
if err != nil {
slog.Error().Err(err).Msg("qemu start")
return
}
defer q.Stop()
slog.Debug().Msgf("wait %v", cmd.QemuAfterStartTimeout)
time.Sleep(cmd.QemuAfterStartTimeout)
go func() {
time.Sleep(time.Minute)
for !q.Died {
slog.Debug().Msg("still alive")
time.Sleep(time.Minute)
}
}()
tmp, err := fs.TempDir()
if err != nil {
slog.Error().Err(err).Msg("making tmp directory")
return
}
defer os.RemoveAll(tmp)
result := phasesResult{}
if !cmd.Endless {
defer dumpResult(q, ka, ki, &result, cmd.Dist, cmd.Tag, cmd.Binary, cmd.db)
}
if ka.Type == config.Script {
result.Build.Ok = true
cmd.Test = ka.Script
} else if cmd.Binary == "" {
// TODO: build should return structure
start := time.Now()
result.BuildDir, result.BuildArtifact, result.Build.Output, err =
build(slog, tmp, ka, ki, cmd.DockerTimeout)
slog.Debug().Str("duration", time.Now().Sub(start).String()).
Msg("build done")
if err != nil {
log.Error().Err(err).Msg("build")
return
}
result.Build.Ok = true
} else {
result.BuildArtifact = cmd.Binary
result.Build.Ok = true
}
if cmd.Test == "" {
cmd.Test = result.BuildArtifact + "_test"
if !fs.PathExists(cmd.Test) {
slog.Debug().Msgf("%s does not exist", cmd.Test)
cmd.Test = tmp + "/source/" + "test.sh"
} else {
slog.Debug().Msgf("%s exist", cmd.Test)
}
}
err = q.WaitForSSH(cmd.QemuTimeout)
if err != nil {
result.InternalError = err
return
}
slog.Debug().Str("qemu_startup_duration",
time.Now().Sub(qemuStart).String()).
Msg("ssh is available")
remoteTest, err := copyTest(q, cmd.Test, ka)
if err != nil {
result.InternalError = err
slog.Error().Err(err).Msg("copy test script")
return
}
if ka.StandardModules {
// Module depends on one of the standard modules
start := time.Now()
err = copyStandardModules(q, ki)
if err != nil {
result.InternalError = err
slog.Error().Err(err).Msg("copy standard modules")
return
}
slog.Debug().Str("duration", time.Now().Sub(start).String()).
Msg("copy standard modules")
}
err = preloadModules(q, ka, ki, cmd.DockerTimeout)
if err != nil {
result.InternalError = err
slog.Error().Err(err).Msg("preload modules")
return
}
start := time.Now()
copyArtifactAndTest(slog, q, ka, &result, remoteTest)
slog.Debug().Str("duration", time.Now().Sub(start).String()).
Msgf("test completed (success: %v)", result.Test.Ok)
if !cmd.Endless {
return
}
dumpResult(q, ka, ki, &result, cmd.Dist, cmd.Tag, cmd.Binary, cmd.db)
if !result.Build.Ok || !result.Run.Ok || !result.Test.Ok {
return
}
slog.Info().Msg("start endless tests")
if cmd.EndlessStress != "" {
slog.Debug().Msg("copy and run endless stress script")
err = q.CopyAndRunAsync("root", cmd.EndlessStress)
if err != nil {
q.Stop()
f.Sync()
slog.Fatal().Err(err).Msg("cannot copy/run stress")
return
}
}
for {
output, err := q.Command("root", remoteTest)
if err != nil {
q.Stop()
f.Sync()
slog.Fatal().Err(err).Msg(output)
return
}
slog.Debug().Msg(output)
slog.Info().Msg("test success")
slog.Debug().Msgf("wait %v", cmd.EndlessTimeout)
time.Sleep(cmd.EndlessTimeout)
}
}
func shuffleKernels(a []distro.KernelInfo) []distro.KernelInfo {
// FisherYates shuffle
for i := len(a) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
a[i], a[j] = a[j], a[i]
}
return a
}
func (cmd PewCmd) performCI(ka config.Artifact) (err error) {
found := false
max := cmd.Max
threadCounter := 0
swg := sizedwaitgroup.New(cmd.Threads)
if cmd.Shuffle {
cmd.kcfg.Kernels = shuffleKernels(cmd.kcfg.Kernels)
}
for _, kernel := range cmd.kcfg.Kernels {
if max <= 0 {
break
}
var supported bool
supported, err = ka.Supported(kernel)
if err != nil {
return
}
if kernel.Blocklisted {
log.Debug().Str("kernel", kernel.KernelVersion).
Msgf("skip (blocklisted)")
continue
}
if supported {
found = true
max--
for i := int64(0); i < cmd.Runs; i++ {
if !cmd.timeoutDeadline.IsZero() &&
time.Now().After(cmd.timeoutDeadline) {
break
}
swg.Add()
if threadCounter < cmd.Threads {
time.Sleep(time.Second)
threadCounter++
}
go cmd.testArtifact(&swg, ka, kernel)
}
}
}
swg.Wait()
if !found {
err = errors.New("No supported kernels found")
}
return
}
func kernelMask(kernel string) (km config.Target, err error) {
parts := strings.Split(kernel, ":")
if len(parts) != 2 {
err = errors.New("Kernel is not 'distroType:regex'")
return
}
dt, err := distro.NewID(parts[0])
if err != nil {
return
}
km = config.Target{
Distro: distro.Distro{ID: dt},
Kernel: config.Kernel{Regex: parts[1]},
}
return
}

164
cmd/preload.go Normal file
View File

@ -0,0 +1,164 @@
// Copyright 2020 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"crypto/sha1"
"encoding/hex"
"errors"
"os"
"path/filepath"
"time"
"github.com/go-git/go-git/v5"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
func preloadModules(q *qemu.System, ka config.Artifact, ki distro.KernelInfo,
dockerTimeout time.Duration) (err error) {
for _, pm := range ka.Preload {
err = preload(q, ki, pm, dockerTimeout)
if err != nil {
return
}
}
return
}
func preload(q *qemu.System, ki distro.KernelInfo, pm config.PreloadModule,
dockerTimeout time.Duration) (err error) {
var workPath, cache string
if pm.Path != "" {
log.Print("Use non-git path for preload module (no cache)")
workPath = pm.Path
} else if pm.Repo != "" {
workPath, cache, err = cloneOrPull(pm.Repo, ki)
if err != nil {
return
}
} else {
errors.New("No repo/path in preload entry")
}
err = buildAndInsmod(workPath, q, ki, dockerTimeout, cache)
if err != nil {
return
}
time.Sleep(pm.TimeoutAfterLoad.Duration)
return
}
func buildAndInsmod(workPath string, q *qemu.System, ki distro.KernelInfo,
dockerTimeout time.Duration, cache string) (err error) {
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
var artifact string
if fs.PathExists(cache) {
artifact = cache
} else {
artifact, err = buildPreload(workPath, tmp, ki, dockerTimeout)
if err != nil {
return
}
if cache != "" {
err = copyFile(artifact, cache)
if err != nil {
return
}
}
}
output, err := q.CopyAndInsmod(artifact)
if err != nil {
log.Print(output)
return
}
return
}
func buildPreload(workPath, tmp string, ki distro.KernelInfo,
dockerTimeout time.Duration) (artifact string, err error) {
ka, err := config.ReadArtifactConfig(workPath + "/.out-of-tree.toml")
if err != nil {
log.Warn().Err(err).Msg("preload")
}
ka.SourcePath = workPath
km := config.Target{
Distro: ki.Distro,
Kernel: config.Kernel{Regex: ki.KernelRelease},
}
ka.Targets = []config.Target{km}
if ka.Docker.Timeout.Duration != 0 {
dockerTimeout = ka.Docker.Timeout.Duration
}
_, artifact, _, err = build(log.Logger, tmp, ka, ki, dockerTimeout)
return
}
func cloneOrPull(repo string, ki distro.KernelInfo) (workPath, cache string,
err error) {
base := config.Dir("preload")
workPath = filepath.Join(base, "/repos/", sha1sum(repo))
var r *git.Repository
if fs.PathExists(workPath) {
r, err = git.PlainOpen(workPath)
if err != nil {
return
}
var w *git.Worktree
w, err = r.Worktree()
if err != nil {
return
}
err = w.Pull(&git.PullOptions{})
if err != nil && err != git.NoErrAlreadyUpToDate {
log.Print(repo, "pull error:", err)
}
} else {
r, err = git.PlainClone(workPath, false, &git.CloneOptions{URL: repo})
if err != nil {
return
}
}
ref, err := r.Head()
if err != nil {
return
}
cachedir := filepath.Join(base, "/cache/")
os.MkdirAll(cachedir, 0700)
filename := sha1sum(repo + ki.KernelPath + ref.Hash().String())
cache = filepath.Join(cachedir, filename)
return
}
func sha1sum(data string) string {
h := sha1.Sum([]byte(data))
return hex.EncodeToString(h[:])
}