feat: initial daemon implementation
This commit is contained in:
parent
820208d079
commit
0314b5ca93
202
api/api.go
Normal file
202
api/api.go
Normal file
@ -0,0 +1,202 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
var ErrInvalid = errors.New("")
|
||||
|
||||
type Status string
|
||||
|
||||
const (
|
||||
StatusNew Status = "new"
|
||||
StatusWaiting Status = "waiting"
|
||||
StatusRunning Status = "running"
|
||||
StatusSuccess Status = "success"
|
||||
StatusFailure Status = "failure"
|
||||
)
|
||||
|
||||
type Command string
|
||||
|
||||
const (
|
||||
RawMode Command = "rawmode"
|
||||
|
||||
AddJob Command = "add_job"
|
||||
ListJobs Command = "list_jobs"
|
||||
JobLogs Command = "job_logs"
|
||||
JobStatus Command = "job_status"
|
||||
|
||||
AddRepo Command = "add_repo"
|
||||
ListRepos Command = "list_repos"
|
||||
|
||||
Kernels Command = "kernels"
|
||||
)
|
||||
|
||||
type Job struct {
|
||||
ID int64
|
||||
UUID string
|
||||
|
||||
RepoName string
|
||||
Commit string
|
||||
|
||||
Params string
|
||||
Artifact artifact.Artifact
|
||||
Target distro.KernelInfo
|
||||
|
||||
Status Status
|
||||
}
|
||||
|
||||
func (job *Job) GenUUID() {
|
||||
job.UUID = uuid.New().String()
|
||||
}
|
||||
|
||||
type Repo struct {
|
||||
ID int64
|
||||
Name string
|
||||
Path string
|
||||
}
|
||||
|
||||
type JobLog struct {
|
||||
Name string
|
||||
Text string
|
||||
}
|
||||
|
||||
type Req struct {
|
||||
Command Command
|
||||
|
||||
Type string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func (r *Req) SetData(data any) {
|
||||
r.Type = fmt.Sprintf("%v", reflect.TypeOf(data))
|
||||
r.Data = Marshal(data)
|
||||
}
|
||||
|
||||
func (r *Req) GetData(data any) (err error) {
|
||||
if len(r.Data) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t := fmt.Sprintf("%v", reflect.TypeOf(data))
|
||||
if r.Type != t {
|
||||
err = fmt.Errorf("type mismatch (%v != %v)", r.Type, t)
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace().Msgf("unmarshal %v", string(r.Data))
|
||||
err = json.Unmarshal(r.Data, &data)
|
||||
return
|
||||
}
|
||||
|
||||
func (r Req) Encode(conn net.Conn) {
|
||||
log.Trace().Msgf("encode %v", spew.Sdump(r))
|
||||
err := json.NewEncoder(conn).Encode(&r)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("encode %v", r)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Req) Decode(conn net.Conn) (err error) {
|
||||
err = json.NewDecoder(conn).Decode(r)
|
||||
return
|
||||
}
|
||||
|
||||
func (r Req) Marshal() (bytes []byte) {
|
||||
return Marshal(r)
|
||||
}
|
||||
|
||||
func (Req) Unmarshal(data []byte) (r Req, err error) {
|
||||
err = json.Unmarshal(data, &r)
|
||||
log.Trace().Msgf("unmarshal %v", spew.Sdump(r))
|
||||
return
|
||||
}
|
||||
|
||||
type Resp struct {
|
||||
UUID string
|
||||
|
||||
Error string
|
||||
|
||||
Err error `json:"-"`
|
||||
|
||||
Type string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func NewResp() (resp Resp) {
|
||||
resp.UUID = uuid.New().String()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Resp) SetData(data any) {
|
||||
r.Type = fmt.Sprintf("%v", reflect.TypeOf(data))
|
||||
r.Data = Marshal(data)
|
||||
}
|
||||
|
||||
func (r *Resp) GetData(data any) (err error) {
|
||||
if len(r.Data) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t := fmt.Sprintf("%v", reflect.TypeOf(data))
|
||||
if r.Type != t {
|
||||
err = fmt.Errorf("type mismatch (%v != %v)", r.Type, t)
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace().Msgf("unmarshal %v", string(r.Data))
|
||||
err = json.Unmarshal(r.Data, &data)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Resp) Encode(conn net.Conn) {
|
||||
if r.Err != nil && r.Err != ErrInvalid && r.Error == "" {
|
||||
r.Error = fmt.Sprintf("%v", r.Err)
|
||||
}
|
||||
log.Trace().Msgf("encode %v", spew.Sdump(r))
|
||||
err := json.NewEncoder(conn).Encode(r)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("encode %v", r)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Resp) Decode(conn net.Conn) (err error) {
|
||||
err = json.NewDecoder(conn).Decode(r)
|
||||
r.Err = ErrInvalid
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Resp) Marshal() (bytes []byte) {
|
||||
if r.Err != nil && r.Err != ErrInvalid && r.Error == "" {
|
||||
r.Error = fmt.Sprintf("%v", r.Err)
|
||||
}
|
||||
|
||||
return Marshal(r)
|
||||
}
|
||||
|
||||
func (Resp) Unmarshal(data []byte) (r Resp, err error) {
|
||||
err = json.Unmarshal(data, &r)
|
||||
log.Trace().Msgf("unmarshal %v", spew.Sdump(r))
|
||||
r.Err = ErrInvalid
|
||||
return
|
||||
}
|
||||
|
||||
func Marshal(data any) (bytes []byte) {
|
||||
bytes, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("marshal %v", data)
|
||||
}
|
||||
log.Trace().Msgf("marshal %v", string(bytes))
|
||||
return
|
||||
}
|
47
api/api_test.go
Normal file
47
api/api_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestReq(t *testing.T) {
|
||||
req := Req{}
|
||||
|
||||
req.Command = ListRepos
|
||||
req.SetData(Job{ID: 999, RepoName: "test"})
|
||||
|
||||
bytes := req.Marshal()
|
||||
|
||||
req2, err := Req{}.Unmarshal(bytes)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, req, req2)
|
||||
|
||||
job := Job{}
|
||||
err = req2.GetData(&job)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, req2.Type, "api.Job")
|
||||
}
|
||||
|
||||
func TestResp(t *testing.T) {
|
||||
resp := Resp{}
|
||||
|
||||
resp.Error = "abracadabra"
|
||||
resp.SetData([]Repo{Repo{}, Repo{}})
|
||||
|
||||
bytes := resp.Marshal()
|
||||
|
||||
resp2, err := Resp{}.Unmarshal(bytes)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, resp, resp2)
|
||||
|
||||
var repos []Repo
|
||||
err = resp2.GetData(&repos)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, resp2.Type, "[]api.Repo")
|
||||
}
|
436
artifact/artifact.go
Normal file
436
artifact/artifact.go
Normal file
@ -0,0 +1,436 @@
|
||||
package artifact
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/naoina/toml"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
type Kernel struct {
|
||||
// TODO
|
||||
// Version string
|
||||
// From string
|
||||
// To string
|
||||
|
||||
// prev. ReleaseMask
|
||||
Regex string
|
||||
ExcludeRegex string
|
||||
}
|
||||
|
||||
// Target defines the kernel
|
||||
type Target struct {
|
||||
Distro distro.Distro
|
||||
|
||||
Kernel Kernel
|
||||
}
|
||||
|
||||
// DockerName is returns stable name for docker container
|
||||
func (km Target) DockerName() string {
|
||||
distro := strings.ToLower(km.Distro.ID.String())
|
||||
release := strings.Replace(km.Distro.Release, ".", "__", -1)
|
||||
return fmt.Sprintf("out_of_tree_%s_%s", distro, release)
|
||||
}
|
||||
|
||||
// ArtifactType is the kernel module or exploit
|
||||
type ArtifactType int
|
||||
|
||||
const (
|
||||
// KernelModule is any kind of kernel module
|
||||
KernelModule ArtifactType = iota
|
||||
// KernelExploit is the privilege escalation exploit
|
||||
KernelExploit
|
||||
// Script for information gathering or automation
|
||||
Script
|
||||
)
|
||||
|
||||
func (at ArtifactType) String() string {
|
||||
return [...]string{"module", "exploit", "script"}[at]
|
||||
}
|
||||
|
||||
// UnmarshalTOML is for support github.com/naoina/toml
|
||||
func (at *ArtifactType) UnmarshalTOML(data []byte) (err error) {
|
||||
stype := strings.Trim(string(data), `"`)
|
||||
stypelower := strings.ToLower(stype)
|
||||
if strings.Contains(stypelower, "module") {
|
||||
*at = KernelModule
|
||||
} else if strings.Contains(stypelower, "exploit") {
|
||||
*at = KernelExploit
|
||||
} else if strings.Contains(stypelower, "script") {
|
||||
*at = Script
|
||||
} else {
|
||||
err = fmt.Errorf("type %s is unsupported", stype)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalTOML is for support github.com/naoina/toml
|
||||
func (at ArtifactType) MarshalTOML() (data []byte, err error) {
|
||||
s := ""
|
||||
switch at {
|
||||
case KernelModule:
|
||||
s = "module"
|
||||
case KernelExploit:
|
||||
s = "exploit"
|
||||
case Script:
|
||||
s = "script"
|
||||
default:
|
||||
err = fmt.Errorf("cannot marshal %d", at)
|
||||
}
|
||||
data = []byte(`"` + s + `"`)
|
||||
return
|
||||
}
|
||||
|
||||
// Duration type with toml unmarshalling support
|
||||
type Duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
// UnmarshalTOML for Duration
|
||||
func (d *Duration) UnmarshalTOML(data []byte) (err error) {
|
||||
duration := strings.Replace(string(data), "\"", "", -1)
|
||||
d.Duration, err = time.ParseDuration(duration)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalTOML for Duration
|
||||
func (d Duration) MarshalTOML() (data []byte, err error) {
|
||||
data = []byte(`"` + d.Duration.String() + `"`)
|
||||
return
|
||||
}
|
||||
|
||||
type PreloadModule struct {
|
||||
Repo string
|
||||
Path string
|
||||
TimeoutAfterLoad Duration
|
||||
}
|
||||
|
||||
// Extra test files to copy over
|
||||
type FileTransfer struct {
|
||||
User string
|
||||
Local string
|
||||
Remote string
|
||||
}
|
||||
|
||||
type Patch struct {
|
||||
Path string
|
||||
Source string
|
||||
Script string
|
||||
}
|
||||
|
||||
// Artifact is for .out-of-tree.toml
|
||||
type Artifact struct {
|
||||
Name string
|
||||
Type ArtifactType
|
||||
TestFiles []FileTransfer
|
||||
SourcePath string
|
||||
Targets []Target
|
||||
|
||||
Script string
|
||||
|
||||
Qemu struct {
|
||||
Cpus int
|
||||
Memory int
|
||||
Timeout Duration
|
||||
AfterStartTimeout Duration
|
||||
}
|
||||
|
||||
Docker struct {
|
||||
Timeout Duration
|
||||
}
|
||||
|
||||
Mitigations struct {
|
||||
DisableSmep bool
|
||||
DisableSmap bool
|
||||
DisableKaslr bool
|
||||
DisableKpti bool
|
||||
}
|
||||
|
||||
Patches []Patch
|
||||
|
||||
Make struct {
|
||||
Target string
|
||||
}
|
||||
|
||||
StandardModules bool
|
||||
|
||||
Preload []PreloadModule
|
||||
}
|
||||
|
||||
// Read is for read .out-of-tree.toml
|
||||
func (Artifact) Read(path string) (ka Artifact, err error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
buf, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = toml.Unmarshal(buf, &ka)
|
||||
|
||||
if len(strings.Fields(ka.Name)) != 1 {
|
||||
err = errors.New("artifact name should not contain spaces")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ka Artifact) checkSupport(ki distro.KernelInfo, target Target) (
|
||||
supported bool, err error) {
|
||||
|
||||
if target.Distro.Release == "" {
|
||||
if ki.Distro.ID != target.Distro.ID {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !ki.Distro.Equal(target.Distro) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r, err := regexp.Compile(target.Kernel.Regex)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
exr, err := regexp.Compile(target.Kernel.ExcludeRegex)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !r.MatchString(ki.KernelRelease) {
|
||||
return
|
||||
}
|
||||
|
||||
if target.Kernel.ExcludeRegex != "" && exr.MatchString(ki.KernelRelease) {
|
||||
return
|
||||
}
|
||||
|
||||
supported = true
|
||||
return
|
||||
}
|
||||
|
||||
// Supported returns true if given kernel is supported by artifact
|
||||
func (ka Artifact) Supported(ki distro.KernelInfo) (supported bool, err error) {
|
||||
for _, km := range ka.Targets {
|
||||
supported, err = ka.checkSupport(ki, km)
|
||||
if supported {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
|
||||
endless bool, cBinary,
|
||||
cEndlessStress string, cEndlessTimeout time.Duration,
|
||||
dump func(q *qemu.System, ka Artifact, ki distro.KernelInfo,
|
||||
result *Result)) {
|
||||
|
||||
slog.Info().Msg("start")
|
||||
testStart := time.Now()
|
||||
defer func() {
|
||||
slog.Debug().Str("test_duration",
|
||||
time.Since(testStart).String()).
|
||||
Msg("")
|
||||
}()
|
||||
|
||||
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
|
||||
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("qemu init")
|
||||
return
|
||||
}
|
||||
q.Log = slog
|
||||
|
||||
if ka.Qemu.Timeout.Duration != 0 {
|
||||
q.Timeout = ka.Qemu.Timeout.Duration
|
||||
}
|
||||
if ka.Qemu.Cpus != 0 {
|
||||
q.Cpus = ka.Qemu.Cpus
|
||||
}
|
||||
if ka.Qemu.Memory != 0 {
|
||||
q.Memory = ka.Qemu.Memory
|
||||
}
|
||||
|
||||
q.SetKASLR(!ka.Mitigations.DisableKaslr)
|
||||
q.SetSMEP(!ka.Mitigations.DisableSmep)
|
||||
q.SetSMAP(!ka.Mitigations.DisableSmap)
|
||||
q.SetKPTI(!ka.Mitigations.DisableKpti)
|
||||
|
||||
if ki.CPU.Model != "" {
|
||||
q.CPU.Model = ki.CPU.Model
|
||||
}
|
||||
|
||||
if len(ki.CPU.Flags) != 0 {
|
||||
q.CPU.Flags = ki.CPU.Flags
|
||||
}
|
||||
|
||||
if endless {
|
||||
q.Timeout = 0
|
||||
}
|
||||
|
||||
qemuStart := time.Now()
|
||||
|
||||
slog.Debug().Msgf("qemu start %v", qemuStart)
|
||||
err = q.Start()
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("qemu start")
|
||||
return
|
||||
}
|
||||
defer q.Stop()
|
||||
|
||||
slog.Debug().Msgf("wait %v", ka.Qemu.AfterStartTimeout)
|
||||
time.Sleep(ka.Qemu.AfterStartTimeout.Duration)
|
||||
|
||||
go func() {
|
||||
time.Sleep(time.Minute)
|
||||
for !q.Died {
|
||||
slog.Debug().Msg("still alive")
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}()
|
||||
|
||||
tmp, err := os.MkdirTemp(dotfiles.Dir("tmp"), "")
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("making tmp directory")
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
result := Result{}
|
||||
if !endless {
|
||||
defer dump(q, ka, ki, &result)
|
||||
}
|
||||
|
||||
var cTest string
|
||||
|
||||
if ka.Type == Script {
|
||||
result.Build.Ok = true
|
||||
cTest = ka.Script
|
||||
} else if cBinary == "" {
|
||||
// TODO: build should return structure
|
||||
start := time.Now()
|
||||
result.BuildDir, result.BuildArtifact, result.Build.Output, err =
|
||||
Build(slog, tmp, ka, ki, ka.Docker.Timeout.Duration)
|
||||
slog.Debug().Str("duration", time.Since(start).String()).
|
||||
Msg("build done")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("build")
|
||||
return
|
||||
}
|
||||
result.Build.Ok = true
|
||||
} else {
|
||||
result.BuildArtifact = cBinary
|
||||
result.Build.Ok = true
|
||||
}
|
||||
|
||||
if cTest == "" {
|
||||
cTest = result.BuildArtifact + "_test"
|
||||
if _, err := os.Stat(cTest); err != nil {
|
||||
slog.Debug().Msgf("%s does not exist", cTest)
|
||||
cTest = tmp + "/source/" + "test.sh"
|
||||
} else {
|
||||
slog.Debug().Msgf("%s exist", cTest)
|
||||
}
|
||||
}
|
||||
|
||||
if ka.Qemu.Timeout.Duration == 0 {
|
||||
ka.Qemu.Timeout.Duration = time.Minute
|
||||
}
|
||||
|
||||
err = q.WaitForSSH(ka.Qemu.Timeout.Duration)
|
||||
if err != nil {
|
||||
result.InternalError = err
|
||||
return
|
||||
}
|
||||
slog.Debug().Str("qemu_startup_duration",
|
||||
time.Since(qemuStart).String()).
|
||||
Msg("ssh is available")
|
||||
|
||||
remoteTest, err := copyTest(q, cTest, ka)
|
||||
if err != nil {
|
||||
result.InternalError = err
|
||||
slog.Error().Err(err).Msg("copy test script")
|
||||
return
|
||||
}
|
||||
|
||||
if ka.StandardModules {
|
||||
// Module depends on one of the standard modules
|
||||
start := time.Now()
|
||||
err = CopyStandardModules(q, ki)
|
||||
if err != nil {
|
||||
result.InternalError = err
|
||||
slog.Error().Err(err).Msg("copy standard modules")
|
||||
return
|
||||
}
|
||||
slog.Debug().Str("duration", time.Since(start).String()).
|
||||
Msg("copy standard modules")
|
||||
}
|
||||
|
||||
err = PreloadModules(q, ka, ki, ka.Docker.Timeout.Duration)
|
||||
if err != nil {
|
||||
result.InternalError = err
|
||||
slog.Error().Err(err).Msg("preload modules")
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
copyArtifactAndTest(slog, q, ka, &result, remoteTest)
|
||||
slog.Debug().Str("duration", time.Since(start).String()).
|
||||
Msgf("test completed (success: %v)", result.Test.Ok)
|
||||
|
||||
if !endless {
|
||||
return
|
||||
}
|
||||
|
||||
dump(q, ka, ki, &result)
|
||||
|
||||
if !result.Build.Ok || !result.Run.Ok || !result.Test.Ok {
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info().Msg("start endless tests")
|
||||
|
||||
if cEndlessStress != "" {
|
||||
slog.Debug().Msg("copy and run endless stress script")
|
||||
err = q.CopyAndRunAsync("root", cEndlessStress)
|
||||
if err != nil {
|
||||
q.Stop()
|
||||
//f.Sync()
|
||||
slog.Fatal().Err(err).Msg("cannot copy/run stress")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
output, err := q.Command("root", remoteTest)
|
||||
if err != nil {
|
||||
q.Stop()
|
||||
//f.Sync()
|
||||
slog.Fatal().Err(err).Msg(output)
|
||||
return
|
||||
}
|
||||
slog.Debug().Msg(output)
|
||||
|
||||
slog.Info().Msg("test success")
|
||||
|
||||
slog.Debug().Msgf("wait %v", cEndlessTimeout)
|
||||
time.Sleep(cEndlessTimeout)
|
||||
}
|
||||
}
|
@ -1,8 +1,4 @@
|
||||
// Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package config
|
||||
package artifact
|
||||
|
||||
import (
|
||||
"testing"
|
@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
package artifact
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
@ -15,13 +15,12 @@ import (
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
func preloadModules(q *qemu.System, ka config.Artifact, ki distro.KernelInfo,
|
||||
func PreloadModules(q *qemu.System, ka Artifact, ki distro.KernelInfo,
|
||||
dockerTimeout time.Duration) (err error) {
|
||||
|
||||
for _, pm := range ka.Preload {
|
||||
@ -33,7 +32,7 @@ func preloadModules(q *qemu.System, ka config.Artifact, ki distro.KernelInfo,
|
||||
return
|
||||
}
|
||||
|
||||
func preload(q *qemu.System, ki distro.KernelInfo, pm config.PreloadModule,
|
||||
func preload(q *qemu.System, ki distro.KernelInfo, pm PreloadModule,
|
||||
dockerTimeout time.Duration) (err error) {
|
||||
|
||||
var workPath, cache string
|
||||
@ -46,7 +45,8 @@ func preload(q *qemu.System, ki distro.KernelInfo, pm config.PreloadModule,
|
||||
return
|
||||
}
|
||||
} else {
|
||||
errors.New("No repo/path in preload entry")
|
||||
err = errors.New("no repo/path in preload entry")
|
||||
return
|
||||
}
|
||||
|
||||
err = buildAndInsmod(workPath, q, ki, dockerTimeout, cache)
|
||||
@ -61,29 +61,29 @@ func preload(q *qemu.System, ki distro.KernelInfo, pm config.PreloadModule,
|
||||
func buildAndInsmod(workPath string, q *qemu.System, ki distro.KernelInfo,
|
||||
dockerTimeout time.Duration, cache string) (err error) {
|
||||
|
||||
tmp, err := fs.TempDir()
|
||||
tmp, err := tempDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var artifact string
|
||||
if fs.PathExists(cache) {
|
||||
artifact = cache
|
||||
var af string
|
||||
if pathExists(cache) {
|
||||
af = cache
|
||||
} else {
|
||||
artifact, err = buildPreload(workPath, tmp, ki, dockerTimeout)
|
||||
af, err = buildPreload(workPath, tmp, ki, dockerTimeout)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if cache != "" {
|
||||
err = copyFile(artifact, cache)
|
||||
err = CopyFile(af, cache)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output, err := q.CopyAndInsmod(artifact)
|
||||
output, err := q.CopyAndInsmod(af)
|
||||
if err != nil {
|
||||
log.Print(output)
|
||||
return
|
||||
@ -92,37 +92,48 @@ func buildAndInsmod(workPath string, q *qemu.System, ki distro.KernelInfo,
|
||||
}
|
||||
|
||||
func buildPreload(workPath, tmp string, ki distro.KernelInfo,
|
||||
dockerTimeout time.Duration) (artifact string, err error) {
|
||||
dockerTimeout time.Duration) (af string, err error) {
|
||||
|
||||
ka, err := config.ReadArtifactConfig(workPath + "/.out-of-tree.toml")
|
||||
ka, err := Artifact{}.Read(workPath + "/.out-of-tree.toml")
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("preload")
|
||||
}
|
||||
|
||||
ka.SourcePath = workPath
|
||||
|
||||
km := config.Target{
|
||||
km := Target{
|
||||
Distro: ki.Distro,
|
||||
Kernel: config.Kernel{Regex: ki.KernelRelease},
|
||||
Kernel: Kernel{Regex: ki.KernelRelease},
|
||||
}
|
||||
ka.Targets = []config.Target{km}
|
||||
ka.Targets = []Target{km}
|
||||
|
||||
if ka.Docker.Timeout.Duration != 0 {
|
||||
dockerTimeout = ka.Docker.Timeout.Duration
|
||||
}
|
||||
|
||||
_, artifact, _, err = build(log.Logger, tmp, ka, ki, dockerTimeout)
|
||||
_, af, _, err = Build(log.Logger, tmp, ka, ki, dockerTimeout)
|
||||
return
|
||||
}
|
||||
|
||||
func pathExists(path string) bool {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func tempDir() (string, error) {
|
||||
return os.MkdirTemp(dotfiles.Dir("tmp"), "")
|
||||
}
|
||||
|
||||
func cloneOrPull(repo string, ki distro.KernelInfo) (workPath, cache string,
|
||||
err error) {
|
||||
|
||||
base := config.Dir("preload")
|
||||
base := dotfiles.Dir("preload")
|
||||
workPath = filepath.Join(base, "/repos/", sha1sum(repo))
|
||||
|
||||
var r *git.Repository
|
||||
if fs.PathExists(workPath) {
|
||||
if pathExists(workPath) {
|
||||
r, err = git.PlainOpen(workPath)
|
||||
if err != nil {
|
||||
return
|
377
artifact/process.go
Normal file
377
artifact/process.go
Normal file
@ -0,0 +1,377 @@
|
||||
package artifact
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/otiai10/copy"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
func sh(workdir, command string) (output string, err error) {
|
||||
flog := log.With().
|
||||
Str("workdir", workdir).
|
||||
Str("command", command).
|
||||
Logger()
|
||||
|
||||
cmd := exec.Command("sh", "-c", "cd "+workdir+" && "+command)
|
||||
|
||||
flog.Debug().Msgf("%v", cmd)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cmd.Stderr = cmd.Stdout
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
output += m + "\n"
|
||||
flog.Trace().Str("stdout", m).Msg("")
|
||||
}
|
||||
}()
|
||||
|
||||
err = cmd.Wait()
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%v %v output: %v", cmd, err, output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func applyPatches(src string, ka Artifact) (err error) {
|
||||
for i, patch := range ka.Patches {
|
||||
name := fmt.Sprintf("patch_%02d", i)
|
||||
|
||||
path := src + "/" + name + ".diff"
|
||||
if patch.Source != "" && patch.Path != "" {
|
||||
err = errors.New("path and source are mutually exclusive")
|
||||
return
|
||||
} else if patch.Source != "" {
|
||||
err = os.WriteFile(path, []byte(patch.Source), 0644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else if patch.Path != "" {
|
||||
err = copy.Copy(patch.Path, path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if patch.Source != "" || patch.Path != "" {
|
||||
_, err = sh(src, "patch < "+path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if patch.Script != "" {
|
||||
script := src + "/" + name + ".sh"
|
||||
err = os.WriteFile(script, []byte(patch.Script), 0755)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = sh(src, script)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Build(flog zerolog.Logger, tmp string, ka Artifact,
|
||||
ki distro.KernelInfo, dockerTimeout time.Duration) (
|
||||
outdir, outpath, output string, err error) {
|
||||
|
||||
target := strings.Replace(ka.Name, " ", "_", -1)
|
||||
if target == "" {
|
||||
target = fmt.Sprintf("%d", rand.Int())
|
||||
}
|
||||
|
||||
outdir = tmp + "/source"
|
||||
|
||||
err = copy.Copy(ka.SourcePath, outdir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = applyPatches(outdir, ka)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outpath = outdir + "/" + target
|
||||
if ka.Type == KernelModule {
|
||||
outpath += ".ko"
|
||||
}
|
||||
|
||||
if ki.KernelVersion == "" {
|
||||
ki.KernelVersion = ki.KernelRelease
|
||||
}
|
||||
|
||||
kernel := "/lib/modules/" + ki.KernelVersion + "/build"
|
||||
if ki.KernelSource != "" {
|
||||
kernel = ki.KernelSource
|
||||
}
|
||||
|
||||
buildCommand := "make KERNEL=" + kernel + " TARGET=" + target
|
||||
if ka.Make.Target != "" {
|
||||
buildCommand += " " + ka.Make.Target
|
||||
}
|
||||
|
||||
if ki.ContainerName != "" {
|
||||
var c container.Container
|
||||
container.Timeout = dockerTimeout
|
||||
c, err = container.NewFromKernelInfo(ki)
|
||||
c.Log = flog
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("container creation failure")
|
||||
}
|
||||
|
||||
output, err = c.Run(outdir, []string{
|
||||
buildCommand + " && chmod -R 777 /work",
|
||||
})
|
||||
} else {
|
||||
cmd := exec.Command("bash", "-c", "cd "+outdir+" && "+
|
||||
buildCommand)
|
||||
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
timer := time.AfterFunc(dockerTimeout, func() {
|
||||
cmd.Process.Kill()
|
||||
})
|
||||
defer timer.Stop()
|
||||
|
||||
var raw []byte
|
||||
raw, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
|
||||
err, buildCommand, string(raw))
|
||||
err = errors.New(e)
|
||||
return
|
||||
}
|
||||
|
||||
output = string(raw)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func runScript(q *qemu.System, script string) (output string, err error) {
|
||||
return q.Command("root", script)
|
||||
}
|
||||
|
||||
func testKernelModule(q *qemu.System, ka Artifact,
|
||||
test string) (output string, err error) {
|
||||
|
||||
output, err = q.Command("root", test)
|
||||
// TODO generic checks for WARNING's and so on
|
||||
return
|
||||
}
|
||||
|
||||
func testKernelExploit(q *qemu.System, ka Artifact,
|
||||
test, exploit string) (output string, err error) {
|
||||
|
||||
output, err = q.Command("user", "chmod +x "+exploit)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
randFilePath := fmt.Sprintf("/root/%d", rand.Int())
|
||||
|
||||
cmd := fmt.Sprintf("%s %s %s", test, exploit, randFilePath)
|
||||
output, err = q.Command("user", cmd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = q.Command("root", "stat "+randFilePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
BuildDir string
|
||||
BuildArtifact string
|
||||
Build, Run, Test struct {
|
||||
Output string
|
||||
Ok bool
|
||||
}
|
||||
|
||||
InternalError error
|
||||
InternalErrorString string
|
||||
}
|
||||
|
||||
func CopyFile(sourcePath, destinationPath string) (err error) {
|
||||
sourceFile, err := os.Open(sourcePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
destinationFile, err := os.Create(destinationPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(destinationFile, sourceFile); err != nil {
|
||||
destinationFile.Close()
|
||||
return err
|
||||
}
|
||||
return destinationFile.Close()
|
||||
}
|
||||
|
||||
func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
|
||||
res *Result, remoteTest string) (err error) {
|
||||
|
||||
// Copy all test files to the remote machine
|
||||
for _, f := range ka.TestFiles {
|
||||
if f.Local[0] != '/' {
|
||||
if res.BuildDir != "" {
|
||||
f.Local = res.BuildDir + "/" + f.Local
|
||||
}
|
||||
}
|
||||
err = q.CopyFile(f.User, f.Local, f.Remote)
|
||||
if err != nil {
|
||||
res.InternalError = err
|
||||
slog.Error().Err(err).Msg("copy test file")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch ka.Type {
|
||||
case KernelModule:
|
||||
res.Run.Output, err = q.CopyAndInsmod(res.BuildArtifact)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Run.Output)
|
||||
// TODO errors.As
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
res.InternalError = err
|
||||
}
|
||||
return
|
||||
}
|
||||
res.Run.Ok = true
|
||||
|
||||
res.Test.Output, err = testKernelModule(q, ka, remoteTest)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Test.Output)
|
||||
return
|
||||
}
|
||||
res.Test.Ok = true
|
||||
case KernelExploit:
|
||||
remoteExploit := fmt.Sprintf("/tmp/exploit_%d", rand.Int())
|
||||
err = q.CopyFile("user", res.BuildArtifact, remoteExploit)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
res.Test.Output, err = testKernelExploit(q, ka, remoteTest,
|
||||
remoteExploit)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Test.Output)
|
||||
return
|
||||
}
|
||||
res.Run.Ok = true // does not really used
|
||||
res.Test.Ok = true
|
||||
case Script:
|
||||
res.Test.Output, err = runScript(q, remoteTest)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Test.Output)
|
||||
return
|
||||
}
|
||||
slog.Info().Msgf("\n%v\n", res.Test.Output)
|
||||
res.Run.Ok = true
|
||||
res.Test.Ok = true
|
||||
default:
|
||||
slog.Fatal().Msg("Unsupported artifact type")
|
||||
}
|
||||
|
||||
_, err = q.Command("root", "echo")
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("after-test ssh reconnect")
|
||||
res.Test.Ok = false
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func copyTest(q *qemu.System, testPath string, ka Artifact) (
|
||||
remoteTest string, err error) {
|
||||
|
||||
remoteTest = fmt.Sprintf("/tmp/test_%d", rand.Int())
|
||||
err = q.CopyFile("user", testPath, remoteTest)
|
||||
if err != nil {
|
||||
if ka.Type == KernelExploit {
|
||||
q.Command("user",
|
||||
"echo -e '#!/bin/sh\necho touch $2 | $1' "+
|
||||
"> "+remoteTest+
|
||||
" && chmod +x "+remoteTest)
|
||||
} else {
|
||||
q.Command("user", "echo '#!/bin/sh' "+
|
||||
"> "+remoteTest+" && chmod +x "+remoteTest)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = q.Command("root", "chmod +x "+remoteTest)
|
||||
return
|
||||
}
|
||||
|
||||
func CopyStandardModules(q *qemu.System, ki distro.KernelInfo) (err error) {
|
||||
_, err = q.Command("root", "mkdir -p /lib/modules/"+ki.KernelVersion)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
remotePath := "/lib/modules/" + ki.KernelVersion + "/"
|
||||
|
||||
err = q.CopyDirectory("root", ki.ModulesPath+"/kernel", remotePath+"/kernel")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(ki.ModulesPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, de := range files {
|
||||
var fi fs.FileInfo
|
||||
fi, err = de.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(fi.Name(), "modules") {
|
||||
continue
|
||||
}
|
||||
err = q.CopyFile("root", ki.ModulesPath+"/"+fi.Name(), remotePath)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
262
client/client.go
Normal file
262
client/client.go
Normal file
@ -0,0 +1,262 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
RemoteAddr string
|
||||
}
|
||||
|
||||
func (c Client) client() *tls.Conn {
|
||||
if !fs.PathExists(dotfiles.File("daemon/cert.pem")) {
|
||||
log.Fatal().Msgf("no {cert,key}.pem at %s",
|
||||
dotfiles.Dir("daemon"))
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
dotfiles.File("daemon/cert.pem"),
|
||||
dotfiles.File("daemon/key.pem"))
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
cacert, err := os.ReadFile(dotfiles.File("daemon/cert.pem"))
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
certpool := x509.NewCertPool()
|
||||
certpool.AppendCertsFromPEM(cacert)
|
||||
|
||||
tlscfg := &tls.Config{
|
||||
RootCAs: certpool,
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
|
||||
conn, err := tls.Dial("tcp", c.RemoteAddr, tlscfg)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return conn // conn.Close()
|
||||
}
|
||||
|
||||
func (c Client) request(cmd api.Command, data any) (resp api.Resp, err error) {
|
||||
req := api.Req{Command: cmd}
|
||||
if data != nil {
|
||||
req.SetData(data)
|
||||
}
|
||||
|
||||
conn := c.client()
|
||||
defer conn.Close()
|
||||
|
||||
req.Encode(conn)
|
||||
|
||||
err = resp.Decode(conn)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("request %v", req)
|
||||
}
|
||||
|
||||
log.Debug().Msgf("resp: %v", resp)
|
||||
|
||||
if resp.Error != "" {
|
||||
err = errors.New(resp.Error)
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) Jobs() (jobs []api.Job, err error) {
|
||||
resp, _ := c.request(api.ListJobs, nil)
|
||||
|
||||
err = resp.GetData(&jobs)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) AddJob(job api.Job) (uuid string, err error) {
|
||||
resp, err := c.request(api.AddJob, &job)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.GetData(&uuid)
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) Repos() (repos []api.Repo, err error) {
|
||||
resp, _ := c.request(api.ListRepos, nil)
|
||||
|
||||
log.Debug().Msgf("resp: %v", spew.Sdump(resp))
|
||||
|
||||
err = resp.GetData(&repos)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
tag string
|
||||
}
|
||||
|
||||
func (lw logWriter) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
log.Trace().Str("tag", lw.tag).Msgf("%v", strconv.Quote(string(p)))
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) handler(cConn net.Conn) {
|
||||
defer cConn.Close()
|
||||
|
||||
dConn := c.client()
|
||||
defer dConn.Close()
|
||||
|
||||
req := api.Req{Command: api.RawMode}
|
||||
req.Encode(dConn)
|
||||
|
||||
go io.Copy(cConn, io.TeeReader(dConn, logWriter{"recv"}))
|
||||
io.Copy(dConn, io.TeeReader(cConn, logWriter{"send"}))
|
||||
}
|
||||
|
||||
var ErrRepoNotFound = errors.New("repo not found")
|
||||
|
||||
// GetRepo virtual API call
|
||||
func (c Client) GetRepo(name string) (repo api.Repo, err error) {
|
||||
// TODO add API call
|
||||
|
||||
repos, err := c.Repos()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, r := range repos {
|
||||
if r.Name == name {
|
||||
repo = r
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = ErrRepoNotFound
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) GitProxy(addr string, ready *sync.Mutex) {
|
||||
l, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("git proxy listen")
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
log.Debug().Msgf("git proxy listen on %v", addr)
|
||||
|
||||
for {
|
||||
ready.Unlock()
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("accept")
|
||||
}
|
||||
log.Debug().Msgf("git proxy accept %s", conn.RemoteAddr())
|
||||
|
||||
go c.handler(conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (c Client) PushRepo(repo api.Repo) (err error) {
|
||||
addr := qemu.GetFreeAddrPort()
|
||||
|
||||
ready := &sync.Mutex{}
|
||||
|
||||
ready.Lock()
|
||||
go c.GitProxy(addr, ready)
|
||||
|
||||
ready.Lock()
|
||||
|
||||
remote := fmt.Sprintf("git://%s/%s", addr, repo.Name)
|
||||
log.Debug().Msgf("git proxy remote: %v", remote)
|
||||
|
||||
raw, err := exec.Command("git", "--work-tree", repo.Path, "push", remote).
|
||||
CombinedOutput()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Msgf("push repo %v\n%v", repo, string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) AddRepo(repo api.Repo) (err error) {
|
||||
_, err = c.request(api.AddRepo, &repo)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Msgf("add repo %v", repo)
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
resp, err := c.request(api.Kernels, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.GetData(&kernels)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
log.Info().Msgf("got %d kernels", len(kernels))
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) JobStatus(uuid string) (st api.Status, err error) {
|
||||
resp, err := c.request(api.JobStatus, &uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.GetData(&st)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c Client) JobLogs(uuid string) (logs []api.JobLog, err error) {
|
||||
resp, err := c.request(api.JobLogs, &uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.GetData(&logs)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
120
cmd/daemon.go
Normal file
120
cmd/daemon.go
Normal file
@ -0,0 +1,120 @@
|
||||
// Copyright 2024 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/client"
|
||||
"code.dumpstack.io/tools/out-of-tree/daemon"
|
||||
)
|
||||
|
||||
type DaemonCmd struct {
|
||||
Addr string `default:":63527"`
|
||||
|
||||
Serve DaemonServeCmd `cmd:"" help:"start daemon"`
|
||||
|
||||
Job DaemonJobCmd `cmd:"" aliases:"jobs" help:"manage jobs"`
|
||||
Repo DaemonRepoCmd `cmd:"" aliases:"repos" help:"manage repositories"`
|
||||
}
|
||||
|
||||
type DaemonServeCmd struct{}
|
||||
|
||||
func (cmd *DaemonServeCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
d, err := daemon.Init(g.Config.Kernels)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
defer d.Kill()
|
||||
|
||||
go d.Daemon()
|
||||
d.Listen(dm.Addr)
|
||||
return
|
||||
}
|
||||
|
||||
type DaemonJobCmd struct {
|
||||
List DaemonJobsListCmd `cmd:"" help:"list jobs"`
|
||||
Status DaemonJobsStatusCmd `cmd:"" help:"show job status"`
|
||||
Log DaemonJobsLogsCmd `cmd:"" help:"job logs"`
|
||||
}
|
||||
|
||||
type DaemonJobsListCmd struct{}
|
||||
|
||||
func (cmd *DaemonJobsListCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
c := client.Client{RemoteAddr: g.RemoteAddr}
|
||||
jobs, err := c.Jobs()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(jobs, "", " ")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
fmt.Println(string(b))
|
||||
return
|
||||
}
|
||||
|
||||
type DaemonJobsStatusCmd struct {
|
||||
UUID string `arg:""`
|
||||
}
|
||||
|
||||
func (cmd *DaemonJobsStatusCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
c := client.Client{RemoteAddr: g.RemoteAddr}
|
||||
st, err := c.JobStatus(cmd.UUID)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(st)
|
||||
return
|
||||
}
|
||||
|
||||
type DaemonJobsLogsCmd struct {
|
||||
UUID string `arg:""`
|
||||
}
|
||||
|
||||
func (cmd *DaemonJobsLogsCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
c := client.Client{RemoteAddr: g.RemoteAddr}
|
||||
logs, err := c.JobLogs(cmd.UUID)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
for _, l := range logs {
|
||||
log.Info().Msg(l.Name)
|
||||
fmt.Println(l.Text)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type DaemonRepoCmd struct {
|
||||
List DaemonRepoListCmd `cmd:"" help:"list repos"`
|
||||
}
|
||||
|
||||
type DaemonRepoListCmd struct{}
|
||||
|
||||
func (cmd *DaemonRepoListCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
|
||||
c := client.Client{RemoteAddr: g.RemoteAddr}
|
||||
repos, err := c.Repos()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(repos, "", " ")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
fmt.Println(string(b))
|
||||
return
|
||||
}
|
12
cmd/db.go
12
cmd/db.go
@ -12,7 +12,7 @@ import (
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
@ -28,9 +28,9 @@ type logEntry struct {
|
||||
Timestamp time.Time
|
||||
|
||||
qemu.System
|
||||
config.Artifact
|
||||
artifact.Artifact
|
||||
distro.KernelInfo
|
||||
phasesResult
|
||||
artifact.Result
|
||||
}
|
||||
|
||||
func createLogTable(db *sql.DB) (err error) {
|
||||
@ -123,8 +123,8 @@ func getVersion(db *sql.DB) (version int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func addToLog(db *sql.DB, q *qemu.System, ka config.Artifact,
|
||||
ki distro.KernelInfo, res *phasesResult, tag string) (err error) {
|
||||
func addToLog(db *sql.DB, q *qemu.System, ka artifact.Artifact,
|
||||
ki distro.KernelInfo, res *artifact.Result, tag string) (err error) {
|
||||
|
||||
stmt, err := db.Prepare("INSERT INTO log (name, type, tag, " +
|
||||
"distro_type, distro_release, kernel_release, " +
|
||||
@ -201,7 +201,7 @@ func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
|
||||
func getAllArtifactLogs(db *sql.DB, tag string, num int, ka artifact.Artifact) (
|
||||
les []logEntry, err error) {
|
||||
|
||||
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
|
||||
|
17
cmd/debug.go
17
cmd/debug.go
@ -14,6 +14,7 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/logrusorgru/aurora.v2"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
@ -53,7 +54,7 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
} else {
|
||||
configPath = cmd.ArtifactConfig
|
||||
}
|
||||
ka, err := config.ReadArtifactConfig(configPath)
|
||||
ka, err := artifact.Artifact{}.Read(configPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -158,14 +159,14 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
|
||||
if ka.StandardModules {
|
||||
// Module depends on one of the standard modules
|
||||
err = copyStandardModules(q, ki)
|
||||
err = artifact.CopyStandardModules(q, ki)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = preloadModules(q, ka, ki, g.Config.Docker.Timeout.Duration)
|
||||
err = artifact.PreloadModules(q, ka, ki, g.Config.Docker.Timeout.Duration)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
return
|
||||
@ -173,20 +174,20 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
|
||||
var buildDir, outFile, output, remoteFile string
|
||||
|
||||
if ka.Type == config.Script {
|
||||
if ka.Type == artifact.Script {
|
||||
err = q.CopyFile("root", ka.Script, ka.Script)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
buildDir, outFile, output, err = build(log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration)
|
||||
buildDir, outFile, output, err = artifact.Build(log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration)
|
||||
if err != nil {
|
||||
log.Print(err, output)
|
||||
return
|
||||
}
|
||||
|
||||
remoteFile = "/tmp/" + strings.Replace(ka.Name, " ", "_", -1)
|
||||
if ka.Type == config.KernelModule {
|
||||
if ka.Type == artifact.KernelModule {
|
||||
remoteFile += ".ko"
|
||||
}
|
||||
|
||||
@ -222,7 +223,7 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func firstSupported(kcfg config.KernelConfig, ka config.Artifact,
|
||||
func firstSupported(kcfg config.KernelConfig, ka artifact.Artifact,
|
||||
kernel string) (ki distro.KernelInfo, err error) {
|
||||
|
||||
km, err := kernelMask(kernel)
|
||||
@ -230,7 +231,7 @@ func firstSupported(kcfg config.KernelConfig, ka config.Artifact,
|
||||
return
|
||||
}
|
||||
|
||||
ka.Targets = []config.Target{km}
|
||||
ka.Targets = []artifact.Target{km}
|
||||
|
||||
for _, ki = range kcfg.Kernels {
|
||||
var supported bool
|
||||
|
22
cmd/gen.go
22
cmd/gen.go
@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/naoina/toml"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
@ -20,30 +20,30 @@ type GenCmd struct {
|
||||
func (cmd *GenCmd) Run(g *Globals) (err error) {
|
||||
switch cmd.Type {
|
||||
case "module":
|
||||
err = genConfig(config.KernelModule)
|
||||
err = genConfig(artifact.KernelModule)
|
||||
case "exploit":
|
||||
err = genConfig(config.KernelExploit)
|
||||
err = genConfig(artifact.KernelExploit)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func genConfig(at config.ArtifactType) (err error) {
|
||||
a := config.Artifact{
|
||||
func genConfig(at artifact.ArtifactType) (err error) {
|
||||
a := artifact.Artifact{
|
||||
Name: "Put name here",
|
||||
Type: at,
|
||||
}
|
||||
a.Targets = append(a.Targets, config.Target{
|
||||
a.Targets = append(a.Targets, artifact.Target{
|
||||
Distro: distro.Distro{ID: distro.Ubuntu, Release: "18.04"},
|
||||
Kernel: config.Kernel{Regex: ".*"},
|
||||
Kernel: artifact.Kernel{Regex: ".*"},
|
||||
})
|
||||
a.Targets = append(a.Targets, config.Target{
|
||||
a.Targets = append(a.Targets, artifact.Target{
|
||||
Distro: distro.Distro{ID: distro.Debian, Release: "8"},
|
||||
Kernel: config.Kernel{Regex: ".*"},
|
||||
Kernel: artifact.Kernel{Regex: ".*"},
|
||||
})
|
||||
a.Preload = append(a.Preload, config.PreloadModule{
|
||||
a.Preload = append(a.Preload, artifact.PreloadModule{
|
||||
Repo: "Repo name (e.g. https://github.com/openwall/lkrg)",
|
||||
})
|
||||
a.Patches = append(a.Patches, config.Patch{
|
||||
a.Patches = append(a.Patches, artifact.Patch{
|
||||
Path: "/path/to/profiling.patch",
|
||||
})
|
||||
|
||||
|
@ -9,7 +9,10 @@ import (
|
||||
type Globals struct {
|
||||
Config config.OutOfTree `help:"path to out-of-tree configuration" default:"~/.out-of-tree/out-of-tree.toml"`
|
||||
|
||||
WorkDir string `help:"path to work directory" default:"./" type:"path"`
|
||||
WorkDir string `help:"path to work directory" default:"./" type:"path" existingdir:""`
|
||||
|
||||
CacheURL url.URL
|
||||
|
||||
Remote bool `help:"run at remote server"`
|
||||
RemoteAddr string `default:"localhost:63527"`
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
@ -26,7 +27,7 @@ type ImageCmd struct {
|
||||
type ImageListCmd struct{}
|
||||
|
||||
func (cmd *ImageListCmd) Run(g *Globals) (err error) {
|
||||
entries, err := os.ReadDir(config.Dir("images"))
|
||||
entries, err := os.ReadDir(dotfiles.Dir("images"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -44,7 +45,7 @@ type ImageEditCmd struct {
|
||||
}
|
||||
|
||||
func (cmd *ImageEditCmd) Run(g *Globals) (err error) {
|
||||
image := filepath.Join(config.Dir("images"), cmd.Name)
|
||||
image := filepath.Join(dotfiles.Dir("images"), cmd.Name)
|
||||
if !fs.PathExists(image) {
|
||||
fmt.Println("image does not exist")
|
||||
}
|
||||
|
@ -15,7 +15,9 @@ import (
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/kernel"
|
||||
@ -83,7 +85,7 @@ func (cmd KernelCmd) UpdateConfig() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(config.File("kernels.toml"), buf, os.ModePerm)
|
||||
err = os.WriteFile(dotfiles.File("kernels.toml"), buf, os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -92,7 +94,7 @@ func (cmd KernelCmd) UpdateConfig() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (cmd *KernelCmd) GenKernel(km config.Target, pkg string) {
|
||||
func (cmd *KernelCmd) GenKernel(km artifact.Target, pkg string) {
|
||||
flog := log.With().
|
||||
Str("kernel", pkg).
|
||||
Str("distro", km.Distro.String()).
|
||||
@ -156,7 +158,7 @@ func (cmd *KernelCmd) GenKernel(km config.Target, pkg string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *KernelCmd) Generate(g *Globals, km config.Target) (err error) {
|
||||
func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
|
||||
if cmd.Update {
|
||||
container.UseCache = false
|
||||
}
|
||||
@ -263,9 +265,9 @@ func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error
|
||||
return
|
||||
}
|
||||
|
||||
km := config.Target{
|
||||
km := artifact.Target{
|
||||
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
|
||||
Kernel: config.Kernel{Regex: ".*"},
|
||||
Kernel: artifact.Kernel{Regex: ".*"},
|
||||
}
|
||||
|
||||
_, err = kernel.GenRootfsImage(km.Distro.RootFS(), false)
|
||||
@ -289,7 +291,7 @@ func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error
|
||||
type KernelAutogenCmd struct{}
|
||||
|
||||
func (cmd *KernelAutogenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
ka, err := config.ReadArtifactConfig(g.WorkDir + "/.out-of-tree.toml")
|
||||
ka, err := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -340,9 +342,9 @@ func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
continue
|
||||
}
|
||||
|
||||
target := config.Target{
|
||||
target := artifact.Target{
|
||||
Distro: dist,
|
||||
Kernel: config.Kernel{Regex: ".*"},
|
||||
Kernel: artifact.Kernel{Regex: ".*"},
|
||||
}
|
||||
|
||||
err = kernelCmd.Generate(g, target)
|
||||
@ -368,9 +370,9 @@ func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
|
||||
kernel.SetSigintHandler(&kernelCmd.shutdown)
|
||||
|
||||
km := config.Target{
|
||||
km := artifact.Target{
|
||||
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
|
||||
Kernel: config.Kernel{Regex: cmd.Kernel},
|
||||
Kernel: artifact.Kernel{Regex: cmd.Kernel},
|
||||
}
|
||||
err = kernelCmd.Generate(g, km)
|
||||
if err != nil {
|
||||
|
12
cmd/log.go
12
cmd/log.go
@ -15,7 +15,7 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/logrusorgru/aurora.v2"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
)
|
||||
|
||||
type LogCmd struct {
|
||||
@ -40,7 +40,7 @@ func (cmd *LogQueryCmd) Run(g *Globals) (err error) {
|
||||
|
||||
var les []logEntry
|
||||
|
||||
ka, kaErr := config.ReadArtifactConfig(g.WorkDir + "/.out-of-tree.toml")
|
||||
ka, kaErr := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
|
||||
if kaErr == nil {
|
||||
log.Print(".out-of-tree.toml found, filter by artifact name")
|
||||
les, err = getAllArtifactLogs(db, cmd.Tag, cmd.Num, ka)
|
||||
@ -119,7 +119,7 @@ func (cmd *LogDumpCmd) Run(g *Globals) (err error) {
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("Build ok:", l.Build.Ok)
|
||||
if l.Type == config.KernelModule {
|
||||
if l.Type == artifact.KernelModule {
|
||||
fmt.Println("Insmod ok:", l.Run.Ok)
|
||||
}
|
||||
fmt.Println("Test ok:", l.Test.Ok)
|
||||
@ -128,7 +128,7 @@ func (cmd *LogDumpCmd) Run(g *Globals) (err error) {
|
||||
fmt.Printf("Build output:\n%s\n", l.Build.Output)
|
||||
fmt.Println()
|
||||
|
||||
if l.Type == config.KernelModule {
|
||||
if l.Type == artifact.KernelModule {
|
||||
fmt.Printf("Insmod output:\n%s\n", l.Run.Output)
|
||||
fmt.Println()
|
||||
}
|
||||
@ -232,7 +232,7 @@ func logLogEntry(l logEntry) {
|
||||
var status aurora.Value
|
||||
if l.InternalErrorString != "" {
|
||||
status = genOkFailCentered("INTERNAL", false)
|
||||
} else if l.Type == config.KernelExploit {
|
||||
} else if l.Type == artifact.KernelExploit {
|
||||
if l.Build.Ok {
|
||||
status = genOkFailCentered("LPE", l.Test.Ok)
|
||||
} else {
|
||||
@ -273,7 +273,7 @@ func getStats(db *sql.DB, path, tag string) (
|
||||
|
||||
var les []logEntry
|
||||
|
||||
ka, kaErr := config.ReadArtifactConfig(path + "/.out-of-tree.toml")
|
||||
ka, kaErr := artifact.Artifact{}.Read(path + "/.out-of-tree.toml")
|
||||
if kaErr == nil {
|
||||
les, err = getAllArtifactLogs(db, tag, -1, ka)
|
||||
} else {
|
||||
|
958
cmd/pew.go
958
cmd/pew.go
File diff suppressed because it is too large
Load Diff
215
config/config.go
215
config/config.go
@ -5,214 +5,14 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
|
||||
"github.com/naoina/toml"
|
||||
)
|
||||
|
||||
type Kernel struct {
|
||||
// TODO
|
||||
// Version string
|
||||
// From string
|
||||
// To string
|
||||
|
||||
// prev. ReleaseMask
|
||||
Regex string
|
||||
ExcludeRegex string
|
||||
}
|
||||
|
||||
// Target defines the kernel
|
||||
type Target struct {
|
||||
Distro distro.Distro
|
||||
|
||||
Kernel Kernel
|
||||
}
|
||||
|
||||
// DockerName is returns stable name for docker container
|
||||
func (km Target) DockerName() string {
|
||||
distro := strings.ToLower(km.Distro.ID.String())
|
||||
release := strings.Replace(km.Distro.Release, ".", "__", -1)
|
||||
return fmt.Sprintf("out_of_tree_%s_%s", distro, release)
|
||||
}
|
||||
|
||||
// ArtifactType is the kernel module or exploit
|
||||
type ArtifactType int
|
||||
|
||||
const (
|
||||
// KernelModule is any kind of kernel module
|
||||
KernelModule ArtifactType = iota
|
||||
// KernelExploit is the privilege escalation exploit
|
||||
KernelExploit
|
||||
// Script for information gathering or automation
|
||||
Script
|
||||
)
|
||||
|
||||
func (at ArtifactType) String() string {
|
||||
return [...]string{"module", "exploit", "script"}[at]
|
||||
}
|
||||
|
||||
// UnmarshalTOML is for support github.com/naoina/toml
|
||||
func (at *ArtifactType) UnmarshalTOML(data []byte) (err error) {
|
||||
stype := strings.Trim(string(data), `"`)
|
||||
stypelower := strings.ToLower(stype)
|
||||
if strings.Contains(stypelower, "module") {
|
||||
*at = KernelModule
|
||||
} else if strings.Contains(stypelower, "exploit") {
|
||||
*at = KernelExploit
|
||||
} else if strings.Contains(stypelower, "script") {
|
||||
*at = Script
|
||||
} else {
|
||||
err = fmt.Errorf("Type %s is unsupported", stype)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalTOML is for support github.com/naoina/toml
|
||||
func (at ArtifactType) MarshalTOML() (data []byte, err error) {
|
||||
s := ""
|
||||
switch at {
|
||||
case KernelModule:
|
||||
s = "module"
|
||||
case KernelExploit:
|
||||
s = "exploit"
|
||||
case Script:
|
||||
s = "script"
|
||||
default:
|
||||
err = fmt.Errorf("Cannot marshal %d", at)
|
||||
}
|
||||
data = []byte(`"` + s + `"`)
|
||||
return
|
||||
}
|
||||
|
||||
// Duration type with toml unmarshalling support
|
||||
type Duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
// UnmarshalTOML for Duration
|
||||
func (d *Duration) UnmarshalTOML(data []byte) (err error) {
|
||||
duration := strings.Replace(string(data), "\"", "", -1)
|
||||
d.Duration, err = time.ParseDuration(duration)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalTOML for Duration
|
||||
func (d Duration) MarshalTOML() (data []byte, err error) {
|
||||
data = []byte(`"` + d.Duration.String() + `"`)
|
||||
return
|
||||
}
|
||||
|
||||
type PreloadModule struct {
|
||||
Repo string
|
||||
Path string
|
||||
TimeoutAfterLoad Duration
|
||||
}
|
||||
|
||||
// Extra test files to copy over
|
||||
type FileTransfer struct {
|
||||
User string
|
||||
Local string
|
||||
Remote string
|
||||
}
|
||||
|
||||
type Patch struct {
|
||||
Path string
|
||||
Source string
|
||||
Script string
|
||||
}
|
||||
|
||||
// Artifact is for .out-of-tree.toml
|
||||
type Artifact struct {
|
||||
Name string
|
||||
Type ArtifactType
|
||||
TestFiles []FileTransfer
|
||||
SourcePath string
|
||||
Targets []Target
|
||||
|
||||
Script string
|
||||
|
||||
Qemu struct {
|
||||
Cpus int
|
||||
Memory int
|
||||
Timeout Duration
|
||||
}
|
||||
|
||||
Docker struct {
|
||||
Timeout Duration
|
||||
}
|
||||
|
||||
Mitigations struct {
|
||||
DisableSmep bool
|
||||
DisableSmap bool
|
||||
DisableKaslr bool
|
||||
DisableKpti bool
|
||||
}
|
||||
|
||||
Patches []Patch
|
||||
|
||||
Make struct {
|
||||
Target string
|
||||
}
|
||||
|
||||
StandardModules bool
|
||||
|
||||
Preload []PreloadModule
|
||||
}
|
||||
|
||||
func (ka Artifact) checkSupport(ki distro.KernelInfo, target Target) (
|
||||
supported bool, err error) {
|
||||
|
||||
if target.Distro.Release == "" {
|
||||
if ki.Distro.ID != target.Distro.ID {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !ki.Distro.Equal(target.Distro) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r, err := regexp.Compile(target.Kernel.Regex)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
exr, err := regexp.Compile(target.Kernel.ExcludeRegex)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !r.MatchString(ki.KernelRelease) {
|
||||
return
|
||||
}
|
||||
|
||||
if target.Kernel.ExcludeRegex != "" && exr.MatchString(ki.KernelRelease) {
|
||||
return
|
||||
}
|
||||
|
||||
supported = true
|
||||
return
|
||||
}
|
||||
|
||||
// Supported returns true if given kernel is supported by artifact
|
||||
func (ka Artifact) Supported(ki distro.KernelInfo) (supported bool, err error) {
|
||||
for _, km := range ka.Targets {
|
||||
supported, err = ka.checkSupport(ki, km)
|
||||
if supported {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// KernelConfig is the ~/.out-of-tree/kernels.toml configuration description
|
||||
type KernelConfig struct {
|
||||
Kernels []distro.KernelInfo
|
||||
@ -225,7 +25,7 @@ func readFileAll(path string) (buf []byte, err error) {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
buf, err = ioutil.ReadAll(f)
|
||||
buf, err = io.ReadAll(f)
|
||||
return
|
||||
}
|
||||
|
||||
@ -243,14 +43,3 @@ func ReadKernelConfig(path string) (kernelCfg KernelConfig, err error) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ReadArtifactConfig is for read .out-of-tree.toml
|
||||
func ReadArtifactConfig(path string) (ka Artifact, err error) {
|
||||
buf, err := readFileAll(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = toml.Unmarshal(buf, &ka)
|
||||
return
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
package config
|
||||
package dotfiles
|
||||
|
||||
import (
|
||||
"os"
|
@ -1,7 +1,6 @@
|
||||
package config
|
||||
package dotfiles
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@ -18,7 +17,7 @@ func TestDirectory(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDir(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "out-of-tree_")
|
||||
tmpdir, err := os.MkdirTemp("", "out-of-tree_")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -64,7 +63,7 @@ func TestDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFile(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "out-of-tree_")
|
||||
tmpdir, err := os.MkdirTemp("", "out-of-tree_")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
@ -9,6 +9,8 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
@ -16,11 +18,6 @@ import (
|
||||
"github.com/naoina/toml"
|
||||
)
|
||||
|
||||
type DockerCommand struct {
|
||||
Distro distro.Distro
|
||||
Command string
|
||||
}
|
||||
|
||||
type OutOfTree struct {
|
||||
// Directory for all files if not explicitly specified
|
||||
Directory string
|
||||
@ -31,16 +28,16 @@ type OutOfTree struct {
|
||||
Database string
|
||||
|
||||
Qemu struct {
|
||||
Timeout Duration
|
||||
Timeout artifact.Duration
|
||||
}
|
||||
|
||||
Docker struct {
|
||||
Timeout Duration
|
||||
Timeout artifact.Duration
|
||||
Registry string
|
||||
|
||||
// Commands that will be executed before
|
||||
// the base layer of Dockerfile
|
||||
Commands []DockerCommand
|
||||
Commands []distro.Command
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,21 +79,21 @@ func ReadOutOfTreeConf(path string) (c OutOfTree, err error) {
|
||||
}
|
||||
|
||||
if c.Directory != "" {
|
||||
Directory = c.Directory
|
||||
dotfiles.Directory = c.Directory
|
||||
} else {
|
||||
c.Directory = Dir("")
|
||||
c.Directory = dotfiles.Dir("")
|
||||
}
|
||||
|
||||
if c.Kernels == "" {
|
||||
c.Kernels = File("kernels.toml")
|
||||
c.Kernels = dotfiles.File("kernels.toml")
|
||||
}
|
||||
|
||||
if c.UserKernels == "" {
|
||||
c.UserKernels = File("kernels.user.toml")
|
||||
c.UserKernels = dotfiles.File("kernels.user.toml")
|
||||
}
|
||||
|
||||
if c.Database == "" {
|
||||
c.Database = File("db.sqlite")
|
||||
c.Database = dotfiles.File("db.sqlite")
|
||||
}
|
||||
|
||||
if c.Qemu.Timeout.Duration == 0 {
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
@ -29,7 +29,7 @@ var Registry = ""
|
||||
|
||||
var Timeout time.Duration
|
||||
|
||||
var Commands []config.DockerCommand
|
||||
var Commands []distro.Command
|
||||
|
||||
var UseCache = true
|
||||
|
||||
@ -123,17 +123,17 @@ func New(dist distro.Distro) (c Container, err error) {
|
||||
c.dist = dist
|
||||
|
||||
c.Volumes = append(c.Volumes, Volume{
|
||||
Src: config.Dir("volumes", c.name, "lib", "modules"),
|
||||
Src: dotfiles.Dir("volumes", c.name, "lib", "modules"),
|
||||
Dest: "/lib/modules",
|
||||
})
|
||||
|
||||
c.Volumes = append(c.Volumes, Volume{
|
||||
Src: config.Dir("volumes", c.name, "usr", "src"),
|
||||
Src: dotfiles.Dir("volumes", c.name, "usr", "src"),
|
||||
Dest: "/usr/src",
|
||||
})
|
||||
|
||||
c.Volumes = append(c.Volumes, Volume{
|
||||
Src: config.Dir("volumes", c.name, "boot"),
|
||||
Src: dotfiles.Dir("volumes", c.name, "boot"),
|
||||
Dest: "/boot",
|
||||
})
|
||||
|
||||
@ -194,7 +194,7 @@ func (c Container) Exist() (yes bool) {
|
||||
}
|
||||
|
||||
func (c Container) Build(image string, envs, runs []string) (err error) {
|
||||
cdir := config.Dir("containers", c.name)
|
||||
cdir := dotfiles.Dir("containers", c.name)
|
||||
cfile := filepath.Join(cdir, "Dockerfile")
|
||||
|
||||
cf := "FROM "
|
||||
@ -474,7 +474,7 @@ func (c Container) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
InitrdPath: filepath.Join(boot, initrdFile),
|
||||
ModulesPath: filepath.Join(libmodules, krel.Name()),
|
||||
|
||||
RootFS: config.File("images", c.dist.RootFS()),
|
||||
RootFS: dotfiles.File("images", c.dist.RootFS()),
|
||||
}
|
||||
|
||||
kernels = append(kernels, ki)
|
||||
@ -483,7 +483,7 @@ func (c Container) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
for _, cmd := range []string{
|
||||
"find /boot -type f -exec chmod a+r {} \\;",
|
||||
} {
|
||||
_, err = c.Run(config.Dir("tmp"), []string{cmd})
|
||||
_, err = c.Run(dotfiles.Dir("tmp"), []string{cmd})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
275
daemon/commands.go
Normal file
275
daemon/commands.go
Normal file
@ -0,0 +1,275 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/daemon/db"
|
||||
)
|
||||
|
||||
type cmdenv struct {
|
||||
Conn net.Conn
|
||||
|
||||
Log zerolog.Logger
|
||||
|
||||
DB *sql.DB
|
||||
|
||||
WG sync.WaitGroup
|
||||
|
||||
KernelConfig string
|
||||
}
|
||||
|
||||
func command(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
e.Log.Trace().Msgf("%v", spew.Sdump(req))
|
||||
defer e.Log.Trace().Msgf("%v", spew.Sdump(resp))
|
||||
|
||||
e.WG.Add(1)
|
||||
defer e.WG.Done()
|
||||
|
||||
e.Log.Debug().Msgf("%v", req.Command)
|
||||
|
||||
switch req.Command {
|
||||
case api.RawMode:
|
||||
err = rawMode(req, e)
|
||||
case api.AddJob:
|
||||
err = addJob(req, resp, e)
|
||||
case api.ListJobs:
|
||||
err = listJobs(resp, e)
|
||||
case api.AddRepo:
|
||||
err = addRepo(req, resp, e)
|
||||
case api.ListRepos:
|
||||
err = listRepos(resp, e)
|
||||
case api.Kernels:
|
||||
err = kernels(resp, e)
|
||||
case api.JobStatus:
|
||||
err = jobStatus(req, resp, e)
|
||||
case api.JobLogs:
|
||||
err = jobLogs(req, resp, e)
|
||||
default:
|
||||
err = errors.New("unknown command")
|
||||
}
|
||||
|
||||
resp.Err = err
|
||||
return
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
log zerolog.Logger
|
||||
}
|
||||
|
||||
func (lw logWriter) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
//lw.log.Trace().Msgf("%v", strconv.Quote(string(p)))
|
||||
return
|
||||
}
|
||||
|
||||
func rawMode(req *api.Req, e cmdenv) (err error) {
|
||||
uuid := uuid.New().String()
|
||||
|
||||
lwsend := logWriter{log.With().Str("uuid", uuid).Str("git", "send").Logger()}
|
||||
lwrecv := logWriter{log.With().Str("uuid", uuid).Str("git", "recv").Logger()}
|
||||
|
||||
conn, err := net.Dial("tcp", ":9418")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("dial")
|
||||
return
|
||||
}
|
||||
|
||||
go io.Copy(e.Conn, io.TeeReader(conn, lwrecv))
|
||||
io.Copy(conn, io.TeeReader(e.Conn, lwsend))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func listJobs(resp *api.Resp, e cmdenv) (err error) {
|
||||
jobs, err := db.Jobs(e.DB)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp.SetData(&jobs)
|
||||
return
|
||||
}
|
||||
|
||||
func addJob(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
var job api.Job
|
||||
err = req.GetData(&job)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
job.GenUUID()
|
||||
|
||||
var repos []api.Repo
|
||||
repos, err = db.Repos(e.DB)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, r := range repos {
|
||||
if job.RepoName == r.Name {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
err = errors.New("repo does not exist")
|
||||
return
|
||||
}
|
||||
|
||||
if job.RepoName == "" {
|
||||
err = errors.New("repo name cannot be empty")
|
||||
return
|
||||
}
|
||||
|
||||
if job.Commit == "" {
|
||||
err = errors.New("invalid commit")
|
||||
return
|
||||
}
|
||||
|
||||
err = db.AddJob(e.DB, &job)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp.SetData(&job.UUID)
|
||||
return
|
||||
}
|
||||
|
||||
func listRepos(resp *api.Resp, e cmdenv) (err error) {
|
||||
repos, err := db.Repos(e.DB)
|
||||
|
||||
if err != nil {
|
||||
e.Log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
for i := range repos {
|
||||
repos[i].Path = dotfiles.Dir("daemon/repos",
|
||||
repos[i].Name)
|
||||
}
|
||||
|
||||
log.Trace().Msgf("%v", spew.Sdump(repos))
|
||||
resp.SetData(&repos)
|
||||
return
|
||||
}
|
||||
|
||||
func addRepo(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
var repo api.Repo
|
||||
err = req.GetData(&repo)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var repos []api.Repo
|
||||
repos, err = db.Repos(e.DB)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, r := range repos {
|
||||
log.Debug().Msgf("%v, %v", r, repo.Name)
|
||||
if repo.Name == r.Name {
|
||||
err = fmt.Errorf("repo already exist")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("git", "init", "--bare")
|
||||
|
||||
cmd.Dir = dotfiles.Dir("daemon/repos", repo.Name)
|
||||
|
||||
var out []byte
|
||||
out, err = cmd.Output()
|
||||
e.Log.Debug().Msgf("%v -> %v\n%v", cmd, err, string(out))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = db.AddRepo(e.DB, &repo)
|
||||
return
|
||||
}
|
||||
|
||||
func kernels(resp *api.Resp, e cmdenv) (err error) {
|
||||
kcfg, err := config.ReadKernelConfig(e.KernelConfig)
|
||||
if err != nil {
|
||||
e.Log.Error().Err(err).Msg("read kernels config")
|
||||
return
|
||||
}
|
||||
|
||||
e.Log.Info().Msgf("send back %d kernels", len(kcfg.Kernels))
|
||||
resp.SetData(&kcfg.Kernels)
|
||||
return
|
||||
}
|
||||
|
||||
func jobLogs(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
var uuid string
|
||||
err = req.GetData(&uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
logdir := filepath.Join(dotfiles.File("daemon/logs"), uuid)
|
||||
if _, err = os.Stat(logdir); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(logdir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var logs []api.JobLog
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
logfile := filepath.Join(logdir, f.Name())
|
||||
|
||||
var buf []byte
|
||||
buf, err = os.ReadFile(logfile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
logs = append(logs, api.JobLog{
|
||||
Name: f.Name(),
|
||||
Text: string(buf),
|
||||
})
|
||||
}
|
||||
|
||||
resp.SetData(&logs)
|
||||
return
|
||||
}
|
||||
|
||||
func jobStatus(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
|
||||
var uuid string
|
||||
err = req.GetData(&uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
st, err := db.JobStatus(e.DB, uuid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.SetData(&st)
|
||||
return
|
||||
}
|
207
daemon/daemon.go
Normal file
207
daemon/daemon.go
Normal file
@ -0,0 +1,207 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"io"
|
||||
"net"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/daemon/db"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
|
||||
type Daemon struct {
|
||||
db *sql.DB
|
||||
kernelConfig string
|
||||
|
||||
shutdown bool
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func Init(kernelConfig string) (d *Daemon, err error) {
|
||||
d = &Daemon{}
|
||||
d.kernelConfig = kernelConfig
|
||||
d.wg.Add(1) // matches with db.Close()
|
||||
d.db, err = db.OpenDatabase(dotfiles.File("daemon/daemon.db"))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("cannot open daemon.db")
|
||||
}
|
||||
|
||||
log.Info().Msgf("database %s", dotfiles.File("daemon/daemon.db"))
|
||||
return
|
||||
}
|
||||
|
||||
func (d *Daemon) Kill() {
|
||||
d.shutdown = true
|
||||
|
||||
d.db.Close()
|
||||
d.wg.Done()
|
||||
}
|
||||
|
||||
func (d *Daemon) Daemon() {
|
||||
if d.db == nil {
|
||||
log.Fatal().Msg("db is not initialized")
|
||||
}
|
||||
|
||||
log.Info().Msg("start daemon loop")
|
||||
|
||||
for !d.shutdown {
|
||||
d.wg.Add(1)
|
||||
|
||||
jobs, err := db.Jobs(d.db)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("")
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
err = newPjob(job, d.db).Process()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("%v", job)
|
||||
}
|
||||
}
|
||||
|
||||
d.wg.Done()
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func handler(conn net.Conn, e cmdenv) {
|
||||
defer conn.Close()
|
||||
|
||||
resp := api.NewResp()
|
||||
|
||||
e.Log = log.With().
|
||||
Str("resp_uuid", resp.UUID).
|
||||
Str("remote_addr", conn.RemoteAddr().String()).
|
||||
Logger()
|
||||
|
||||
e.Log.Info().Msg("")
|
||||
|
||||
var req api.Req
|
||||
|
||||
defer func() {
|
||||
if req.Command != api.RawMode {
|
||||
resp.Encode(conn)
|
||||
} else {
|
||||
log.Debug().Msg("raw mode, not encode response")
|
||||
}
|
||||
}()
|
||||
|
||||
err := req.Decode(conn)
|
||||
if err != nil {
|
||||
e.Log.Error().Err(err).Msg("cannot decode")
|
||||
return
|
||||
}
|
||||
|
||||
err = command(&req, &resp, e)
|
||||
if err != nil {
|
||||
e.Log.Error().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Daemon) Listen(addr string) {
|
||||
if d.db == nil {
|
||||
log.Fatal().Msg("db is not initialized")
|
||||
}
|
||||
|
||||
go func() {
|
||||
repodir := dotfiles.Dir("daemon/repos")
|
||||
git := exec.Command("git", "daemon", "--port=9418", "--verbose",
|
||||
"--reuseaddr",
|
||||
"--export-all", "--base-path="+repodir,
|
||||
"--enable=receive-pack",
|
||||
"--enable=upload-pack",
|
||||
repodir)
|
||||
|
||||
stdout, err := git.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("%v", git)
|
||||
return
|
||||
}
|
||||
|
||||
go io.Copy(logWriter{log: log.Logger}, stdout)
|
||||
|
||||
stderr, err := git.StderrPipe()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("%v", git)
|
||||
return
|
||||
}
|
||||
|
||||
go io.Copy(logWriter{log: log.Logger}, stderr)
|
||||
|
||||
log.Info().Msgf("start %v", git)
|
||||
git.Start()
|
||||
defer func() {
|
||||
log.Info().Msgf("stop %v", git)
|
||||
}()
|
||||
|
||||
err = git.Wait()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("%v", git)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if !fs.PathExists(dotfiles.File("daemon/cert.pem")) {
|
||||
log.Info().Msg("No cert.pem, generating...")
|
||||
cmd := exec.Command("openssl",
|
||||
"req", "-batch", "-newkey", "rsa:2048",
|
||||
"-new", "-nodes", "-x509",
|
||||
"-subj", "/CN=*",
|
||||
"-addext", "subjectAltName = DNS:*",
|
||||
"-out", dotfiles.File("daemon/cert.pem"),
|
||||
"-keyout", dotfiles.File("daemon/key.pem"))
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg(string(out))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Msg("copy to client:")
|
||||
log.Info().Msgf("cert: %s, key: %s",
|
||||
dotfiles.File("daemon/cert.pem"),
|
||||
dotfiles.File("daemon/key.pem"))
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(dotfiles.File("daemon/cert.pem"),
|
||||
dotfiles.File("daemon/key.pem"))
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("LoadX509KeyPair")
|
||||
}
|
||||
tlscfg := &tls.Config{Certificates: []tls.Certificate{cert}}
|
||||
|
||||
l, err := tls.Listen("tcp", addr, tlscfg)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("listen")
|
||||
}
|
||||
|
||||
log.Info().Msgf("listen on %v", addr)
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("accept")
|
||||
}
|
||||
log.Info().Msgf("accept %s", conn.RemoteAddr())
|
||||
|
||||
e := cmdenv{
|
||||
DB: d.db,
|
||||
WG: d.wg,
|
||||
Conn: conn,
|
||||
KernelConfig: d.kernelConfig,
|
||||
}
|
||||
|
||||
go handler(conn, e)
|
||||
}
|
||||
}
|
15
daemon/daemon_test.go
Normal file
15
daemon/daemon_test.go
Normal file
@ -0,0 +1,15 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.Logger = zerolog.New(zerolog.ConsoleWriter{
|
||||
Out: os.Stdout,
|
||||
NoColor: true,
|
||||
})
|
||||
}
|
123
daemon/db/db.go
Normal file
123
daemon/db/db.go
Normal file
@ -0,0 +1,123 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// Change on ANY database update
|
||||
const currentDatabaseVersion = 1
|
||||
|
||||
const versionField = "db_version"
|
||||
|
||||
func createMetadataTable(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS metadata (
|
||||
id INTEGER PRIMARY KEY,
|
||||
key TEXT UNIQUE,
|
||||
value TEXT
|
||||
)`)
|
||||
return
|
||||
}
|
||||
|
||||
func metaChkValue(db *sql.DB, key string) (exist bool, err error) {
|
||||
sql := "SELECT EXISTS(SELECT id FROM metadata WHERE key = $1)"
|
||||
stmt, err := db.Prepare(sql)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
err = stmt.QueryRow(key).Scan(&exist)
|
||||
return
|
||||
}
|
||||
|
||||
func metaGetValue(db *sql.DB, key string) (value string, err error) {
|
||||
stmt, err := db.Prepare("SELECT value FROM metadata " +
|
||||
"WHERE key = $1")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
err = stmt.QueryRow(key).Scan(&value)
|
||||
return
|
||||
}
|
||||
|
||||
func metaSetValue(db *sql.DB, key, value string) (err error) {
|
||||
stmt, err := db.Prepare("INSERT OR REPLACE INTO metadata " +
|
||||
"(key, value) VALUES ($1, $2)")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
_, err = stmt.Exec(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
func getVersion(db *sql.DB) (version int, err error) {
|
||||
s, err := metaGetValue(db, versionField)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version, err = strconv.Atoi(s)
|
||||
return
|
||||
}
|
||||
|
||||
func createSchema(db *sql.DB) (err error) {
|
||||
err = createMetadataTable(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = createJobTable(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = createRepoTable(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func OpenDatabase(path string) (db *sql.DB, err error) {
|
||||
db, err = sql.Open("sqlite3", path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
exists, _ := metaChkValue(db, versionField)
|
||||
if !exists {
|
||||
err = createSchema(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = metaSetValue(db, versionField,
|
||||
strconv.Itoa(currentDatabaseVersion))
|
||||
return
|
||||
}
|
||||
|
||||
version, err := getVersion(db)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if version != currentDatabaseVersion {
|
||||
err = fmt.Errorf("database is not supported (%d instead of %d)",
|
||||
version, currentDatabaseVersion)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
22
daemon/db/db_test.go
Normal file
22
daemon/db/db_test.go
Normal file
@ -0,0 +1,22 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestOpenDatabase(t *testing.T) {
|
||||
file, err := os.CreateTemp("", "temp-sqlite.db")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
db, err := OpenDatabase(file.Name())
|
||||
assert.Nil(t, err)
|
||||
db.Close()
|
||||
|
||||
db, err = OpenDatabase(file.Name())
|
||||
assert.Nil(t, err)
|
||||
db.Close()
|
||||
}
|
136
daemon/db/job.go
Normal file
136
daemon/db/job.go
Normal file
@ -0,0 +1,136 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
)
|
||||
|
||||
func createJobTable(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS job (
|
||||
id INTEGER PRIMARY KEY,
|
||||
uuid TEXT,
|
||||
repo TEXT,
|
||||
"commit" TEXT,
|
||||
params TEXT,
|
||||
config TEXT,
|
||||
target TEXT,
|
||||
status TEXT DEFAULT "new"
|
||||
)`)
|
||||
return
|
||||
}
|
||||
|
||||
func AddJob(db *sql.DB, job *api.Job) (err error) {
|
||||
stmt, err := db.Prepare(`INSERT INTO job (uuid, repo, "commit", params, config, target) ` +
|
||||
`VALUES ($1, $2, $3, $4, $5, $6);`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
config := api.Marshal(job.Artifact)
|
||||
target := api.Marshal(job.Target)
|
||||
|
||||
res, err := stmt.Exec(job.UUID, job.RepoName, job.Commit, job.Params,
|
||||
config, target,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
job.ID, err = res.LastInsertId()
|
||||
return
|
||||
}
|
||||
|
||||
func UpdateJob(db *sql.DB, job api.Job) (err error) {
|
||||
stmt, err := db.Prepare(`UPDATE job SET uuid=$1, repo=$2, "commit"=$3, params=$4, ` +
|
||||
`config=$5, target=$6, status=$7 WHERE id=$8`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
config := api.Marshal(job.Artifact)
|
||||
target := api.Marshal(job.Target)
|
||||
|
||||
_, err = stmt.Exec(job.UUID, job.RepoName, job.Commit, job.Params,
|
||||
config, target,
|
||||
job.Status, job.ID)
|
||||
return
|
||||
}
|
||||
|
||||
func Jobs(db *sql.DB) (jobs []api.Job, err error) {
|
||||
stmt, err := db.Prepare(`SELECT id, uuid, repo, "commit", params, config, target, status FROM job`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
rows, err := stmt.Query()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var job api.Job
|
||||
var config, target []byte
|
||||
err = rows.Scan(&job.ID, &job.UUID, &job.RepoName, &job.Commit, &job.Params, &config, &target, &job.Status)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(config, &job.Artifact)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(target, &job.Target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func Job(db *sql.DB, uuid string) (job api.Job, err error) {
|
||||
stmt, err := db.Prepare(`SELECT id, uuid, repo, "commit", ` +
|
||||
`params, config, target, status ` +
|
||||
`FROM job WHERE uuid=$1`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
err = stmt.QueryRow(uuid).Scan(&job.ID, &job.UUID,
|
||||
&job.RepoName, &job.Commit, &job.Params,
|
||||
&job.Artifact, &job.Target, &job.Status)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func JobStatus(db *sql.DB, uuid string) (st api.Status, err error) {
|
||||
stmt, err := db.Prepare(`SELECT status FROM job ` +
|
||||
`WHERE uuid=$1`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
err = stmt.QueryRow(uuid).Scan(&st)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
55
daemon/db/job_test.go
Normal file
55
daemon/db/job_test.go
Normal file
@ -0,0 +1,55 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
)
|
||||
|
||||
func testCreateJobTable(t *testing.T) (file *os.File, db *sql.DB) {
|
||||
file, err := os.CreateTemp("", "temp-sqlite.db")
|
||||
assert.Nil(t, err)
|
||||
// defer os.Remove(file.Name())
|
||||
|
||||
db, err = sql.Open("sqlite3", file.Name())
|
||||
assert.Nil(t, err)
|
||||
// defer db.Close()
|
||||
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
err = createJobTable(db)
|
||||
assert.Nil(t, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestJobTable(t *testing.T) {
|
||||
file, db := testCreateJobTable(t)
|
||||
defer db.Close()
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
job := api.Job{
|
||||
RepoName: "testname",
|
||||
Commit: "test",
|
||||
Params: "none",
|
||||
}
|
||||
|
||||
err := AddJob(db, &job)
|
||||
assert.Nil(t, err)
|
||||
|
||||
job.Params = "changed"
|
||||
|
||||
err = UpdateJob(db, job)
|
||||
assert.Nil(t, err)
|
||||
|
||||
jobs, err := Jobs(db)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(jobs))
|
||||
|
||||
assert.Equal(t, job.Params, jobs[0].Params)
|
||||
}
|
61
daemon/db/repo.go
Normal file
61
daemon/db/repo.go
Normal file
@ -0,0 +1,61 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
)
|
||||
|
||||
func createRepoTable(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS repo (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT UNIQUE
|
||||
)`)
|
||||
return
|
||||
}
|
||||
|
||||
func AddRepo(db *sql.DB, repo *api.Repo) (err error) {
|
||||
stmt, err := db.Prepare(`INSERT INTO repo (name) ` +
|
||||
`VALUES ($1);`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
res, err := stmt.Exec(repo.Name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
repo.ID, err = res.LastInsertId()
|
||||
return
|
||||
}
|
||||
|
||||
func Repos(db *sql.DB) (repos []api.Repo, err error) {
|
||||
stmt, err := db.Prepare(`SELECT id, name FROM repo`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
rows, err := stmt.Query()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var repo api.Repo
|
||||
err = rows.Scan(&repo.ID, &repo.Name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
46
daemon/db/repo_test.go
Normal file
46
daemon/db/repo_test.go
Normal file
@ -0,0 +1,46 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
)
|
||||
|
||||
func testCreateRepoTable(t *testing.T) (file *os.File, db *sql.DB) {
|
||||
file, err := os.CreateTemp("", "temp-sqlite.db")
|
||||
assert.Nil(t, err)
|
||||
// defer os.Remove(tempDB.Name())
|
||||
|
||||
db, err = sql.Open("sqlite3", file.Name())
|
||||
assert.Nil(t, err)
|
||||
// defer db.Close()
|
||||
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
err = createRepoTable(db)
|
||||
assert.Nil(t, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestRepoTable(t *testing.T) {
|
||||
file, db := testCreateRepoTable(t)
|
||||
defer db.Close()
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
repo := api.Repo{Name: "testname"}
|
||||
|
||||
err := AddRepo(db, &repo)
|
||||
assert.Nil(t, err)
|
||||
|
||||
repos, err := Repos(db)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(repos))
|
||||
|
||||
assert.Equal(t, repo, repos[0])
|
||||
}
|
154
daemon/process.go
Normal file
154
daemon/process.go
Normal file
@ -0,0 +1,154 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/api"
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/daemon/db"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/qemu"
|
||||
)
|
||||
|
||||
type pjob struct {
|
||||
job api.Job
|
||||
log zerolog.Logger
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func newPjob(job api.Job, db *sql.DB) (pj pjob) {
|
||||
pj.job = job
|
||||
pj.db = db
|
||||
pj.log = log.With().Str("uuid", job.UUID).Logger()
|
||||
return
|
||||
}
|
||||
|
||||
func (pj pjob) Update() (err error) {
|
||||
err = db.UpdateJob(pj.db, pj.job)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msgf("update job %v", pj.job)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (pj pjob) SetStatus(status api.Status) (err error) {
|
||||
pj.log.Info().Msgf(`%v -> %v`, pj.job.Status, status)
|
||||
pj.job.Status = status
|
||||
err = pj.Update()
|
||||
return
|
||||
}
|
||||
|
||||
func (pj pjob) Process() (err error) {
|
||||
switch pj.job.Status {
|
||||
case api.StatusNew:
|
||||
pj.log.Info().Msgf(`%v`, pj.job.Status)
|
||||
pj.SetStatus(api.StatusWaiting)
|
||||
return
|
||||
|
||||
case api.StatusWaiting:
|
||||
pj.SetStatus(api.StatusRunning)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
pj.SetStatus(api.StatusFailure)
|
||||
} else {
|
||||
pj.SetStatus(api.StatusSuccess)
|
||||
}
|
||||
}()
|
||||
|
||||
var tmp string
|
||||
tmp, err = os.MkdirTemp(dotfiles.Dir("tmp"), "")
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("mktemp")
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
tmprepo := filepath.Join(tmp, "repo")
|
||||
|
||||
pj.log.Debug().Msgf("temp repo: %v", tmprepo)
|
||||
|
||||
remote := fmt.Sprintf("git://localhost:9418/%s", pj.job.RepoName)
|
||||
|
||||
pj.log.Debug().Msgf("remote: %v", remote)
|
||||
|
||||
var raw []byte
|
||||
|
||||
cmd := exec.Command("git", "clone", remote, tmprepo)
|
||||
|
||||
raw, err = cmd.CombinedOutput()
|
||||
pj.log.Trace().Msgf("%v\n%v", cmd, string(raw))
|
||||
if err != nil {
|
||||
pj.log.Error().Msgf("%v\n%v", cmd, string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "checkout", pj.job.Commit)
|
||||
|
||||
cmd.Dir = tmprepo
|
||||
|
||||
raw, err = cmd.CombinedOutput()
|
||||
pj.log.Trace().Msgf("%v\n%v", cmd, string(raw))
|
||||
if err != nil {
|
||||
pj.log.Error().Msgf("%v\n%v", cmd, string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
pj.job.Artifact.SourcePath = tmprepo
|
||||
|
||||
var result *artifact.Result
|
||||
var dq *qemu.System
|
||||
|
||||
pj.job.Artifact.Process(pj.log, pj.job.Target, false, "", "", 0,
|
||||
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
|
||||
res *artifact.Result) {
|
||||
|
||||
result = res
|
||||
dq = q
|
||||
},
|
||||
)
|
||||
|
||||
logdir := dotfiles.Dir("daemon/logs", pj.job.UUID)
|
||||
|
||||
err = os.WriteFile(filepath.Join(logdir, "build.log"),
|
||||
[]byte(result.Build.Output), 0644)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(logdir, "run.log"),
|
||||
[]byte(result.Run.Output), 0644)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(logdir, "test.log"),
|
||||
[]byte(result.Test.Output), 0644)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(logdir, "qemu.log"),
|
||||
[]byte(dq.Stdout), 0644)
|
||||
if err != nil {
|
||||
pj.log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
pj.log.Info().Msgf("build %v, run %v, test %v",
|
||||
result.Build.Ok, result.Run.Ok, result.Test.Ok)
|
||||
|
||||
if !result.Test.Ok {
|
||||
err = errors.New("tests failed")
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
@ -27,7 +27,7 @@ pkgs.buildGoApplication rec {
|
||||
|
||||
postFixup = ''
|
||||
wrapProgram $out/bin/out-of-tree \
|
||||
--prefix PATH : "${lib.makeBinPath [ pkgs.qemu pkgs.podman ]}"
|
||||
--prefix PATH : "${lib.makeBinPath [ pkgs.qemu pkgs.podman pkgs.openssl ]}"
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
@ -47,14 +47,12 @@ func (centos CentOS) Packages() (pkgs []string, err error) {
|
||||
"| grep -v src " +
|
||||
"| cut -d ' ' -f 1"
|
||||
|
||||
output, err := c.Run(config.Dir("tmp"), []string{cmd})
|
||||
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, pkg := range strings.Fields(output) {
|
||||
pkgs = append(pkgs, pkg)
|
||||
}
|
||||
pkgs = append(pkgs, strings.Fields(output)...)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
|
||||
@ -329,8 +329,8 @@ func (d Debian) Kernels() (kernels []distro.KernelInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
cpath := config.Dir("volumes", c.Name())
|
||||
rootfs := config.File("images", c.Name()+".img")
|
||||
cpath := dotfiles.Dir("volumes", c.Name())
|
||||
rootfs := dotfiles.File("images", c.Name()+".img")
|
||||
|
||||
files, err := os.ReadDir(cpath)
|
||||
if err != nil {
|
||||
@ -413,17 +413,17 @@ func (d Debian) volumes(pkgname string) (volumes []container.Volume) {
|
||||
pkgdir := filepath.Join("volumes", c.Name(), pkgname)
|
||||
|
||||
volumes = append(volumes, container.Volume{
|
||||
Src: config.Dir(pkgdir, "/lib/modules"),
|
||||
Src: dotfiles.Dir(pkgdir, "/lib/modules"),
|
||||
Dest: "/lib/modules",
|
||||
})
|
||||
|
||||
volumes = append(volumes, container.Volume{
|
||||
Src: config.Dir(pkgdir, "/usr/src"),
|
||||
Src: dotfiles.Dir(pkgdir, "/usr/src"),
|
||||
Dest: "/usr/src",
|
||||
})
|
||||
|
||||
volumes = append(volumes, container.Volume{
|
||||
Src: config.Dir(pkgdir, "/boot"),
|
||||
Src: dotfiles.Dir(pkgdir, "/boot"),
|
||||
Dest: "/boot",
|
||||
})
|
||||
|
||||
@ -518,7 +518,7 @@ func (d Debian) cleanup(pkgname string) {
|
||||
return
|
||||
}
|
||||
|
||||
pkgdir := config.Dir(filepath.Join("volumes", c.Name(), pkgname))
|
||||
pkgdir := dotfiles.Dir(filepath.Join("volumes", c.Name(), pkgname))
|
||||
|
||||
log.Debug().Msgf("cleanup %s", pkgdir)
|
||||
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot/metasnap"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
@ -406,7 +406,7 @@ func GetKernelsWithLimit(limit int, mode GetKernelsMode) (kernels []DebianKernel
|
||||
err error) {
|
||||
|
||||
if CachePath == "" {
|
||||
CachePath = config.File("debian.cache")
|
||||
CachePath = dotfiles.File("debian.cache")
|
||||
log.Debug().Msgf("Use default kernels cache path: %s", CachePath)
|
||||
|
||||
if !fs.PathExists(CachePath) {
|
||||
|
@ -12,8 +12,6 @@ import (
|
||||
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot/mr"
|
||||
)
|
||||
|
||||
const timeLayout = "20060102T150405Z"
|
||||
|
||||
const URL = "https://snapshot.debian.org"
|
||||
|
||||
func SourcePackageVersions(name string) (versions []string, err error) {
|
||||
|
@ -99,3 +99,8 @@ func (d Distro) RootFS() string {
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
type Command struct {
|
||||
Distro Distro
|
||||
Command string
|
||||
}
|
||||
|
@ -83,10 +83,7 @@ func (suse OpenSUSE) Packages() (pkgs []string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
for _, pkg := range strings.Fields(output) {
|
||||
pkgs = append(pkgs, pkg)
|
||||
}
|
||||
|
||||
pkgs = append(pkgs, strings.Fields(output)...)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
@ -57,15 +57,12 @@ func (ol OracleLinux) Packages() (pkgs []string, err error) {
|
||||
"| grep -v src " +
|
||||
"| cut -d ' ' -f 1"
|
||||
|
||||
output, err := c.Run(config.Dir("tmp"), []string{cmd})
|
||||
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, pkg := range strings.Fields(output) {
|
||||
pkgs = append(pkgs, pkg)
|
||||
}
|
||||
|
||||
pkgs = append(pkgs, strings.Fields(output)...)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
@ -51,15 +51,12 @@ func (u Ubuntu) Packages() (pkgs []string, err error) {
|
||||
"--names-only '^linux-image-[0-9\\.\\-]*-generic$' " +
|
||||
"| awk '{ print $1 }'"
|
||||
|
||||
output, err := c.Run(config.Dir("tmp"), []string{cmd})
|
||||
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, pkg := range strings.Fields(output) {
|
||||
pkgs = append(pkgs, pkg)
|
||||
}
|
||||
|
||||
pkgs = append(pkgs, strings.Fields(output)...)
|
||||
return
|
||||
}
|
||||
|
||||
|
4
fs/fs.go
4
fs/fs.go
@ -6,7 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
)
|
||||
|
||||
// CaseInsensitive check
|
||||
@ -51,7 +51,7 @@ func PathExists(path string) bool {
|
||||
|
||||
// TempDir that exist relative to config directory
|
||||
func TempDir() (string, error) {
|
||||
return os.MkdirTemp(config.Dir("tmp"), "")
|
||||
return os.MkdirTemp(dotfiles.Dir("tmp"), "")
|
||||
}
|
||||
|
||||
func FindBySubstring(dir, substring string) (k string, err error) {
|
||||
|
6
go.mod
6
go.mod
@ -1,6 +1,8 @@
|
||||
module code.dumpstack.io/tools/out-of-tree
|
||||
|
||||
go 1.19
|
||||
go 1.21
|
||||
|
||||
toolchain go1.21.6
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
@ -8,6 +10,7 @@ require (
|
||||
github.com/cavaliergopher/grab/v3 v3.0.1
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-git/go-git/v5 v5.6.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mattn/go-sqlite3 v1.14.16
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/naoina/toml v0.1.1
|
||||
@ -52,5 +55,6 @@ require (
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0 // indirect
|
||||
)
|
||||
|
11
go.sum
11
go.sum
@ -9,9 +9,11 @@ github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0g
|
||||
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
|
||||
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
|
||||
github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0=
|
||||
github.com/alecthomas/assert/v2 v2.1.0/go.mod h1:b/+1DI2Q6NckYi+3mXyH3wFb8qG37K/DuK80n7WefXA=
|
||||
github.com/alecthomas/kong v0.7.1 h1:azoTh0IOfwlAX3qN9sHWTxACE2oV8Bg2gAwBsMwDQY4=
|
||||
github.com/alecthomas/kong v0.7.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
|
||||
github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE=
|
||||
github.com/alecthomas/repr v0.1.0/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
@ -44,7 +46,10 @@ github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
@ -86,6 +91,7 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6
|
||||
github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc=
|
||||
github.com/otiai10/copy v1.11.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
|
||||
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
|
||||
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
|
||||
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
|
||||
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@ -168,6 +174,7 @@ golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@ -175,6 +182,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@ -194,8 +202,9 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYs
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
@ -5,23 +5,21 @@
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
|
||||
func MatchPackages(km config.Target) (packages []string, err error) {
|
||||
func MatchPackages(km artifact.Target) (packages []string, err error) {
|
||||
pkgs, err := km.Distro.Packages()
|
||||
if err != nil {
|
||||
return
|
||||
@ -52,26 +50,8 @@ func MatchPackages(km config.Target) (packages []string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func vsyscallAvailable() (available bool, err error) {
|
||||
if runtime.GOOS != "linux" {
|
||||
// Docker for non-Linux systems is not using the host
|
||||
// kernel but uses kernel inside a virtual machine, so
|
||||
// it builds by the Docker team with vsyscall support.
|
||||
available = true
|
||||
return
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadFile("/proc/self/maps")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
available = strings.Contains(string(buf), "[vsyscall]")
|
||||
return
|
||||
}
|
||||
|
||||
func GenRootfsImage(imageFile string, download bool) (rootfs string, err error) {
|
||||
imagesPath := config.Dir("images")
|
||||
imagesPath := dotfiles.Dir("images")
|
||||
|
||||
rootfs = filepath.Join(imagesPath, imageFile)
|
||||
if !fs.PathExists(rootfs) {
|
||||
@ -97,7 +77,7 @@ func SetSigintHandler(variable *bool) {
|
||||
signal.Notify(c, os.Interrupt)
|
||||
go func() {
|
||||
counter := 0
|
||||
for _ = range c {
|
||||
for range c {
|
||||
if counter == 0 {
|
||||
*variable = true
|
||||
log.Warn().Msg("shutdown requested, finishing work")
|
||||
|
8
main.go
8
main.go
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/cmd"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
@ -44,6 +44,8 @@ type CLI struct {
|
||||
Container cmd.ContainerCmd `cmd:"" help:"manage containers"`
|
||||
Distro cmd.DistroCmd `cmd:"" help:"distro-related helpers"`
|
||||
|
||||
Daemon cmd.DaemonCmd `cmd:"" help:"run daemon"`
|
||||
|
||||
Version VersionFlag `name:"version" help:"print version information and quit"`
|
||||
|
||||
LogLevel LogLevelFlag `enum:"trace,debug,info,warn,error" default:"info"`
|
||||
@ -132,7 +134,7 @@ func main() {
|
||||
}
|
||||
|
||||
cmd.FileWriter = cmd.LevelWriter{Writer: &lumberjack.Logger{
|
||||
Filename: config.File("logs/out-of-tree.log"),
|
||||
Filename: dotfiles.File("logs/out-of-tree.log"),
|
||||
},
|
||||
Level: zerolog.TraceLevel,
|
||||
}
|
||||
@ -151,7 +153,7 @@ func main() {
|
||||
log.Debug().Msgf("%v", buildInfo.Settings)
|
||||
}
|
||||
|
||||
path := config.Dir()
|
||||
path := dotfiles.Dir()
|
||||
yes, err := fs.CaseInsensitive(path)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg(path)
|
||||
|
Loading…
Reference in New Issue
Block a user