From 9dfcb25d95470ae213df9dfbfc87bb92020d9710 Mon Sep 17 00:00:00 2001 From: Simon Let Date: Sun, 16 Oct 2022 20:07:59 +0200 Subject: [PATCH] checkpoint before continuing work after a while --- Makefile | 7 +- cmd/config-setup/main.go | 46 -- cmd/install-utils/backup.go | 5 + cmd/install-utils/main.go | 49 ++ cmd/install-utils/migrate.go | 36 + go.mod | 1 + go.sum | 2 + internal/cfg/cfg.go | 2 +- internal/datadir/datadir.go | 71 ++ internal/deviceid/deviceid.go | 18 + internal/histio/file.go | 56 ++ internal/histio/histio.go | 44 ++ internal/logger/logger.go | 9 +- internal/msg/msg.go | 19 +- internal/recconv/recconv.go | 9 + internal/recio/read.go | 158 ++++ internal/recio/recio.go | 13 + internal/recio/write.go | 46 ++ internal/recload/recload.go | 1 + internal/record/legacy.go | 88 +++ internal/record/record.go | 2 + internal/record/v1.go | 58 ++ internal/recordint/collect.go | 21 + internal/recordint/enriched.go | 51 ++ internal/recordint/flag.go | 9 + internal/recordint/indexed.go | 9 + internal/recordint/recordint.go | 2 + internal/recordint/searchapp.go | 40 + internal/records/records.go | 360 --------- internal/records/records_test.go | 66 -- internal/recutil/recutil.go | 94 +++ scripts/hooks.sh | 14 +- scripts/install.sh | 21 +- scripts/resh-evaluate-plot.py | 1218 ------------------------------ scripts/shellrc.sh | 17 +- scripts/util.sh | 35 +- 36 files changed, 914 insertions(+), 1783 deletions(-) delete mode 100644 cmd/config-setup/main.go create mode 100644 cmd/install-utils/backup.go create mode 100644 cmd/install-utils/main.go create mode 100644 cmd/install-utils/migrate.go create mode 100644 internal/datadir/datadir.go create mode 100644 internal/deviceid/deviceid.go create mode 100644 internal/histio/file.go create mode 100644 internal/histio/histio.go create mode 100644 internal/recconv/recconv.go create mode 100644 internal/recio/read.go create mode 100644 internal/recio/recio.go create mode 100644 internal/recio/write.go create mode 100644 internal/recload/recload.go create mode 100644 internal/record/legacy.go create mode 100644 internal/record/record.go create mode 100644 internal/record/v1.go create mode 100644 internal/recordint/collect.go create mode 100644 internal/recordint/enriched.go create mode 100644 internal/recordint/flag.go create mode 100644 internal/recordint/indexed.go create mode 100644 internal/recordint/recordint.go create mode 100644 internal/recordint/searchapp.go create mode 100644 internal/recutil/recutil.go delete mode 100755 scripts/resh-evaluate-plot.py diff --git a/Makefile b/Makefile index 9a30621..249cd43 100644 --- a/Makefile +++ b/Makefile @@ -5,8 +5,9 @@ VERSION="${LATEST_TAG}-DEV" GOFLAGS=-ldflags "-X main.version=${VERSION} -X main.commit=${COMMIT} -X main.development=true" -build: submodules bin/resh-session-init bin/resh-collect bin/resh-postcollect bin/resh-daemon\ - bin/resh-control bin/resh-config bin/resh-cli bin/resh-config-setup +build: submodules bin/resh-session-init bin/resh-collect bin/resh-postcollect\ + bin/resh-daemon bin/resh-control bin/resh-config bin/resh-cli\ + bin/installutil install: build scripts/install.sh @@ -21,7 +22,7 @@ rebuild: make build clean: - rm -f bin/resh-* + rm -f bin/* uninstall: # Uninstalling ... diff --git a/cmd/config-setup/main.go b/cmd/config-setup/main.go deleted file mode 100644 index a1ed942..0000000 --- a/cmd/config-setup/main.go +++ /dev/null @@ -1,46 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/curusarn/resh/internal/cfg" - "github.com/curusarn/resh/internal/logger" - "go.uber.org/zap" -) - -// info passed during build -var version string -var commit string -var developement bool - -func main() { - errDo := doConfigSetup() - config, errCfg := cfg.New() - logger, _ := logger.New("config-setup", config.LogLevel, developement) - defer logger.Sync() // flushes buffer, if any - - if errDo != nil { - logger.Error("Config setup failed", zap.Error(errDo)) - // TODO: better error message for people - fmt.Fprintf(os.Stderr, "ERROR: %v\n", errDo) - } - if errCfg != nil { - logger.Error("Error while getting configuration", zap.Error(errCfg)) - } -} - -func doConfigSetup() error { - err := cfg.Touch() - if err != nil { - return fmt.Errorf("could not touch config file: %w", err) - } - changes, err := cfg.Migrate() - if err != nil { - return fmt.Errorf("could not migrate config file version: %v", err) - } - if changes { - fmt.Printf("Config file format has changed - your config was updated to reflect the changes.\n") - } - return nil -} diff --git a/cmd/install-utils/backup.go b/cmd/install-utils/backup.go new file mode 100644 index 0000000..9148164 --- /dev/null +++ b/cmd/install-utils/backup.go @@ -0,0 +1,5 @@ +package main + +func backup() { + +} diff --git a/cmd/install-utils/main.go b/cmd/install-utils/main.go new file mode 100644 index 0000000..4e83ec0 --- /dev/null +++ b/cmd/install-utils/main.go @@ -0,0 +1,49 @@ +package main + +import ( + "flag" + "fmt" + "os" +) + +// info passed during build +var version string +var commit string +var developement bool + +func main() { + var command string + flag.StringVar(&command, "command", "", "Utility to run") + flag.Parse() + + switch command { + case "backup": + backup() + case "rollback": + rollback() + case "migrate-config": + migrateConfig() + case "migrate-history": + migrateHistory() + case "help": + printUsage(os.Stdout) + default: + fmt.Fprintf(os.Stderr, "ERROR: Unknown command") + printUsage(os.Stderr) + } +} + +func printUsage(f *os.File) { + usage := ` + Utils used during resh instalation + + USAGE: ./install-utils COMMAND + COMMANDS: + backup backup resh installation and data + rollback restore resh installation and data from backup + migrate-config update config to reflect updates + migrate-history update history to reflect updates + help show this help + ` + fmt.Fprintf(f, usage) +} diff --git a/cmd/install-utils/migrate.go b/cmd/install-utils/migrate.go new file mode 100644 index 0000000..e0bd34e --- /dev/null +++ b/cmd/install-utils/migrate.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "os" + + "github.com/curusarn/resh/internal/cfg" +) + +func migrateConfig() { + err := cfg.Touch() + if err != nil { + fmt.Fprintf(os.Stderr, "ERROR: Failed to touch config file: %v\n", err) + os.Exit(1) + } + changes, err := cfg.Migrate() + if err != nil { + fmt.Fprintf(os.Stderr, "ERROR: Failed to update config file: %v\n", err) + os.Exit(1) + } + if changes { + fmt.Printf("Config file format has changed since last update - your config was updated to reflect the changes.\n") + } +} + +func migrateHistory() { + // homeDir, err := os.UserHomeDir() + // if err != nil { + + // } + + // TODO: Find history in: + // - xdg_data/resh/history.reshjson + // - .resh_history.json + // - .resh/history.json +} diff --git a/go.mod b/go.mod index 4c3db28..c63b38f 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/mattn/go-shellwords v1.0.12 github.com/mitchellh/go-ps v1.0.0 github.com/spf13/cobra v1.2.1 + github.com/whilp/git-urls v1.0.0 go.uber.org/zap v1.21.0 golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 ) diff --git a/go.sum b/go.sum index c1e937d..2e9120d 100644 --- a/go.sum +++ b/go.sum @@ -246,6 +246,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/whilp/git-urls v1.0.0 h1:95f6UMWN5FKW71ECsXRUd3FVYiXdrE7aX4NZKcPmIjU= +github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgwk/qCfE= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/internal/cfg/cfg.go b/internal/cfg/cfg.go index 9c3ae7a..99f7c84 100644 --- a/internal/cfg/cfg.go +++ b/internal/cfg/cfg.go @@ -59,7 +59,7 @@ var defaults = Config{ BindControlR: true, Debug: false, - SesswatchPeriodSeconds: 120, + SesswatchPeriodSeconds: 600, SesshistInitHistorySize: 1000, } diff --git a/internal/datadir/datadir.go b/internal/datadir/datadir.go new file mode 100644 index 0000000..59011dd --- /dev/null +++ b/internal/datadir/datadir.go @@ -0,0 +1,71 @@ +package datadir + +import ( + "fmt" + "os" + "path" +) + +// You should not need this caching +// It messes with proper dependency injection +// Find another way + +// type dirCache struct { +// dir string +// err error +// +// cached bool +// } +// +// var cache dirCache +// +// func getPathNoCache() (string, error) { +// reshDir := "resh" +// xdgDir, found := os.LookupEnv("XDG_DATA_HOME") +// if found { +// return path.Join(xdgDir, reshDir), nil +// } +// homeDir, err := os.UserHomeDir() +// if err != nil { +// return "", fmt.Errorf("error while getting home dir: %w", err) +// } +// return path.Join(homeDir, ".local/share/", reshDir), nil +// } +// +// func GetPath() (string, error) { +// if !cache.cached { +// dir, err := getPathNoCache() +// cache = dirCache{ +// dir: dir, +// err: err, +// cached: true, +// } +// } +// return cache.dir, cache.err +// } + +func GetPath() (string, error) { + reshDir := "resh" + xdgDir, found := os.LookupEnv("XDG_DATA_HOME") + if found { + return path.Join(xdgDir, reshDir), nil + } + homeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("error while getting home dir: %w", err) + } + return path.Join(homeDir, ".local/share/", reshDir), nil +} + +func MakePath() (string, error) { + path, err := GetPath() + if err != nil { + return "", err + } + err = os.MkdirAll(path, 0755) + // skip "exists" error + if err != nil && !os.IsExist(err) { + return "", fmt.Errorf("error while creating directories: %w", err) + } + return path, nil +} diff --git a/internal/deviceid/deviceid.go b/internal/deviceid/deviceid.go new file mode 100644 index 0000000..79a080d --- /dev/null +++ b/internal/deviceid/deviceid.go @@ -0,0 +1,18 @@ +package deviceid + +import ( + "fmt" + "os" + "path" + "strings" +) + +func Get(dataDir string) (string, error) { + fname := "device-id" + dat, err := os.ReadFile(path.Join(dataDir, fname)) + if err != nil { + return "", fmt.Errorf("could not read file with device-id: %w", err) + } + id := strings.TrimRight(string(dat), "\n") + return id, nil +} diff --git a/internal/histio/file.go b/internal/histio/file.go new file mode 100644 index 0000000..5233717 --- /dev/null +++ b/internal/histio/file.go @@ -0,0 +1,56 @@ +package histio + +import ( + "fmt" + "os" + "sync" + + "github.com/curusarn/resh/internal/recio" + "github.com/curusarn/resh/internal/recordint" + "go.uber.org/zap" +) + +type histfile struct { + sugar *zap.SugaredLogger + // deviceID string + path string + + mu sync.RWMutex + data []recordint.Indexed + fileinfo os.FileInfo +} + +func newHistfile(sugar *zap.SugaredLogger, path string) *histfile { + return &histfile{ + sugar: sugar.With( + // FIXME: drop V1 once original histfile is gone + "component", "histfileV1", + "path", path, + ), + // deviceID: deviceID, + path: path, + } +} + +func (h *histfile) updateFromFile() error { + rio := recio.New(h.sugar) + // TODO: decide and handle errors + newData, _, err := rio.ReadFile(h.path) + if err != nil { + return fmt.Errorf("could not read history file: %w", err) + } + h.mu.Lock() + defer h.mu.Unlock() + h.data = newData + h.updateFileInfo() + return nil +} + +func (h *histfile) updateFileInfo() error { + info, err := os.Stat(h.path) + if err != nil { + return fmt.Errorf("history file not found: %w", err) + } + h.fileinfo = info + return nil +} diff --git a/internal/histio/histio.go b/internal/histio/histio.go new file mode 100644 index 0000000..3bc7c68 --- /dev/null +++ b/internal/histio/histio.go @@ -0,0 +1,44 @@ +package histio + +import ( + "path" + + "github.com/curusarn/resh/internal/record" + "github.com/curusarn/resh/internal/recordint" + "go.uber.org/zap" +) + +type Histio struct { + sugar *zap.SugaredLogger + histDir string + + thisDeviceID string + thisHistory *histfile + // TODO: remote histories + // moreHistories map[string]*histfile + + recordsToAppend chan record.V1 + recordsToFlag chan recordint.Flag +} + +func New(sugar *zap.SugaredLogger, dataDir, deviceID string) *Histio { + sugarHistio := sugar.With(zap.String("component", "histio")) + histDir := path.Join(dataDir, "history") + currPath := path.Join(histDir, deviceID) + // TODO: file extenstion for the history, yes or no? (.reshjson vs. ) + + // TODO: discover other history files, exclude current + + return &Histio{ + sugar: sugarHistio, + histDir: histDir, + + thisDeviceID: deviceID, + thisHistory: newHistfile(sugar, currPath), + // moreHistories: ... + } +} + +func (h *Histio) Append(r *record.V1) { + +} diff --git a/internal/logger/logger.go b/internal/logger/logger.go index b031b77..3167412 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -2,20 +2,19 @@ package logger import ( "fmt" - "os" "path/filepath" + "github.com/curusarn/resh/internal/datadir" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) func New(executable string, level zapcore.Level, developement bool) (*zap.Logger, error) { - // TODO: consider getting log path from config ? - homeDir, err := os.UserHomeDir() + dataDir, err := datadir.GetPath() if err != nil { - return nil, fmt.Errorf("error while getting home dir: %w", err) + return nil, fmt.Errorf("error while getting resh data dir: %w", err) } - logPath := filepath.Join(homeDir, ".resh/log.json") + logPath := filepath.Join(dataDir, "log.json") loggerConfig := zap.NewProductionConfig() loggerConfig.OutputPaths = []string{logPath} loggerConfig.Level.SetLevel(level) diff --git a/internal/msg/msg.go b/internal/msg/msg.go index cf8e3ea..06d87cf 100644 --- a/internal/msg/msg.go +++ b/internal/msg/msg.go @@ -1,27 +1,14 @@ package msg -import "github.com/curusarn/resh/internal/records" - // CliMsg struct type CliMsg struct { - SessionID string `json:"sessionID"` - PWD string `json:"pwd"` + SessionID string + PWD string } // CliResponse struct type CliResponse struct { - CliRecords []records.CliRecord `json:"cliRecords"` -} - -// InspectMsg struct -type InspectMsg struct { - SessionID string `json:"sessionId"` - Count uint `json:"count"` -} - -// MultiResponse struct -type MultiResponse struct { - CmdLines []string `json:"cmdlines"` + Records []record.SearchApp } // StatusResponse struct diff --git a/internal/recconv/recconv.go b/internal/recconv/recconv.go new file mode 100644 index 0000000..d0156c5 --- /dev/null +++ b/internal/recconv/recconv.go @@ -0,0 +1,9 @@ +package recconv + +import "github.com/curusarn/resh/internal/record" + +func LegacyToV1(r *record.Legacy) *record.V1 { + return &record.V1{ + // FIXME: fill in all the fields + } +} diff --git a/internal/recio/read.go b/internal/recio/read.go new file mode 100644 index 0000000..7a7f183 --- /dev/null +++ b/internal/recio/read.go @@ -0,0 +1,158 @@ +package recio + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/curusarn/resh/internal/recconv" + "github.com/curusarn/resh/internal/record" + "github.com/curusarn/resh/internal/recordint" + "go.uber.org/zap" +) + +func (r *RecIO) ReadAndFixFile(fpath string, maxErrors int) ([]recordint.Indexed, error) { + recs, numErrs, err := r.ReadFile(fpath) + if err != nil { + return nil, err + } + if numErrs > maxErrors { + return nil, fmt.Errorf("encountered too many decoding errors") + } + if numErrs == 0 { + return recs, nil + } + + // TODO: check there error messages + r.sugar.Warnw("Some history records could not be decoded - fixing resh history file by dropping them", + "corruptedRecords", numErrs, + ) + fpathBak := fpath + ".bak" + r.sugar.Infow("Backing up current corrupted history file", + "backupFilename", fpathBak, + ) + // TODO: maybe use upstram copy function + err = copyFile(fpath, fpathBak) + if err != nil { + r.sugar.Errorw("Failed to create a backup history file - aborting fixing history file", + "backupFilename", fpathBak, + zap.Error(err), + ) + return recs, nil + } + r.sugar.Info("Writing resh history file without errors ...") + var recsV1 []record.V1 + for _, rec := range recs { + recsV1 = append(recsV1, rec.Rec) + } + err = r.WriteFile(fpath, recsV1) + if err != nil { + r.sugar.Errorw("Failed write fixed history file - aborting fixing history file", + "filename", fpath, + zap.Error(err), + ) + } + return recs, nil +} + +func (r *RecIO) ReadFile(fpath string) ([]recordint.Indexed, int, error) { + var recs []recordint.Indexed + file, err := os.Open(fpath) + if err != nil { + return nil, 0, fmt.Errorf("failed to open history file: %w", err) + } + defer file.Close() + + reader := bufio.NewReader(file) + numErrs := 0 + var idx int + for { + var line string + line, err = reader.ReadString('\n') + if err != nil { + break + } + idx++ + rec, err := r.decodeLine(line) + if err != nil { + numErrs++ + continue + } + recidx := recordint.Indexed{ + Rec: *rec, + // TODO: Is line index actually enough? + // Don't we want to count bytes because we will scan by number of bytes? + // hint: https://benjamincongdon.me/blog/2018/04/10/Counting-Scanned-Bytes-in-Go/ + Idx: idx, + } + recs = append(recs, recidx) + } + if err != io.EOF { + r.sugar.Error("Error while loading file", zap.Error(err)) + } + r.sugar.Infow("Loaded resh history records", + "recordCount", len(recs), + ) + return recs, numErrs, nil +} + +func copyFile(source, dest string) error { + from, err := os.Open(source) + if err != nil { + return err + } + defer from.Close() + + // This is equivalnet to: os.OpenFile(dest, os.O_RDWR|os.O_CREATE, 0666) + to, err := os.Create(dest) + if err != nil { + return err + } + defer to.Close() + + _, err = io.Copy(to, from) + if err != nil { + return err + } + return nil +} + +func (r *RecIO) decodeLine(line string) (*record.V1, error) { + idx := strings.Index(line, "{") + if idx == -1 { + return nil, fmt.Errorf("no openning brace found") + } + schema := line[:idx] + jsn := line[idx:] + switch schema { + case "v1": + var rec record.V1 + err := decodeAnyRecord(jsn, &rec) + if err != nil { + return nil, err + } + return &rec, nil + case "": + var rec record.Legacy + err := decodeAnyRecord(jsn, &rec) + if err != nil { + return nil, err + } + return recconv.LegacyToV1(&rec), nil + default: + return nil, fmt.Errorf("unknown record schema/type '%s'", schema) + } +} + +// TODO: find out if we are loosing performance because of the use of interface{} + +func decodeAnyRecord(jsn string, rec interface{}) error { + err := json.Unmarshal([]byte(jsn), &rec) + if err != nil { + return fmt.Errorf("failed to decode json: %w", err) + } + return nil +} diff --git a/internal/recio/recio.go b/internal/recio/recio.go new file mode 100644 index 0000000..5ea986b --- /dev/null +++ b/internal/recio/recio.go @@ -0,0 +1,13 @@ +package recio + +import ( + "go.uber.org/zap" +) + +type RecIO struct { + sugar *zap.SugaredLogger +} + +func New(sugar *zap.SugaredLogger) RecIO { + return RecIO{sugar: sugar} +} diff --git a/internal/recio/write.go b/internal/recio/write.go new file mode 100644 index 0000000..8bc5e06 --- /dev/null +++ b/internal/recio/write.go @@ -0,0 +1,46 @@ +package recio + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/curusarn/resh/internal/record" + "github.com/curusarn/resh/internal/recordint" +) + +// TODO: better errors +func (r *RecIO) WriteFile(fpath string, data []record.V1) error { + file, err := os.Create(fpath) + if err != nil { + return err + } + defer file.Close() + for _, rec := range data { + jsn, err := encodeV1Record(rec) + if err != nil { + return err + } + _, err = file.Write(jsn) + if err != nil { + return err + } + } + return nil +} + +func (r *RecIO) EditRecordFlagsInFile(fpath string, idx int, rec recordint.Flag) error { + // FIXME: implement + // open file "not as append" + // scan to the correct line + + return nil +} + +func encodeV1Record(rec record.V1) ([]byte, error) { + jsn, err := json.Marshal(rec) + if err != nil { + return nil, fmt.Errorf("failed to encode json: %w", err) + } + return append(jsn, []byte("\n")...), nil +} diff --git a/internal/recload/recload.go b/internal/recload/recload.go new file mode 100644 index 0000000..0db3e3c --- /dev/null +++ b/internal/recload/recload.go @@ -0,0 +1 @@ +package recload diff --git a/internal/record/legacy.go b/internal/record/legacy.go new file mode 100644 index 0000000..3b913fb --- /dev/null +++ b/internal/record/legacy.go @@ -0,0 +1,88 @@ +package record + +type Legacy struct { + // core + CmdLine string `json:"cmdLine"` + ExitCode int `json:"exitCode"` + Shell string `json:"shell"` + Uname string `json:"uname"` + SessionID string `json:"sessionId"` + RecordID string `json:"recordId"` + + // posix + Home string `json:"home"` + Lang string `json:"lang"` + LcAll string `json:"lcAll"` + Login string `json:"login"` + Pwd string `json:"pwd"` + PwdAfter string `json:"pwdAfter"` + ShellEnv string `json:"shellEnv"` + Term string `json:"term"` + + // non-posix"` + RealPwd string `json:"realPwd"` + RealPwdAfter string `json:"realPwdAfter"` + Pid int `json:"pid"` + SessionPID int `json:"sessionPid"` + Host string `json:"host"` + Hosttype string `json:"hosttype"` + Ostype string `json:"ostype"` + Machtype string `json:"machtype"` + Shlvl int `json:"shlvl"` + + // before after + TimezoneBefore string `json:"timezoneBefore"` + TimezoneAfter string `json:"timezoneAfter"` + + RealtimeBefore float64 `json:"realtimeBefore"` + RealtimeAfter float64 `json:"realtimeAfter"` + RealtimeBeforeLocal float64 `json:"realtimeBeforeLocal"` + RealtimeAfterLocal float64 `json:"realtimeAfterLocal"` + + RealtimeDuration float64 `json:"realtimeDuration"` + RealtimeSinceSessionStart float64 `json:"realtimeSinceSessionStart"` + RealtimeSinceBoot float64 `json:"realtimeSinceBoot"` + + GitDir string `json:"gitDir"` + GitRealDir string `json:"gitRealDir"` + GitOriginRemote string `json:"gitOriginRemote"` + GitDirAfter string `json:"gitDirAfter"` + GitRealDirAfter string `json:"gitRealDirAfter"` + GitOriginRemoteAfter string `json:"gitOriginRemoteAfter"` + MachineID string `json:"machineId"` + + OsReleaseID string `json:"osReleaseId"` + OsReleaseVersionID string `json:"osReleaseVersionId"` + OsReleaseIDLike string `json:"osReleaseIdLike"` + OsReleaseName string `json:"osReleaseName"` + OsReleasePrettyName string `json:"osReleasePrettyName"` + + ReshUUID string `json:"reshUuid"` + ReshVersion string `json:"reshVersion"` + ReshRevision string `json:"reshRevision"` + + // records come in two parts (collect and postcollect) + PartOne bool `json:"partOne,omitempty"` // false => part two + PartsMerged bool `json:"partsMerged"` + // special flag -> not an actual record but an session end + SessionExit bool `json:"sessionExit,omitempty"` + + // recall metadata + Recalled bool `json:"recalled"` + RecallHistno int `json:"recallHistno,omitempty"` + RecallStrategy string `json:"recallStrategy,omitempty"` + RecallActionsRaw string `json:"recallActionsRaw,omitempty"` + RecallActions []string `json:"recallActions,omitempty"` + RecallLastCmdLine string `json:"recallLastCmdLine"` + + // recall command + RecallPrefix string `json:"recallPrefix,omitempty"` + + // added by sanitizatizer + Sanitized bool `json:"sanitized,omitempty"` + CmdLength int `json:"cmdLength,omitempty"` + + // fields that are string here and int in older resh verisons + Cols interface{} `json:"cols"` + Lines interface{} `json:"lines"` +} diff --git a/internal/record/record.go b/internal/record/record.go new file mode 100644 index 0000000..07ad798 --- /dev/null +++ b/internal/record/record.go @@ -0,0 +1,2 @@ +// Package record provides record types that are used in resh history files +package record diff --git a/internal/record/v1.go b/internal/record/v1.go new file mode 100644 index 0000000..606988a --- /dev/null +++ b/internal/record/v1.go @@ -0,0 +1,58 @@ +package record + +type V1 struct { + // flags + // deleted, favorite + // FIXME: is this the best way? .. what about string, separate fields, or something similar + Flags int `json:"flags"` + + DeviceID string `json:"deviceID"` + SessionID string `json:"sessionID"` + // can we have a shorter uuid for record + RecordID string `json:"recordID"` + + // cmdline, exitcode + CmdLine string `json:"cmdLine"` + ExitCode int `json:"exitCode"` + + // paths + Home string `json:"home"` + Pwd string `json:"pwd"` + RealPwd string `json:"realPwd"` + + // hostname + lognem (not sure if we actually need logname) + Logname string `json:"logname"` + Hostname string `json:"hostname"` + + // git info + // origin is the most important + GitOriginRemote string `json:"gitOriginRemote"` + // maybe branch could be useful - e.g. in monorepo ?? + GitBranch string `json:"gitBranch"` + + // what is this for ?? + // session watching needs this + // but I'm not sure if we need to save it + // records belong to sessions + // PID int `json:"pid"` + // needed for tracking of sessions but I think it shouldn't be part of V1 + SessionPID int `json:"sessionPID"` + + // needed to because records are merged with parts with same "SessionID + Shlvl" + // I don't think we need to save it + Shlvl int `json:"shlvl"` + + // time (before), duration of command + Time float64 `json:"time"` + Duration float64 `json:"duration"` + + // these look like internal stuff + + // records come in two parts (collect and postcollect) + PartOne bool `json:"partOne,omitempty"` // false => part two + PartsNotMerged bool `json:"partsNotMerged,omitempty"` + + // special flag -> not an actual record but an session end + // TODO: this shouldn't be part of serializable V1 record + SessionExit bool `json:"sessionExit,omitempty"` +} diff --git a/internal/recordint/collect.go b/internal/recordint/collect.go new file mode 100644 index 0000000..b1e62e7 --- /dev/null +++ b/internal/recordint/collect.go @@ -0,0 +1,21 @@ +package recordint + +import "github.com/curusarn/resh/internal/record" + +type Collect struct { + // record merging + SessionID string + Shlvl int + // session watching + SessionPID int + + Rec record.V1 +} + +type Postcollect struct { + // record merging + SessionID string + Shlvl int + // session watching + SessionPID int +} diff --git a/internal/recordint/enriched.go b/internal/recordint/enriched.go new file mode 100644 index 0000000..b2141a4 --- /dev/null +++ b/internal/recordint/enriched.go @@ -0,0 +1,51 @@ +package recordint + +import ( + "github.com/curusarn/resh/internal/record" + "github.com/curusarn/resh/internal/recutil" +) + +// TODO: This all seems excessive +// TODO: V1 should be converted directly to SearchApp record + +// EnrichedRecord - record enriched with additional data +type Enriched struct { + // TODO: think about if it really makes sense to have this based on V1 + record.V1 + + // TODO: drop some/all of this + // enriching fields - added "later" + Command string `json:"command"` + FirstWord string `json:"firstWord"` + Invalid bool `json:"invalid"` + SeqSessionID uint64 `json:"seqSessionId"` + LastRecordOfSession bool `json:"lastRecordOfSession"` + DebugThisRecord bool `json:"debugThisRecord"` + Errors []string `json:"errors"` + // SeqSessionID uint64 `json:"seqSessionId,omitempty"` +} + +// Enriched - returns enriched record +func NewEnrichedFromV1(r *record.V1) Enriched { + rec := Enriched{Record: r} + // normlize git remote + rec.GitOriginRemote = NormalizeGitRemote(rec.GitOriginRemote) + rec.GitOriginRemoteAfter = NormalizeGitRemote(rec.GitOriginRemoteAfter) + // Get command/first word from commandline + var err error + err = recutil.Validate(r) + if err != nil { + rec.Errors = append(rec.Errors, "Validate error:"+err.Error()) + // rec, _ := record.ToString() + // sugar.Println("Invalid command:", rec) + rec.Invalid = true + } + rec.Command, rec.FirstWord, err = GetCommandAndFirstWord(r.CmdLine) + if err != nil { + rec.Errors = append(rec.Errors, "GetCommandAndFirstWord error:"+err.Error()) + // rec, _ := record.ToString() + // sugar.Println("Invalid command:", rec) + rec.Invalid = true // should this be really invalid ? + } + return rec +} diff --git a/internal/recordint/flag.go b/internal/recordint/flag.go new file mode 100644 index 0000000..2eaff6a --- /dev/null +++ b/internal/recordint/flag.go @@ -0,0 +1,9 @@ +package recordint + +type Flag struct { + deviceID string + recordID string + + flagDeleted bool + flagFavourite bool +} diff --git a/internal/recordint/indexed.go b/internal/recordint/indexed.go new file mode 100644 index 0000000..6d21870 --- /dev/null +++ b/internal/recordint/indexed.go @@ -0,0 +1,9 @@ +package recordint + +import "github.com/curusarn/resh/internal/record" + +// Indexed record allows us to find records in history file in order to edit them +type Indexed struct { + Rec record.V1 + Idx int +} diff --git a/internal/recordint/recordint.go b/internal/recordint/recordint.go new file mode 100644 index 0000000..73457cd --- /dev/null +++ b/internal/recordint/recordint.go @@ -0,0 +1,2 @@ +// Package recordint provides internal record types that are passed between resh components +package recordint diff --git a/internal/recordint/searchapp.go b/internal/recordint/searchapp.go new file mode 100644 index 0000000..965ebce --- /dev/null +++ b/internal/recordint/searchapp.go @@ -0,0 +1,40 @@ +package recordint + +// SearchApp record used for sending records to RESH-CLI +type SearchApp struct { + IsRaw bool + SessionID string + DeviceID string + + CmdLine string + Host string + Pwd string + Home string // helps us to collapse /home/user to tilde + GitOriginRemote string + ExitCode int + + Time float64 +} + +// NewCliRecordFromCmdLine +func NewSearchAppFromCmdLine(cmdLine string) SearchApp { + return SearchApp{ + IsRaw: true, + CmdLine: cmdLine, + } +} + +// NewCliRecord from EnrichedRecord +func NewSearchApp(r *Enriched) SearchApp { + return SearchApp{ + IsRaw: false, + SessionID: r.SessionID, + CmdLine: r.CmdLine, + Host: r.Hostname, + Pwd: r.Pwd, + Home: r.Home, + GitOriginRemote: r.GitOriginRemote, + ExitCode: r.ExitCode, + Time: r.Time, + } +} diff --git a/internal/records/records.go b/internal/records/records.go index b1f9a85..1747547 100644 --- a/internal/records/records.go +++ b/internal/records/records.go @@ -3,16 +3,13 @@ package records import ( "bufio" "encoding/json" - "errors" "fmt" "io" - "math" "os" "strconv" "strings" "github.com/curusarn/resh/internal/histlist" - "github.com/mattn/go-shellwords" "go.uber.org/zap" ) @@ -134,61 +131,6 @@ type FallbackRecord struct { Lines int `json:"lines"` // notice the int type } -// SlimRecord used for recalling because unmarshalling record w/ 50+ fields is too slow -type SlimRecord struct { - SessionID string `json:"sessionId"` - RecallHistno int `json:"recallHistno,omitempty"` - RecallPrefix string `json:"recallPrefix,omitempty"` - - // extra recall - we might use these in the future - // Pwd string `json:"pwd"` - // RealPwd string `json:"realPwd"` - // GitDir string `json:"gitDir"` - // GitRealDir string `json:"gitRealDir"` - // GitOriginRemote string `json:"gitOriginRemote"` - -} - -// CliRecord used for sending records to RESH-CLI -type CliRecord struct { - IsRaw bool `json:"isRaw"` - SessionID string `json:"sessionId"` - - CmdLine string `json:"cmdLine"` - Host string `json:"host"` - Pwd string `json:"pwd"` - Home string `json:"home"` // helps us to collapse /home/user to tilde - GitOriginRemote string `json:"gitOriginRemote"` - ExitCode int `json:"exitCode"` - - RealtimeBefore float64 `json:"realtimeBefore"` - // RealtimeAfter float64 `json:"realtimeAfter"` - // RealtimeDuration float64 `json:"realtimeDuration"` -} - -// NewCliRecordFromCmdLine from EnrichedRecord -func NewCliRecordFromCmdLine(cmdLine string) CliRecord { - return CliRecord{ - IsRaw: true, - CmdLine: cmdLine, - } -} - -// NewCliRecord from EnrichedRecord -func NewCliRecord(r EnrichedRecord) CliRecord { - return CliRecord{ - IsRaw: false, - SessionID: r.SessionID, - CmdLine: r.CmdLine, - Host: r.Host, - Pwd: r.Pwd, - Home: r.Home, - GitOriginRemote: r.GitOriginRemote, - ExitCode: r.ExitCode, - RealtimeBefore: r.RealtimeBefore, - } -} - // Convert from FallbackRecord to Record func Convert(r *FallbackRecord) Record { return Record{ @@ -208,308 +150,6 @@ func (r EnrichedRecord) ToString() (string, error) { return string(jsonRec), nil } -// Enriched - returnd enriched record -func Enriched(r Record) EnrichedRecord { - record := EnrichedRecord{Record: r} - // normlize git remote - record.GitOriginRemote = NormalizeGitRemote(record.GitOriginRemote) - record.GitOriginRemoteAfter = NormalizeGitRemote(record.GitOriginRemoteAfter) - // Get command/first word from commandline - var err error - err = r.Validate() - if err != nil { - record.Errors = append(record.Errors, "Validate error:"+err.Error()) - // rec, _ := record.ToString() - // sugar.Println("Invalid command:", rec) - record.Invalid = true - } - record.Command, record.FirstWord, err = GetCommandAndFirstWord(r.CmdLine) - if err != nil { - record.Errors = append(record.Errors, "GetCommandAndFirstWord error:"+err.Error()) - // rec, _ := record.ToString() - // sugar.Println("Invalid command:", rec) - record.Invalid = true // should this be really invalid ? - } - return record -} - -// Merge two records (part1 - collect + part2 - postcollect) -func (r *Record) Merge(r2 Record) error { - if r.PartOne == false || r2.PartOne { - return errors.New("Expected part1 and part2 of the same record - usage: part1.Merge(part2)") - } - if r.SessionID != r2.SessionID { - return errors.New("Records to merge are not from the same sesion - r1:" + r.SessionID + " r2:" + r2.SessionID) - } - if r.CmdLine != r2.CmdLine { - return errors.New("Records to merge are not parts of the same records - r1:" + r.CmdLine + " r2:" + r2.CmdLine) - } - if r.RecordID != r2.RecordID { - return errors.New("Records to merge do not have the same ID - r1:" + r.RecordID + " r2:" + r2.RecordID) - } - // r.RealtimeBefore != r2.RealtimeBefore - can't be used because of bash-preexec runs when it's not supposed to - r.ExitCode = r2.ExitCode - r.PwdAfter = r2.PwdAfter - r.RealPwdAfter = r2.RealPwdAfter - r.GitDirAfter = r2.GitDirAfter - r.GitRealDirAfter = r2.GitRealDirAfter - r.RealtimeAfter = r2.RealtimeAfter - r.GitOriginRemoteAfter = r2.GitOriginRemoteAfter - r.TimezoneAfter = r2.TimezoneAfter - r.RealtimeAfterLocal = r2.RealtimeAfterLocal - r.RealtimeDuration = r2.RealtimeDuration - - r.PartsMerged = true - r.PartOne = false - return nil -} - -// Validate - returns error if the record is invalid -func (r *Record) Validate() error { - if r.CmdLine == "" { - return errors.New("There is no CmdLine") - } - if r.RealtimeBefore == 0 || r.RealtimeAfter == 0 { - return errors.New("There is no Time") - } - if r.RealtimeBeforeLocal == 0 || r.RealtimeAfterLocal == 0 { - return errors.New("There is no Local Time") - } - if r.RealPwd == "" || r.RealPwdAfter == "" { - return errors.New("There is no Real Pwd") - } - if r.Pwd == "" || r.PwdAfter == "" { - return errors.New("There is no Pwd") - } - - // TimezoneBefore - // TimezoneAfter - - // RealtimeDuration - // RealtimeSinceSessionStart - TODO: add later - // RealtimeSinceBoot - TODO: add later - - // device extras - // Host - // Hosttype - // Ostype - // Machtype - // OsReleaseID - // OsReleaseVersionID - // OsReleaseIDLike - // OsReleaseName - // OsReleasePrettyName - - // session extras - // Term - // Shlvl - - // static info - // Lang - // LcAll - - // meta - // ReshUUID - // ReshVersion - // ReshRevision - - // added by sanitizatizer - // Sanitized - // CmdLength - return nil -} - -// SetCmdLine sets cmdLine and related members -func (r *EnrichedRecord) SetCmdLine(cmdLine string) { - r.CmdLine = cmdLine - r.CmdLength = len(cmdLine) - r.ExitCode = 0 - var err error - r.Command, r.FirstWord, err = GetCommandAndFirstWord(cmdLine) - if err != nil { - r.Errors = append(r.Errors, "GetCommandAndFirstWord error:"+err.Error()) - // sugar.Println("Invalid command:", r.CmdLine) - r.Invalid = true - } -} - -// Stripped returns record stripped of all info that is not available during prediction -func Stripped(r EnrichedRecord) EnrichedRecord { - // clear the cmd itself - r.SetCmdLine("") - // replace after info with before info - r.PwdAfter = r.Pwd - r.RealPwdAfter = r.RealPwd - r.TimezoneAfter = r.TimezoneBefore - r.RealtimeAfter = r.RealtimeBefore - r.RealtimeAfterLocal = r.RealtimeBeforeLocal - // clear some more stuff - r.RealtimeDuration = 0 - r.LastRecordOfSession = false - return r -} - -// GetCommandAndFirstWord func -func GetCommandAndFirstWord(cmdLine string) (string, string, error) { - args, err := shellwords.Parse(cmdLine) - if err != nil { - // Println("shellwords Error:", err, " (cmdLine: <", cmdLine, "> )") - return "", "", err - } - if len(args) == 0 { - return "", "", nil - } - i := 0 - for true { - // commands in shell sometimes look like this `variable=something command argument otherArgument --option` - // to get the command we skip over tokens that contain '=' - if strings.ContainsRune(args[i], '=') && len(args) > i+1 { - i++ - continue - } - return args[i], args[0], nil - } - return "ERROR", "ERROR", errors.New("failed to retrieve first word of command") -} - -// NormalizeGitRemote func -func NormalizeGitRemote(gitRemote string) string { - if strings.HasSuffix(gitRemote, ".git") { - return gitRemote[:len(gitRemote)-4] - } - return gitRemote -} - -// DistParams is used to supply params to Enrichedrecords.DistanceTo() -type DistParams struct { - ExitCode float64 - MachineID float64 - SessionID float64 - Login float64 - Shell float64 - Pwd float64 - RealPwd float64 - Git float64 - Time float64 -} - -// DistanceTo another record -func (r *EnrichedRecord) DistanceTo(r2 EnrichedRecord, p DistParams) float64 { - var dist float64 - dist = 0 - - // lev distance or something? TODO later - // CmdLine - - // exit code - if r.ExitCode != r2.ExitCode { - if r.ExitCode == 0 || r2.ExitCode == 0 { - // one success + one error -> 1 - dist += 1 * p.ExitCode - } else { - // two different errors - dist += 0.5 * p.ExitCode - } - } - - // machine/device - if r.MachineID != r2.MachineID { - dist += 1 * p.MachineID - } - // Uname - - // session - if r.SessionID != r2.SessionID { - dist += 1 * p.SessionID - } - // Pid - add because of nested shells? - // SessionPid - - // user - if r.Login != r2.Login { - dist += 1 * p.Login - } - // Home - - // shell - if r.Shell != r2.Shell { - dist += 1 * p.Shell - } - // ShellEnv - - // pwd - if r.Pwd != r2.Pwd { - // TODO: compare using hierarchy - // TODO: make more important - dist += 1 * p.Pwd - } - if r.RealPwd != r2.RealPwd { - // TODO: -||- - dist += 1 * p.RealPwd - } - // PwdAfter - // RealPwdAfter - - // git - if r.GitDir != r2.GitDir { - dist += 1 * p.Git - } - if r.GitRealDir != r2.GitRealDir { - dist += 1 * p.Git - } - if r.GitOriginRemote != r2.GitOriginRemote { - dist += 1 * p.Git - } - - // time - // this can actually get negative for differences of less than one second which is fine - // distance grows by 1 with every order - distTime := math.Log10(math.Abs(r.RealtimeBefore-r2.RealtimeBefore)) * p.Time - if math.IsNaN(distTime) == false && math.IsInf(distTime, 0) == false { - dist += distTime - } - // RealtimeBeforeLocal - // RealtimeAfter - // RealtimeAfterLocal - - // TimezoneBefore - // TimezoneAfter - - // RealtimeDuration - // RealtimeSinceSessionStart - TODO: add later - // RealtimeSinceBoot - TODO: add later - - // device extras - // Host - // Hosttype - // Ostype - // Machtype - // OsReleaseID - // OsReleaseVersionID - // OsReleaseIDLike - // OsReleaseName - // OsReleasePrettyName - - // session extras - // Term - // Shlvl - - // static info - // Lang - // LcAll - - // meta - // ReshUUID - // ReshVersion - // ReshRevision - - // added by sanitizatizer - // Sanitized - // CmdLength - - return dist -} - // LoadFromFile loads records from 'fname' file func LoadFromFile(sugar *zap.SugaredLogger, fname string) []Record { const allowedErrors = 3 diff --git a/internal/records/records_test.go b/internal/records/records_test.go index 35c6920..9bcb69f 100644 --- a/internal/records/records_test.go +++ b/internal/records/records_test.go @@ -77,75 +77,9 @@ func TestValidate(t *testing.T) { } } -func TestSetCmdLine(t *testing.T) { - record := EnrichedRecord{} - cmdline := "cmd arg1 arg2" - record.SetCmdLine(cmdline) - if record.CmdLine != cmdline || record.Command != "cmd" || record.FirstWord != "cmd" { - t.Error() - } -} - -func TestStripped(t *testing.T) { - for _, rec := range GetTestEnrichedRecords() { - stripped := Stripped(rec) - - // there should be no cmdline - if stripped.CmdLine != "" || - stripped.FirstWord != "" || - stripped.Command != "" { - t.Error("Stripped() returned record w/ info about CmdLine, Command OR FirstWord") - } - // *after* fields should be overwritten by *before* fields - if stripped.PwdAfter != stripped.Pwd || - stripped.RealPwdAfter != stripped.RealPwd || - stripped.TimezoneAfter != stripped.TimezoneBefore || - stripped.RealtimeAfter != stripped.RealtimeBefore || - stripped.RealtimeAfterLocal != stripped.RealtimeBeforeLocal { - t.Error("Stripped() returned record w/ different *after* and *before* values - *after* fields should be overwritten by *before* fields") - } - // there should be no information about duration and session end - if stripped.RealtimeDuration != 0 || - stripped.LastRecordOfSession != false { - t.Error("Stripped() returned record with too much information") - } - } -} - func TestGetCommandAndFirstWord(t *testing.T) { cmd, stWord, err := GetCommandAndFirstWord("cmd arg1 arg2") if err != nil || cmd != "cmd" || stWord != "cmd" { t.Error("GetCommandAndFirstWord() returned wrong Command OR FirstWord") } } - -func TestDistanceTo(t *testing.T) { - paramsFull := DistParams{ - ExitCode: 1, - MachineID: 1, - SessionID: 1, - Login: 1, - Shell: 1, - Pwd: 1, - RealPwd: 1, - Git: 1, - Time: 1, - } - paramsZero := DistParams{} - var prevRec EnrichedRecord - for _, rec := range GetTestEnrichedRecords() { - dist := rec.DistanceTo(rec, paramsFull) - if dist != 0 { - t.Error("DistanceTo() itself should be always 0") - } - dist = rec.DistanceTo(prevRec, paramsFull) - if dist == 0 { - t.Error("DistanceTo() between two test records shouldn't be 0") - } - dist = rec.DistanceTo(prevRec, paramsZero) - if dist != 0 { - t.Error("DistanceTo() should be 0 when DistParams is all zeros") - } - prevRec = rec - } -} diff --git a/internal/recutil/recutil.go b/internal/recutil/recutil.go new file mode 100644 index 0000000..96d8baa --- /dev/null +++ b/internal/recutil/recutil.go @@ -0,0 +1,94 @@ +package recutil + +import ( + "errors" + "net/url" + "strings" + + "github.com/curusarn/resh/internal/record" + "github.com/mattn/go-shellwords" + giturls "github.com/whilp/git-urls" +) + +// NormalizeGitRemote helper +func NormalizeGitRemote(gitRemote string) string { + if strings.HasSuffix(gitRemote, ".git") { + gitRemote = gitRemote[:len(gitRemote)-4] + } + parsedURL, err := giturls.Parse(gitRemote) + if err != nil { + // TODO: log this error + return gitRemote + } + if parsedURL.User == nil || parsedURL.User.Username() == "" { + parsedURL.User = url.User("git") + } + // TODO: figure out what scheme we want + parsedURL.Scheme = "git+ssh" + return parsedURL.String() +} + +// Validate returns error if the record is invalid +func Validate(r *record.V1) error { + if r.CmdLine == "" { + return errors.New("There is no CmdLine") + } + if r.RealtimeBefore == 0 || r.RealtimeAfter == 0 { + return errors.New("There is no Time") + } + if r.RealtimeBeforeLocal == 0 || r.RealtimeAfterLocal == 0 { + return errors.New("There is no Local Time") + } + if r.RealPwd == "" || r.RealPwdAfter == "" { + return errors.New("There is no Real Pwd") + } + if r.Pwd == "" || r.PwdAfter == "" { + return errors.New("There is no Pwd") + } + return nil +} + +// Merge two records (part1 - collect + part2 - postcollect) +func Merge(r1 *record.V1, r2 *record.V1) error { + if r1.PartOne == false || r2.PartOne { + return errors.New("Expected part1 and part2 of the same record - usage: Merge(part1, part2)") + } + if r1.SessionID != r2.SessionID { + return errors.New("Records to merge are not from the same sesion - r1:" + r1.SessionID + " r2:" + r2.SessionID) + } + if r1.CmdLine != r2.CmdLine { + return errors.New("Records to merge are not parts of the same records - r1:" + r1.CmdLine + " r2:" + r2.CmdLine) + } + if r1.RecordID != r2.RecordID { + return errors.New("Records to merge do not have the same ID - r1:" + r1.RecordID + " r2:" + r2.RecordID) + } + r1.ExitCode = r2.ExitCode + r1.Duration = r2.Duration + + r1.PartsMerged = true + r1.PartOne = false + return nil +} + +// GetCommandAndFirstWord func +func GetCommandAndFirstWord(cmdLine string) (string, string, error) { + args, err := shellwords.Parse(cmdLine) + if err != nil { + // Println("shellwords Error:", err, " (cmdLine: <", cmdLine, "> )") + return "", "", err + } + if len(args) == 0 { + return "", "", nil + } + i := 0 + for true { + // commands in shell sometimes look like this `variable=something command argument otherArgument --option` + // to get the command we skip over tokens that contain '=' + if strings.ContainsRune(args[i], '=') && len(args) > i+1 { + i++ + continue + } + return args[i], args[0], nil + } + return "ERROR", "ERROR", errors.New("failed to retrieve first word of command") +} diff --git a/scripts/hooks.sh b/scripts/hooks.sh index 5d9ebe3..a734b7b 100644 --- a/scripts/hooks.sh +++ b/scripts/hooks.sh @@ -29,19 +29,7 @@ __resh_collect() { local __RESH_GIT_REMOTE; __RESH_GIT_REMOTE="$(git remote get-url origin 2>/dev/null)" local __RESH_GIT_REMOTE_EXIT_CODE=$? - if [ -n "${ZSH_VERSION-}" ]; then - # assume Zsh - local __RESH_PID="$$" # current pid - elif [ -n "${BASH_VERSION-}" ]; then - # assume Bash - if [ "${BASH_VERSINFO[0]}" -ge "4" ]; then - # $BASHPID is only available in bash4+ - # $$ is fairly similar so it should not be an issue - local __RESH_PID="$BASHPID" # current pid - else - local __RESH_PID="$$" # current pid - fi - fi + local __RESH_PID="$$" # current pid # time local __RESH_TZ_BEFORE; __RESH_TZ_BEFORE=$(date +%z) # __RESH_RT_BEFORE="$EPOCHREALTIME" diff --git a/scripts/install.sh b/scripts/install.sh index b07ac4d..8d5aba9 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -94,6 +94,19 @@ fi # read -r x echo +echo "Backing up previous installation" +# TODO: ~/.resh -> XDG_DATA/resh/rollback/ +# TODO: ~/XDG_DATA/resh/history.reshjson -> XDG_DATA/resh/rollback/ +# TODO: what about legacy history locations +# TODO: ~/XDG_DATA/resh/log.json -> XDG_DATA/resh/rollback/ + +echo "Cleaning up installation directory ..." +rm ~/.resh/bin/* 2>/dev/null ||: +rm ~/.resh/* 2>/dev/null 2>/dev/null ||: +# TODO: put this behind version condition +# backward compatibility: We have a new location for resh history file +[ ! -f ~/.resh/history.json ] || mv ~/.resh/history.json ~/.resh_history.json + echo "Creating directories ..." mkdir_if_not_exists() { @@ -122,15 +135,9 @@ bin/resh-control completion zsh > ~/.resh/zsh_completion.d/_reshctl echo "Copying more files ..." cp -f scripts/uuid.sh ~/.resh/bin/resh-uuid -rm ~/.resh/bin/resh-* ||: cp -f bin/resh-{daemon,control,collect,postcollect,session-init,config} ~/.resh/bin/ -cp -f scripts/resh-evaluate-plot.py ~/.resh/bin/ -cp -fr data/sanitizer ~/.resh/sanitizer_data - -# backward compatibility: We have a new location for resh history file -[ ! -f ~/.resh/history.json ] || mv ~/.resh/history.json ~/.resh_history.json -echo "Checking config file ..." +echo "Creating/updating config file ..." ./bin/resh-config-setup echo "Finishing up ..." diff --git a/scripts/resh-evaluate-plot.py b/scripts/resh-evaluate-plot.py deleted file mode 100755 index 89792cb..0000000 --- a/scripts/resh-evaluate-plot.py +++ /dev/null @@ -1,1218 +0,0 @@ -#!/usr/bin/env python3 - - -import traceback -import sys -import json -from collections import defaultdict -import numpy as np -from graphviz import Digraph -from datetime import datetime - -from matplotlib import rcParams -rcParams['font.family'] = 'serif' -# rcParams['font.serif'] = [''] - -import matplotlib.pyplot as plt -import matplotlib.path as mpath -import matplotlib.patches as mpatches - -PLOT_WIDTH = 10 # inches -PLOT_HEIGHT = 7 # inches - -PLOT_SIZE_zipf = 20 - -data = json.load(sys.stdin) - -DATA_records = [] -DATA_records_by_session = defaultdict(list) -DATA_records_by_user = defaultdict(list) -for user in data["UsersRecords"]: - if user["Devices"] is None: - continue - for device in user["Devices"]: - if device["Records"] is None: - continue - for record in device["Records"]: - if "invalid" in record and record["invalid"]: - continue - - DATA_records.append(record) - DATA_records_by_session[record["seqSessionId"]].append(record) - DATA_records_by_user[user["Name"] + ":" + device["Name"]].append(record) - -DATA_records = list(sorted(DATA_records, key=lambda x: x["realtimeAfterLocal"])) - -for pid, session in DATA_records_by_session.items(): - session = list(sorted(session, key=lambda x: x["realtimeAfterLocal"])) - -# TODO: this should be a cmdline option -async_draw = True - -# for strategy in data["Strategies"]: -# print(json.dumps(strategy)) - -def zipf(length): - return list(map(lambda x: 1/2**x, range(0, length))) - - -def trim(text, length, add_elipse=True): - if add_elipse and len(text) > length: - return text[:length-1] + "…" - return text[:length] - - -# Figure 3.1. The normalized command frequency, compared with Zipf. -def plot_cmdLineFrq_rank(plotSize=PLOT_SIZE_zipf, show_labels=False): - cmdLine_count = defaultdict(int) - for record in DATA_records: - cmdLine_count[record["cmdLine"]] += 1 - - tmp = sorted(cmdLine_count.items(), key=lambda x: x[1], reverse=True)[:plotSize] - cmdLineFrq = list(map(lambda x: x[1] / tmp[0][1], tmp)) - labels = list(map(lambda x: trim(x[0], 7), tmp)) - - ranks = range(1, len(cmdLineFrq)+1) - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.plot(ranks, zipf(len(ranks)), '-') - plt.plot(ranks, cmdLineFrq, 'o-') - plt.title("Commandline frequency / rank") - plt.ylabel("Normalized commandline frequency") - plt.xlabel("Commandline rank") - plt.legend(("Zipf", "Commandline"), loc="best") - if show_labels: - plt.xticks(ranks, labels, rotation=-60) - # TODO: make xticks integral - if async_draw: - plt.draw() - else: - plt.show() - - -# similar to ~ Figure 3.1. The normalized command frequency, compared with Zipf. -def plot_cmdFrq_rank(plotSize=PLOT_SIZE_zipf, show_labels=False): - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Command frequency / rank") - plt.ylabel("Normalized command frequency") - plt.xlabel("Command rank") - legend = [] - - - cmd_count = defaultdict(int) - len_records = 0 - for record in DATA_records: - cmd = record["command"] - if cmd == "": - continue - cmd_count[cmd] += 1 - len_records += 1 - - tmp = sorted(cmd_count.items(), key=lambda x: x[1], reverse=True)[:plotSize] - cmdFrq = list(map(lambda x: x[1] / tmp[0][1], tmp)) - labels = list(map(lambda x: trim(x[0], 7), tmp)) - - top100percent = 100 * sum(map(lambda x: x[1], list(cmd_count.items())[:int(1 * len(cmd_count))])) / len_records - top10percent = 100 * sum(map(lambda x: x[1], list(cmd_count.items())[:int(0.1 * len(cmd_count))])) / len_records - top20percent = 100 * sum(map(lambda x: x[1], list(cmd_count.items())[:int(0.2 * len(cmd_count))])) / len_records - print("% ALL: Top {} %% of cmds amounts for {} %% of all command lines".format(100, top100percent)) - print("% ALL: Top {} %% of cmds amounts for {} %% of all command lines".format(10, top10percent)) - print("% ALL: Top {} %% of cmds amounts for {} %% of all command lines".format(20, top20percent)) - ranks = range(1, len(cmdFrq)+1) - plt.plot(ranks, zipf(len(ranks)), '-') - legend.append("Zipf distribution") - plt.plot(ranks, cmdFrq, 'o-') - legend.append("All subjects") - - - for user in DATA_records_by_user.items(): - cmd_count = defaultdict(int) - len_records = 0 - name, records = user - for record in records: - cmd = record["command"] - if cmd == "": - continue - cmd_count[cmd] += 1 - len_records += 1 - - tmp = sorted(cmd_count.items(), key=lambda x: x[1], reverse=True)[:plotSize] - cmdFrq = list(map(lambda x: x[1] / tmp[0][1], tmp)) - labels = list(map(lambda x: trim(x[0], 7), tmp)) - - top100percent = 100 * sum(map(lambda x: x[1], list(cmd_count.items())[:int(1 * len(cmd_count))])) / len_records - top10percent = 100 * sum(map(lambda x: x[1], list(cmd_count.items())[:int(0.1 * len(cmd_count))])) / len_records - top20percent = 100 * sum(map(lambda x: x[1], list(cmd_count.items())[:int(0.2 * len(cmd_count))])) / len_records - print("% {}: Top {} %% of cmds amounts for {} %% of all command lines".format(name, 100, top100percent)) - print("% {}: Top {} %% of cmds amounts for {} %% of all command lines".format(name, 10, top10percent)) - print("% {}: Top {} %% of cmds amounts for {} %% of all command lines".format(name, 20, top20percent)) - ranks = range(1, len(cmdFrq)+1) - plt.plot(ranks, cmdFrq, 'o-') - legend.append("{} (sanitize!)".format(name)) - - plt.legend(legend, loc="best") - - if show_labels: - plt.xticks(ranks, labels, rotation=-60) - # TODO: make xticks integral - if async_draw: - plt.draw() - else: - plt.show() - -# Figure 3.2. Command vocabulary size vs. the number of command lines entered for four individuals. -def plot_cmdVocabularySize_cmdLinesEntered(): - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Command vocabulary size vs. the number of command lines entered") - plt.ylabel("Command vocabulary size") - plt.xlabel("# of command lines entered") - legend = [] - - # x_count = max(map(lambda x: len(x[1]), DATA_records_by_user.items())) - # x_values = range(0, x_count) - for user in DATA_records_by_user.items(): - new_cmds_after_1k = 0 - new_cmds_after_2k = 0 - new_cmds_after_3k = 0 - cmd_vocabulary = set() - y_cmd_count = [0] - name, records = user - for record in records: - cmd = record["command"] - if cmd == "": - continue - if cmd in cmd_vocabulary: - # repeat last value - y_cmd_count.append(y_cmd_count[-1]) - else: - cmd_vocabulary.add(cmd) - # append last value +1 - y_cmd_count.append(y_cmd_count[-1] + 1) - if len(y_cmd_count) > 1000: - new_cmds_after_1k+=1 - if len(y_cmd_count) > 2000: - new_cmds_after_2k+=1 - if len(y_cmd_count) > 3000: - new_cmds_after_3k+=1 - - if len(y_cmd_count) == 1000: - print("% {}: Cmd adoption rate at 1k (between 0 and 1k) cmdlines = {}".format(name ,len(cmd_vocabulary) / (len(y_cmd_count)))) - if len(y_cmd_count) == 2000: - print("% {}: Cmd adoption rate at 2k cmdlines = {}".format(name ,len(cmd_vocabulary) / (len(y_cmd_count)))) - print("% {}: Cmd adoption rate between 1k and 2k cmdlines = {}".format(name ,new_cmds_after_1k / (len(y_cmd_count) - 1000))) - if len(y_cmd_count) == 3000: - print("% {}: Cmd adoption rate between 2k and 3k cmdlines = {}".format(name ,new_cmds_after_2k / (len(y_cmd_count) - 2000))) - - print("% {}: New cmd adoption rate after 1k cmdlines = {}".format(name ,new_cmds_after_1k / (len(y_cmd_count) - 1000))) - print("% {}: New cmd adoption rate after 2k cmdlines = {}".format(name ,new_cmds_after_2k / (len(y_cmd_count) - 2000))) - print("% {}: New cmd adoption rate after 3k cmdlines = {}".format(name ,new_cmds_after_3k / (len(y_cmd_count) - 3000))) - x_cmds_entered = range(0, len(y_cmd_count)) - plt.plot(x_cmds_entered, y_cmd_count, '-') - legend.append(name + " (TODO: sanitize!)") - - # print(cmd_vocabulary) - - plt.legend(legend, loc="best") - - if async_draw: - plt.draw() - else: - plt.show() - - -def plot_cmdVocabularySize_daily(): - SECONDS_IN_A_DAY = 86400 - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Command vocabulary size in days") - plt.ylabel("Command vocabulary size") - plt.xlabel("Days") - legend = [] - - # x_count = max(map(lambda x: len(x[1]), DATA_records_by_user.items())) - # x_values = range(0, x_count) - for user in DATA_records_by_user.items(): - new_cmds_after_100 = 0 - new_cmds_after_200 = 0 - new_cmds_after_300 = 0 - cmd_vocabulary = set() - y_cmd_count = [0] - name, records = user - - cmd_fail_count = 0 - - if not len(records): - print("ERROR: no records for user {}".format(name)) - continue - - first_day = records[0]["realtimeAfter"] - this_day = first_day - - for record in records: - cmd = record["command"] - timestamp = record["realtimeAfter"] - - if cmd == "": - cmd_fail_count += 1 - continue - - if timestamp >= this_day + SECONDS_IN_A_DAY: - this_day += SECONDS_IN_A_DAY - while timestamp >= this_day + SECONDS_IN_A_DAY: - y_cmd_count.append(-10) - this_day += SECONDS_IN_A_DAY - - y_cmd_count.append(len(cmd_vocabulary)) - cmd_vocabulary = set() # wipes the vocabulary each day - - if len(y_cmd_count) > 100: - new_cmds_after_100+=1 - if len(y_cmd_count) > 200: - new_cmds_after_200+=1 - if len(y_cmd_count) > 300: - new_cmds_after_300+=1 - - if len(y_cmd_count) == 100: - print("% {}: Cmd adoption rate at 100 days (between 0 and 100 days) = {}".format(name, len(cmd_vocabulary) / (len(y_cmd_count)))) - if len(y_cmd_count) == 200: - print("% {}: Cmd adoption rate at 200 days days = {}".format(name, len(cmd_vocabulary) / (len(y_cmd_count)))) - print("% {}: Cmd adoption rate between 100 and 200 days = {}".format(name, new_cmds_after_100 / (len(y_cmd_count) - 100))) - if len(y_cmd_count) == 300: - print("% {}: Cmd adoption rate between 200 and 300 days = {}".format(name, new_cmds_after_200 / (len(y_cmd_count) - 200))) - - if cmd not in cmd_vocabulary: - cmd_vocabulary.add(cmd) - - - print("% {}: New cmd adoption rate after 100 days = {}".format(name, new_cmds_after_100 / (len(y_cmd_count) - 100))) - print("% {}: New cmd adoption rate after 200 days = {}".format(name, new_cmds_after_200 / (len(y_cmd_count) - 200))) - print("% {}: New cmd adoption rate after 300 days = {}".format(name, new_cmds_after_300 / (len(y_cmd_count) - 300))) - print("% {}: cmd_fail_count = {}".format(name, cmd_fail_count)) - x_cmds_entered = range(0, len(y_cmd_count)) - plt.plot(x_cmds_entered, y_cmd_count, 'o', markersize=2) - legend.append(name + " (TODO: sanitize!)") - - # print(cmd_vocabulary) - - plt.legend(legend, loc="best") - plt.ylim(bottom=-5) - - if async_draw: - plt.draw() - else: - plt.show() - - -def matplotlib_escape(ss): - ss = ss.replace('$', '\\$') - return ss - - -def plot_cmdUsage_in_time(sort_cmds=False, num_cmds=None): - SECONDS_IN_A_DAY = 86400 - tab_colors = ("tab:blue", "tab:orange", "tab:green", "tab:red", "tab:purple", "tab:brown", "tab:pink", "tab:gray") - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Command use in time") - plt.ylabel("Commands") - plt.xlabel("Days") - legend_patches = [] - - cmd_ids = {} - y_labels = [] - - all_x_values = [] - all_y_values = [] - all_s_values = [] # size - all_c_values = [] # color - - x_values = [] - y_values = [] - s_values = [] # size - c_values = [] # color - - if sort_cmds: - cmd_count = defaultdict(int) - for user in DATA_records_by_user.items(): - name, records = user - for record in records: - cmd = record["command"] - cmd_count[cmd] += 1 - - sorted_cmds = map(lambda x: x[0], sorted(cmd_count.items(), key=lambda x: x[1], reverse=True)) - - for cmd in sorted_cmds: - cmd_ids[cmd] = len(cmd_ids) - y_labels.append(matplotlib_escape(cmd)) - - - for user_idx, user in enumerate(DATA_records_by_user.items()): - name, records = user - - if not len(records): - print("ERROR: no records for user {}".format(name)) - continue - - - first_day = records[0]["realtimeAfter"] - this_day = first_day - day_no = 0 - today_cmds = defaultdict(int) - - for record in records: - cmd = record["command"] - timestamp = record["realtimeAfter"] - - if cmd == "": - print("NOTICE: Empty cmd for {}".format(record["cmdLine"])) - continue - - if timestamp >= this_day + SECONDS_IN_A_DAY: - for item in today_cmds.items(): - cmd, count = item - cmd_id = cmd_ids[cmd] - # skip commands with high ids - if num_cmds is not None and cmd_id >= num_cmds: - continue - - x_values.append(day_no) - y_values.append(cmd_id) - s_values.append(count) - c_values.append(tab_colors[user_idx]) - - today_cmds = defaultdict(int) - - this_day += SECONDS_IN_A_DAY - day_no += 1 - while timestamp >= this_day + SECONDS_IN_A_DAY: - this_day += SECONDS_IN_A_DAY - day_no += 1 - - if cmd not in cmd_ids: - cmd_ids[cmd] = len(cmd_ids) - y_labels.append(matplotlib_escape(cmd)) - - today_cmds[cmd] += 1 - - all_x_values.extend(x_values) - all_y_values.extend(y_values) - all_s_values.extend(s_values) - all_c_values.extend(c_values) - x_values = [] - y_values = [] - s_values = [] - c_values = [] - legend_patches.append(mpatches.Patch(color=tab_colors[user_idx], label="{} ({}) (TODO: sanitize!)".format(name, user_idx))) - - if num_cmds is not None and len(y_labels) > num_cmds: - y_labels = y_labels[:num_cmds] - plt.yticks(ticks=range(0, len(y_labels)), labels=y_labels, fontsize=6) - plt.scatter(all_x_values, all_y_values, s=all_s_values, c=all_c_values, marker='o') - plt.legend(handles=legend_patches, loc="best") - - if async_draw: - plt.draw() - else: - plt.show() - - -# Figure 5.6. Command line vocabulary size vs. the number of commands entered for four typical individuals. -def plot_cmdVocabularySize_time(): - SECONDS_IN_A_DAY = 86400 - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Command vocabulary size growth in time") - plt.ylabel("Command vocabulary size") - plt.xlabel("Days") - legend = [] - - # x_count = max(map(lambda x: len(x[1]), DATA_records_by_user.items())) - # x_values = range(0, x_count) - for user in DATA_records_by_user.items(): - new_cmds_after_100 = 0 - new_cmds_after_200 = 0 - new_cmds_after_300 = 0 - cmd_vocabulary = set() - y_cmd_count = [0] - name, records = user - - cmd_fail_count = 0 - - if not len(records): - print("ERROR: no records for user {}".format(name)) - continue - - first_day = records[0]["realtimeAfter"] - this_day = first_day - - for record in records: - cmd = record["command"] - timestamp = record["realtimeAfter"] - - if cmd == "": - cmd_fail_count += 1 - continue - - if timestamp >= this_day + SECONDS_IN_A_DAY: - this_day += SECONDS_IN_A_DAY - while timestamp >= this_day + SECONDS_IN_A_DAY: - y_cmd_count.append(-10) - this_day += SECONDS_IN_A_DAY - - y_cmd_count.append(len(cmd_vocabulary)) - - if len(y_cmd_count) > 100: - new_cmds_after_100+=1 - if len(y_cmd_count) > 200: - new_cmds_after_200+=1 - if len(y_cmd_count) > 300: - new_cmds_after_300+=1 - - if len(y_cmd_count) == 100: - print("% {}: Cmd adoption rate at 100 days (between 0 and 100 days) = {}".format(name, len(cmd_vocabulary) / (len(y_cmd_count)))) - if len(y_cmd_count) == 200: - print("% {}: Cmd adoption rate at 200 days days = {}".format(name, len(cmd_vocabulary) / (len(y_cmd_count)))) - print("% {}: Cmd adoption rate between 100 and 200 days = {}".format(name, new_cmds_after_100 / (len(y_cmd_count) - 100))) - if len(y_cmd_count) == 300: - print("% {}: Cmd adoption rate between 200 and 300 days = {}".format(name, new_cmds_after_200 / (len(y_cmd_count) - 200))) - - if cmd not in cmd_vocabulary: - cmd_vocabulary.add(cmd) - - - print("% {}: New cmd adoption rate after 100 days = {}".format(name, new_cmds_after_100 / (len(y_cmd_count) - 100))) - print("% {}: New cmd adoption rate after 200 days = {}".format(name, new_cmds_after_200 / (len(y_cmd_count) - 200))) - print("% {}: New cmd adoption rate after 300 days = {}".format(name, new_cmds_after_300 / (len(y_cmd_count) - 300))) - print("% {}: cmd_fail_count = {}".format(name, cmd_fail_count)) - x_cmds_entered = range(0, len(y_cmd_count)) - plt.plot(x_cmds_entered, y_cmd_count, 'o', markersize=2) - legend.append(name + " (TODO: sanitize!)") - - # print(cmd_vocabulary) - - plt.legend(legend, loc="best") - plt.ylim(bottom=0) - - if async_draw: - plt.draw() - else: - plt.show() - - -# Figure 5.6. Command line vocabulary size vs. the number of commands entered for four typical individuals. -def plot_cmdLineVocabularySize_cmdLinesEntered(): - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Command line vocabulary size vs. the number of command lines entered") - plt.ylabel("Command line vocabulary size") - plt.xlabel("# of command lines entered") - legend = [] - - for user in DATA_records_by_user.items(): - cmdLine_vocabulary = set() - y_cmdLine_count = [0] - name, records = user - for record in records: - cmdLine = record["cmdLine"] - if cmdLine in cmdLine_vocabulary: - # repeat last value - y_cmdLine_count.append(y_cmdLine_count[-1]) - else: - cmdLine_vocabulary.add(cmdLine) - # append last value +1 - y_cmdLine_count.append(y_cmdLine_count[-1] + 1) - - # print(cmdLine_vocabulary) - x_cmdLines_entered = range(0, len(y_cmdLine_count)) - plt.plot(x_cmdLines_entered, y_cmdLine_count, '-') - legend.append(name + " (TODO: sanitize!)") - - plt.legend(legend, loc="best") - - if async_draw: - plt.draw() - else: - plt.show() - -# Figure 3.3. Sequential structure of UNIX command usage, from Figure 4 in Hanson et al. (1984). -# Ball diameters are proportional to stationary probability. Lines indicate significant dependencies, -# solid ones being more probable (p < .0001) and dashed ones less probable (.005 < p < .0001). -def graph_cmdSequences(node_count=33, edge_minValue=0.05, view_graph=True): - START_CMD = "_start_" - END_CMD = "_end_" - cmd_count = defaultdict(int) - cmdSeq_count = defaultdict(lambda: defaultdict(int)) - cmd_id = dict() - x = 0 - cmd_id[START_CMD] = str(x) - x += 1 - cmd_id[END_CMD] = str(x) - for pid, session in DATA_records_by_session.items(): - cmd_count[START_CMD] += 1 - prev_cmd = START_CMD - for record in session: - cmd = record["command"] - if cmd == "": - continue - cmdSeq_count[prev_cmd][cmd] += 1 - cmd_count[cmd] += 1 - if cmd not in cmd_id: - x += 1 - cmd_id[cmd] = str(x) - prev_cmd = cmd - # end the session - cmdSeq_count[prev_cmd][END_CMD] += 1 - cmd_count[END_CMD] += 1 - - - # get `node_count` of largest nodes - sorted_cmd_count = sorted(cmd_count.items(), key=lambda x: x[1], reverse=True) - print(sorted_cmd_count) - cmds_to_graph = list(map(lambda x: x[0], sorted_cmd_count))[:node_count] - - # use 3 biggest nodes as a reference point for scaling - biggest_node = cmd_count[cmds_to_graph[0]] - nd_biggest_node = cmd_count[cmds_to_graph[1]] - rd_biggest_node = cmd_count[cmds_to_graph[1]] - count2scale_coef = 3 / (biggest_node + nd_biggest_node + rd_biggest_node) - - # scaling constant - # affects node size and node label - base_scaling_factor = 21 - # extra scaling for experiments - not really useful imho - # affects everything nodes, edges, node labels, treshold for turning label into xlabel, xlabel size, ... - extra_scaling_factor = 1.0 - for x in range(0, 10): - # graphviz is not the most reliable piece of software - # -> retry on fail but scale nodes down by 1% - scaling_factor = base_scaling_factor * (1 - x * 0.01) - - # overlap: scale -> solve overlap by scaling the graph - # overlap_shrink -> try to shrink the graph a bit after you are done - # splines -> don't draw edges over nodes - # sep: 2.5 -> assume that nodes are 2.5 inches larger - graph_attr={'overlap':'scale', 'overlap_shrink':'true', - 'splines':'true', 'sep':'0.25'} - graph = Digraph(name='command_sequentiality', engine='neato', graph_attr=graph_attr) - - # iterate over all nodes - for cmd in cmds_to_graph: - seq = cmdSeq_count[cmd] - count = cmd_count[cmd] - - # iterate over all "following" commands (for each node) - for seq_entry in seq.items(): - cmd2, seq_count = seq_entry - relative_seq_count = seq_count / count - - # check if "follow" command is supposed to be in the graph - if cmd2 not in cmds_to_graph: - continue - # check if the edge value is high enough - if relative_seq_count < edge_minValue: - continue - - # create starting node and end node for the edge - # duplicates don't matter - for id_, cmd_ in ((cmd_id[cmd], cmd), (cmd_id[cmd2], cmd2)): - count_ = cmd_count[cmd_] - scale_ = count_ * count2scale_coef * scaling_factor * extra_scaling_factor - width_ = 0.08 * scale_ - fontsize_ = 8.5 * scale_ / (len(cmd_) + 3) - - width_ = str(width_) - if fontsize_ < 12 * extra_scaling_factor: - graph.node(id_, ' ', shape='circle', fixedsize='true', fontname='monospace bold', - width=width_, fontsize=str(12 * extra_scaling_factor), forcelabels='true', xlabel=cmd_) - else: - fontsize_ = str(fontsize_) - graph.node(id_, cmd_, shape='circle', fixedsize='true', fontname='monospace bold', - width=width_, fontsize=fontsize_, forcelabels='true', labelloc='c') - - # value of the edge (percentage) 1.0 is max - scale_ = seq_count / cmd_count[cmd] - penwidth_ = str((0.5 + 4.5 * scale_) * extra_scaling_factor) - #penwidth_bold_ = str(8 * scale_) - # if scale_ > 0.5: - # graph.edge(cmd_id[cmd], cmd_id[cmd2], constraint='true', splines='curved', - # penwidth=penwidth_, style='bold', arrowhead='diamond') - # elif scale_ > 0.2: - if scale_ > 0.3: - scale_ = str(int(scale_ * 100)/100) - graph.edge(cmd_id[cmd], cmd_id[cmd2], constraint='true', splines='curved', - penwidth=penwidth_, forcelables='true', label=scale_) - elif scale_ > 0.2: - graph.edge(cmd_id[cmd], cmd_id[cmd2], constraint='true', splines='curved', - penwidth=penwidth_, style='dashed') - # elif scale_ > 0.1: - else: - graph.edge(cmd_id[cmd], cmd_id[cmd2], constraint='false', splines='curved', - penwidth=penwidth_, style='dotted', arrowhead='empty') - - # graphviz sometimes fails - see above - try: - # graph.view() - graph.render('/tmp/resh-graph-command_sequence-nodeCount_{}-edgeMinVal_{}.gv'.format(node_count, edge_minValue), view=view_graph) - break - except Exception as e: - trace = traceback.format_exc() - print("GRAPHVIZ EXCEPTION: <{}>\nGRAPHVIZ TRACE: <{}>".format(str(e), trace)) - - -def plot_strategies_matches(plot_size=50, selected_strategies=[], show_strat_title=True, force_strat_title=None): - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Matches at distance <{}>".format(datetime.now().strftime('%H:%M:%S'))) - plt.ylabel('%' + " of matches") - plt.xlabel("Distance") - legend = [] - x_values = range(1, plot_size+1) - saved_matches_total = None - saved_dataPoint_count = None - for strategy in data["Strategies"]: - strategy_title = strategy["Title"] - # strategy_description = strategy["Description"] - - dataPoint_count = 0 - matches = [0] * plot_size - matches_total = 0 - charsRecalled = [0] * plot_size - charsRecalled_total = 0 - - for match in strategy["Matches"]: - dataPoint_count += 1 - - if not match["Match"]: - continue - - chars = match["CharsRecalled"] - charsRecalled_total += chars - matches_total += 1 - - dist = match["Distance"] - if dist > plot_size: - continue - - matches[dist-1] += 1 - charsRecalled[dist-1] += chars - - # recent is very simple strategy so we will believe - # that there is no bug in it and we can use it to determine total - if strategy_title == "recent": - saved_matches_total = matches_total - saved_dataPoint_count = dataPoint_count - - if len(selected_strategies) and strategy_title not in selected_strategies: - continue - - acc = 0 - matches_cumulative = [] - for x in matches: - acc += x - matches_cumulative.append(acc) - # matches_cumulative.append(matches_total) - matches_percent = list(map(lambda x: 100 * x / dataPoint_count, matches_cumulative)) - - plt.plot(x_values, matches_percent, 'o-') - if force_strat_title is not None: - legend.append(force_strat_title) - else: - legend.append(strategy_title) - - - assert(saved_matches_total is not None) - assert(saved_dataPoint_count is not None) - max_values = [100 * saved_matches_total / saved_dataPoint_count] * len(x_values) - print("% >>> Avg recurrence rate = {}".format(max_values[0])) - plt.plot(x_values, max_values, 'r-') - legend.append("maximum possible") - - x_ticks = list(range(1, plot_size+1, 2)) - x_labels = x_ticks[:] - plt.xticks(x_ticks, x_labels) - plt.ylim(bottom=0) - if show_strat_title: - plt.legend(legend, loc="best") - if async_draw: - plt.draw() - else: - plt.show() - - -def plot_strategies_charsRecalled(plot_size=50, selected_strategies=[]): - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Average characters recalled at distance <{}>".format(datetime.now().strftime('%H:%M:%S'))) - plt.ylabel("Average characters recalled") - plt.xlabel("Distance") - x_values = range(1, plot_size+1) - legend = [] - saved_charsRecalled_total = None - saved_dataPoint_count = None - for strategy in data["Strategies"]: - strategy_title = strategy["Title"] - # strategy_description = strategy["Description"] - - dataPoint_count = 0 - matches = [0] * plot_size - matches_total = 0 - charsRecalled = [0] * plot_size - charsRecalled_total = 0 - - for match in strategy["Matches"]: - dataPoint_count += 1 - - if not match["Match"]: - continue - - chars = match["CharsRecalled"] - charsRecalled_total += chars - matches_total += 1 - - dist = match["Distance"] - if dist > plot_size: - continue - - matches[dist-1] += 1 - charsRecalled[dist-1] += chars - - # recent is very simple strategy so we will believe - # that there is no bug in it and we can use it to determine total - if strategy_title == "recent": - saved_charsRecalled_total = charsRecalled_total - saved_dataPoint_count = dataPoint_count - - if len(selected_strategies) and strategy_title not in selected_strategies: - continue - - acc = 0 - charsRecalled_cumulative = [] - for x in charsRecalled: - acc += x - charsRecalled_cumulative.append(acc) - charsRecalled_average = list(map(lambda x: x / dataPoint_count, charsRecalled_cumulative)) - - plt.plot(x_values, charsRecalled_average, 'o-') - legend.append(strategy_title) - - assert(saved_charsRecalled_total is not None) - assert(saved_dataPoint_count is not None) - max_values = [saved_charsRecalled_total / saved_dataPoint_count] * len(x_values) - print("% >>> Max avg recalled characters = {}".format(max_values[0])) - plt.plot(x_values, max_values, 'r-') - legend.append("maximum possible") - - x_ticks = list(range(1, plot_size+1, 2)) - x_labels = x_ticks[:] - plt.xticks(x_ticks, x_labels) - plt.ylim(bottom=0) - plt.legend(legend, loc="best") - if async_draw: - plt.draw() - else: - plt.show() - - -def plot_strategies_charsRecalled_prefix(plot_size=50, selected_strategies=[]): - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Average characters recalled at distance (including prefix matches) <{}>".format(datetime.now().strftime('%H:%M:%S'))) - plt.ylabel("Average characters recalled (including prefix matches)") - plt.xlabel("Distance") - x_values = range(1, plot_size+1) - legend = [] - saved_charsRecalled_total = None - saved_dataPoint_count = None - for strategy in data["Strategies"]: - strategy_title = strategy["Title"] - # strategy_description = strategy["Description"] - - dataPoint_count = 0 - matches_total = 0 - charsRecalled = [0] * plot_size - charsRecalled_total = 0 - - for multiMatch in strategy["PrefixMatches"]: - dataPoint_count += 1 - - if not multiMatch["Match"]: - continue - matches_total += 1 - - last_charsRecalled = 0 - for match in multiMatch["Entries"]: - - chars = match["CharsRecalled"] - charsIncrease = chars - last_charsRecalled - assert(charsIncrease > 0) - charsRecalled_total += charsIncrease - - dist = match["Distance"] - if dist > plot_size: - continue - - charsRecalled[dist-1] += charsIncrease - last_charsRecalled = chars - - # recent is very simple strategy so we will believe - # that there is no bug in it and we can use it to determine total - if strategy_title == "recent": - saved_charsRecalled_total = charsRecalled_total - saved_dataPoint_count = dataPoint_count - - if len(selected_strategies) and strategy_title not in selected_strategies: - continue - - acc = 0 - charsRecalled_cumulative = [] - for x in charsRecalled: - acc += x - charsRecalled_cumulative.append(acc) - charsRecalled_average = list(map(lambda x: x / dataPoint_count, charsRecalled_cumulative)) - - plt.plot(x_values, charsRecalled_average, 'o-') - legend.append(strategy_title) - - assert(saved_charsRecalled_total is not None) - assert(saved_dataPoint_count is not None) - max_values = [saved_charsRecalled_total / saved_dataPoint_count] * len(x_values) - print("% >>> Max avg recalled characters (including prefix matches) = {}".format(max_values[0])) - plt.plot(x_values, max_values, 'r-') - legend.append("maximum possible") - - x_ticks = list(range(1, plot_size+1, 2)) - x_labels = x_ticks[:] - plt.xticks(x_ticks, x_labels) - plt.ylim(bottom=0) - plt.legend(legend, loc="best") - if async_draw: - plt.draw() - else: - plt.show() - - -def plot_strategies_matches_noncummulative(plot_size=50, selected_strategies=["recent (bash-like)"], show_strat_title=False, force_strat_title=None): - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Matches at distance (noncumulative) <{}>".format(datetime.now().strftime('%H:%M:%S'))) - plt.ylabel('%' + " of matches") - plt.xlabel("Distance") - legend = [] - x_values = range(1, plot_size+1) - saved_matches_total = None - saved_dataPoint_count = None - for strategy in data["Strategies"]: - strategy_title = strategy["Title"] - # strategy_description = strategy["Description"] - - dataPoint_count = 0 - matches = [0] * plot_size - matches_total = 0 - charsRecalled = [0] * plot_size - charsRecalled_total = 0 - - for match in strategy["Matches"]: - dataPoint_count += 1 - - if not match["Match"]: - continue - - chars = match["CharsRecalled"] - charsRecalled_total += chars - matches_total += 1 - - dist = match["Distance"] - if dist > plot_size: - continue - - matches[dist-1] += 1 - charsRecalled[dist-1] += chars - - # recent is very simple strategy so we will believe - # that there is no bug in it and we can use it to determine total - if strategy_title == "recent": - saved_matches_total = matches_total - saved_dataPoint_count = dataPoint_count - - if len(selected_strategies) and strategy_title not in selected_strategies: - continue - - # acc = 0 - # matches_cumulative = [] - # for x in matches: - # acc += x - # matches_cumulative.append(acc) - # # matches_cumulative.append(matches_total) - matches_percent = list(map(lambda x: 100 * x / dataPoint_count, matches)) - - plt.plot(x_values, matches_percent, 'o-') - if force_strat_title is not None: - legend.append(force_strat_title) - else: - legend.append(strategy_title) - - assert(saved_matches_total is not None) - assert(saved_dataPoint_count is not None) - # max_values = [100 * saved_matches_total / saved_dataPoint_count] * len(x_values) - # print("% >>> Avg recurrence rate = {}".format(max_values[0])) - # plt.plot(x_values, max_values, 'r-') - # legend.append("maximum possible") - - x_ticks = list(range(1, plot_size+1, 2)) - x_labels = x_ticks[:] - plt.xticks(x_ticks, x_labels) - # plt.ylim(bottom=0) - if show_strat_title: - plt.legend(legend, loc="best") - if async_draw: - plt.draw() - else: - plt.show() - - -def plot_strategies_charsRecalled_noncummulative(plot_size=50, selected_strategies=["recent (bash-like)"], show_strat_title=False): - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Average characters recalled at distance (noncumulative) <{}>".format(datetime.now().strftime('%H:%M:%S'))) - plt.ylabel("Average characters recalled") - plt.xlabel("Distance") - x_values = range(1, plot_size+1) - legend = [] - saved_charsRecalled_total = None - saved_dataPoint_count = None - for strategy in data["Strategies"]: - strategy_title = strategy["Title"] - # strategy_description = strategy["Description"] - - dataPoint_count = 0 - matches = [0] * plot_size - matches_total = 0 - charsRecalled = [0] * plot_size - charsRecalled_total = 0 - - for match in strategy["Matches"]: - dataPoint_count += 1 - - if not match["Match"]: - continue - - chars = match["CharsRecalled"] - charsRecalled_total += chars - matches_total += 1 - - dist = match["Distance"] - if dist > plot_size: - continue - - matches[dist-1] += 1 - charsRecalled[dist-1] += chars - - # recent is very simple strategy so we will believe - # that there is no bug in it and we can use it to determine total - if strategy_title == "recent": - saved_charsRecalled_total = charsRecalled_total - saved_dataPoint_count = dataPoint_count - - if len(selected_strategies) and strategy_title not in selected_strategies: - continue - - # acc = 0 - # charsRecalled_cumulative = [] - # for x in charsRecalled: - # acc += x - # charsRecalled_cumulative.append(acc) - # charsRecalled_average = list(map(lambda x: x / dataPoint_count, charsRecalled_cumulative)) - charsRecalled_average = list(map(lambda x: x / dataPoint_count, charsRecalled)) - - plt.plot(x_values, charsRecalled_average, 'o-') - legend.append(strategy_title) - - assert(saved_charsRecalled_total is not None) - assert(saved_dataPoint_count is not None) - # max_values = [saved_charsRecalled_total / saved_dataPoint_count] * len(x_values) - # print("% >>> Max avg recalled characters = {}".format(max_values[0])) - # plt.plot(x_values, max_values, 'r-') - # legend.append("maximum possible") - - x_ticks = list(range(1, plot_size+1, 2)) - x_labels = x_ticks[:] - plt.xticks(x_ticks, x_labels) - # plt.ylim(bottom=0) - if show_strat_title: - plt.legend(legend, loc="best") - if async_draw: - plt.draw() - else: - plt.show() - - -def plot_strategies_charsRecalled_prefix_noncummulative(plot_size=50, selected_strategies=["recent (bash-like)"], show_strat_title=False): - plt.figure(figsize=(PLOT_WIDTH, PLOT_HEIGHT)) - plt.title("Average characters recalled at distance (including prefix matches) (noncummulative) <{}>".format(datetime.now().strftime('%H:%M:%S'))) - plt.ylabel("Average characters recalled (including prefix matches)") - plt.xlabel("Distance") - x_values = range(1, plot_size+1) - legend = [] - saved_charsRecalled_total = None - saved_dataPoint_count = None - for strategy in data["Strategies"]: - strategy_title = strategy["Title"] - # strategy_description = strategy["Description"] - - dataPoint_count = 0 - matches_total = 0 - charsRecalled = [0] * plot_size - charsRecalled_total = 0 - - for multiMatch in strategy["PrefixMatches"]: - dataPoint_count += 1 - - if not multiMatch["Match"]: - continue - matches_total += 1 - - last_charsRecalled = 0 - for match in multiMatch["Entries"]: - - chars = match["CharsRecalled"] - charsIncrease = chars - last_charsRecalled - assert(charsIncrease > 0) - charsRecalled_total += charsIncrease - - dist = match["Distance"] - if dist > plot_size: - continue - - charsRecalled[dist-1] += charsIncrease - last_charsRecalled = chars - - # recent is very simple strategy so we will believe - # that there is no bug in it and we can use it to determine total - if strategy_title == "recent": - saved_charsRecalled_total = charsRecalled_total - saved_dataPoint_count = dataPoint_count - - if len(selected_strategies) and strategy_title not in selected_strategies: - continue - - # acc = 0 - # charsRecalled_cumulative = [] - # for x in charsRecalled: - # acc += x - # charsRecalled_cumulative.append(acc) - # charsRecalled_average = list(map(lambda x: x / dataPoint_count, charsRecalled_cumulative)) - charsRecalled_average = list(map(lambda x: x / dataPoint_count, charsRecalled)) - - plt.plot(x_values, charsRecalled_average, 'o-') - legend.append(strategy_title) - - assert(saved_charsRecalled_total is not None) - assert(saved_dataPoint_count is not None) - # max_values = [saved_charsRecalled_total / saved_dataPoint_count] * len(x_values) - # print("% >>> Max avg recalled characters (including prefix matches) = {}".format(max_values[0])) - # plt.plot(x_values, max_values, 'r-') - # legend.append("maximum possible") - - x_ticks = list(range(1, plot_size+1, 2)) - x_labels = x_ticks[:] - plt.xticks(x_ticks, x_labels) - # plt.ylim(bottom=0) - if show_strat_title: - plt.legend(legend, loc="best") - if async_draw: - plt.draw() - else: - plt.show() - - -def print_top_cmds(num_cmds=20): - cmd_count = defaultdict(int) - cmd_total = 0 - for pid, session in DATA_records_by_session.items(): - for record in session: - cmd = record["command"] - if cmd == "": - continue - cmd_count[cmd] += 1 - cmd_total += 1 - - # get `node_count` of largest nodes - sorted_cmd_count = list(sorted(cmd_count.items(), key=lambda x: x[1], reverse=True)) - print("\n\n% All subjects: Top commands") - for cmd, count in sorted_cmd_count[:num_cmds]: - print("{} {}".format(cmd, count)) - # print(sorted_cmd_count) - # cmds_to_graph = list(map(lambda x: x[0], sorted_cmd_count))[:cmd_count] - - -def print_top_cmds_by_user(num_cmds=20): - for user in DATA_records_by_user.items(): - name, records = user - cmd_count = defaultdict(int) - cmd_total = 0 - for record in records: - cmd = record["command"] - if cmd == "": - continue - cmd_count[cmd] += 1 - cmd_total += 1 - - # get `node_count` of largest nodes - sorted_cmd_count = list(sorted(cmd_count.items(), key=lambda x: x[1], reverse=True)) - print("\n\n% {}: Top commands".format(name)) - for cmd, count in sorted_cmd_count[:num_cmds]: - print("{} {}".format(cmd, count)) - # print(sorted_cmd_count) - # cmds_to_graph = list(map(lambda x: x[0], sorted_cmd_count))[:cmd_count] - - -def print_avg_cmdline_length(): - cmd_len_total = 0 - cmd_total = 0 - for pid, session in DATA_records_by_session.items(): - for record in session: - cmd = record["cmdLine"] - if cmd == "": - continue - cmd_len_total += len(cmd) - cmd_total += 1 - - print("% ALL avg cmdline = {}".format(cmd_len_total / cmd_total)) - # print(sorted_cmd_count) - # cmds_to_graph = list(map(lambda x: x[0], sorted_cmd_count))[:cmd_count] - - -# plot_cmdLineFrq_rank() -# plot_cmdFrq_rank() -print_top_cmds(30) -print_top_cmds_by_user(30) -# print_avg_cmdline_length() -# -# plot_cmdLineVocabularySize_cmdLinesEntered() -plot_cmdVocabularySize_cmdLinesEntered() -plot_cmdVocabularySize_time() -# plot_cmdVocabularySize_daily() -plot_cmdUsage_in_time(num_cmds=100) -plot_cmdUsage_in_time(sort_cmds=True, num_cmds=100) -# -recent_strats=("recent", "recent (bash-like)") -recurrence_strat=("recent (bash-like)",) -# plot_strategies_matches(20) -# plot_strategies_charsRecalled(20) -# plot_strategies_charsRecalled_prefix(20) -# plot_strategies_charsRecalled_noncummulative(20, selected_strategies=recent_strats) -# plot_strategies_matches_noncummulative(20) -# plot_strategies_charsRecalled_noncummulative(20) -# plot_strategies_charsRecalled_prefix_noncummulative(20) -# plot_strategies_matches(20, selected_strategies=recurrence_strat, show_strat_title=True, force_strat_title="recurrence rate") -# plot_strategies_matches_noncummulative(20, selected_strategies=recurrence_strat, show_strat_title=True, force_strat_title="recurrence rate") - -# graph_cmdSequences(node_count=33, edge_minValue=0.048) - -# graph_cmdSequences(node_count=28, edge_minValue=0.06) - -# new improved -# for n in range(40, 43): -# for e in range(94, 106, 2): -# e *= 0.001 -# graph_cmdSequences(node_count=n, edge_minValue=e, view_graph=False) - -#for n in range(29, 35): -# for e in range(44, 56, 2): -# e *= 0.001 -# graph_cmdSequences(node_count=n, edge_minValue=e, view_graph=False) - -# be careful and check if labels fit the display - -if async_draw: - plt.show() diff --git a/scripts/shellrc.sh b/scripts/shellrc.sh index 311ed2a..2bc84ee 100644 --- a/scripts/shellrc.sh +++ b/scripts/shellrc.sh @@ -47,24 +47,9 @@ __RESH_TERM="$TERM" # non-posix __RESH_RT_SESSION=$(__resh_get_epochrealtime) -__RESH_OSTYPE="$OSTYPE" +__RESH_OSTYPE="$OSTYPE" __RESH_MACHTYPE="$MACHTYPE" -if [ $__RESH_LINUX -eq 1 ]; then - __RESH_OS_RELEASE_ID=$(. /etc/os-release; echo "$ID") - __RESH_OS_RELEASE_VERSION_ID=$(. /etc/os-release; echo "$VERSION_ID") - __RESH_OS_RELEASE_ID_LIKE=$(. /etc/os-release; echo "$ID_LIKE") - __RESH_OS_RELEASE_NAME=$(. /etc/os-release; echo "$NAME") - __RESH_OS_RELEASE_PRETTY_NAME=$(. /etc/os-release; echo "$PRETTY_NAME") - __RESH_RT_SESS_SINCE_BOOT=$(cut -d' ' -f1 /proc/uptime) -elif [ $__RESH_MACOS -eq 1 ]; then - __RESH_OS_RELEASE_ID="macos" - __RESH_OS_RELEASE_VERSION_ID=$(sw_vers -productVersion 2>/dev/null) - __RESH_OS_RELEASE_NAME="macOS" - __RESH_OS_RELEASE_PRETTY_NAME="Mac OS X" - __RESH_RT_SESS_SINCE_BOOT=$(sysctl -n kern.boottime | awk '{print $4}' | sed 's/,//g') -fi - # shellcheck disable=2155 export __RESH_VERSION=$(resh-collect -version) # shellcheck disable=2155 diff --git a/scripts/util.sh b/scripts/util.sh index b434ecc..7383e57 100644 --- a/scripts/util.sh +++ b/scripts/util.sh @@ -68,18 +68,10 @@ __resh_bash_completion_init() { # skip completion init if they are not _get_comp_words_by_ref >/dev/null 2>/dev/null [[ $? == 127 ]] && return - local bash_completion_dir=~/.resh/bash_completion.d - if [[ -d $bash_completion_dir && -r $bash_completion_dir && \ - -x $bash_completion_dir ]]; then - for i in $(LC_ALL=C command ls "$bash_completion_dir"); do - i=$bash_completion_dir/$i - # shellcheck disable=SC2154 - # shellcheck source=/dev/null - [[ -f "$i" && -r "$i" ]] && . "$i" - done - fi + . ~/.resh/bash_completion.d/_reshctl } +// TODO: redo this __resh_zsh_completion_init() { # NOTE: this is hacky - each completion needs to be added individually # TODO: fix later @@ -137,7 +129,6 @@ __resh_session_init() { fi fi if [ "$__RESH_VERSION" = "$(resh-session-init -version)" ] && [ "$__RESH_REVISION" = "$(resh-session-init -revision)" ]; then - local fpath_last_run="$__RESH_XDG_CACHE_HOME/session_init_last_run_out.txt" resh-session-init -requireVersion "$__RESH_VERSION" \ -requireRevision "$__RESH_REVISION" \ -shell "$__RESH_SHELL" \ @@ -166,26 +157,6 @@ __resh_session_init() { -osReleaseVersionId "$__RESH_OS_RELEASE_VERSION_ID" \ -osReleaseIdLike "$__RESH_OS_RELEASE_ID_LIKE" \ -osReleaseName "$__RESH_OS_RELEASE_NAME" \ - -osReleasePrettyName "$__RESH_OS_RELEASE_PRETTY_NAME" \ - >| "$fpath_last_run" 2>&1 || echo "resh-session-init ERROR: $(head -n 1 $fpath_last_run)" + -osReleasePrettyName "$__RESH_OS_RELEASE_PRETTY_NAME" fi } - -__resh_set_xdg_home_paths() { - if [ -z "${XDG_CACHE_HOME-}" ]; then - __RESH_XDG_CACHE_HOME="$HOME/.cache/resh" - else - __RESH_XDG_CACHE_HOME="$XDG_CACHE_HOME/resh" - fi - mkdir -p "$__RESH_XDG_CACHE_HOME" >/dev/null 2>/dev/null - export __RESH_XDG_CACHE_HOME - - - if [ -z "${XDG_DATA_HOME-}" ]; then - __RESH_XDG_DATA_HOME="$HOME/.local/share/resh" - else - __RESH_XDG_DATA_HOME="$XDG_DATA_HOME/resh" - fi - mkdir -p "$__RESH_XDG_DATA_HOME" >/dev/null 2>/dev/null - export __RESH_XDG_DATA_HOME -}