diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..506762c8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +wails3 +build/ +vendor/ diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000..4153cd37 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,287 @@ + EUROPEAN UNION PUBLIC LICENCE v. 1.2 + EUPL © the European Union 2007, 2016 + +This European Union Public Licence (the ‘EUPL’) applies to the Work (as defined +below) which is provided under the terms of this Licence. Any use of the Work, +other than as authorised under this Licence is prohibited (to the extent such +use is covered by a right of the copyright holder of the Work). + +The Work is provided under the terms of this Licence when the Licensor (as +defined below) has placed the following notice immediately following the +copyright notice for the Work: + + Licensed under the EUPL + +or has expressed by any other means his willingness to license under the EUPL. + +1. Definitions + +In this Licence, the following terms have the following meaning: + +- ‘The Licence’: this Licence. + +- ‘The Original Work’: the work or software distributed or communicated by the + Licensor under this Licence, available as Source Code and also as Executable + Code as the case may be. + +- ‘Derivative Works’: the works or software that could be created by the + Licensee, based upon the Original Work or modifications thereof. This Licence + does not define the extent of modification or dependence on the Original Work + required in order to classify a work as a Derivative Work; this extent is + determined by copyright law applicable in the country mentioned in Article 15. + +- ‘The Work’: the Original Work or its Derivative Works. + +- ‘The Source Code’: the human-readable form of the Work which is the most + convenient for people to study and modify. + +- ‘The Executable Code’: any code which has generally been compiled and which is + meant to be interpreted by a computer as a program. + +- ‘The Licensor’: the natural or legal person that distributes or communicates + the Work under the Licence. + +- ‘Contributor(s)’: any natural or legal person who modifies the Work under the + Licence, or otherwise contributes to the creation of a Derivative Work. + +- ‘The Licensee’ or ‘You’: any natural or legal person who makes any usage of + the Work under the terms of the Licence. + +- ‘Distribution’ or ‘Communication’: any act of selling, giving, lending, + renting, distributing, communicating, transmitting, or otherwise making + available, online or offline, copies of the Work or providing access to its + essential functionalities at the disposal of any other natural or legal + person. + +2. Scope of the rights granted by the Licence + +The Licensor hereby grants You a worldwide, royalty-free, non-exclusive, +sublicensable licence to do the following, for the duration of copyright vested +in the Original Work: + +- use the Work in any circumstance and for all usage, +- reproduce the Work, +- modify the Work, and make Derivative Works based upon the Work, +- communicate to the public, including the right to make available or display + the Work or copies thereof to the public and perform publicly, as the case may + be, the Work, +- distribute the Work or copies thereof, +- lend and rent the Work or copies thereof, +- sublicense rights in the Work or copies thereof. + +Those rights can be exercised on any media, supports and formats, whether now +known or later invented, as far as the applicable law permits so. + +In the countries where moral rights apply, the Licensor waives his right to +exercise his moral right to the extent allowed by law in order to make effective +the licence of the economic rights here above listed. + +The Licensor grants to the Licensee royalty-free, non-exclusive usage rights to +any patents held by the Licensor, to the extent necessary to make use of the +rights granted on the Work under this Licence. + +3. Communication of the Source Code + +The Licensor may provide the Work either in its Source Code form, or as +Executable Code. If the Work is provided as Executable Code, the Licensor +provides in addition a machine-readable copy of the Source Code of the Work +along with each copy of the Work that the Licensor distributes or indicates, in +a notice following the copyright notice attached to the Work, a repository where +the Source Code is easily and freely accessible for as long as the Licensor +continues to distribute or communicate the Work. + +4. Limitations on copyright + +Nothing in this Licence is intended to deprive the Licensee of the benefits from +any exception or limitation to the exclusive rights of the rights owners in the +Work, of the exhaustion of those rights or of other applicable limitations +thereto. + +5. Obligations of the Licensee + +The grant of the rights mentioned above is subject to some restrictions and +obligations imposed on the Licensee. Those obligations are the following: + +Attribution right: The Licensee shall keep intact all copyright, patent or +trademarks notices and all notices that refer to the Licence and to the +disclaimer of warranties. The Licensee must include a copy of such notices and a +copy of the Licence with every copy of the Work he/she distributes or +communicates. The Licensee must cause any Derivative Work to carry prominent +notices stating that the Work has been modified and the date of modification. + +Copyleft clause: If the Licensee distributes or communicates copies of the +Original Works or Derivative Works, this Distribution or Communication will be +done under the terms of this Licence or of a later version of this Licence +unless the Original Work is expressly distributed only under this version of the +Licence — for example by communicating ‘EUPL v. 1.2 only’. The Licensee +(becoming Licensor) cannot offer or impose any additional terms or conditions on +the Work or Derivative Work that alter or restrict the terms of the Licence. + +Compatibility clause: If the Licensee Distributes or Communicates Derivative +Works or copies thereof based upon both the Work and another work licensed under +a Compatible Licence, this Distribution or Communication can be done under the +terms of this Compatible Licence. For the sake of this clause, ‘Compatible +Licence’ refers to the licences listed in the appendix attached to this Licence. +Should the Licensee's obligations under the Compatible Licence conflict with +his/her obligations under this Licence, the obligations of the Compatible +Licence shall prevail. + +Provision of Source Code: When distributing or communicating copies of the Work, +the Licensee will provide a machine-readable copy of the Source Code or indicate +a repository where this Source will be easily and freely available for as long +as the Licensee continues to distribute or communicate the Work. + +Legal Protection: This Licence does not grant permission to use the trade names, +trademarks, service marks, or names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the copyright notice. + +6. Chain of Authorship + +The original Licensor warrants that the copyright in the Original Work granted +hereunder is owned by him/her or licensed to him/her and that he/she has the +power and authority to grant the Licence. + +Each Contributor warrants that the copyright in the modifications he/she brings +to the Work are owned by him/her or licensed to him/her and that he/she has the +power and authority to grant the Licence. + +Each time You accept the Licence, the original Licensor and subsequent +Contributors grant You a licence to their contributions to the Work, under the +terms of this Licence. + +7. Disclaimer of Warranty + +The Work is a work in progress, which is continuously improved by numerous +Contributors. It is not a finished work and may therefore contain defects or +‘bugs’ inherent to this type of development. + +For the above reason, the Work is provided under the Licence on an ‘as is’ basis +and without warranties of any kind concerning the Work, including without +limitation merchantability, fitness for a particular purpose, absence of defects +or errors, accuracy, non-infringement of intellectual property rights other than +copyright as stated in Article 6 of this Licence. + +This disclaimer of warranty is an essential part of the Licence and a condition +for the grant of any rights to the Work. + +8. Disclaimer of Liability + +Except in the cases of wilful misconduct or damages directly caused to natural +persons, the Licensor will in no event be liable for any direct or indirect, +material or moral, damages of any kind, arising out of the Licence or of the use +of the Work, including without limitation, damages for loss of goodwill, work +stoppage, computer failure or malfunction, loss of data or any commercial +damage, even if the Licensor has been advised of the possibility of such damage. +However, the Licensor will be liable under statutory product liability laws as +far such laws apply to the Work. + +9. Additional agreements + +While distributing the Work, You may choose to conclude an additional agreement, +defining obligations or services consistent with this Licence. However, if +accepting obligations, You may act only on your own behalf and on your sole +responsibility, not on behalf of the original Licensor or any other Contributor, +and only if You agree to indemnify, defend, and hold each Contributor harmless +for any liability incurred by, or claims asserted against such Contributor by +the fact You have accepted any warranty or additional liability. + +10. Acceptance of the Licence + +The provisions of this Licence can be accepted by clicking on an icon ‘I agree’ +placed under the bottom of a window displaying the text of this Licence or by +affirming consent in any other similar way, in accordance with the rules of +applicable law. Clicking on that icon indicates your clear and irrevocable +acceptance of this Licence and all of its terms and conditions. + +Similarly, you irrevocably accept this Licence and all of its terms and +conditions by exercising any rights granted to You by Article 2 of this Licence, +such as the use of the Work, the creation by You of a Derivative Work or the +Distribution or Communication by You of the Work or copies thereof. + +11. Information to the public + +In case of any Distribution or Communication of the Work by means of electronic +communication by You (for example, by offering to download the Work from a +remote location) the distribution channel or media (for example, a website) must +at least provide to the public the information requested by the applicable law +regarding the Licensor, the Licence and the way it may be accessible, concluded, +stored and reproduced by the Licensee. + +12. Termination of the Licence + +The Licence and the rights granted hereunder will terminate automatically upon +any breach by the Licensee of the terms of the Licence. + +Such a termination will not terminate the licences of any person who has +received the Work from the Licensee under the Licence, provided such persons +remain in full compliance with the Licence. + +13. Miscellaneous + +Without prejudice of Article 9 above, the Licence represents the complete +agreement between the Parties as to the Work. + +If any provision of the Licence is invalid or unenforceable under applicable +law, this will not affect the validity or enforceability of the Licence as a +whole. Such provision will be construed or reformed so as necessary to make it +valid and enforceable. + +The European Commission may publish other linguistic versions or new versions of +this Licence or updated versions of the Appendix, so far this is required and +reasonable, without reducing the scope of the rights granted by the Licence. New +versions of the Licence will be published with a unique version number. + +All linguistic versions of this Licence, approved by the European Commission, +have identical value. Parties can take advantage of the linguistic version of +their choice. + +14. Jurisdiction + +Without prejudice to specific agreement between parties, + +- any litigation resulting from the interpretation of this License, arising + between the European Union institutions, bodies, offices or agencies, as a + Licensor, and any Licensee, will be subject to the jurisdiction of the Court + of Justice of the European Union, as laid down in article 272 of the Treaty on + the Functioning of the European Union, + +- any litigation arising between other parties and resulting from the + interpretation of this License, will be subject to the exclusive jurisdiction + of the competent court where the Licensor resides or conducts its primary + business. + +15. Applicable Law + +Without prejudice to specific agreement between parties, + +- this Licence shall be governed by the law of the European Union Member State + where the Licensor has his seat, resides or has his registered office, + +- this licence shall be governed by Belgian law if the Licensor has no seat, + residence or registered office inside a European Union Member State. + +Appendix + +‘Compatible Licences’ according to Article 5 EUPL are: + +- GNU General Public License (GPL) v. 2, v. 3 +- GNU Affero General Public License (AGPL) v. 3 +- Open Software License (OSL) v. 2.1, v. 3.0 +- Eclipse Public License (EPL) v. 1.0 +- CeCILL v. 2.0, v. 2.1 +- Mozilla Public Licence (MPL) v. 2 +- GNU Lesser General Public Licence (LGPL) v. 2.1, v. 3 +- Creative Commons Attribution-ShareAlike v. 3.0 Unported (CC BY-SA 3.0) for + works other than software +- European Union Public Licence (EUPL) v. 1.1, v. 1.2 +- Québec Free and Open-Source Licence — Reciprocity (LiLiQ-R) or Strong + Reciprocity (LiLiQ-R+). + +The European Commission may update this Appendix to later versions of the above +licences without producing a new version of the EUPL, as long as they provide +the rights granted in Article 2 of this Licence and protect the covered Source +Code from exclusive appropriation. + +All other changes or additions to this Appendix require the production of a new +EUPL version. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..ab6bd3a4 --- /dev/null +++ b/Makefile @@ -0,0 +1,2 @@ +all: + go build -o build/bin/core cmd/app/main.go \ No newline at end of file diff --git a/README.md b/README.md index e8217fe2..14171f2a 100644 --- a/README.md +++ b/README.md @@ -1 +1,41 @@ -# Core \ No newline at end of file +# Core + +A Helper for GoLang projects, who also use, but not exclusive to Wails.io v3+ + +You need a file called apptray.png in your assets folder +```go +package main + +import ( + "embed" + + "github.com/Snider/Core" + "github.com/Snider/Core/display" + "github.com/wailsapp/wails/v3/pkg/application" +) + +//go:embed all:public/* +var assets embed.FS + +func main() { + + app := application.New(application.Options{ + Assets: application.AssetOptions{ + Handler: application.AssetFileServerFS(assets), + }, + }) + + app.RegisterService(application.NewService(core.Service( + core.WithWails(app), // Provides the Wails application instance to core services + core.WithAssets(assets), // Provides the embed.FS to core services + core.WithService(display.Register), // Provides the ability to open windows + core.WithService(config.Register), // Provides the ability to persist UI state (windows reopen where they closed) + core.WithServiceLock(), // locks core from accepting new services blocking access to IPC + ))) + + err := app.Run() + if err != nil { + panic(err) + } +} +``` \ No newline at end of file diff --git a/Taskfile.yaml b/Taskfile.yaml new file mode 100644 index 00000000..877af8c3 --- /dev/null +++ b/Taskfile.yaml @@ -0,0 +1,6 @@ +version: '3' + +tasks: + build: + cmds: + - go build -o build/bin/core cmd/app/main.go diff --git a/actions.go b/actions.go new file mode 100644 index 00000000..66e10079 --- /dev/null +++ b/actions.go @@ -0,0 +1,3 @@ +package core + +type ActionServiceStartup struct{} diff --git a/cmd/app/frontend/dist/assets/app.js b/cmd/app/frontend/dist/assets/app.js new file mode 100644 index 00000000..28abaa36 --- /dev/null +++ b/cmd/app/frontend/dist/assets/app.js @@ -0,0 +1 @@ +console.log("Hello from app.js!"); diff --git a/cmd/app/frontend/dist/assets/apptray.png b/cmd/app/frontend/dist/assets/apptray.png new file mode 100644 index 00000000..0778fc61 Binary files /dev/null and b/cmd/app/frontend/dist/assets/apptray.png differ diff --git a/cmd/app/frontend/dist/index.html b/cmd/app/frontend/dist/index.html new file mode 100644 index 00000000..916c9c4b --- /dev/null +++ b/cmd/app/frontend/dist/index.html @@ -0,0 +1,10 @@ + + + + Core + + +

Core

+ + + diff --git a/cmd/app/main.go b/cmd/app/main.go new file mode 100644 index 00000000..abfa33da --- /dev/null +++ b/cmd/app/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "embed" + + "github.com/Snider/Core" + "github.com/Snider/Core/config" + "github.com/Snider/Core/display" + "github.com/wailsapp/wails/v3/pkg/application" +) + +//go:embed all:frontend/dist +var assets embed.FS + +func main() { + + app := application.New(application.Options{ + Assets: application.AssetOptions{ + Handler: application.AssetFileServerFS(assets), + }, + }) + + app.RegisterService(application.NewService(core.Service( + core.WithWails(app), // Provides the Wails application instance to core services + core.WithAssets(assets), // Provides the embed.FS to core services + core.WithService(display.Register), // Provides the ability to open windows + core.WithService(config.Register), // Provides the ability to persist UI state (windows reopen where they closed) + core.WithServiceLock(), // locks core from accepting new services blocking access to IPC + ))) + + err := app.Run() + if err != nil { + panic(err) + } +} diff --git a/config/config.go b/config/config.go new file mode 100644 index 00000000..62f39761 --- /dev/null +++ b/config/config.go @@ -0,0 +1,128 @@ +package config + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + core "github.com/Snider/Core" + "github.com/adrg/xdg" +) + +const appName = "lethean" +const configFileName = "config.json" + +// ErrSetupRequired is returned by ServiceStartup if config.json is missing. +var ErrSetupRequired = errors.New("setup required: config.json not found") + +// Service provides access to the application's configuration. +var service *Config + +// NewService creates and initializes a new configuration service. +// It loads an existing configuration or creates a default one if not found. +func Register(c *core.Core) error { + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("could not resolve user home directory: %w", err) + } + userHomeDir := filepath.Join(homeDir, appName) + configDir := filepath.Join(userHomeDir, "config") + //configPath := filepath.Join(configDir, configFileName) + + service = &Config{ + core: c, + UserHomeDir: userHomeDir, + ConfigDir: configDir, + DataDir: filepath.Join(userHomeDir, "data"), + WorkspacesDir: filepath.Join(userHomeDir, "workspaces"), + DefaultRoute: "/", + Features: []string{}, + Language: "en", + } + + return c.RegisterModule("config", service) +} + +// newDefaultConfig creates a default configuration with resolved paths and ensures directories exist. +func newDefaultConfig() (*Config, error) { + if strings.Contains(appName, "..") || strings.Contains(appName, string(filepath.Separator)) { + return nil, fmt.Errorf("invalid app name '%s': contains path traversal characters", appName) + } + + homeDir, err := os.UserHomeDir() + if err != nil { + return nil, fmt.Errorf("could not resolve user home directory: %w", err) + } + userHomeDir := filepath.Join(homeDir, appName) + + rootDir, err := xdg.DataFile(appName) + if err != nil { + return nil, fmt.Errorf("could not resolve data directory: %w", err) + } + + cacheDir, err := xdg.CacheFile(appName) + if err != nil { + return nil, fmt.Errorf("could not resolve cache directory: %w", err) + } + + cfg := &Config{ + UserHomeDir: userHomeDir, + RootDir: rootDir, + CacheDir: cacheDir, + ConfigDir: filepath.Join(userHomeDir, "config"), + DataDir: filepath.Join(userHomeDir, "data"), + WorkspacesDir: filepath.Join(userHomeDir, "workspaces"), + DefaultRoute: "/", + Features: []string{}, + Language: "en", // Hardcoded default, will be overridden if loaded or detected + } + + dirs := []string{cfg.RootDir, cfg.ConfigDir, cfg.DataDir, cfg.CacheDir, cfg.WorkspacesDir, cfg.UserHomeDir} + for _, dir := range dirs { + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return nil, fmt.Errorf("could not create directory %s: %w", dir, err) + } + } + + return cfg, nil +} + +// Save writes the current configuration to config.json. +func (c *Config) Save() error { + configPath := filepath.Join(c.ConfigDir, configFileName) + + data, err := json.MarshalIndent(*c, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal config: %w", err) + } + + if err := os.WriteFile(configPath, data, 0644); err != nil { + return fmt.Errorf("failed to write config file: %w", err) + } + return nil +} + +// IsFeatureEnabled checks if a given feature is enabled in the configuration. +func (c *Config) IsFeatureEnabled(feature string) bool { + for _, f := range c.Features { + if f == feature { + return true + } + } + return false +} + +// EnableFeature adds a feature to the list of enabled features and saves the config. +func (c *Config) EnableFeature(feature string) error { + if c.IsFeatureEnabled(feature) { + return nil + } + c.Features = append(c.Features, feature) + if err := c.Save(); err != nil { + return fmt.Errorf("failed to save config after enabling feature %s: %w", feature, err) + } + return nil +} diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 00000000..b6757c4e --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,81 @@ +package config + +import ( + "os" + "path/filepath" + "testing" + + "github.com/Snider/Core" +) + +// setupTestEnv creates a temporary home directory for testing. +func setupTestEnv(t *testing.T) (string, func()) { + tempHomeDir, err := os.MkdirTemp("", "test_home") + if err != nil { + t.Fatalf("Failed to create temp home directory: %v", err) + } + + oldHome := os.Getenv("HOME") + os.Setenv("HOME", tempHomeDir) + + cleanup := func() { + os.Setenv("HOME", oldHome) + os.RemoveAll(tempHomeDir) + } + + return tempHomeDir, cleanup +} + +// newTestCore creates a new, empty core instance for testing. +func newTestCore(t *testing.T) *core.Core { + c := core.Service() + if c == nil { + t.Fatalf("core.Service() returned a nil instance, which is not expected for a test setup") + } + return c +} + +func TestRegister(t *testing.T) { + tempHomeDir, cleanup := setupTestEnv(t) + defer cleanup() + + c := newTestCore(t) + + if err := Register(c); err != nil { + t.Fatalf("Register() failed: %v", err) + } + + mod := c.Mod("config") + if mod == nil { + t.Fatalf("Failed to get config module from core instance") + } + + cfg, ok := mod.(*Config) + if !ok { + t.Fatalf("Module is not of type *Config") + } + + expectedUserHomeDir := filepath.Join(tempHomeDir, appName) + expectedConfigDir := filepath.Join(expectedUserHomeDir, "config") + expectedDataDir := filepath.Join(expectedUserHomeDir, "data") + expectedWorkspacesDir := filepath.Join(expectedUserHomeDir, "workspaces") + + tests := []struct { + name string + actual string + expected string + }{ + {"UserHomeDir", cfg.UserHomeDir, expectedUserHomeDir}, + {"ConfigDir", cfg.ConfigDir, expectedConfigDir}, + {"DataDir", cfg.DataDir, expectedDataDir}, + {"WorkspacesDir", cfg.WorkspacesDir, expectedWorkspacesDir}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.actual != tt.expected { + t.Errorf("Mismatch for %s: got %q, want %q", tt.name, tt.actual, tt.expected) + } + }) + } +} diff --git a/config/header.go b/config/header.go new file mode 100644 index 00000000..ae5da433 --- /dev/null +++ b/config/header.go @@ -0,0 +1,55 @@ +package config + +import ( + "fmt" + "reflect" + "strings" + + core "github.com/Snider/Core" +) + +// Config holds the resolved paths and user-configurable settings for the application. +type Config struct { + // --- Dynamic Paths (not stored in config.json) --- + core *core.Core + DataDir string `json:"-"` + ConfigDir string `json:"-"` + CacheDir string `json:"-"` + WorkspacesDir string `json:"-"` + RootDir string `json:"-"` + UserHomeDir string `json:"-"` + IsNew bool `json:"-"` // Flag indicating if the config was newly created. + + // --- Storable Settings (persisted in config.json) --- + DefaultRoute string `json:"defaultRoute,omitempty"` + Features []string `json:"features,omitempty"` + Language string `json:"language,omitempty"` +} + +// Key retrieves a configuration value by its key. It checks JSON tags and field names (case-insensitive). +func (c *Config) Key(key string) (interface{}, error) { + // Use reflection to inspect the struct fields. + val := reflect.ValueOf(c).Elem() + typ := val.Type() + + for i := 0; i < val.NumField(); i++ { + field := typ.Field(i) + fieldName := field.Name + + // Check the field name first. + if strings.EqualFold(fieldName, key) { + return val.Field(i).Interface(), nil + } + + // Then check the `json` tag. + jsonTag := field.Tag.Get("json") + if jsonTag != "" && jsonTag != "-" { + jsonName := strings.Split(jsonTag, ",")[0] + if strings.EqualFold(jsonName, key) { + return val.Field(i).Interface(), nil + } + } + } + + return nil, fmt.Errorf("key '%s' not found in config", key) +} diff --git a/core.go b/core.go new file mode 100644 index 00000000..eb2a1199 --- /dev/null +++ b/core.go @@ -0,0 +1,139 @@ +package core + +import ( + "context" + "embed" + "errors" + "fmt" + + "github.com/wailsapp/wails/v3/pkg/application" +) + +// Service initialises a Core instance using the provided options and performs the necessary setup. +func Service(opts ...Option) *Core { + c := &Core{ + mods: make(map[string]any), + } + // Apply all options (including WithService calls) + for _, o := range opts { + if err := o(c); err != nil { + return nil + } + } + c.once.Do(func() { + // any one‑time initialisation you need + instance = c + c.initErr = nil + }) + if c.initErr != nil { + return nil + } + if c.serviceLock { + c.servicesLocked = true + } + return c +} + +// WithService wraps a function that registers a package or module with the provided Core instance as an Option. +func WithService(reg func(*Core) error) Option { + return func(c *Core) error { + return reg(c) + } +} + +// WithWails sets the Wails application instance to the Core configuration and returns an Option function. +func WithWails(app *application.App) Option { + return func(c *Core) error { + c.App = app + return nil + } +} + +// WithAssets sets the provided embedded filesystem as the assets for the Core instance. +func WithAssets(fs embed.FS) Option { + return func(c *Core) error { + c.assets = fs + return nil + } +} + +func WithServiceLock() Option { + return func(c *Core) error { + c.serviceLock = true + return nil + } +} + +// ServiceStartup initializes the service during application startup by executing the ActionServiceStartup message. +func (c *Core) ServiceStartup(ctx context.Context, options application.ServiceOptions) error { + return c.ACTION(ActionServiceStartup{}) +} + +// ACTION processes a Message by invoking all registered handlers and returns an aggregated error if any handlers fail. +func (c *Core) ACTION(msg Message) error { + c.ipcMu.RLock() + handlers := append([]func(*Core, Message) error(nil), c.ipcHandlers...) + c.ipcMu.RUnlock() + + var agg error + for _, h := range handlers { + if err := h(c, msg); err != nil { + agg = fmt.Errorf("%w; %v", agg, err) + } + } + return agg +} + +// RegisterAction adds a single handler function to the list of registered IPC handlers in a thread-safe manner. +func (c *Core) RegisterAction(handler func(*Core, Message) error) { + c.ipcMu.Lock() + c.ipcHandlers = append(c.ipcHandlers, handler) + c.ipcMu.Unlock() +} + +// RegisterActions registers multiple IPC handler functions to be executed during message processing in a thread-safe manner. +func (c *Core) RegisterActions(handlers ...func(*Core, Message) error) { + c.ipcMu.Lock() + c.ipcHandlers = append(c.ipcHandlers, handlers...) + c.ipcMu.Unlock() +} + +// RegisterModule inserts an API object under a unique name. +func (c *Core) RegisterModule(name string, api any) error { + + if c.servicesLocked { + return fmt.Errorf("core: module %q is not permitted by the serviceLock setting", name) + } + + if name == "" { + return errors.New("core: module name cannot be empty") + } + c.modMu.Lock() + defer c.modMu.Unlock() + if _, exists := c.mods[name]; exists { + return fmt.Errorf("core: module %q already registered", name) + } + c.mods[name] = api + return nil +} + +// Mod caller must type‑assert the result to the concrete API type it expects. +func (c *Core) Mod(name string) any { + c.modMu.RLock() + api, ok := c.mods[name] + c.modMu.RUnlock() + if !ok { + return nil + } + return api +} + +// Mod is a generic helper to get a module of expected type T. +func Mod[T any](c *Core, name string) *T { + raw := c.Mod(name) + typed, ok := raw.(*T) + if !ok { + return nil + } + return typed +} diff --git a/crypt/crypt.go b/crypt/crypt.go new file mode 100644 index 00000000..49d590e7 --- /dev/null +++ b/crypt/crypt.go @@ -0,0 +1,23 @@ +package crypt + +import ( + "github.com/Snider/Core/config" +) + +// HashType defines the supported hashing algorithms. +type HashType string + +const ( + LTHN HashType = "lthn" + SHA512 HashType = "sha512" + SHA256 HashType = "sha256" + SHA1 HashType = "sha1" + MD5 HashType = "md5" +) + +// Service provides cryptographic functions. +// It is the main entry point for all cryptographic operations +// and is bound to the frontend. +type Service struct { + config *config.Config +} diff --git a/crypt/crypt_test.go b/crypt/crypt_test.go new file mode 100644 index 00000000..2cde5070 --- /dev/null +++ b/crypt/crypt_test.go @@ -0,0 +1,20 @@ +package crypt + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHash(t *testing.T) { + s := &Service{} + payload := "hello" + hash := s.Hash(LTHN, payload) + assert.NotEmpty(t, hash) +} + +func TestLuhn(t *testing.T) { + s := &Service{} + assert.True(t, s.Luhn("79927398713")) + assert.False(t, s.Luhn("79927398714")) +} diff --git a/crypt/hash.go b/crypt/hash.go new file mode 100644 index 00000000..5ca021f0 --- /dev/null +++ b/crypt/hash.go @@ -0,0 +1,33 @@ +package crypt + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + + "github.com/Snider/Core/crypt/lib/lthn" +) + +// Hash computes a hash of the payload using the specified algorithm. +func (s *Service) Hash(lib HashType, payload string) string { + switch lib { + case LTHN: + return lthn.Hash(payload) + case SHA512: + hash := sha512.Sum512([]byte(payload)) + return hex.EncodeToString(hash[:]) + case SHA1: + hash := sha1.Sum([]byte(payload)) + return hex.EncodeToString(hash[:]) + case MD5: + hash := md5.Sum([]byte(payload)) + return hex.EncodeToString(hash[:]) + case SHA256: + fallthrough + default: + hash := sha256.Sum256([]byte(payload)) + return hex.EncodeToString(hash[:]) + } +} diff --git a/crypt/lib/lthn/hash.go b/crypt/lib/lthn/hash.go new file mode 100644 index 00000000..c9f0ac09 --- /dev/null +++ b/crypt/lib/lthn/hash.go @@ -0,0 +1,46 @@ +package lthn + +import ( + "crypto/sha256" + "encoding/hex" +) + +// SetKeyMap sets the key map for the notarisation process. +func SetKeyMap(newKeyMap map[rune]rune) { + keyMap = newKeyMap +} + +// GetKeyMap gets the current key map. +func GetKeyMap() map[rune]rune { + return keyMap +} + +// Hash creates a reproducible hash from a string. +func Hash(input string) string { + salt := createSalt(input) + hash := sha256.Sum256([]byte(input + salt)) + return hex.EncodeToString(hash[:]) +} + +// createSalt creates a quasi-salt from a string by reversing it and swapping characters. +func createSalt(input string) string { + if input == "" { + return "" + } + runes := []rune(input) + salt := make([]rune, len(runes)) + for i := 0; i < len(runes); i++ { + char := runes[len(runes)-1-i] + if replacement, ok := keyMap[char]; ok { + salt[i] = replacement + } else { + salt[i] = char + } + } + return string(salt) +} + +// Verify checks if an input string matches a given hash. +func Verifyf(input string, hash string) bool { + return Hash(input) == hash +} diff --git a/crypt/lib/lthn/hash_test.go b/crypt/lib/lthn/hash_test.go new file mode 100644 index 00000000..463ea5d6 --- /dev/null +++ b/crypt/lib/lthn/hash_test.go @@ -0,0 +1,48 @@ +package lthn + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHash(t *testing.T) { + input := "test_string" + expectedHash := "45d4027179b17265c38732fb1e7089a0b1adfe1d3ba4105fce66f7d46ba42f7d" + + hashed := Hash(input) + fmt.Printf("Hash for \"%s\": %s\n", input, hashed) + + assert.Equal(t, expectedHash, hashed, "The hash should match the expected value") +} + +func TestCreateSalt(t *testing.T) { + // Test with default keyMap + SetKeyMap(map[rune]rune{}) + assert.Equal(t, "gnirts_tset", createSalt("test_string")) + assert.Equal(t, "", createSalt("")) + assert.Equal(t, "A", createSalt("A")) + + // Test with a custom keyMap + customKeyMap := map[rune]rune{ + 'a': 'x', + 'b': 'y', + 'c': 'z', + } + SetKeyMap(customKeyMap) + assert.Equal(t, "zyx", createSalt("abc")) + assert.Equal(t, "gnirts_tset", createSalt("test_string")) // 'test_string' doesn't have 'a', 'b', 'c' + + // Reset keyMap to default for other tests + SetKeyMap(map[rune]rune{}) +} + +func TestVerify(t *testing.T) { + input := "another_test_string" + hashed := Hash(input) + + assert.True(t, Verifyf(input, hashed), "Verifyf should return true for a matching hash") + assert.False(t, Verifyf(input, "wrong_hash"), "Verifyf should return false for a non-matching hash") + assert.False(t, Verifyf("different_input", hashed), "Verifyf should return false for different input") +} diff --git a/crypt/lib/lthn/lthn.go b/crypt/lib/lthn/lthn.go new file mode 100644 index 00000000..5a1f6e1d --- /dev/null +++ b/crypt/lib/lthn/lthn.go @@ -0,0 +1,16 @@ +package lthn + +// keyMap is the default character-swapping map used for the quasi-salting process. +var keyMap = map[rune]rune{ + 'o': '0', + 'l': '1', + 'e': '3', + 'a': '4', + 's': 'z', + 't': '7', + '0': 'o', + '1': 'l', + '3': 'e', + '4': 'a', + '7': 't', +} diff --git a/crypt/lib/openpgp/encrypt.go b/crypt/lib/openpgp/encrypt.go new file mode 100644 index 00000000..4ed1f771 --- /dev/null +++ b/crypt/lib/openpgp/encrypt.go @@ -0,0 +1,106 @@ +package openpgp + +import ( + "bytes" + "fmt" + "io" + "strings" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/Snider/Core/filesystem" +) + +// EncryptPGP encrypts data for a recipient, optionally signing it. +func EncryptPGP(medium filesystem.Medium, recipientPath, data string, signerPath, signerPassphrase *string) (string, error) { + recipient, err := GetPublicKey(medium, recipientPath) + if err != nil { + return "", fmt.Errorf("failed to get recipient public key: %w", err) + } + + var signer *openpgp.Entity + if signerPath != nil && signerPassphrase != nil { + signer, err = GetPrivateKey(medium, *signerPath, *signerPassphrase) + if err != nil { + return "", fmt.Errorf("could not get private key for signing: %w", err) + } + } + + buf := new(bytes.Buffer) + armoredWriter, err := armor.Encode(buf, pgpMessageHeader, nil) + if err != nil { + return "", fmt.Errorf("failed to create armored writer: %w", err) + } + + plaintextWriter, err := openpgp.Encrypt(armoredWriter, []*openpgp.Entity{recipient}, signer, nil, nil) + if err != nil { + return "", fmt.Errorf("failed to encrypt: %w", err) + } + + if _, err := plaintextWriter.Write([]byte(data)); err != nil { + return "", fmt.Errorf("failed to write plaintext data: %w", err) + } + + if err := plaintextWriter.Close(); err != nil { + return "", fmt.Errorf("failed to close plaintext writer: %w", err) + } + if err := armoredWriter.Close(); err != nil { + return "", fmt.Errorf("failed to close armored writer: %w", err) + } + + // Debug print the encrypted message + fmt.Printf("Encrypted Message:\n%s\n", buf.String()) + + return buf.String(), nil +} + +// DecryptPGP decrypts a PGP message, optionally verifying the signature. +func DecryptPGP(medium filesystem.Medium, recipientPath, message, passphrase string, signerPath *string) (string, error) { + privateKeyEntity, err := GetPrivateKey(medium, recipientPath, passphrase) + if err != nil { + return "", fmt.Errorf("failed to get private key: %w", err) + } + + // For this API version, the keyring must contain all keys for decryption and verification. + keyring := openpgp.EntityList{privateKeyEntity} + var expectedSigner *openpgp.Entity + + if signerPath != nil { + publicKeyEntity, err := GetPublicKey(medium, *signerPath) + if err != nil { + return "", fmt.Errorf("could not get public key for verification: %w", err) + } + keyring = append(keyring, publicKeyEntity) + expectedSigner = publicKeyEntity + } + + // Debug print the message before decryption + fmt.Printf("Message to Decrypt:\n%s\n", message) + + // We pass the combined keyring, and nil for the prompt function because the private key is already decrypted. + md, err := openpgp.ReadMessage(strings.NewReader(message), keyring, nil, nil) + if err != nil { + return "", fmt.Errorf("failed to read PGP message: %w", err) + } + + decrypted, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + return "", fmt.Errorf("failed to read decrypted body: %w", err) + } + + // The signature is checked automatically if the public key is in the keyring. + // We still need to check for errors and that the signer was who we expected. + if signerPath != nil { + if md.SignatureError != nil { + return "", fmt.Errorf("signature verification failed: %w", md.SignatureError) + } + if md.SignedBy == nil { + return "", fmt.Errorf("message is not signed, but signature verification was requested") + } + if expectedSigner.PrimaryKey.KeyId != md.SignedBy.PublicKey.KeyId { + return "", fmt.Errorf("signature from unexpected key id: got %X, want %X", md.SignedBy.PublicKey.KeyId, expectedSigner.PrimaryKey.KeyId) + } + } + + return string(decrypted), nil +} diff --git a/crypt/lib/openpgp/key.go b/crypt/lib/openpgp/key.go new file mode 100644 index 00000000..8ee57eda --- /dev/null +++ b/crypt/lib/openpgp/key.go @@ -0,0 +1,226 @@ +package openpgp + +import ( + "bytes" + "crypto" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/Snider/Core/crypt/lib/lthn" + "github.com/Snider/Core/filesystem" +) + +// CreateKeyPair generates a new OpenPGP key pair. +// The password parameter is optional. If not provided, the private key will not be encrypted. +func CreateKeyPair(username string, passwords ...string) (*KeyPair, error) { + var password string + if len(passwords) > 0 { + password = passwords[0] + } + + entity, err := openpgp.NewEntity(username, "Lethean Desktop", "", &packet.Config{ + RSABits: 4096, + DefaultHash: crypto.SHA256, + }) + if err != nil { + return nil, fmt.Errorf("failed to create new entity: %w", err) + } + + // The private key is initially unencrypted after NewEntity. + // Generate revocation certificate while the private key is unencrypted. + revocationCert, err := createRevocationCertificate(entity) + if err != nil { + revocationCert = "" // Non-critical, proceed without it if it fails + } + + // Encrypt the private key only if a password is provided, after revocation cert generation. + if password != "" { + if err := entity.PrivateKey.Encrypt([]byte(password)); err != nil { + return nil, fmt.Errorf("failed to encrypt private key: %w", err) + } + } + + publicKey, err := serializeEntity(entity, openpgp.PublicKeyType, "") // Public key doesn't need password + if err != nil { + return nil, err + } + + // Private key serialization. The key is already in its final encrypted/unencrypted state. + privateKey, err := serializeEntity(entity, openpgp.PrivateKeyType, "") // No password needed here for serialization + if err != nil { + return nil, err + } + + return &KeyPair{ + PublicKey: publicKey, + PrivateKey: privateKey, + RevocationCertificate: revocationCert, + }, nil +} + +// CreateServerKeyPair creates and stores a key pair for the server in a specific directory. +func CreateServerKeyPair(keysDir string) error { + serverKeyPath := filepath.Join(keysDir, "server.lthn.pub") + // Passphrase is derived from the path itself, consistent with original logic. + passphrase := lthn.Hash(serverKeyPath) + return createAndStoreKeyPair("server", passphrase, keysDir) +} + +// GetPublicKey retrieves an armored public key for a given ID. +func GetPublicKey(medium filesystem.Medium, path string) (*openpgp.Entity, error) { + return readEntity(medium, path) +} + +// GetPrivateKey retrieves and decrypts an armored private key. +func GetPrivateKey(medium filesystem.Medium, path, passphrase string) (*openpgp.Entity, error) { + entity, err := readEntity(medium, path) + if err != nil { + return nil, err + } + + if entity.PrivateKey == nil { + return nil, fmt.Errorf("no private key found for path %s", path) + } + + if entity.PrivateKey.Encrypted { + if err := entity.PrivateKey.Decrypt([]byte(passphrase)); err != nil { + return nil, fmt.Errorf("failed to decrypt private key for path %s: %w", path, err) + } + } + + var primaryIdentity *openpgp.Identity + for _, identity := range entity.Identities { + if identity.SelfSignature.IsPrimaryId != nil && *identity.SelfSignature.IsPrimaryId { + primaryIdentity = identity + break + } + } + if primaryIdentity == nil { + for _, identity := range entity.Identities { + primaryIdentity = identity + break + } + } + + if primaryIdentity == nil { + return nil, fmt.Errorf("key for %s has no identity", path) + } + + if primaryIdentity.SelfSignature.KeyLifetimeSecs != nil { + if primaryIdentity.SelfSignature.CreationTime.Add(time.Duration(*primaryIdentity.SelfSignature.KeyLifetimeSecs) * time.Second).Before(time.Now()) { + return nil, fmt.Errorf("key for %s has expired", path) + } + } + + return entity, nil +} + +// --- Helper Functions --- + +func createAndStoreKeyPair(id, password, dir string) error { + var keyPair *KeyPair + var err error + + if password != "" { + keyPair, err = CreateKeyPair(id, password) + } else { + keyPair, err = CreateKeyPair(id) + } + + if err != nil { + return fmt.Errorf("failed to create key pair for id %s: %w", id, err) + } + + if err := filesystem.Local.EnsureDir(dir); err != nil { + return fmt.Errorf("failed to ensure key directory exists: %w", err) + } + + files := map[string]string{ + filepath.Join(dir, fmt.Sprintf("%s.lthn.pub", id)): keyPair.PublicKey, + filepath.Join(dir, fmt.Sprintf("%s.lthn.key", id)): keyPair.PrivateKey, + filepath.Join(dir, fmt.Sprintf("%s.lthn.rev", id)): keyPair.RevocationCertificate, // Re-enabled + } + + for path, content := range files { + if content == "" { + continue + } + if err := filesystem.Local.Write(path, content); err != nil { + return fmt.Errorf("failed to write key file %s: %w", path, err) + } + } + return nil +} + +func readEntity(m filesystem.Medium, path string) (*openpgp.Entity, error) { + keyArmored, err := m.Read(path) + if err != nil { + return nil, fmt.Errorf("failed to read key file %s: %w", path, err) + } + + entityList, err := openpgp.ReadArmoredKeyRing(strings.NewReader(keyArmored)) + if err != nil { + return nil, fmt.Errorf("failed to parse key file %s: %w", path, err) + } + if len(entityList) == 0 { + return nil, fmt.Errorf("no entity found in key file %s", path) + } + return entityList[0], nil +} + +func serializeEntity(entity *openpgp.Entity, keyType string, password string) (string, error) { + buf := new(bytes.Buffer) + writer, err := armor.Encode(buf, keyType, nil) + if err != nil { + return "", fmt.Errorf("failed to create armor encoder: %w", err) + } + + if keyType == openpgp.PrivateKeyType { + // Serialize the private key in its current in-memory state. + // Encryption is handled by CreateKeyPair before this function is called. + err = entity.SerializePrivateWithoutSigning(writer, nil) + } else { + err = entity.Serialize(writer) + } + + if err != nil { + return "", fmt.Errorf("failed to serialize entity: %w", err) + } + if err := writer.Close(); err != nil { + return "", fmt.Errorf("failed to close armor writer: %w", err) + } + return buf.String(), nil +} + +func createRevocationCertificate(entity *openpgp.Entity) (string, error) { + buf := new(bytes.Buffer) + writer, err := armor.Encode(buf, openpgp.SignatureType, nil) + if err != nil { + return "", fmt.Errorf("failed to create armor encoder for revocation: %w", err) + } + + sig := &packet.Signature{ + SigType: packet.SigTypeKeyRevocation, + PubKeyAlgo: entity.PrimaryKey.PubKeyAlgo, + Hash: crypto.SHA256, + CreationTime: time.Now(), + IssuerKeyId: &entity.PrimaryKey.KeyId, + } + + // SignKey requires an unencrypted private key. + if err := sig.SignKey(entity.PrimaryKey, entity.PrivateKey, nil); err != nil { + return "", fmt.Errorf("failed to sign revocation: %w", err) + } + if err := sig.Serialize(writer); err != nil { + return "", fmt.Errorf("failed to serialize revocation signature: %w", err) + } + if err := writer.Close(); err != nil { + return "", fmt.Errorf("failed to close revocation writer: %w", err) + } + return buf.String(), nil +} diff --git a/crypt/lib/openpgp/openpgp.go b/crypt/lib/openpgp/openpgp.go new file mode 100644 index 00000000..1e604a55 --- /dev/null +++ b/crypt/lib/openpgp/openpgp.go @@ -0,0 +1,12 @@ +package openpgp + +// pgpMessageHeader is the standard armor header for PGP messages. +const pgpMessageHeader = "PGP MESSAGE" + +// KeyPair holds the generated armored keys and revocation certificate. +// This is the primary data structure representing a user's PGP identity within the system. +type KeyPair struct { + PublicKey string + PrivateKey string + RevocationCertificate string +} diff --git a/crypt/lib/openpgp/sign.go b/crypt/lib/openpgp/sign.go new file mode 100644 index 00000000..4d999976 --- /dev/null +++ b/crypt/lib/openpgp/sign.go @@ -0,0 +1,39 @@ +package openpgp + +import ( + "bytes" + "fmt" + "strings" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/Snider/Core/filesystem" +) + +// Sign creates a detached signature for the data. +func Sign(medium filesystem.Medium, data, privateKeyPath, passphrase string) (string, error) { + signer, err := GetPrivateKey(medium, privateKeyPath, passphrase) + if err != nil { + return "", fmt.Errorf("failed to get private key for signing: %w", err) + } + + buf := new(bytes.Buffer) + if err := openpgp.ArmoredDetachSign(buf, signer, strings.NewReader(data), nil); err != nil { + return "", fmt.Errorf("failed to create detached signature: %w", err) + } + + return buf.String(), nil +} + +// Verify checks a detached signature. +func Verify(medium filesystem.Medium, data, signature, publicKeyPath string) (bool, error) { + keyring, err := GetPublicKey(medium, publicKeyPath) + if err != nil { + return false, fmt.Errorf("failed to get public key for verification: %w", err) + } + + _, err = openpgp.CheckArmoredDetachedSignature(openpgp.EntityList{keyring}, strings.NewReader(data), strings.NewReader(signature), nil) + if err != nil { + return false, fmt.Errorf("signature verification failed: %w", err) + } + return true, nil +} diff --git a/crypt/service.go b/crypt/service.go new file mode 100644 index 00000000..8690ecf5 --- /dev/null +++ b/crypt/service.go @@ -0,0 +1,43 @@ +package crypt + +import ( + "context" + "fmt" + "log" + "path/filepath" + + "github.com/Snider/Core/config" + "github.com/Snider/Core/crypt/lib/openpgp" + "github.com/Snider/Core/filesystem" + "github.com/wailsapp/wails/v3/pkg/application" +) + +// createServerKeyPair is a package-level variable that can be swapped for testing. +var createServerKeyPair = openpgp.CreateServerKeyPair + +// NewService creates a new crypt.Service, accepting a config service instance. +func NewService(cfg *config.Config) *Service { + return &Service{ + config: cfg, + } +} + +// ServiceStartup Startup is called when the app starts. It handles one-time cryptographic setup. +func (s *Service) ServiceStartup(ctx context.Context, options application.ServiceOptions) error { + // Define the directory for server keys based on the central config. + serverKeysDir := filepath.Join(s.config.DataDir, "server_keys") + if err := filesystem.EnsureDir(filesystem.Local, serverKeysDir); err != nil { + return fmt.Errorf("failed to create server keys directory: %w", err) + } + + // Check for server key pair using the configured path. + serverKeyPath := filepath.Join(serverKeysDir, "server.lthn.pub") + if !filesystem.IsFile(filesystem.Local, serverKeyPath) { + log.Println("Creating server key pair...") + if err := createServerKeyPair(serverKeysDir); err != nil { + return fmt.Errorf("failed to create server key pair: %w", err) + } + log.Println("Server key pair created.") + } + return nil +} diff --git a/crypt/sum.go b/crypt/sum.go new file mode 100644 index 00000000..74530374 --- /dev/null +++ b/crypt/sum.go @@ -0,0 +1,77 @@ +package crypt + +import ( + "encoding/binary" + "strconv" + "strings" +) + +// Luhn validates a number using the Luhn algorithm. +func (s *Service) Luhn(payload string) bool { + payload = strings.ReplaceAll(payload, " ", "") + sum := 0 + isSecond := false + for i := len(payload) - 1; i >= 0; i-- { + digit, err := strconv.Atoi(string(payload[i])) + if err != nil { + return false // Contains non-digit + } + + if isSecond { + digit = digit * 2 + if digit > 9 { + digit = digit - 9 + } + } + + sum += digit + isSecond = !isSecond + } + return sum%10 == 0 +} + +// Fletcher16 computes the Fletcher-16 checksum. +func (s *Service) Fletcher16(payload string) uint16 { + data := []byte(payload) + var sum1, sum2 uint16 + for _, b := range data { + sum1 = (sum1 + uint16(b)) % 255 + sum2 = (sum2 + sum1) % 255 + } + return (sum2 << 8) | sum1 +} + +// Fletcher32 computes the Fletcher-32 checksum. +func (s *Service) Fletcher32(payload string) uint32 { + data := []byte(payload) + // Pad with 0 to make it even length for uint16 conversion + if len(data)%2 != 0 { + data = append(data, 0) + } + + var sum1, sum2 uint32 + for i := 0; i < len(data); i += 2 { + val := binary.LittleEndian.Uint16(data[i : i+2]) + sum1 = (sum1 + uint32(val)) % 65535 + sum2 = (sum2 + sum1) % 65535 + } + return (sum2 << 16) | sum1 +} + +// Fletcher64 computes the Fletcher-64 checksum. +func (s *Service) Fletcher64(payload string) uint64 { + data := []byte(payload) + // Pad to multiple of 4 + if len(data)%4 != 0 { + padding := 4 - (len(data) % 4) + data = append(data, make([]byte, padding)...) + } + + var sum1, sum2 uint64 + for i := 0; i < len(data); i += 4 { + val := binary.LittleEndian.Uint32(data[i : i+4]) + sum1 = (sum1 + uint64(val)) % 4294967295 + sum2 = (sum2 + sum1) % 4294967295 + } + return (sum2 << 32) | sum1 +} diff --git a/display/display.go b/display/display.go new file mode 100644 index 00000000..40907bd1 --- /dev/null +++ b/display/display.go @@ -0,0 +1,154 @@ +package display + +import ( + "context" + "fmt" + + "github.com/Snider/Core" + "github.com/wailsapp/wails/v3/pkg/application" + "github.com/wailsapp/wails/v3/pkg/events" +) + +type ActionOpenWindow struct { + Target string +} + +var instance *API + +func Register(c *core.Core) error { + instance = &API{ + core: c, + windowHandles: make(map[string]*application.WebviewWindow), + } + if err := c.RegisterModule("display", instance); err != nil { + return err + } + c.RegisterAction(handleActionCall) + return nil +} + +func handleActionCall(c *core.Core, msg core.Message) error { + switch m := msg.(type) { + case *ActionOpenWindow: + instance.OpenWindow(m.Target, application.WebviewWindowOptions{ + Title: "Core", + Height: 900, + Width: 1280, + URL: m.Target, + }) + return nil + case core.ActionServiceStartup: + err := instance.ServiceStartup(context.Background(), application.ServiceOptions{}) + if err != nil { + return err + } + return nil + default: + c.App.Logger.Error("Unknown message type", "type", fmt.Sprintf("%T", m)) + return nil + } +} + +func (d *API) analyzeScreens() { + d.core.App.Logger.Info("Screen analysis", "count", len(d.core.App.Screen.GetAll())) + + primary := d.core.App.Screen.GetPrimary() + if primary != nil { + d.core.App.Logger.Info("Primary screen", + "name", primary.Name, + "size", fmt.Sprintf("%dx%d", primary.Size.Width, primary.Size.Height), + "scaleFactor", primary.ScaleFactor, + "workArea", primary.WorkArea, + ) + scaleFactor := primary.ScaleFactor + + switch { + case scaleFactor == 1.0: + d.core.App.Logger.Info("Standard DPI display", "screen", primary.Name) + case scaleFactor == 1.25: + d.core.App.Logger.Info("125% scaled display", "screen", primary.Name) + case scaleFactor == 1.5: + d.core.App.Logger.Info("150% scaled display", "screen", primary.Name) + case scaleFactor == 2.0: + d.core.App.Logger.Info("High DPI display (200%)", "screen", primary.Name) + default: + d.core.App.Logger.Info("Custom scale display", + "screen", primary.Name, + "scale", scaleFactor, + ) + } + } else { + d.core.App.Logger.Info("No primary screen found") + } + + for i, screen := range d.core.App.Screen.GetAll() { + d.core.App.Logger.Info("Screen details", + "index", i, + "name", screen.Name, + "primary", screen.IsPrimary, + "bounds", screen.Bounds, + "scaleFactor", screen.ScaleFactor, + ) + } +} + +func (d *API) monitorScreenChanges() { + // Monitor for screen configuration changes + d.core.App.Event.OnApplicationEvent(events.Common.ThemeChanged, func(event *application.ApplicationEvent) { + d.core.App.Logger.Info("Screen configuration changed") + + // Re-analyze screens + d.core.App.Logger.Info("Updated screen count", "count", len(d.core.App.Screen.GetAll())) + + // Could reposition windows here if needed + }) +} + +func (d *API) ShowEnvironmentDialog() { + envInfo := d.core.App.Env.Info() + + details := fmt.Sprintf(`Environment Information: + +Operating System: %s +Architecture: %s +Debug Mode: %t + +Dark Mode: %t + +Platform Information:`, + envInfo.OS, + envInfo.Arch, + envInfo.Debug, + d.core.App.Env.IsDarkMode()) // Use d.core.App + + // Add platform-specific details + for key, value := range envInfo.PlatformInfo { + details += fmt.Sprintf("\n%s: %v", key, value) + } + + if envInfo.OSInfo != nil { + details += fmt.Sprintf("\n\nOS Details:\nName: %s\nVersion: %s", + envInfo.OSInfo.Name, + envInfo.OSInfo.Version) + } + + dialog := d.core.App.Dialog.Info() + dialog.SetTitle("Environment Information") + dialog.SetMessage(details) + dialog.Show() +} + +func (d *API) ServiceStartup(ctx context.Context, options application.ServiceOptions) error { + d.core.App.Logger.Info("Display service starting up") + d.analyzeScreens() + d.monitorScreenChanges() + d.buildMenu() + d.systemTray() + d.core.App.Window.NewWithOptions(application.WebviewWindowOptions{ + Title: "Core", + Height: 900, + Width: 1280, + URL: "/", + }) + return nil +} diff --git a/display/header.go b/display/header.go new file mode 100644 index 00000000..c17adf7f --- /dev/null +++ b/display/header.go @@ -0,0 +1,26 @@ +package display + +import ( + "github.com/Snider/Core" + "github.com/wailsapp/wails/v3/pkg/application" +) + +// Brand defines the type for different application brands. +type Brand string + +const ( + AdminHub Brand = "admin-hub" + ServerHub Brand = "server-hub" + GatewayHub Brand = "gateway-hub" + DeveloperHub Brand = "developer-hub" + ClientHub Brand = "client-hub" +) + +// Service manages all OS-level UI interactions (menus, windows, tray). +// It is the main entry point for all display-related operations. +type API struct { + // --- Injected Dependencies --- + core *core.Core + + windowHandles map[string]*application.WebviewWindow +} diff --git a/display/menu.go b/display/menu.go new file mode 100644 index 00000000..b01cd2be --- /dev/null +++ b/display/menu.go @@ -0,0 +1,32 @@ +package display + +import ( + "runtime" + + "github.com/wailsapp/wails/v3/pkg/application" +) + +// buildMenu creates and sets the main application menu. +func (d *API) buildMenu() { + appMenu := d.core.App.Menu.New() + if runtime.GOOS == "darwin" { + appMenu.AddRole(application.AppMenu) + } + appMenu.AddRole(application.FileMenu) + appMenu.AddRole(application.ViewMenu) + appMenu.AddRole(application.EditMenu) + + workspace := appMenu.AddSubmenu("Workspace") + workspace.Add("New").OnClick(func(ctx *application.Context) { /* TODO */ }) + workspace.Add("List").OnClick(func(ctx *application.Context) { /* TODO */ }) + + // Add brand-specific menu items + //if s.brand == DeveloperHub { + // appMenu.AddSubmenu("Developer") + //} + + appMenu.AddRole(application.WindowMenu) + appMenu.AddRole(application.HelpMenu) + + d.core.App.Menu.Set(appMenu) +} diff --git a/display/tray.go b/display/tray.go new file mode 100644 index 00000000..7f5156c5 --- /dev/null +++ b/display/tray.go @@ -0,0 +1,71 @@ +package display + +import ( + _ "embed" + + "github.com/wailsapp/wails/v3/pkg/application" +) + +// setupTray configures and creates the system tray icon and menu. +func (d *API) systemTray() { + + systray := d.core.App.SystemTray.New() + systray.SetTooltip("Lethean Desktop") + systray.SetLabel("hey") + //appTrayIcon, _ := d.assets.ReadFile("assets/apptray.png") + // + //if runtime.GOOS == "darwin" { + // systray.SetTemplateIcon(appTrayIcon) + //} else { + // // Support for light/dark mode icons + // systray.SetDarkModeIcon(appTrayIcon) + // systray.SetIcon(appTrayIcon) + //} + // Create a hidden window for the system tray menu to interact with + trayWindow := d.core.App.Window.NewWithOptions(application.WebviewWindowOptions{ + Title: "System Tray Status", + URL: "/#/system-tray", + Width: 400, + Frameless: true, + Hidden: true, + }) + systray.AttachWindow(trayWindow).WindowOffset(5) + + // --- Build Tray Menu --- + trayMenu := d.core.App.Menu.New() + trayMenu.Add("Open Desktop").OnClick(func(ctx *application.Context) { + for _, window := range d.core.App.Window.GetAll() { + window.Show() + } + }) + trayMenu.Add("Close Desktop").OnClick(func(ctx *application.Context) { + for _, window := range d.core.App.Window.GetAll() { + window.Hide() + } + }) + + trayMenu.Add("Environment Info").OnClick(func(ctx *application.Context) { + d.ShowEnvironmentDialog() + }) + // Add brand-specific menu items + //switch d.brand { + //case AdminHub: + // trayMenu.Add("Manage Workspace").OnClick(func(ctx *application.Context) { /* TODO */ }) + //case ServerHub: + // trayMenu.Add("Server Control").OnClick(func(ctx *application.Context) { /* TODO */ }) + //case GatewayHub: + // trayMenu.Add("Routing Table").OnClick(func(ctx *application.Context) { /* TODO */ }) + //case DeveloperHub: + // trayMenu.Add("Debug Console").OnClick(func(ctx *application.Context) { /* TODO */ }) + //case ClientHub: + // trayMenu.Add("Connect").OnClick(func(ctx *application.Context) { /* TODO */ }) + // trayMenu.Add("Disconnect").OnClick(func(ctx *application.Context) { /* TODO */ }) + //} + + trayMenu.AddSeparator() + trayMenu.Add("Quit").OnClick(func(ctx *application.Context) { + d.core.App.Quit() + }) + + systray.SetMenu(trayMenu) +} diff --git a/display/window.go b/display/window.go new file mode 100644 index 00000000..15d2c0e0 --- /dev/null +++ b/display/window.go @@ -0,0 +1,28 @@ +package display + +import "github.com/wailsapp/wails/v3/pkg/application" + +// OpenWindow creates and shows a new webview window. +// This function is callable from the frontend. +func (d *API) OpenWindow(name string, options application.WebviewWindowOptions) { + // Check if a window with that name already exists + if window, exists := d.core.App.Window.GetByName(name); exists { + window.Focus() + return + } + + window := d.core.App.Window.NewWithOptions(options) + d.windowHandles[name] = window + window.Show() +} + +// SelectDirectory opens a directory selection dialog and returns the selected path. +func (d *API) SelectDirectory() (string, error) { + dialog := application.OpenFileDialog() + dialog.SetTitle("Select Project Directory") + if path, err := dialog.PromptForSingleSelection(); err == nil { + // Use selected directory path + return path, nil + } + return "", nil +} diff --git a/docs/docs.go b/docs/docs.go new file mode 100644 index 00000000..0ac0a43b --- /dev/null +++ b/docs/docs.go @@ -0,0 +1,27 @@ +package docs + +import ( + "embed" + + "github.com/Snider/Core/display" + "github.com/wailsapp/wails/v3/pkg/application" +) + +// displayer is an interface that defines the functionality docs needs from a display service. +// This avoids a direct dependency on the display package or the core package. +type displayer interface { + OpenWindow(name string, options application.WebviewWindowOptions) (*application.WebviewWindow, error) +} + +// Service manages the documentation display and serving of assets. +type Service struct { + // --- Injected Dependencies --- + app *application.App + displayService *display.Display // Depends on the local interface, not a concrete type from another package. + + // --- Internal State --- + assets embed.FS +} + +//go:embed all:static/**/* +var docsStatic embed.FS diff --git a/docs/service.go b/docs/service.go new file mode 100644 index 00000000..16b896ba --- /dev/null +++ b/docs/service.go @@ -0,0 +1,54 @@ +package docs + +import ( + "embed" + "net/http" + "strings" + + "github.com/Snider/Core/display" + "github.com/wailsapp/wails/v3/pkg/application" +) + +// NewService creates a new, un-wired documentation service. +func NewService(assets embed.FS) *Service { + return &Service{ + assets: assets, + } +} + +// Setup injects the required dependencies into the service. +func (s *Service) Setup(app *application.App, displayService *display.Display) { + s.app = app + s.displayService = displayService +} + +// OpenDocsWindow opens a new window with the documentation. +func (s *Service) OpenDocsWindow(path ...string) { + url := "/docs/" + if len(path) > 0 { + fullPath := path[0] + if strings.Contains(fullPath, "#") { + parts := strings.SplitN(fullPath, "#", 2) + pagePath := parts[0] + fragment := parts[1] + url += pagePath + "/#" + fragment + } else { + url += fullPath + } + } + + // Use the injected displayService, which satisfies the local displayer interface. + s.displayService.OpenWindow("docs", application.WebviewWindowOptions{ + Title: "Lethean Documentation", + Height: 600, + Width: 1000, + URL: url, + AlwaysOnTop: true, + Frameless: false, + }) +} + +// ServeHTTP serves the embedded documentation assets. +func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) { + http.FileServerFS(docsStatic).ServeHTTP(w, r) +} diff --git a/docs/static/assets/style.css b/docs/static/assets/style.css new file mode 100644 index 00000000..e69de29b diff --git a/docs/static/index.html b/docs/static/index.html new file mode 100644 index 00000000..e69de29b diff --git a/filesystem/client.go b/filesystem/client.go new file mode 100644 index 00000000..52adc53e --- /dev/null +++ b/filesystem/client.go @@ -0,0 +1,45 @@ +package filesystem + +import ( + "github.com/Snider/Core/filesystem/sftp" + "github.com/Snider/Core/filesystem/webdav" +) + +// NewSFTPMedium creates and returns a new SFTP medium. +func NewSFTPMedium(cfg sftp.ConnectionConfig) (Medium, error) { + return sftp.New(cfg) +} + +// NewWebDAVMedium creates and returns a new WebDAV medium. +func NewWebDAVMedium(cfg webdav.ConnectionConfig) (Medium, error) { + return webdav.New(cfg) +} + +// Read retrieves the content of a file from the given medium. +func Read(m Medium, path string) (string, error) { + return m.Read(path) +} + +// Write saves content to a file on the given medium. +func Write(m Medium, path, content string) error { + return m.Write(path, content) +} + +// EnsureDir ensures a directory exists on the given medium. +func EnsureDir(m Medium, path string) error { + return m.EnsureDir(path) +} + +// IsFile checks if a path is a file on the given medium. +func IsFile(m Medium, path string) bool { + return m.IsFile(path) +} + +// Copy copies a file from a source medium to a destination medium. +func Copy(sourceMedium Medium, sourcePath string, destMedium Medium, destPath string) error { + content, err := sourceMedium.Read(sourcePath) + if err != nil { + return err + } + return destMedium.Write(destPath, content) +} diff --git a/filesystem/client_test.go b/filesystem/client_test.go new file mode 100644 index 00000000..5bf1a5cc --- /dev/null +++ b/filesystem/client_test.go @@ -0,0 +1,31 @@ +package filesystem + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRead(t *testing.T) { + m := NewMockMedium() + m.Files["test.txt"] = "hello" + content, err := Read(m, "test.txt") + assert.NoError(t, err) + assert.Equal(t, "hello", content) +} + +func TestWrite(t *testing.T) { + m := NewMockMedium() + err := Write(m, "test.txt", "hello") + assert.NoError(t, err) + assert.Equal(t, "hello", m.Files["test.txt"]) +} + +func TestCopy(t *testing.T) { + source := NewMockMedium() + dest := NewMockMedium() + source.Files["test.txt"] = "hello" + err := Copy(source, "test.txt", dest, "test.txt") + assert.NoError(t, err) + assert.Equal(t, "hello", dest.Files["test.txt"]) +} diff --git a/filesystem/filesystem.go b/filesystem/filesystem.go new file mode 100644 index 00000000..56fde025 --- /dev/null +++ b/filesystem/filesystem.go @@ -0,0 +1,27 @@ +package filesystem + +// Medium defines the standard interface for a storage backend. +// This allows for different implementations (e.g., local disk, S3, SFTP) +// to be used interchangeably. +type Medium interface { + // Read retrieves the content of a file as a string. + Read(path string) (string, error) + + // Write saves the given content to a file, overwriting it if it exists. + Write(path, content string) error + + // EnsureDir makes sure a directory exists, creating it if necessary. + EnsureDir(path string) error + + // IsFile checks if a path exists and is a regular file. + IsFile(path string) bool + + // FileGet is a convenience function that reads a file from the medium. + FileGet(path string) (string, error) + + // FileSet is a convenience function that writes a file to the medium. + FileSet(path, content string) error +} + +// Pre-initialized, sandboxed medium for the local filesystem. +var Local Medium diff --git a/filesystem/filesystem_test.go b/filesystem/filesystem_test.go new file mode 100644 index 00000000..e4b5af92 --- /dev/null +++ b/filesystem/filesystem_test.go @@ -0,0 +1 @@ +package filesystem diff --git a/filesystem/local/client.go b/filesystem/local/client.go new file mode 100644 index 00000000..0efe1719 --- /dev/null +++ b/filesystem/local/client.go @@ -0,0 +1,83 @@ +package local + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// New creates a new instance of the local storage medium. +// It requires a root path to sandbox all file operations. +func New(rootPath string) (*Medium, error) { + if err := os.MkdirAll(rootPath, os.ModePerm); err != nil { + return nil, fmt.Errorf("could not create root directory at %s: %w", rootPath, err) + } + return &Medium{root: rootPath}, nil +} + +// path returns a full, safe path within the medium's root. +func (m *Medium) path(subpath string) (string, error) { + if strings.Contains(subpath, "..") { + return "", fmt.Errorf("path traversal attempt detected") + } + return filepath.Join(m.root, subpath), nil +} + +// Read retrieves the content of a file from the local disk. +func (m *Medium) Read(path string) (string, error) { + safePath, err := m.path(path) + if err != nil { + return "", err + } + data, err := os.ReadFile(safePath) + if err != nil { + return "", err + } + return string(data), nil +} + +// Write saves the given content to a file on the local disk. +func (m *Medium) Write(path, content string) error { + safePath, err := m.path(path) + if err != nil { + return err + } + dir := filepath.Dir(safePath) + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return err + } + return os.WriteFile(safePath, []byte(content), 0644) +} + +// EnsureDir makes sure a directory exists on the local disk. +func (m *Medium) EnsureDir(path string) error { + safePath, err := m.path(path) + if err != nil { + return err + } + return os.MkdirAll(safePath, os.ModePerm) +} + +// IsFile checks if a path exists and is a regular file on the local disk. +func (m *Medium) IsFile(path string) bool { + safePath, err := m.path(path) + if err != nil { + return false + } + info, err := os.Stat(safePath) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} + +// FileGet is a convenience function that reads a file from the medium. +func (m *Medium) FileGet(path string) (string, error) { + return m.Read(path) +} + +// FileSet is a convenience function that writes a file to the medium. +func (m *Medium) FileSet(path, content string) error { + return m.Write(path, content) +} diff --git a/filesystem/local/client_test.go b/filesystem/local/client_test.go new file mode 100644 index 00000000..ff3dce72 --- /dev/null +++ b/filesystem/local/client_test.go @@ -0,0 +1,154 @@ +package local + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + // Create a temporary directory for testing + testRoot, err := os.MkdirTemp("", "local_test_root") + assert.NoError(t, err) + defer os.RemoveAll(testRoot) // Clean up after the test + + // Test successful creation + medium, err := New(testRoot) + assert.NoError(t, err) + assert.NotNil(t, medium) + assert.Equal(t, testRoot, medium.root) + + // Verify the root directory exists + info, err := os.Stat(testRoot) + assert.NoError(t, err) + assert.True(t, info.IsDir()) + + // Test creating a new instance with an existing directory (should not error) + medium2, err := New(testRoot) + assert.NoError(t, err) + assert.NotNil(t, medium2) +} + +func TestPath(t *testing.T) { + testRoot := "/tmp/test_root" + medium := &Medium{root: testRoot} + + // Valid path + validPath, err := medium.path("file.txt") + assert.NoError(t, err) + assert.Equal(t, filepath.Join(testRoot, "file.txt"), validPath) + + // Subdirectory path + subDirPath, err := medium.path("dir/sub/file.txt") + assert.NoError(t, err) + assert.Equal(t, filepath.Join(testRoot, "dir", "sub", "file.txt"), subDirPath) + + // Path traversal attempt + _, err = medium.path("../secret.txt") + assert.Error(t, err) + assert.Contains(t, err.Error(), "path traversal attempt detected") + + _, err = medium.path("dir/../../secret.txt") + assert.Error(t, err) + assert.Contains(t, err.Error(), "path traversal attempt detected") +} + +func TestReadWrite(t *testing.T) { + testRoot, err := os.MkdirTemp("", "local_read_write_test") + assert.NoError(t, err) + defer os.RemoveAll(testRoot) + + medium, err := New(testRoot) + assert.NoError(t, err) + + fileName := "testfile.txt" + filePath := filepath.Join("subdir", fileName) + content := "Hello, Gopher!\nThis is a test file." + + // Test Write + err = medium.Write(filePath, content) + assert.NoError(t, err) + + // Verify file content by reading directly from OS + readContent, err := os.ReadFile(filepath.Join(testRoot, filePath)) + assert.NoError(t, err) + assert.Equal(t, content, string(readContent)) + + // Test Read + readByMedium, err := medium.Read(filePath) + assert.NoError(t, err) + assert.Equal(t, content, readByMedium) + + // Test Read non-existent file + _, err = medium.Read("nonexistent.txt") + assert.Error(t, err) + assert.True(t, os.IsNotExist(err)) + + // Test Write to a path with traversal attempt + writeErr := medium.Write("../badfile.txt", "malicious content") + assert.Error(t, writeErr) + assert.Contains(t, writeErr.Error(), "path traversal attempt detected") +} + +func TestEnsureDir(t *testing.T) { + testRoot, err := os.MkdirTemp("", "local_ensure_dir_test") + assert.NoError(t, err) + defer os.RemoveAll(testRoot) + + medium, err := New(testRoot) + assert.NoError(t, err) + + dirName := "newdir/subdir" + dirPath := filepath.Join(testRoot, dirName) + + // Test creating a new directory + err = medium.EnsureDir(dirName) + assert.NoError(t, err) + info, err := os.Stat(dirPath) + assert.NoError(t, err) + assert.True(t, info.IsDir()) + + // Test ensuring an existing directory (should not error) + err = medium.EnsureDir(dirName) + assert.NoError(t, err) + + // Test ensuring a directory with path traversal attempt + err = medium.EnsureDir("../bad_dir") + assert.Error(t, err) + assert.Contains(t, err.Error(), "path traversal attempt detected") +} + +func TestIsFile(t *testing.T) { + testRoot, err := os.MkdirTemp("", "local_is_file_test") + assert.NoError(t, err) + defer os.RemoveAll(testRoot) + + medium, err := New(testRoot) + assert.NoError(t, err) + + // Create a test file + fileName := "existing_file.txt" + filePath := filepath.Join(testRoot, fileName) + err = os.WriteFile(filePath, []byte("content"), 0644) + assert.NoError(t, err) + + // Create a test directory + dirName := "existing_dir" + dirPath := filepath.Join(testRoot, dirName) + err = os.Mkdir(dirPath, 0755) + assert.NoError(t, err) + + // Test with an existing file + assert.True(t, medium.IsFile(fileName)) + + // Test with a non-existent file + assert.False(t, medium.IsFile("nonexistent_file.txt")) + + // Test with a directory + assert.False(t, medium.IsFile(dirName)) + + // Test with path traversal attempt + assert.False(t, medium.IsFile("../bad_file.txt")) +} diff --git a/filesystem/local/local.go b/filesystem/local/local.go new file mode 100644 index 00000000..61f2447b --- /dev/null +++ b/filesystem/local/local.go @@ -0,0 +1,6 @@ +package local + +// Medium implements the filesystem.Medium interface for the local disk. +type Medium struct { + root string +} diff --git a/filesystem/mock.go b/filesystem/mock.go new file mode 100644 index 00000000..e97327b4 --- /dev/null +++ b/filesystem/mock.go @@ -0,0 +1,47 @@ +package filesystem + +import "github.com/stretchr/testify/assert" + +// MockMedium implements the Medium interface for testing purposes. +type MockMedium struct { + Files map[string]string + Dirs map[string]bool +} + +func NewMockMedium() *MockMedium { + return &MockMedium{ + Files: make(map[string]string), + Dirs: make(map[string]bool), + } +} + +func (m *MockMedium) Read(path string) (string, error) { + content, ok := m.Files[path] + if !ok { + return "", assert.AnError // Simulate file not found error + } + return content, nil +} + +func (m *MockMedium) Write(path, content string) error { + m.Files[path] = content + return nil +} + +func (m *MockMedium) EnsureDir(path string) error { + m.Dirs[path] = true + return nil +} + +func (m *MockMedium) IsFile(path string) bool { + _, ok := m.Files[path] + return ok +} + +func (m *MockMedium) FileGet(path string) (string, error) { + return m.Read(path) +} + +func (m *MockMedium) FileSet(path, content string) error { + return m.Write(path, content) +} diff --git a/filesystem/sftp/client.go b/filesystem/sftp/client.go new file mode 100644 index 00000000..a745a90c --- /dev/null +++ b/filesystem/sftp/client.go @@ -0,0 +1,125 @@ +package sftp + +import ( + "fmt" + "io" + "net" + "os" + "path/filepath" + + "github.com/pkg/sftp" + "github.com/skeema/knownhosts" + "golang.org/x/crypto/ssh" +) + +// New creates a new, connected instance of the SFTP storage medium. +func New(cfg ConnectionConfig) (*Medium, error) { + var authMethods []ssh.AuthMethod + + if cfg.KeyFile != "" { + key, err := os.ReadFile(cfg.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to read private key: %w", err) + } + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + return nil, fmt.Errorf("unable to parse private key: %w", err) + } + authMethods = append(authMethods, ssh.PublicKeys(signer)) + } else if cfg.Password != "" { + authMethods = append(authMethods, ssh.Password(cfg.Password)) + } else { + return nil, fmt.Errorf("no authentication method provided (password or keyfile)") + } + + kh, err := knownhosts.New(filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts")) + if err != nil { + return nil, fmt.Errorf("failed to read known_hosts: %w", err) + } + + sshConfig := &ssh.ClientConfig{ + User: cfg.User, + Auth: authMethods, + HostKeyCallback: kh.HostKeyCallback(), + } + + addr := net.JoinHostPort(cfg.Host, cfg.Port) + conn, err := ssh.Dial("tcp", addr, sshConfig) + if err != nil { + return nil, fmt.Errorf("failed to dial ssh: %w", err) + } + + sftpClient, err := sftp.NewClient(conn) + if err != nil { + // Ensure the underlying ssh connection is closed on failure + conn.Close() + return nil, fmt.Errorf("failed to create sftp client: %w", err) + } + + return &Medium{client: sftpClient}, nil +} + +// Read retrieves the content of a file from the SFTP server. +func (m *Medium) Read(path string) (string, error) { + file, err := m.client.Open(path) + if err != nil { + return "", fmt.Errorf("sftp: failed to open file %s: %w", path, err) + } + defer file.Close() + + data, err := io.ReadAll(file) + if err != nil { + return "", fmt.Errorf("sftp: failed to read file %s: %w", path, err) + } + + return string(data), nil +} + +// Write saves the given content to a file on the SFTP server. +func (m *Medium) Write(path, content string) error { + // Ensure the remote directory exists first. + dir := filepath.Dir(path) + if err := m.EnsureDir(dir); err != nil { + return err + } + + file, err := m.client.Create(path) + if err != nil { + return fmt.Errorf("sftp: failed to create file %s: %w", path, err) + } + defer file.Close() + + if _, err := file.Write([]byte(content)); err != nil { + return fmt.Errorf("sftp: failed to write to file %s: %w", path, err) + } + + return nil +} + +// EnsureDir makes sure a directory exists on the SFTP server. +func (m *Medium) EnsureDir(path string) error { + // MkdirAll is idempotent, so it won't error if the path already exists. + return m.client.MkdirAll(path) +} + +// IsFile checks if a path exists and is a regular file on the SFTP server. +func (m *Medium) IsFile(path string) bool { + info, err := m.client.Stat(path) + if err != nil { + // If the error is "not found", it's definitely not a file. + // For any other error, we also conservatively say it's not a file. + return false + } + // Return true only if it's not a directory. + return !info.IsDir() +} + +// FileGet is a convenience function that reads a file from the medium. +func (m *Medium) FileGet(path string) (string, error) { + return m.Read(path) +} + +// FileSet is a convenience function that writes a file to the medium. +func (m *Medium) FileSet(path, content string) error { + return m.Write(path, content) +} diff --git a/filesystem/sftp/sftp.go b/filesystem/sftp/sftp.go new file mode 100644 index 00000000..cf9e2e11 --- /dev/null +++ b/filesystem/sftp/sftp.go @@ -0,0 +1,19 @@ +package sftp + +import ( + "github.com/pkg/sftp" +) + +// Medium implements the filesystem.Medium interface for the SFTP protocol. +type Medium struct { + client *sftp.Client +} + +// ConnectionConfig holds the necessary details to connect to an SFTP server. +type ConnectionConfig struct { + Host string + Port string + User string + Password string // For password-based auth + KeyFile string // Path to a private key for key-based auth +} diff --git a/filesystem/webdav/client.go b/filesystem/webdav/client.go new file mode 100644 index 00000000..7ed4f741 --- /dev/null +++ b/filesystem/webdav/client.go @@ -0,0 +1,16 @@ +package webdav + +import "net/http" + +// Medium implements the filesystem.Medium interface for the WebDAV protocol. +type Medium struct { + client *http.Client + baseURL string // e.g., https://dav.example.com/remote.php/dav/files/username/ +} + +// ConnectionConfig holds the necessary details to connect to a WebDAV server. +type ConnectionConfig struct { + URL string // The full base URL of the WebDAV share. + User string + Password string +} diff --git a/filesystem/webdav/webdav.go b/filesystem/webdav/webdav.go new file mode 100644 index 00000000..db0ac663 --- /dev/null +++ b/filesystem/webdav/webdav.go @@ -0,0 +1,183 @@ +package webdav + +import ( + "bytes" + _ "context" + "fmt" + "io" + "net/http" + "path" + "strings" +) + +// New creates a new, connected instance of the WebDAV storage medium. +func New(cfg ConnectionConfig) (*Medium, error) { + transport := &authTransport{ + Username: cfg.User, + Password: cfg.Password, + Wrapped: http.DefaultTransport, + } + + httpClient := &http.Client{Transport: transport} + + // Ping the server to ensure the connection and credentials are valid. + // We do a PROPFIND on the root, which is a standard WebDAV operation. + req, err := http.NewRequest("PROPFIND", cfg.URL, nil) + if err != nil { + return nil, fmt.Errorf("webdav: failed to create ping request: %w", err) + } + req.Header.Set("Depth", "0") + resp, err := httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("webdav: connection test failed: %w", err) + } + resp.Body.Close() + if resp.StatusCode != http.StatusMultiStatus && resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("webdav: connection test failed with status %s", resp.Status) + } + + return &Medium{ + client: httpClient, + baseURL: cfg.URL, + }, nil +} + +// Read retrieves the content of a file from the WebDAV server. +func (m *Medium) Read(p string) (string, error) { + url := m.resolveURL(p) + resp, err := m.client.Get(url) + if err != nil { + return "", fmt.Errorf("webdav: GET request for %s failed: %w", p, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("webdav: failed to read %s, status: %s", p, resp.Status) + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("webdav: failed to read response body for %s: %w", p, err) + } + + return string(data), nil +} + +// Write saves the given content to a file on the WebDAV server. +func (m *Medium) Write(p, content string) error { + // Ensure the parent directory exists first. + dir := path.Dir(p) + if dir != "." && dir != "/" { + if err := m.EnsureDir(dir); err != nil { + return err // This will be a detailed error from EnsureDir + } + } + + url := m.resolveURL(p) + req, err := http.NewRequest("PUT", url, bytes.NewReader([]byte(content))) + if err != nil { + return fmt.Errorf("webdav: failed to create PUT request: %w", err) + } + + resp, err := m.client.Do(req) + if err != nil { + return fmt.Errorf("webdav: PUT request for %s failed: %w", p, err) + } + defer resp.Body.Close() + + // StatusCreated (201) or StatusNoContent (204) are success codes for PUT. + if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("webdav: failed to write %s, status: %s", p, resp.Status) + } + + return nil +} + +// EnsureDir makes sure a directory exists on the WebDAV server, creating parent dirs as needed. +func (m *Medium) EnsureDir(p string) error { + // To mimic MkdirAll, we create each part of the path sequentially. + parts := strings.Split(p, "/") + currentPath := "" + for _, part := range parts { + if part == "" { + continue + } + currentPath = path.Join(currentPath, part) + url := m.resolveURL(currentPath) + "/" // MKCOL needs a trailing slash + + req, err := http.NewRequest("MKCOL", url, nil) + if err != nil { + return fmt.Errorf("webdav: failed to create MKCOL request for %s: %w", currentPath, err) + } + + resp, err := m.client.Do(req) + if err != nil { + return fmt.Errorf("webdav: MKCOL request for %s failed: %w", currentPath, err) + } + resp.Body.Close() + + // 405 Method Not Allowed means it already exists, which is fine for us. + // 201 Created is a success. + if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusMethodNotAllowed { + return fmt.Errorf("webdav: failed to create directory %s, status: %s", currentPath, resp.Status) + } + } + return nil +} + +// IsFile checks if a path exists and is a regular file on the WebDAV server. +func (m *Medium) IsFile(p string) bool { + url := m.resolveURL(p) + req, err := http.NewRequest("PROPFIND", url, nil) + if err != nil { + return false + } + req.Header.Set("Depth", "0") + + resp, err := m.client.Do(req) + if err != nil { + return false + } + defer resp.Body.Close() + + // If we get anything other than a Multi-Status, it's probably not a file. + if resp.StatusCode != http.StatusMultiStatus { + return false + } + + // A simple check: if the response body contains the string for a collection, it's a directory. + // A more robust implementation would parse the XML response. + body, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + + return !strings.Contains(string(body), "") +} + +// resolveURL joins the base URL with a path segment, ensuring correct slashes. +func (m *Medium) resolveURL(p string) string { + return strings.TrimSuffix(m.baseURL, "/") + "/" + strings.TrimPrefix(p, "/") +} + +// authTransport is a custom http.RoundTripper to inject Basic Auth. +type authTransport struct { + Username string + Password string + Wrapped http.RoundTripper +} + +func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req.SetBasicAuth(t.Username, t.Password) + return t.Wrapped.RoundTrip(req) +} + +// FileGet is a convenience function that reads a file from the medium. +func (m *Medium) FileGet(path string) (string, error) { + return m.Read(path) +} + +// FileSet is a convenience function that writes a file to the medium. +func (m *Medium) FileSet(path, content string) error { + return m.Write(path, content) +} diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..aaf978df --- /dev/null +++ b/go.mod @@ -0,0 +1,56 @@ +module github.com/Snider/Core + +go 1.25 + +require ( + github.com/ProtonMail/go-crypto v1.3.0 + github.com/adrg/xdg v0.5.3 + github.com/pkg/sftp v1.13.10 + github.com/skeema/knownhosts v1.3.2 + github.com/stretchr/testify v1.11.1 + github.com/wailsapp/wails/v3 v3.0.0-alpha.36 + golang.org/x/crypto v0.43.0 +) + +require ( + dario.cat/mergo v1.0.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/bep/debounce v1.2.1 // indirect + github.com/cloudflare/circl v1.6.0 // indirect + github.com/cyphar/filepath-securejoin v0.4.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/ebitengine/purego v0.8.2 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect + github.com/go-git/go-git/v5 v5.13.2 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/jchv/go-winloader v0.0.0-20210711035445-715c2860da7e // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/leaanthony/go-ansi-parser v1.6.1 // indirect + github.com/leaanthony/u v1.1.1 // indirect + github.com/lmittmann/tint v1.0.7 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/samber/lo v1.49.1 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/wailsapp/go-webview2 v1.0.22 // indirect + github.com/wailsapp/mimetype v1.4.1 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + golang.org/x/net v0.45.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/header.go b/header.go new file mode 100644 index 00000000..96970bb7 --- /dev/null +++ b/header.go @@ -0,0 +1,45 @@ +package core + +import ( + "embed" + "sync" + + "github.com/wailsapp/wails/v3/pkg/application" +) + +type Contract struct { + DontPanic bool + DisableLogging bool +} +type Ipc struct { + Target string +} + +var allowedModules = map[string]bool{ + "docs": true, + "display": true, + // add more names here if you want to restrict what can be loaded +} + +type Message interface{} +type Core struct { + once sync.Once + initErr error + App *application.App + assets embed.FS + + modMu sync.RWMutex + mods map[string]any + ipcMu sync.RWMutex + ipcHandlers []func(*Core, Message) error + serviceLock bool + servicesLocked bool +} + +type Option func(*Core) error + +var ( + instance *Core + once sync.Once + initErr error +) diff --git a/workspace/local.go b/workspace/local.go new file mode 100644 index 00000000..6f750cdc --- /dev/null +++ b/workspace/local.go @@ -0,0 +1,41 @@ +package workspace + +import "github.com/Snider/Core/filesystem" + +// localMedium implements the Medium interface for the local disk. +type localMedium struct{} + +// NewLocalMedium creates a new instance of the local storage medium. +func NewLocalMedium() filesystem.Medium { + return &localMedium{} +} + +// FileGet reads a file from the local disk. +func (m *localMedium) FileGet(path string) (string, error) { + return filesystem.Read(filesystem.Local, path) +} + +// FileSet writes a file to the local disk. +func (m *localMedium) FileSet(path, content string) error { + return filesystem.Write(filesystem.Local, path, content) +} + +// Read reads a file from the local disk. +func (m *localMedium) Read(path string) (string, error) { + return filesystem.Read(filesystem.Local, path) +} + +// Write writes a file to the local disk. +func (m *localMedium) Write(path, content string) error { + return filesystem.Write(filesystem.Local, path, content) +} + +// EnsureDir creates a directory on the local disk. +func (m *localMedium) EnsureDir(path string) error { + return filesystem.EnsureDir(filesystem.Local, path) +} + +// IsFile checks if a path exists and is a file on the local disk. +func (m *localMedium) IsFile(path string) bool { + return filesystem.IsFile(filesystem.Local, path) +} diff --git a/workspace/service.go b/workspace/service.go new file mode 100644 index 00000000..b529122b --- /dev/null +++ b/workspace/service.go @@ -0,0 +1,124 @@ +package workspace + +import ( + "encoding/json" + "fmt" + "path/filepath" + + "github.com/Snider/Core/config" + "github.com/Snider/Core/crypt/lib/lthn" + "github.com/Snider/Core/crypt/lib/openpgp" + "github.com/Snider/Core/filesystem" +) + +// NewService creates a new WorkspaceService. +func NewService(cfg *config.Config, medium filesystem.Medium) *Service { + return &Service{ + config: cfg, + workspaceList: make(map[string]string), + medium: medium, + } +} + +// ServiceStartup Startup initializes the service, loading the workspace list. +func (s *Service) ServiceStartup() error { + listPath := filepath.Join(s.config.WorkspacesDir, listFile) + + if s.medium.IsFile(listPath) { + content, err := s.medium.FileGet(listPath) + if err != nil { + return fmt.Errorf("failed to read workspace list: %w", err) + } + if err := json.Unmarshal([]byte(content), &s.workspaceList); err != nil { + fmt.Printf("Warning: could not parse workspace list: %v\n", err) + s.workspaceList = make(map[string]string) + } + } + + return s.SwitchWorkspace(defaultWorkspace) +} + +// CreateWorkspace creates a new, obfuscated workspace on the local medium. +func (s *Service) CreateWorkspace(identifier, password string) (string, error) { + realName := lthn.Hash(identifier) + workspaceID := lthn.Hash(fmt.Sprintf("workspace/%s", realName)) + workspacePath := filepath.Join(s.config.WorkspacesDir, workspaceID) + + if _, exists := s.workspaceList[workspaceID]; exists { + return "", fmt.Errorf("workspace for this identifier already exists") + } + + dirsToCreate := []string{"config", "log", "data", "files", "keys"} + for _, dir := range dirsToCreate { + if err := s.medium.EnsureDir(filepath.Join(workspacePath, dir)); err != nil { + return "", fmt.Errorf("failed to create workspace directory '%s': %w", dir, err) + } + } + + keyPair, err := openpgp.CreateKeyPair(workspaceID, password) + if err != nil { + return "", fmt.Errorf("failed to create workspace key pair: %w", err) + } + + keyFiles := map[string]string{ + filepath.Join(workspacePath, "keys", "key.pub"): keyPair.PublicKey, + filepath.Join(workspacePath, "keys", "key.priv"): keyPair.PrivateKey, + } + for path, content := range keyFiles { + if err := s.medium.FileSet(path, content); err != nil { + return "", fmt.Errorf("failed to write key file %s: %w", path, err) + } + } + + s.workspaceList[workspaceID] = keyPair.PublicKey + listData, err := json.MarshalIndent(s.workspaceList, "", " ") + if err != nil { + return "", fmt.Errorf("failed to marshal workspace list: %w", err) + } + + listPath := filepath.Join(s.config.WorkspacesDir, listFile) + if err := s.medium.FileSet(listPath, string(listData)); err != nil { + return "", fmt.Errorf("failed to write workspace list file: %w", err) + } + + return workspaceID, nil +} + +// SwitchWorkspace changes the active workspace. +func (s *Service) SwitchWorkspace(name string) error { + if name != defaultWorkspace { + if _, exists := s.workspaceList[name]; !exists { + return fmt.Errorf("workspace '%s' does not exist", name) + } + } + + path := filepath.Join(s.config.WorkspacesDir, name) + if err := s.medium.EnsureDir(path); err != nil { + return fmt.Errorf("failed to ensure workspace directory exists: %w", err) + } + + s.activeWorkspace = &Workspace{ + Name: name, + Path: path, + } + + return nil +} + +// WorkspaceFileGet retrieves a file from the active workspace. +func (s *Service) WorkspaceFileGet(filename string) (string, error) { + if s.activeWorkspace == nil { + return "", fmt.Errorf("no active workspace") + } + path := filepath.Join(s.activeWorkspace.Path, filename) + return s.medium.FileGet(path) +} + +// WorkspaceFileSet writes a file to the active workspace. +func (s *Service) WorkspaceFileSet(filename, content string) error { + if s.activeWorkspace == nil { + return fmt.Errorf("no active workspace") + } + path := filepath.Join(s.activeWorkspace.Path, filename) + return s.medium.FileSet(path, content) +} diff --git a/workspace/workspace.go b/workspace/workspace.go new file mode 100644 index 00000000..503d1139 --- /dev/null +++ b/workspace/workspace.go @@ -0,0 +1,25 @@ +package workspace + +import ( + "github.com/Snider/Core/config" + "github.com/Snider/Core/filesystem" +) + +const ( + defaultWorkspace = "default" + listFile = "list.json" +) + +// Workspace represents a user's workspace. +type Workspace struct { + Name string + Path string +} + +// Service manages user workspaces. +type Service struct { + config *config.Config + activeWorkspace *Workspace + workspaceList map[string]string // Maps Workspace ID to Public Key + medium filesystem.Medium +} diff --git a/workspace/workspace_test.go b/workspace/workspace_test.go new file mode 100644 index 00000000..adacca80 --- /dev/null +++ b/workspace/workspace_test.go @@ -0,0 +1,157 @@ +package workspace + +import ( + "encoding/json" + "path/filepath" + "testing" + + "core/config" + "github.com/stretchr/testify/assert" +) + +// MockMedium implements the Medium interface for testing purposes. +type MockMedium struct { + Files map[string]string + Dirs map[string]bool +} + +func NewMockMedium() *MockMedium { + return &MockMedium{ + Files: make(map[string]string), + Dirs: make(map[string]bool), + } +} + +func (m *MockMedium) FileGet(path string) (string, error) { + content, ok := m.Files[path] + if !ok { + return "", assert.AnError // Simulate file not found error + } + return content, nil +} + +func (m *MockMedium) FileSet(path, content string) error { + m.Files[path] = content + return nil +} + +func (m *MockMedium) EnsureDir(path string) error { + m.Dirs[path] = true + return nil +} + +func (m *MockMedium) IsFile(path string) bool { + _, ok := m.Files[path] + return ok +} + +func (m *MockMedium) Read(path string) (string, error) { + return m.FileGet(path) +} + +func (m *MockMedium) Write(path, content string) error { + return m.FileSet(path, content) +} + +func TestNewService(t *testing.T) { + mockConfig := &config.Config{} // You might want to mock this further if its behavior is critical + mockMedium := NewMockMedium() + + service := NewService(mockConfig, mockMedium) + + assert.NotNil(t, service) + assert.Equal(t, mockConfig, service.config) + assert.Equal(t, mockMedium, service.medium) + assert.NotNil(t, service.workspaceList) + assert.Nil(t, service.activeWorkspace) // Initially no active workspace +} + +func TestServiceStartup(t *testing.T) { + mockConfig := &config.Config{ + WorkspacesDir: "/tmp/workspaces", + } + + // Test case 1: list.json exists and is valid + t.Run("existing valid list.json", func(t *testing.T) { + mockMedium := NewMockMedium() + + // Prepare a mock workspace list + expectedWorkspaceList := map[string]string{ + "workspace1": "pubkey1", + "workspace2": "pubkey2", + } + listContent, _ := json.MarshalIndent(expectedWorkspaceList, "", " ") + + listPath := filepath.Join(mockConfig.WorkspacesDir, listFile) + mockMedium.FileSet(listPath, string(listContent)) + + service := NewService(mockConfig, mockMedium) + err := service.ServiceStartup() + + assert.NoError(t, err) + assert.Equal(t, expectedWorkspaceList, service.workspaceList) + assert.NotNil(t, service.activeWorkspace) + assert.Equal(t, defaultWorkspace, service.activeWorkspace.Name) + assert.Equal(t, filepath.Join(mockConfig.WorkspacesDir, defaultWorkspace), service.activeWorkspace.Path) + }) + + // Test case 2: list.json does not exist + t.Run("no list.json", func(t *testing.T) { + mockMedium := NewMockMedium() // Fresh medium with no files + + service := NewService(mockConfig, mockMedium) + err := service.ServiceStartup() + + assert.NoError(t, err) + assert.NotNil(t, service.workspaceList) + assert.Empty(t, service.workspaceList) // Should be empty if no list.json + assert.NotNil(t, service.activeWorkspace) + assert.Equal(t, defaultWorkspace, service.activeWorkspace.Name) + assert.Equal(t, filepath.Join(mockConfig.WorkspacesDir, defaultWorkspace), service.activeWorkspace.Path) + }) + + // Test case 3: list.json exists but is invalid + t.Run("invalid list.json", func(t *testing.T) { + mockMedium := NewMockMedium() + + listPath := filepath.Join(mockConfig.WorkspacesDir, listFile) + mockMedium.FileSet(listPath, "{invalid json") // Invalid JSON + + service := NewService(mockConfig, mockMedium) + err := service.ServiceStartup() + + assert.NoError(t, err) // Error is logged, but startup continues + assert.NotNil(t, service.workspaceList) + assert.Empty(t, service.workspaceList) // Should be empty if invalid list.json + assert.NotNil(t, service.activeWorkspace) + assert.Equal(t, defaultWorkspace, service.activeWorkspace.Name) + assert.Equal(t, filepath.Join(mockConfig.WorkspacesDir, defaultWorkspace), service.activeWorkspace.Path) + }) +} + +func TestCreateWorkspace(t *testing.T) { + mockConfig := &config.Config{ + WorkspacesDir: "/tmp/workspaces", + } + mockMedium := NewMockMedium() + service := NewService(mockConfig, mockMedium) + + workspaceID, err := service.CreateWorkspace("test", "password") + assert.NoError(t, err) + assert.NotEmpty(t, workspaceID) +} + +func TestSwitchWorkspace(t *testing.T) { + mockConfig := &config.Config{ + WorkspacesDir: "/tmp/workspaces", + } + mockMedium := NewMockMedium() + service := NewService(mockConfig, mockMedium) + + workspaceID, err := service.CreateWorkspace("test", "password") + assert.NoError(t, err) + + err = service.SwitchWorkspace(workspaceID) + assert.NoError(t, err) + assert.Equal(t, workspaceID, service.activeWorkspace.Name) +}