diff --git a/README.md b/README.md new file mode 100644 index 0000000..c269d79 --- /dev/null +++ b/README.md @@ -0,0 +1,198 @@ +# SysReplicate(DistroHopper) - Linux System Backup and Restore Tool + +## Features + +### Core Functionality +- **Unified System Backup**: Complete system backup including keys, dotfiles, packages, and automation files +- **System Restore**: Full system restoration from backup tarballs +- **Individual Component Backup**: Separate backup options for keys, dotfiles, or packages only +- **Package Replication**: Generate installation scripts for package managers +- **Encryption**: AES-256-GCM encryption for sensitive data (SSH/GPG keys) + +### Supported Components +- **SSH Keys**: Automatic detection and encryption of SSH keys from standard locations +- **GPG Keys**: Backup and encryption of GPG keyrings +- **Dotfiles**: Configuration files (.bashrc, .vimrc, .gitconfig, etc.) +- **Package Lists**: Support for multiple package managers (apt, pacman, dnf, xbps) +- **Automation Files**: SystemD services, timers, and cronjobs +- **Custom Key Locations**: User-defined paths for additional key storage + +### Package Manager Support +- **Debian/Ubuntu**: apt-get package manager +- **Arch Linux**: pacman and AUR packages (via yay) +- **Fedora/RHEL**: dnf package manager +- **Void Linux**: xbps package manager +- **Flatpak**: Universal package format +- **Snap**: Canonical's package format + +## Installation + +### Prerequisites +- Go 1.24.3 or later +- Linux operating system (Windows and macOS are not supported) + +### Build Instructions +```bash +git clone +cd sysreplicate +go run main.go +go build -o sysreplicate main.go +``` + +## Usage + +### Running the Application +```bash +./sysreplicate +``` + +The application will display a menu with the following options: + +1. **Create Complete System Backup (Recommended)** +2. **Restore System from Backup** +3. **Generate package replication files only** +4. **Backup SSH/GPG keys only** +5. **Backup dotfiles only** +6. **Exit** + +### Menu Options Explained + +#### 1. Create Complete System Backup +Creates a unified backup containing: +- SSH/GPG keys (encrypted with AES-256-GCM) +- Dotfiles from home directory +- Package lists for all supported package managers +- SystemD services, timers, and cronjobs +- System metadata (hostname, username, distribution) + +The backup is stored as a compressed tarball in the `dist/` directory with timestamp naming. + +#### 2. Restore System from Backup +Restores a complete system from a previously created backup: +- Prompts for backup tarball path +- Extracts and decrypts SSH/GPG keys to original locations +- Restores dotfiles to home directory +- Generates package installation script +- Provides restoration summary and next steps + +#### 3. Generate Package Replication Files +Creates package lists and installation scripts without backing up keys or dotfiles: +- Detects Linux distribution and base distribution +- Fetches installed packages by category +- Generates JSON metadata file +- Creates installation script for package restoration + +#### 4. Backup SSH/GPG Keys Only +Creates encrypted backup of SSH and GPG keys: +- Searches standard locations (~/.ssh/, ~/.gnupg/) +- Accepts custom key locations from user input +- Encrypts keys with AES-256-GCM +- Stores backup as compressed tarball + +#### 5. Backup Dotfiles Only +Creates backup of configuration files: +- Scans home directory for dotfiles +- Excludes binary files and directories +- Creates metadata with file information +- Generates compressed tarball + +## Project Structure + +``` +sysreplicate/ +├── main.go # Application entry point +├── go.mod # Go module definition +├── system/ # Core system package +│ ├── run.go # Main menu and orchestration +│ ├── settings.go # Configuration constants +│ ├── backup_integration.go # Backup/restore integration +│ ├── backup/ # Backup functionality +│ │ ├── unified_backup.go # Complete system backup +│ │ ├── restore.go # System restoration +│ │ ├── key.go # Key backup management +│ │ ├── dotfiles_backup.go # Dotfile backup +│ │ ├── encrypt.go # Encryption utilities +│ │ ├── search.go # Key location discovery +│ │ └── dotfile_scanner.go # Dotfile scanning +│ ├── automation/ # Automation file handling +│ │ ├── automation.go # SystemD and cron detection +│ │ ├── backup.go # Automation backup +│ │ └── detect.go # Automation detection +│ ├── output/ # Output generation +│ │ ├── json.go # JSON metadata generation +│ │ ├── script.go # Installation script generation +│ │ └── tarball.go # Tarball creation +│ └── utils/ # Utility functions +│ ├── detect_distro.go # Distribution detection +│ ├── fetch_packages.go # Package list fetching +│ └── verify_path.go # Path validation +└── dist/ # Output directory + ├── unified-backup-*.tar.gz # Complete system backups + ├── key-backup-*.tar.gz # Key-only backups + ├── dotfile-backup.tar.gz # Dotfile backups + └── restored_packages_install.sh # Generated install script +``` + +## Technical Details + +### Encryption +- **Algorithm**: AES-256-GCM +- **Key Generation**: Cryptographically secure random 32-byte keys +- **Data Format**: Base64-encoded encrypted data with nonce +- **Scope**: Only SSH/GPG keys are encrypted; dotfiles and packages are stored in plaintext + +### Backup Format +- **Container**: Compressed tarball (.tar.gz) +- **Metadata**: JSON file containing system information and file lists +- **Structure**: + - `unified_backup.json`: Main metadata and encrypted keys + - `dotfiles/`: Directory containing dotfile contents + - `automation/`: Directory containing automation files + +### Distribution Detection +The tool detects Linux distributions by reading `/etc/os-release` and extracting: +- `ID`: Specific distribution name +- `ID_LIKE`: Base distribution family + +### Package Detection +Package lists are fetched based on the base distribution: +- **Debian-based**: Uses `dpkg` and `apt` commands +- **Arch-based**: Uses `pacman` and `yay` commands +- **Red Hat-based**: Uses `dnf` commands +- **Void**: Uses `xbps` commands + +### Automation Detection +The tool detects and backs up: +- **SystemD Services**: Custom services in `/etc/systemd/system/` +- **SystemD Timers**: Custom timers in `/etc/systemd/system/` +- **User Cronjobs**: User-specific cron jobs +- **System Cronjobs**: System-wide cron jobs + +## Security Considerations + +- SSH and GPG keys are encrypted with AES-256-GCM before storage +- Encryption keys are generated randomly for each backup +- Backup files should be stored securely as they contain sensitive data +- The tool does not require user passwords for encryption (uses random keys) + +## Limitations + +- Only supports Linux operating systems +- Requires appropriate permissions to read system files +- Package restoration may fail if packages are not available in target distribution repositories +- Some automation files may require manual configuration after restoration + +## Output Files + +### Backup Files +- `unified-backup-YYYY-MM-DD-HH-MM-SS.tar.gz`: Complete system backup +- `key-backup-YYYY-MM-DD-HH-MM-SS.tar.gz`: SSH/GPG keys only +- `dotfile-backup.tar.gz`: Dotfiles only + +### Generated Scripts +- `restored_packages_install.sh`: Package installation script for restoration +- `setup.sh`: Package installation script for replication only + +### Metadata Files +- `package.json`: System and package information in JSON format + diff --git a/sysreplicate b/sysreplicate index e3e54b1..aa11074 100755 Binary files a/sysreplicate and b/sysreplicate differ diff --git a/system/automation/automation.go b/system/automation/automation.go new file mode 100644 index 0000000..726cbb7 --- /dev/null +++ b/system/automation/automation.go @@ -0,0 +1,157 @@ +package automation + +import ( + "fmt" + "os" + "strings" +) + +type AutomationData struct { + SystemDServices []SystemDUnit `json:"systemd_services"` + SystemDTimers []SystemDUnit `json:"systemd_timers"` + UserCronjobs []Cronjob `json:"user_cronjobs"` + SystemCronjobs []Cronjob `json:"system_cronjobs"` +} +type SystemDUnit struct { + Name string `json:"name"` + Path string `json:"path"` + Content string `json:"content"` + UnitType string `json:"unit_type"` ////saare service, timer and target available ere + IsEnabled bool `json:"is_enabled"` + IsActive bool `json:"is_active"` +} +type Cronjob struct { + Path string `json:"path"` + Content string `json:"content"` + Type string `json:"type"` //.//user, system, cron_d +} +type AutomationManager struct { + username string +} +func NewAutomationManager() *AutomationManager { + username := os.Getenv("USER") + if username == "" { + username = os.Getenv("USERNAME") + } + + return &AutomationManager{ + username: username, + } +} +func (am *AutomationManager) DetectAutomation() (*AutomationData, error) { + fmt.Println("Detecting automation files...") + + data := &AutomationData{ + SystemDServices: make([]SystemDUnit, 0), + SystemDTimers: make([]SystemDUnit, 0), + UserCronjobs: make([]Cronjob, 0), + SystemCronjobs: make([]Cronjob, 0), + } + + systemdServices, systemdTimers, err := am.detectSystemDUnits() + if err != nil { + fmt.Printf("Warning: Failed to detect SystemD units: %v\n", err) + } else { + data.SystemDServices = systemdServices + data.SystemDTimers = systemdTimers + } + + // usercustom, systemCronjobs, err := am.detectCronjobs() + // if err != nil { + // fmt.Printf("Warning: Failed to detect customs: %v\n", err) + // } else { + // data.UserCronjobs = usercustoms + // data.SystemCronjobs = + // } + + userCronjobs, systemCronjobs, err := am.detectCronjobs() + if err != nil { + fmt.Printf("Warning: Failed to detect cronjobs: %v\n", err) + } else { + data.UserCronjobs = userCronjobs + data.SystemCronjobs = systemCronjobs + } + + fmt.Printf("Detected %d SystemD services, %d SystemD timers, %d user cronjobs, %d system cronjobs\n", + len(data.SystemDServices), len(data.SystemDTimers), len(data.UserCronjobs), len(data.SystemCronjobs)) + + + + if len(data.SystemDServices) > 0 { + fmt.Println(" SystemD Services found:") + for _, service := range data.SystemDServices { + fmt.Printf(" - %s (%s)\n", service.Name, service.Path) + } + } + + if len(data.SystemDTimers) > 0 { + fmt.Println(" SystemD Timers found:") + for _, timer := range data.SystemDTimers { + fmt.Printf(" - %s (%s)\n", timer.Name, timer.Path) + } + } + + if len(data.UserCronjobs) > 0 { + fmt.Println(" User Cronjobs found:") + for _, cronjob := range data.UserCronjobs { + fmt.Printf(" - %s\n", cronjob.Path) + } + } + + if len(data.SystemCronjobs) > 0 { + fmt.Println(" System Cronjobs found:") + for _, cronjob := range data.SystemCronjobs { + fmt.Printf(" - %s\n", cronjob.Path) + } + } + + return data, nil +} +/////symlink logicc +func (am *AutomationManager) isCustomSystemDUnit(filePath string) bool { + // Check if it's a symlink + linkInfo, err := os.Lstat(filePath) + if err != nil { + return false + } + + // If it's not a symlink, it's custom + if linkInfo.Mode()&os.ModeSymlink == 0 { + return true + } + + // If it's a symlink, check if it points to package-managed directory + target, err := os.Readlink(filePath) + if err != nil { + return false + } + // Package-managed directories that we want to exclude + packageManagedDirs := []string{ + "/usr/lib/systemd/system/", + "/lib/systemd/system/", + "/usr/share/systemd/", + } + + for _, dir := range packageManagedDirs { + if strings.HasPrefix(target, dir) { + return false + } + } + + return true +} + +func (am *AutomationManager) readFileContent(filePath string) (string, error) { + content, err := os.ReadFile(filePath) + if err != nil { + return "", err + } + return string(content), nil +} + +// getSystemDUnitStatus checks if a SystemD unit is enabled and active +func (am *AutomationManager) getSystemDUnitStatus(unitName string) (bool, bool) { + ///TODO(@jaadu): getSystemDUnitStatus checks if a SystemD unit is enabled and active: THIS IS A Simplified implementation you should use SYSTEMCTL COMMANDS in better implementation + //AS enabled services logic is different + return false, false +} diff --git a/system/automation/backup.go b/system/automation/backup.go new file mode 100644 index 0000000..742b088 --- /dev/null +++ b/system/automation/backup.go @@ -0,0 +1,136 @@ +package automation + +import ( + "archive/tar" + "fmt" + "path/filepath" + "strings" +) + +func (am *AutomationManager) BackupAutomation(data *AutomationData, tarWriter *tar.Writer) error { + fmt.Println("Adding automation files to backup...") + + for _, service := range data.SystemDServices { + if err := am.addFileToTarball(service.Path, service.Content, "automation/systemd/", tarWriter); err != nil { + fmt.Printf("Warning: Failed to add SystemD service %s: %v\n", service.Name, err) + } + } + for _, timer := range data.SystemDTimers { + if err := am.addFileToTarball(timer.Path, timer.Content, "automation/systemd/", tarWriter); err != nil { + fmt.Printf("Warning: Failed to add SystemD timer %s: %v\n", timer.Name, err) + } + } + + for _, cronjob := range data.UserCronjobs { + if err := am.addFileToTarball(cronjob.Path, cronjob.Content, "automation/cron/", tarWriter); err != nil { + fmt.Printf("Warning: Failed to add user cronjob %s: %v\n", cronjob.Path, err) + } + } + + for _, cronjob := range data.SystemCronjobs { + if err := am.addFileToTarball(cronjob.Path, cronjob.Content, "automation/cron/", tarWriter); err != nil { + fmt.Printf("Warning: Failed to add system cronjob %s: %v\n", cronjob.Path, err) + } + } + + return nil +} +func (am *AutomationManager) addFileToTarball(originalPath, content, tarballPrefix string, tarWriter *tar.Writer) error { + tarballPath := tarballPrefix + filepath.Base(originalPath) + header := &tar.Header{ + Name: tarballPath, + Mode: 0644, + Size: int64(len(content)), + } + if err := tarWriter.WriteHeader(header); err != nil { + return fmt.Errorf("failed to write tar header for %s: %w", tarballPath, err) + } + if _, err := tarWriter.Write([]byte(content)); err != nil { + return fmt.Errorf("failed to write content for %s: %w", tarballPath, err) + } + + return nil +} +///TODO(@jaadu): IMPROVE THE RESTORE LOGIC AND RESTORATION COMMAND +func (am *AutomationManager) GenerateRestorationCommands(data *AutomationData) []string { + var commands []string + if len(data.SystemDServices) > 0 || len(data.SystemDTimers) > 0 { + commands = append(commands, "echo 'Restoring SystemD units...'") + + for _, service := range data.SystemDServices { + commands = append(commands, fmt.Sprintf("sudo cp automation/systemd/%s %s", + filepath.Base(service.Path), service.Path)) + } + for _, timer := range data.SystemDTimers { + commands = append(commands, fmt.Sprintf("sudo cp automation/systemd/%s %s", + filepath.Base(timer.Path), timer.Path)) + } + + + + // Reload SystemD daemon + commands = append(commands, "sudo systemctl daemon-reload") + // Enable and start services + for _, service := range data.SystemDServices { + if service.UnitType == "service" { + commands = append(commands, fmt.Sprintf("sudo systemctl enable --now %s || true", + strings.TrimSuffix(service.Name, ".service"))) + } + } + // Enable and start timers + for _, timer := range data.SystemDTimers { + commands = append(commands, fmt.Sprintf("sudo systemctl enable --now %s || true", + strings.TrimSuffix(timer.Name, ".timer"))) + } + } + if len(data.UserCronjobs) > 0 || len(data.SystemCronjobs) > 0 { + commands = append(commands, "echo 'Restoring cronjobs...'") + for _, cronjob := range data.UserCronjobs { + if cronjob.Type == "user" { + commands = append(commands, fmt.Sprintf("crontab automation/cron/%s || true", + filepath.Base(cronjob.Path))) + } + } + for _, cronjob := range data.SystemCronjobs { + if cronjob.Type == "system" { + commands = append(commands, fmt.Sprintf("sudo cp automation/cron/%s %s", + filepath.Base(cronjob.Path), cronjob.Path)) + } else if cronjob.Type == "cron_d" { + commands = append(commands, fmt.Sprintf("sudo cp automation/cron/%s %s", + filepath.Base(cronjob.Path), cronjob.Path)) + } + } + } + + return commands +} +func (am *AutomationManager) ValidateAutomationData(data *AutomationData) error { + unitNames := make(map[string]bool) + + for _, service := range data.SystemDServices { + if unitNames[service.Name] { + return fmt.Errorf("duplicate SystemD unit name: %s", service.Name) + } + unitNames[service.Name] = true + } + + for _, timer := range data.SystemDTimers { + if unitNames[timer.Name] { + return fmt.Errorf("duplicate SystemD unit name: %s", timer.Name) + } + unitNames[timer.Name] = true + } + for _, service := range data.SystemDServices { + if service.UnitType != "service" && service.UnitType != "target" { + return fmt.Errorf("invalid unit type for service %s: %s", service.Name, service.UnitType) + } + } + + for _, timer := range data.SystemDTimers { + if timer.UnitType != "timer" { + return fmt.Errorf("invalid unit type for timer %s: %s", timer.Name, timer.UnitType) + } + } + + return nil +} diff --git a/system/automation/detect.go b/system/automation/detect.go new file mode 100644 index 0000000..5d9a0a8 --- /dev/null +++ b/system/automation/detect.go @@ -0,0 +1,173 @@ +package automation + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) +func (am *AutomationManager) detectSystemDUnits() ([]SystemDUnit, []SystemDUnit, error) { + var services []SystemDUnit + var timers []SystemDUnit + + systemdDir := "/etc/systemd/system" + if _, err := os.Stat(systemdDir); os.IsNotExist(err) { + fmt.Printf("SystemD directory %s does not exist, skipping SystemD detection\n", systemdDir) + return services, timers, nil + } + err := filepath.Walk(systemdDir, func(path string, info os.FileInfo, err error) error { + if err != nil {return err} + if info.IsDir() {return nil} + if !am.isCustomSystemDUnit(path) {return nil} + + ext := filepath.Ext(path) + unitName := filepath.Base(path) + + content, err := am.readFileContent(path) + if err != nil { + fmt.Printf("Warning: Could not read SystemD unit %s: %v\n", path, err) + return nil + } + isEnabled, isActive := am.getSystemDUnitStatus(unitName) + + unit := SystemDUnit{ + Name: unitName, + Path: path, + Content: content, + UnitType: ext[1:], // Remove the dot + IsEnabled: isEnabled, + IsActive: isActive, + } + + switch ext { + case ".service": + services = append(services, unit) + case ".timer": + timers = append(timers, unit) + case ".target": + services = append(services, unit) + } + + return nil + }) + + if err != nil { + return nil, nil, fmt.Errorf("failed to scan SystemD directory: %w", err) + } + + return services, timers, nil +} + +/////detectCronjobs scans for cron job files +func (am *AutomationManager) detectCronjobs() ([]Cronjob, []Cronjob, error) { + var userCronjobs []Cronjob + var systemCronjobs []Cronjob + + userCronPath := fmt.Sprintf("/var/spool/cron/crontabs/%s", am.username) + if content, err := am.readFileContent(userCronPath); err == nil { + lines := strings.Split(content, "\n") + var filteredLines []string + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" && !strings.HasPrefix(line, "#") { + filteredLines = append(filteredLines, line) + } + } + + if len(filteredLines) > 0 { + userCronjobs = append(userCronjobs, Cronjob{ + Path: userCronPath, + Content: content, + Type: "user", + }) + } + } + + + systemCronPaths := []string{ + "/etc/crontab", + } + + if cronDDir := "/etc/cron.d"; am.dirExists(cronDDir) { + if files, err := filepath.Glob(filepath.Join(cronDDir, "*")); err == nil { + for _, file := range files { + if info, err := os.Stat(file); err == nil && !info.IsDir() { + systemCronPaths = append(systemCronPaths, file) + } + } + } + } + + for _, cronPath := range systemCronPaths { + if content, err := am.readFileContent(cronPath); err == nil { + lines := strings.Split(content, "\n") + var filteredLines []string + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" && !strings.HasPrefix(line, "#") { + filteredLines = append(filteredLines, line) + } + } + + if len(filteredLines) > 0 { + cronType := "system" + if strings.Contains(cronPath, "/etc/cron.d/") { + cronType = "cron_d" + } + + systemCronjobs = append(systemCronjobs, Cronjob{ + Path: cronPath, + Content: content, + Type: cronType, + }) + } + } + } + + return userCronjobs, systemCronjobs, nil +} + +func (am *AutomationManager) dirExists(path string) bool { + info, err := os.Stat(path) + return err == nil && info.IsDir() +} + +func (am *AutomationManager) GetAutomationSummary(data *AutomationData) string { + var summary strings.Builder + + summary.WriteString("Automation Detection Summary:\n") + summary.WriteString(fmt.Sprintf("- SystemD Services: %d\n", len(data.SystemDServices))) + summary.WriteString(fmt.Sprintf("- SystemD Timers: %d\n", len(data.SystemDTimers))) + summary.WriteString(fmt.Sprintf("- User Cronjobs: %d\n", len(data.UserCronjobs))) + summary.WriteString(fmt.Sprintf("- System Cronjobs: %d\n", len(data.SystemCronjobs))) + + if len(data.SystemDServices) > 0 { + summary.WriteString("\nSystemD Services found:\n") + for _, service := range data.SystemDServices { + summary.WriteString(fmt.Sprintf(" - %s (%s)\n", service.Name, service.Path)) + } + } + + if len(data.SystemDTimers) > 0 { + summary.WriteString("\nSystemD Timers found:\n") + for _, timer := range data.SystemDTimers { + summary.WriteString(fmt.Sprintf(" - %s (%s)\n", timer.Name, timer.Path)) + } + } + + if len(data.UserCronjobs) > 0 { + summary.WriteString("\nUser Cronjobs found:\n") + for _, cronjob := range data.UserCronjobs { + summary.WriteString(fmt.Sprintf(" - %s\n", cronjob.Path)) + } + } + + if len(data.SystemCronjobs) > 0 { + summary.WriteString("\nSystem Cronjobs found:\n") + for _, cronjob := range data.SystemCronjobs { + summary.WriteString(fmt.Sprintf(" - %s\n", cronjob.Path)) + } + } + + return summary.String() +} diff --git a/system/backup/restore.go b/system/backup/restore.go new file mode 100644 index 0000000..4e75188 --- /dev/null +++ b/system/backup/restore.go @@ -0,0 +1,302 @@ +package backup + +import ( + "archive/tar" + "compress/gzip" + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/mdgspace/sysreplicate/system/output" +) + +type RestoreManager struct { + backupData *UnifiedBackupData +} + +func NewRestoreManager() *RestoreManager { + return &RestoreManager{} +} + +// restoring the complete system from a unified backup +func (rm *RestoreManager) RestoreFromBackup(tarballPath string) error { + fmt.Printf("Starting system restore from: %s\n", tarballPath) + + // extract and parse backup data + err := rm.extractBackupData(tarballPath) + if err != nil { + return fmt.Errorf("failed to extract backup data: %w", err) + } + + // backup information and furthur instructions + ///s + fmt.Printf("Backup created on: %s\n", rm.backupData.Timestamp.Format("2006-01-09 15:04:05")) + fmt.Printf("Original system: %s@%s (%s)\n", + rm.backupData.SystemInfo.Username, + rm.backupData.SystemInfo.Hostname, + rm.backupData.Distro) + + // 1. Restore SSH/GPG keys + fmt.Println("Restoring SSH/GPG keys...") + err = rm.restoreKeys() + if err != nil { + fmt.Printf("Warning: Key restoration failed: %v\n", err) + } + fmt.Println() + + // 2. Restore dotfiles + fmt.Println("Restoring dotfiles...") + err = rm.restoreDotfiles(tarballPath) + if err != nil { + fmt.Printf("Warning: Dotfile restoration failed: %v\n", err) + } + fmt.Println() + + // 3. Generate package installation script + fmt.Println("Generating package installation script...") + err = rm.generateInstallScript() + if err != nil { + fmt.Printf("Warning: Package script generation failed: %v\n", err) + } + fmt.Println() + + fmt.Println("System restore completed successfully!") + fmt.Println() + fmt.Printf("Restore Summary:\n") + fmt.Printf(" Keys restored: %d\n", len(rm.backupData.EncryptedKeys)) + fmt.Printf(" Dotfiles restored: %d\n", len(rm.backupData.Dotfiles)) + fmt.Printf(" Package categories: %d\n", len(rm.backupData.Packages)) + + if rm.backupData.Automation != nil { + automationCount := len(rm.backupData.Automation.SystemDServices) + len(rm.backupData.Automation.SystemDTimers) + + len(rm.backupData.Automation.UserCronjobs) + len(rm.backupData.Automation.SystemCronjobs) + if automationCount > 0 { + fmt.Printf(" Automation files: %d (%d services, %d timers, %d user cronjobs, %d system cronjobs)\n", + automationCount, len(rm.backupData.Automation.SystemDServices), len(rm.backupData.Automation.SystemDTimers), + len(rm.backupData.Automation.UserCronjobs), len(rm.backupData.Automation.SystemCronjobs)) + } + } + + fmt.Println() + fmt.Println("Note: Run the generated install script to restore packages and automation:") + fmt.Println(" chmod +x dist/restored_packages_install.sh") + fmt.Println(" ./dist/restored_packages_install.sh") + + return nil +} + +// extractBackupData extracts and parses the main backup JSON from tarball +func (rm *RestoreManager) extractBackupData(tarballPath string) error { + file, err := os.Open(tarballPath) + if err != nil { + return fmt.Errorf("failed to open tarball: %w", err) + } + defer file.Close() + + gzipReader, err := gzip.NewReader(file) + if err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + defer gzipReader.Close() + + tarReader := tar.NewReader(gzipReader) + + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("failed to read tar entry: %w", err) + } + + if header.Name == "unified_backup.json" { + data, err := io.ReadAll(tarReader) + if err != nil { + return fmt.Errorf("failed to read backup data: %w", err) + } + + rm.backupData = &UnifiedBackupData{} + err = json.Unmarshal(data, rm.backupData) + if err != nil { + return fmt.Errorf("failed to parse backup data: %w", err) + } + + return nil + } + } + + return fmt.Errorf("backup data not found in tarball") +} + +// decryptiug and restoring SSH/GPG keys to their original locations +func (rm *RestoreManager) restoreKeys() error { + config := &EncryptionConfig{ + Key: rm.backupData.EncryptionKey, + } + + restoredCount := 0 + for keyID, encKey := range rm.backupData.EncryptedKeys { + fmt.Printf("Restoring key: %s -> %s\n", keyID, encKey.OriginalPath) + + // Decrypt the key data + decryptedData, err := rm.decryptData(encKey.EncryptedData, config) + if err != nil { + fmt.Printf("Warning: Failed to decrypt key %s: %v\n", keyID, err) + continue + } + + //// Ensure directory exists + dir := filepath.Dir(encKey.OriginalPath) + if err := os.MkdirAll(dir, 0755); err != nil { + fmt.Printf("Warning: Failed to create directory %s: %v\n", dir, err) + continue + } + + // Write decrypted data to original location + err = os.WriteFile(encKey.OriginalPath, decryptedData, os.FileMode(encKey.Permissions)) + if err != nil { + fmt.Printf("Warning: Failed to write key to %s: %v\n", encKey.OriginalPath, err) + continue + } + + restoredCount++ + } + + if restoredCount > 0 { + fmt.Printf("Successfully restored %d keys\n", restoredCount) + } else { + fmt.Println("No keys were restored") + } + return nil +} + +// extract from tarbell +func (rm *RestoreManager) restoreDotfiles(tarballPath string) error { + file, err := os.Open(tarballPath) + if err != nil { + return fmt.Errorf("failed to open tarball: %w", err) + } + defer file.Close() + + gzipReader, err := gzip.NewReader(file) + if err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + defer gzipReader.Close() + + tarReader := tar.NewReader(gzipReader) + + homeDir, _ := os.UserHomeDir() + restoredCount := 0 + + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("failed to read tar entry: %w", err) + } + + // Process dotfiles + if strings.HasPrefix(header.Name, "dotfiles/") { + relativePath := strings.TrimPrefix(header.Name, "dotfiles/") + targetPath := filepath.Join(homeDir, relativePath) + + fmt.Printf("Restoring dotfile: %s -> %s\n", header.Name, targetPath) + + // Ensure directory exists + dir := filepath.Dir(targetPath) + if err := os.MkdirAll(dir, 0755); err != nil { + fmt.Printf("Warning: Failed to create directory %s: %v\n", dir, err) + continue + } + + // creates thje target file and copies content to it + targetFile, err := os.Create(targetPath) + if err != nil { + fmt.Printf("Warning: Failed to create file %s: %v\n", targetPath, err) + continue + } + _, err = io.Copy(targetFile, tarReader) + targetFile.Close() + + if err != nil { + fmt.Printf("Warning: Failed to copy dotfile content: %v\n", err) + continue + } + + // Set permissions + err = os.Chmod(targetPath, header.FileInfo().Mode()) + if err != nil { + fmt.Printf("Warning: Failed to set permissions for %s: %v\n", targetPath, err) + } + + restoredCount++ + } + } + + if restoredCount > 0 { + fmt.Printf("Successfully restored %d dotfiles\n", restoredCount) + } else { + fmt.Println("No dotfiles were restored") + } + return nil +} + +// generateInstallScript creates a script to reinstall packages +func (rm *RestoreManager) generateInstallScript() error { + scriptPath := "dist/restored_packages_install.sh" + + // dir check + if err := os.MkdirAll(filepath.Dir(scriptPath), 0755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + + return output.GenerateInstallScript(rm.backupData.BaseDistro, rm.backupData.Packages, rm.backupData.Automation, scriptPath) +} + +// decryptData decrypts base64 encoded data using AES-GCM +func (rm *RestoreManager) decryptData(encryptedBase64 string, config *EncryptionConfig) ([]byte, error) { + // Decode base64 + ciphertext, err := base64.StdEncoding.DecodeString(encryptedBase64) + if err != nil { + return nil, fmt.Errorf("failed to decode base64: %w", err) + } + + // creating cipher + block, err := aes.NewCipher(config.Key) + if err != nil { + return nil, fmt.Errorf("failed to create cipher: %w", err) + } + + // creating GCM + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, fmt.Errorf("failed to create GCM: %w", err) + } + + // Extract nonce and encrypted data + nonceSize := gcm.NonceSize() + if len(ciphertext) < nonceSize { + return nil, fmt.Errorf("ciphertext too short") + } + + nonce := ciphertext[:nonceSize] + encryptedData := ciphertext[nonceSize:] + + // Decrypt + plaintext, err := gcm.Open(nil, nonce, encryptedData, nil) + if err != nil { + return nil, fmt.Errorf("failed to decrypt: %w", err) + } + + return plaintext, nil +} diff --git a/system/backup/unified_backup.go b/system/backup/unified_backup.go new file mode 100644 index 0000000..472a7c1 --- /dev/null +++ b/system/backup/unified_backup.go @@ -0,0 +1,338 @@ +package backup + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/mdgspace/sysreplicate/system/automation" + "github.com/mdgspace/sysreplicate/system/output" + "github.com/mdgspace/sysreplicate/system/utils" +) + +// all backup information in one structure +type UnifiedBackupData struct { + Timestamp time.Time `json:"timestamp"` + SystemInfo output.SystemInfo `json:"system_info"` + EncryptedKeys map[string]output.EncryptedKey `json:"encrypted_keys"` + Dotfiles []output.Dotfile `json:"dotfiles"` + Packages map[string][]string `json:"packages"` + Automation *automation.AutomationData `json:"automation"` + EncryptionKey []byte `json:"encryption_key"` + Distro string `json:"distro"` + BaseDistro string `json:"base_distro"` +} + +// complete system backup +type UnifiedBackupManager struct { + config *EncryptionConfig +} + +func NewUnifiedBackupManager() *UnifiedBackupManager { + return &UnifiedBackupManager{} +} + +// complete system backup including keys, dotfiles, and packages +func (ubm *UnifiedBackupManager) CreateUnifiedBackup(customPaths []string) error { + fmt.Println("Starting unified system backup...") + + // Generate encryption key + key, err := GenerateKey() + if err != nil { + return fmt.Errorf("failed to generate encryption key: %w", err) + } + + ubm.config = &EncryptionConfig{ + Key: key, + } + + // Get system information + hostname, _ := os.Hostname() + username := os.Getenv("USER") + if username == "" { + username = os.Getenv("USERNAME") + } + + // Detect distro and get packages + distro, baseDistro := utils.DetectDistro() + packages := utils.FetchPackages(baseDistro) + + fmt.Printf("Detected distro: %s (%s)\n", distro, baseDistro) + totalPackages := 0 + for repo, pkgs := range packages { + if len(pkgs) > 0 { + fmt.Printf(" %s packages: %d\n", repo, len(pkgs)) + totalPackages += len(pkgs) + } + } + fmt.Printf(" Total packages to restore: %d\n", totalPackages) + fmt.Println() + + // Create unified backup data + backupData := &UnifiedBackupData{ + Timestamp: time.Now(), + SystemInfo: output.SystemInfo{ + Hostname: hostname, + Username: username, + OS: "linux", + }, + EncryptedKeys: make(map[string]output.EncryptedKey), + EncryptionKey: key, + Packages: packages, + Distro: distro, + BaseDistro: baseDistro, + } + + // 1. Backup SSH/GPG keys + fmt.Println("Backing up SSH/GPG keys...") + err = ubm.backupKeys(customPaths, backupData) + if err != nil { + fmt.Printf("Warning: Key backup failed: %v\n", err) + } + fmt.Println() + + // 2. Backup dotfiles + fmt.Println("Backing up dotfiles...") + err = ubm.backupDotfiles(backupData) + if err != nil { + fmt.Printf("Warning: Dotfile backup failed: %v\n", err) + } + fmt.Println() + + // 3. Backup automation files + fmt.Println("Backing up automation files...") + err = ubm.backupAutomation(backupData) + if err != nil { + fmt.Printf("Warning: Automation backup failed: %v\n", err) + } + fmt.Println() + + // 4. Create unified tarball + fmt.Println("Creating unified backup tarball...") + tarballPath := fmt.Sprintf("dist/unified-backup-%s.tar.gz", + time.Now().Format("2006-01-02-15-04-05")) + + err = ubm.createUnifiedTarball(backupData, tarballPath) + if err != nil { + return fmt.Errorf("failed to create unified tarball: %w", err) + } + + fmt.Printf("Unified backup completed successfully: %s\n", tarballPath) + fmt.Println() + fmt.Printf("Backup Summary:\n") + fmt.Printf(" Keys: %d files\n", len(backupData.EncryptedKeys)) + fmt.Printf(" Dotfiles: %d files\n", len(backupData.Dotfiles)) + fmt.Printf(" Packages: %d categories\n", len(backupData.Packages)) + + if backupData.Automation != nil { + automationCount := len(backupData.Automation.SystemDServices) + len(backupData.Automation.SystemDTimers) + + len(backupData.Automation.UserCronjobs) + len(backupData.Automation.SystemCronjobs) + fmt.Printf(" Automation: %d files (%d services, %d timers, %d user cronjobs, %d system cronjobs)\n", + automationCount, len(backupData.Automation.SystemDServices), len(backupData.Automation.SystemDTimers), + len(backupData.Automation.UserCronjobs), len(backupData.Automation.SystemCronjobs)) + } + + return nil +} + +// SSH/GPG key backup +func (ubm *UnifiedBackupManager) backupKeys(customPaths []string, backupData *UnifiedBackupData) error { + // Search standard locations + standardLocations, err := searchStandardLocations() + if err != nil { + return fmt.Errorf("failed to search standard locations: %w", err) + } + + // process the custom paths user might have given while backup + bm := &BackupManager{} + customLocations := bm.processCustomPaths(customPaths) + + // Combine all locations + allLocations := append(standardLocations, customLocations...) + + // encrypt and store keys + keyCount := 0 + for _, location := range allLocations { + if len(location.Files) > 0 { + fmt.Printf(" %s keys found:\n", location.Type) + for _, filePath := range location.Files { + fileInfo, err := os.Stat(filePath) + if err != nil { + continue + } + + encryptedData, err := EncryptFile(filePath, ubm.config) + if err != nil { + fmt.Printf(" Warning: Failed to encrypt %s: %v\n", filePath, err) + continue + } + + fmt.Printf(" - %s\n", filePath) + keyID := filepath.Base(filePath) + "_" + strings.ReplaceAll(filePath, "/", "_") + backupData.EncryptedKeys[keyID] = output.EncryptedKey{ + OriginalPath: filePath, + KeyType: location.Type, + EncryptedData: encryptedData, + Permissions: uint32(fileInfo.Mode()), + } + keyCount++ + } + } + } + + if keyCount == 0 { + fmt.Println(" No SSH/GPG keys found") + } else { + fmt.Printf(" Total keys backed up: %d\n", keyCount) + } + + return nil +} + +// dotfile backup logic +func (ubm *UnifiedBackupManager) backupDotfiles(backupData *UnifiedBackupData) error { + files, err := ScanDotfiles() + if err != nil { + return fmt.Errorf("error scanning dotfiles: %w", err) + } + + // Convert to output format and show details + outputFiles := make([]output.Dotfile, len(files)) + dotfileCount := 0 + + for i, file := range files { + if !file.IsDir && !file.IsBinary { + fmt.Printf(" - %s\n", file.Path) + dotfileCount++ + } + + outputFiles[i] = output.Dotfile{ + Path: file.Path, + RealPath: file.RealPath, + IsDir: file.IsDir, + IsBinary: file.IsBinary, + Mode: file.Mode, + Content: file.Content, + } + } + + if dotfileCount == 0 { + fmt.Println(" No dotfiles found") + } else { + fmt.Printf(" Total dotfiles backed up: %d\n", dotfileCount) + } + + backupData.Dotfiles = outputFiles + return nil +} + +func (ubm *UnifiedBackupManager) backupAutomation(backupData *UnifiedBackupData) error { + am := automation.NewAutomationManager() + + data, err := am.DetectAutomation() + if err != nil { + return fmt.Errorf("failed to detect automation: %w", err) + } + + if err := am.ValidateAutomationData(data); err != nil { + return fmt.Errorf("invalid automation data: %w", err) + } + + backupData.Automation = data + return nil +} + +// creating one single tarball containing all backup data +func (ubm *UnifiedBackupManager) createUnifiedTarball(backupData *UnifiedBackupData, tarballPath string) error { + // Ensure directory exists + if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + + file, err := os.Create(tarballPath) + if err != nil { + return fmt.Errorf("failed to create tarball: %w", err) + } + defer file.Close() + + gzipWriter := gzip.NewWriter(file) + defer gzipWriter.Close() + + tarWriter := tar.NewWriter(gzipWriter) + defer tarWriter.Close() + + //// Add main backup metadata + jsonData, err := json.MarshalIndent(backupData, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal backup data: %w", err) + } + + header := &tar.Header{ + Name: "unified_backup.json", + Mode: 0644, + Size: int64(len(jsonData)), + } + + if err := tarWriter.WriteHeader(header); err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + if _, err := tarWriter.Write(jsonData); err != nil { + return fmt.Errorf("failed to write backup data: %w", err) + } + + // Add dotfiles as separate entries (non-binary only) + for _, dotfile := range backupData.Dotfiles { + if dotfile.IsDir || dotfile.IsBinary { + continue + } + + file, err := os.Open(dotfile.Path) + if err != nil { + fmt.Printf("Warning: Could not open dotfile %s: %v\n", dotfile.Path, err) + continue + } + + info, err := file.Stat() + if err != nil { + file.Close() + continue + } + + hdr, err := tar.FileInfoHeader(info, "") + if err != nil { + file.Close() + continue + } + + hdr.Name = "dotfiles/" + dotfile.RealPath + + if err := tarWriter.WriteHeader(hdr); err != nil { + file.Close() + continue + } + + _, err = io.Copy(tarWriter, file) + file.Close() + + if err != nil { + fmt.Printf("Warning: Failed to add dotfile %s to tarball: %v\n", dotfile.Path, err) + } + } + + if backupData.Automation != nil { + am := automation.NewAutomationManager() + err := am.BackupAutomation(backupData.Automation, tarWriter) + if err != nil { + fmt.Printf("Warning: Failed to add automation files to tarball: %v\n", err) + } + } + + return nil +} diff --git a/system/backup_integration.go b/system/backup_integration.go index c84ae5d..fc77f4e 100644 --- a/system/backup_integration.go +++ b/system/backup_integration.go @@ -1,9 +1,9 @@ package system import ( + "bufio" "fmt" "log" - "bufio" "os" "strings" @@ -11,6 +11,103 @@ import ( ) // handle backup integration +func RunUnifiedBackup() { + fmt.Println("=== Unified System Backup (Keys + Dotfiles + Packages) ===") + + // unified backup manager + ubm := backup.NewUnifiedBackupManager() + + // gert all the custom key paths from user + fmt.Println("\nOptional: Add custom key locations") + customPaths := backup.GetCustomPaths() + + // Create unified backup + err := ubm.CreateUnifiedBackup(customPaths) + if err != nil { + log.Printf("Unified backup failed: %v", err) + return + } + + fmt.Println("Complete system backup completed successfully!") + fmt.Println() + fmt.Println("Your backup includes:") + fmt.Println("- SSH/GPG keys (encrypted)") + fmt.Println("- Dotfiles (.bashrc, .vimrc, .gitconfig, etc.)") + fmt.Println("- Package lists for reinstallation") + fmt.Println("- System automation files (SystemD services, timers, cronjobs)") +} + +// system restoration from backup +func RunRestore() { + fmt.Println("=== System Restore from Backup ===") + + scanner := bufio.NewScanner(os.Stdin) + fmt.Print("Enter backup tarball path: ") + + if !scanner.Scan() { + fmt.Println("Failed to read input") + return + } + + tarballPath := strings.TrimSpace(scanner.Text()) + if tarballPath == "" { + fmt.Println("No tarball path provided") + return + } + + // Normalize path separators (handle both Windows and Unix paths) + normalizedPath := strings.ReplaceAll(tarballPath, "\\", "/") + + // Check if file exists and is a file (not directory) + fileInfo, err := os.Stat(normalizedPath) + if os.IsNotExist(err) { + fmt.Printf("Backup file does not exist: %s\n", normalizedPath) + return + } + if err != nil { + fmt.Printf("Error checking backup file: %v\n", err) + return + } + if fileInfo.IsDir() { + fmt.Printf("Path is a directory, not a file: %s\n", normalizedPath) + return + } + + // Use normalized path for restoration + tarballPath = normalizedPath + + // Confirm restoration + fmt.Printf("This will restore your system from: %s\n", tarballPath) + fmt.Print("WARNING: This will overwrite existing files. Continue? (y/N): ") + + if !scanner.Scan() { + return + } + + confirm := strings.ToLower(strings.TrimSpace(scanner.Text())) + if confirm != "y" && confirm != "yes" { + fmt.Println("Restoration cancelled") + return + } + + // creating previously defined restore manager and run restoration + rm := backup.NewRestoreManager() + err = rm.RestoreFromBackup(tarballPath) + if err != nil { + log.Printf("Restoration failed: %v", err) + return + } + + fmt.Println("\nSystem restoration completed!") + fmt.Println() + fmt.Println("Next steps:") + fmt.Println("1. Run the generated package installation script") + fmt.Println("2. Restart your shell or run 'source ~/.bashrc' (or ~/.zshrc)") + fmt.Println("3. Check that your SSH keys work: 'ssh-add -l'") + fmt.Println("4. Verify automation files were restored correctly") +} + +// rest of the options func RunBackup() { fmt.Println("=== Key Backup Process ===") @@ -54,13 +151,3 @@ func RunDotfileBackup() { fmt.Println("Backup complete!") } -func restoreBackup() { - fmt.Println("Restoring Backup") - fmt.Println("Enter backup tarball path") - - reader := bufio.NewReader(os.Stdin) - name, _ := reader.ReadString('\n') // reads until newline - name = strings.TrimSpace(name) - - -} diff --git a/system/output/script.go b/system/output/script.go index d253ce6..2342805 100644 --- a/system/output/script.go +++ b/system/output/script.go @@ -3,11 +3,13 @@ package output import ( "fmt" "os" + + "github.com/mdgspace/sysreplicate/system/automation" ) // generateInstallScript creates a shell script to install all packages for the given distro. // Returns an error if the script cannot be created or written. -func GenerateInstallScript(baseDistro string, packages map[string][]string, scriptPath string) error { +func GenerateInstallScript(baseDistro string, packages map[string][]string, automationData *automation.AutomationData, scriptPath string) error { f, err := os.Create(scriptPath) if err != nil { return err @@ -111,6 +113,25 @@ func GenerateInstallScript(baseDistro string, packages map[string][]string, scri } } + if automationData != nil { + am := automation.NewAutomationManager() + automationCommands := am.GenerateRestorationCommands(automationData) + + if len(automationCommands) > 0 { + _, err = fmt.Fprintln(f, "\necho 'Restoring automation files...'") + if err != nil { + return err + } + + for _, cmd := range automationCommands { + _, err = fmt.Fprintf(f, "%s\n", cmd) + if err != nil { + return err + } + } + } + } + return nil } diff --git a/system/output/tarball.go b/system/output/tarball.go index 7683da2..e1b929c 100644 --- a/system/output/tarball.go +++ b/system/output/tarball.go @@ -32,13 +32,14 @@ type EncryptedKey struct { } type Dotfile struct { - Path string - RealPath string - IsDir bool - IsBinary bool - Mode os.FileMode - Content string // ignore for the binary files + Path string `json:"path"` + RealPath string `json:"real_path"` + IsDir bool `json:"is_dir"` + IsBinary bool `json:"is_binary"` + Mode os.FileMode `json:"mode"` + Content string `json:"content"` // ignore for the binary files } + type BackupMetadata struct { Timestamp time.Time `json:"timestamp"` Hostname string `json:"hostname"` diff --git a/system/run.go b/system/run.go index 712b91d..1a8bad7 100644 --- a/system/run.go +++ b/system/run.go @@ -32,17 +32,18 @@ func Run() { } // showMenu displays the main menu for Linux users -// MUST BE CHANGED IN THE FUTURE func showMenu() { scanner := bufio.NewScanner(os.Stdin) for { fmt.Println("\n=== SysReplicate - Distro Hopping Tool ===") - fmt.Println("1. Generate package replication files") - fmt.Println("2. Backup SSH/GPG keys") - fmt.Println("3. Backup dotfiles") - fmt.Println("4. Exit") - fmt.Print("Choose an option (1-4): ") + fmt.Println("1. Create Complete System Backup (Recommended)") + fmt.Println("2. Restore System from Backup") + fmt.Println("3. Generate package replication files only") + fmt.Println("4. Backup SSH/GPG keys only") + fmt.Println("5. Backup dotfiles only") + fmt.Println("6. Exit") + fmt.Print("Choose an option (1-6): ") if !scanner.Scan() { break @@ -52,16 +53,20 @@ func showMenu() { switch choice { case "1": - runPackageReplication() + RunUnifiedBackup() case "2": - RunBackup() + RunRestore() case "3": - RunDotfileBackup() + runPackageReplication() case "4": - fmt.Println() //exit + RunBackup() + case "5": + RunDotfileBackup() + case "6": + fmt.Println("Goodbye Captain!") return default: - fmt.Println("Invalid choice. Please select 1, 2, or 3.") + fmt.Println("Invalid choice. Please select 1-6.") } } } @@ -99,7 +104,7 @@ func runPackageReplication() { return } - if err := output.GenerateInstallScript(baseDistro, packages, scriptOutputPath); err != nil { + if err := output.GenerateInstallScript(baseDistro, packages, nil, scriptOutputPath); err != nil { log.Println("Error generating install script:", err) } else { fmt.Println("Script generated successfully at:", scriptOutputPath)