diff --git a/.gitignore b/.gitignore index 66aa1d2..f4d99cc 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,6 @@ scabiosa # GoLand IDE files .idea/ + +#tmp folder +tmp/ diff --git a/Compressor/Compression.go b/Compressor/Compression.go index fd8f25b..063276c 100644 --- a/Compressor/Compression.go +++ b/Compressor/Compression.go @@ -15,15 +15,16 @@ import ( "time" ) -func CreateBakFile(filename string, folderPath string, destinationPath string) string { +func CreateBakFile(fileName string, folderPath string, destinationPath string, backupName string) string { logger := Logging.DetailedLogger("Compression", "CreateBakFile") var buf bytes.Buffer - compress(folderPath, &buf) + compress(folderPath, &buf, backupName) - fileName := filename + ".bak" + pathToFile := destinationPath + string(os.PathSeparator) + fileName + ".bak" - fileToWrite, err := os.OpenFile(destinationPath + string(os.PathSeparator) + fileName, os.O_CREATE|os.O_RDWR, os.FileMode(600)) + + fileToWrite, err := os.OpenFile(pathToFile, os.O_CREATE|os.O_RDWR, os.FileMode(600)) if err != nil { logger.Fatal(err) } @@ -32,21 +33,21 @@ func CreateBakFile(filename string, folderPath string, destinationPath string) s logger.Fatal(err) } - SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, filepath.Base(folderPath), SQL.SQLStage_Compress, SQL.REMOTE_NONE, "File successfully written.", time.Now()) + SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, backupName, SQL.SQLStage_Compress, SQL.REMOTE_NONE, "File successfully written.", time.Now()) return fileName } -func compress(folderPath string, buf io.Writer){ +func compress(folderPath string, buf io.Writer, backupName string){ logger := Logging.DetailedLogger("Gzip", "compress") zr, _ := gzip.NewWriterLevel(buf, flate.BestCompression) tw := tar.NewWriter(zr) fmt.Printf("[%s] Start compression...\n", filepath.Base(folderPath)) - SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, filepath.Base(folderPath), SQL.SQLStage_Compress, SQL.REMOTE_NONE, "Start compression", time.Now()) + SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, backupName, SQL.SQLStage_Compress, SQL.REMOTE_NONE, "Start compression", time.Now()) filepath.Walk(folderPath, func(file string, fi os.FileInfo, err error) error { header, err := tar.FileInfoHeader(fi, file) if err != nil { @@ -84,5 +85,5 @@ func compress(folderPath string, buf io.Writer){ fmt.Printf("[%s] Compression Done.\n", filepath.Base(folderPath)) - SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, filepath.Base(folderPath), SQL.SQLStage_Compress, SQL.REMOTE_NONE, "Compression complete.", time.Now()) + SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, backupName, SQL.SQLStage_Compress, SQL.REMOTE_NONE, "Compression complete.", time.Now()) } \ No newline at end of file diff --git a/SQL/MariaDBConnector.go b/SQL/MariaDBConnector.go index bd4b0c4..ccd3ad3 100644 --- a/SQL/MariaDBConnector.go +++ b/SQL/MariaDBConnector.go @@ -43,6 +43,12 @@ func checkIfBackupTableExist(db *sql.DB, mariadb MariaDBConnector) bool { return true } +func checkIfBackupEntryExist(db *sql.DB, mariadb MariaDBConnector, backupName string) bool { + rows, _ := db.Query("SELECT * FROM `" + mariadb.Database + "`.Backups WHERE BackupName = '" + backupName + "';") + if !rows.Next(){ return false; } + return true +} + func createMariaDBConnection(mariadb MariaDBConnector) *sql.DB{ logger := Logging.DetailedLogger("MariaDB", "createConnection") db, err := sql.Open("mysql", mariadb.DbUser + ":" + mariadb.DbPassword + "@(" + mariadb.Address + ":" +strconv.Itoa(int(mariadb.Port))+ ")/" + mariadb.Database) @@ -56,7 +62,7 @@ func (mariadb MariaDBConnector) createDefaultTables(){ logger := Logging.DetailedLogger("MariaDB", "createDefaultTables") eventLogSQL := "create table " + mariadb.Database +".EventLog\n(\n UUID text null,\n LogType enum ('INFO', 'WARNING', 'ERROR', 'FATAL') null,\n BackupName varchar(256) null,\n Stage enum ('COMPRESS', 'UPLOAD', 'DELETE TMP') null,\n RemoteStorage enum ('AZURE-FILE', 'AZURE-BLOB', 'NONE') null,\n Description text null,\n Timestamp datetime null\n);" - backupSQL := "create table " + mariadb.Database +".Backups\n(\n UUID text null,\n BackupName varchar(256) null,\n LastBackup datetime null,\n LocalBackup tinyint(1) null,\n FilePath varchar(256) null,\n RemoteStorage enum ('AZURE-FILE', 'AZURE-BLOB', 'NONE') null,\n RemotePath varchar(256) null,\n `DurationToBackup (s)` double null,\n HadErrors tinyint(1) null\n);\n\n" + backupSQL := "create table " + mariadb.Database +".Backups\n(\n UUID text null,\n BackupName varchar(256) null,\n LastBackup datetime null,\n LocalBackup tinyint(1) null,\n FilePath varchar(256) null,\n RemoteStorage enum ('AZURE-FILE', 'AZURE-BLOB', 'NONE') null,\n RemotePath varchar(256) null\n);\n\n" db := createMariaDBConnection(mariadb) @@ -80,7 +86,6 @@ func (mariadb MariaDBConnector) createDefaultTables(){ func (mariadb MariaDBConnector) newLogEntry(uuid uuid.UUID, logType LogType, backupName string, stage SQLStage, storageType RemoteStorageType, description string, timestamp time.Time){ logger := Logging.DetailedLogger("MariaDB", "newLogEntry") - db := createMariaDBConnection(mariadb) _, err := db.Query("INSERT INTO `" + mariadb.Database + "`.EventLog VALUES (?, ?, ?, ?, ?, ?, ?);", uuid.String(), strconv.FormatInt(int64(logType), 10), backupName, stage, strconv.FormatInt(int64(storageType), 10), description ,timestamp) @@ -89,4 +94,22 @@ func (mariadb MariaDBConnector) newLogEntry(uuid uuid.UUID, logType LogType, bac } } -func (mariadb MariaDBConnector) newBackupEntry(uuid uuid.UUID, backupName string, lastBackup time.Time, localBackup bool, filePath string, storageType RemoteStorageType, remotePath string, durationToBackup time.Duration, hadErrors bool){} \ No newline at end of file + + +func (mariadb MariaDBConnector) newBackupEntry(backupName string, lastBackup time.Time, localBackup bool, filePath string, storageType RemoteStorageType, remotePath string){ + logger := Logging.DetailedLogger("MariaDB", "newBackupEntry") + + db := createMariaDBConnection(mariadb) + + if checkIfBackupEntryExist(db, mariadb, backupName){ + _, err := db.Query("UPDATE `" + mariadb.Database + "`.Backups SET LastBackup = ? WHERE BackupName = ?;", lastBackup, backupName) + if err != nil { + logger.Fatal(err) + } + } else { + _, err := db.Query("INSERT INTO `" + mariadb.Database + "`.Backups VALUES (?, ?, ?, ?, ?, ?, ?);", uuid.New(), backupName, lastBackup, localBackup, filePath, strconv.FormatInt(int64(storageType), 10), remotePath) + if err != nil { + logger.Fatal(err) + } + } +} \ No newline at end of file diff --git a/SQL/SQLInterface.go b/SQL/SQLInterface.go index b597a51..f0cebc4 100644 --- a/SQL/SQLInterface.go +++ b/SQL/SQLInterface.go @@ -9,7 +9,7 @@ import ( type SQLService interface { createDefaultTables() newLogEntry(uuid uuid.UUID, logType LogType, backupName string, stage SQLStage, storageType RemoteStorageType, description string, timestamp time.Time) - newBackupEntry(uuid uuid.UUID, backupName string, lastBackup time.Time, localBackup bool, filePath string, storageType RemoteStorageType, remotePath string, durationToBackup time.Duration, hadErrors bool) + newBackupEntry(backupName string, lastBackup time.Time, localBackup bool, filePath string, storageType RemoteStorageType, remotePath string) } func CreateDefaultTables(sqlService SQLService){ @@ -20,8 +20,8 @@ func NewLogEntry(sqlService SQLService, uuid uuid.UUID, logType LogType, backupN sqlService.newLogEntry(uuid, logType, backupName, stage, storageType, description, timestamp) } -func NewBackupEntry(sqlService SQLService, uuid uuid.UUID, backupName string, lastBackup time.Time, localBackup bool, filePath string, storageType RemoteStorageType, remotePath string, durationToBackup time.Duration, hadErrors bool){ - sqlService.newBackupEntry(uuid, backupName, lastBackup, localBackup, filePath, storageType, remotePath, durationToBackup, hadErrors) +func NewBackupEntry(sqlService SQLService, backupName string, lastBackup time.Time, localBackup bool, filePath string, storageType RemoteStorageType, remotePath string){ + sqlService.newBackupEntry(backupName, lastBackup, localBackup, filePath, storageType, remotePath) } func GetSQLInstance() SQLService{ diff --git a/StorageTypes/AzureFileStorage.go b/StorageTypes/AzureFileStorage.go index 73837e1..74aad65 100644 --- a/StorageTypes/AzureFileStorage.go +++ b/StorageTypes/AzureFileStorage.go @@ -23,7 +23,7 @@ type AzureFileStorage struct{ } -func (azure AzureFileStorage) upload(fileName string){ +func (azure AzureFileStorage) upload(fileName string, backupName string){ logger := Logging.DetailedLogger("AzureFileStorage", "upload") file, err := os.Open(fileName) @@ -48,8 +48,8 @@ func (azure AzureFileStorage) upload(fileName string){ ctx := context.Background() - fmt.Printf("[%s] Starting upload to Azure File Share...\n", strings.Trim(filepath.Base(fileName), ".bak")) - SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, filepath.Base(fileName), SQL.SQLStage_Upload, SQL.REMOTE_AZURE_FILE, "Starting upload.", time.Now()) + fmt.Printf("[%s] Starting upload to Azure File Share...\n", backupName, ".bak") + SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, backupName, SQL.SQLStage_Upload, SQL.REMOTE_AZURE_FILE, "Starting upload.", time.Now()) err = azfile.UploadFileToAzureFile(ctx, file, fileURL, azfile.UploadToAzureFileOptions{ @@ -58,11 +58,15 @@ func (azure AzureFileStorage) upload(fileName string){ CacheControl: "no-transform", }, Progress: func(bytesTransferred int64){ - fmt.Printf("[%s] Uploaded %d of %d bytes.\n", strings.Trim(filepath.Base(fileName), ".bak") ,bytesTransferred, fileSize.Size()) + fmt.Printf("[%s] Uploaded %d of %d bytes.\n", strings.Trim(backupName, ".bak") ,bytesTransferred, fileSize.Size()) }}) - fmt.Printf("[%s] Upload finished.\n", strings.Trim(filepath.Base(fileName), ".bak")) - SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, filepath.Base(fileName), SQL.SQLStage_Upload, SQL.REMOTE_AZURE_FILE, "Finished upload.", time.Now()) + if err != nil{ + logger.Fatal(err) + } + + fmt.Printf("[%s] Upload finished.\n", strings.Trim(backupName, ".bak")) + SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, backupName, SQL.SQLStage_Upload, SQL.REMOTE_AZURE_FILE, "Finished upload.", time.Now()) } func readConfig() []byte { diff --git a/StorageTypes/StorageInterface.go b/StorageTypes/StorageInterface.go index b85a5dd..43e243f 100644 --- a/StorageTypes/StorageInterface.go +++ b/StorageTypes/StorageInterface.go @@ -1,11 +1,13 @@ package StorageTypes +import "scabiosa/SQL" + type Storage interface { - upload(fileName string) + upload(fileName string, backupName string) } -func UploadFile(storage Storage, fileName string){ - storage.upload(fileName) +func UploadFile(storage Storage, fileName string, backupName string){ + storage.upload(fileName, backupName) } func CheckStorageType(storageType string) Storage{ @@ -13,6 +15,12 @@ func CheckStorageType(storageType string) Storage{ if storageType == "azure-fileshare"{ return GetAzureStorage() } - return nil +} + +func CheckRemoteStorageType(storageType string) SQL.RemoteStorageType { + if storageType == "azure-fileshare"{ + return SQL.REMOTE_AZURE_FILE + } + return 3 } \ No newline at end of file diff --git a/main.go b/main.go index 28e50f9..7a49edf 100644 --- a/main.go +++ b/main.go @@ -1,6 +1,7 @@ package main import ( + "fmt" "github.com/google/uuid" "os" "scabiosa/Compressor" @@ -20,14 +21,15 @@ func main(){ storage := StorageTypes.CheckStorageType(backupItem.StorageType) destPath := checkTmpPath(config, backupItem.CreateLocalBackup) - bakFile := Compressor.CreateBakFile(backupItem.BackupName + getTimeSuffix(), backupItem.FolderPath, destPath) - StorageTypes.UploadFile(storage, destPath + string(os.PathSeparator) + bakFile) + bakFile := Compressor.CreateBakFile(backupItem.BackupName + getTimeSuffix(), backupItem.FolderPath, destPath, backupItem.BackupName) + fmt.Printf(bakFile) + StorageTypes.UploadFile(storage, bakFile, backupItem.BackupName) if !backupItem.CreateLocalBackup { - _ = os.Remove(destPath + string(os.PathSeparator) + bakFile) + _ = os.Remove(bakFile) SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, backupItem.BackupName, SQL.SQLStage_DeleteTmp, SQL.REMOTE_NONE, "Deleted tmp file" ,time.Now()) } - + SQL.NewBackupEntry(SQL.GetSQLInstance(), backupItem.BackupName, time.Now(), backupItem.CreateLocalBackup, backupItem.FolderPath, StorageTypes.CheckRemoteStorageType(backupItem.StorageType), StorageTypes.GetAzureStorage().TargetDirectory) } }