Merge pull request #2 from netbenix/develop

Added core functionality
This commit is contained in:
netbenix 2021-11-19 19:51:29 +01:00 committed by GitHub
commit 5f63aa04a6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 287 additions and 37 deletions

81
Compression.go Normal file
View file

@ -0,0 +1,81 @@
package main
import (
"archive/tar"
"bytes"
"compress/flate"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"scabiosa/Logging"
)
func CreateBakFile(filename string, folderPath string, destinationPath string) string {
logger := Logging.DetailedLogger("Compression", "CreateBakFile")
var buf bytes.Buffer
compress(folderPath, &buf)
fileName := filename + ".bak"
fileToWrite, err := os.OpenFile(destinationPath + string(os.PathSeparator) + fileName, os.O_CREATE|os.O_RDWR, os.FileMode(600))
if err != nil {
logger.Fatal(err)
}
if _, err := io.Copy(fileToWrite, &buf); err != nil {
logger.Fatal(err)
}
return fileName
}
func compress(folderPath string, buf io.Writer){
logger := Logging.DetailedLogger("Gzip", "compress")
zr, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
tw := tar.NewWriter(zr)
fmt.Printf("[%s] Start compression...\n", filepath.Base(folderPath))
filepath.Walk(folderPath, func(file string, fi os.FileInfo, err error) error {
header, err := tar.FileInfoHeader(fi, file)
if err != nil {
logger.Fatal(err)
}
relPath, _ := filepath.Rel(filepath.Dir(folderPath), file)
header.Name = relPath
if err := tw.WriteHeader(header); err != nil {
logger.Fatal(err)
}
if !fi.IsDir(){
data, err := os.Open(file)
if err != nil {
logger.Fatal(err)
}
fmt.Printf("[%s] Compressing: %s (%d bytes)\n", filepath.Base(folderPath) ,relPath, fi.Size())
if _, err := io.Copy(tw, data); err != nil {
logger.Fatal(err)
}
}
return nil
})
if err := tw.Close(); err != nil {
logger.Fatal(err)
}
if err := zr.Close(); err != nil {
logger.Fatal(err)
}
fmt.Printf("[%s] Compression Done.\n", filepath.Base(folderPath))
}

View file

@ -7,11 +7,27 @@ import (
)
type Config struct {
_7zPath string
_7zArgs string
LocalBackupPath string `json:"localBackupPath"`
SQLConfig struct{
SqlType string `json:"sqlType"`
SqlAddress string `json:"sql-address"`
SqlPort uint16 `json:"sql-port"`
Database string `json:"database"`
DbUser string `json:"db-user"`
DbPassword string `json:"db-password"`
} `json:"sqlConfig"`
FolderToBackup []struct{
BackupName string `json:"backupName"`
FolderPath string `json:"folderPath"`
StorageType string `json:"storageType"`
CreateLocalBackup bool `json:"createLocalBackup"`
} `json:"foldersToBackup"`
}
type Backup struct{
backupName string
folderPath string
storageType string
localbackup uint8
localbackupPath string
createLocalBackup bool
}
func readConfig() []byte {
@ -21,10 +37,10 @@ func readConfig() []byte {
if err != nil {
logger.Fatal(err)
}
return file
}
func GetConfig() Config {
logger := Logging.DetailedLogger("ConfigHandler", "GetConfig()")
var config Config

View file

@ -3,12 +3,13 @@
Please keep in mind that this project is WIP.
## What can it do?
- Backup you stuff via a dynamic configuration
- Log the Backup progress to a database
- Backup you stuff via a dynamic configuration (done!)
- Log the Backup progress to a database (planned)
- Upload the files to a remote storage of your choice (see [Storage Types](#storage-types))
## Database Types
- MariaDB (planned)
- MariaDB (soon)
- MySQL (far future)
- MS-SQL (far future)
@ -16,8 +17,32 @@ Please keep in mind that this project is WIP.
## Storage types
- Local storage (soon)
- Azure Blob Storage (planned)
- Azure File Share (planned)
- Azure File Share (done!)
- S3 Bucket (far future)
- Dropbox (far future)
- OneDrive (far future)
- GDrive (far future)
| Storage Type | Config Type |
|-------------------------|--------------------------|
| Azure File Share | azure-fileshare |
## Config Explaination
### config.json
| Field | Type | Description |
|---------------------|:----------------:|------------------------------------------------|
| localBackupPath | string | Path where local backups are stored |
| **sqlConfig** | ---------------- | ---------------------------------------------- |
| sqlType | string | SQL Server Type (not yet used) |
| sql-address | string | Address to the SQL Server |
| sql-port | uint16 | SQL Server Port |
| database | string | Database name |
| db-user | string | SQL username from user which should be used |
| db-password | string | SQL password from user which should be used |
| **foldersToBackup** | ---------------- | ---------------------------------------------- |
| backupName | string | .bak file name |
| folderPath | string | Path to folder which should be backed up |
| storageType | string | See [StorageTypes](#storage-types) |
| createLocalBackup | boolean | Sets if .bak file should also be saved locally |

View file

@ -1,23 +1,63 @@
package StorageTypes
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/Azure/azure-storage-file-go/azfile"
"net/url"
"os"
"path/filepath"
"scabiosa/Logging"
"strings"
)
type AzureFileStorage struct{
azcopyPath string
storageAccUrl string
targetDirectory string
SASKey string
FileshareName string `json:"fileshareName"`
TargetDirectory string `json:"targetDirectory"`
StorageAccountName string `json:"storageAccountName"`
StorageAccountKey string `json:"storageAccountKey"`
}
func (azure AzureFileStorage) upload() error{
//Do Stuff here
return errors.New("lelek")
func (azure AzureFileStorage) upload(fileName string){
logger := Logging.DetailedLogger("AzureFileStorage", "upload")
file, err := os.Open(fileName)
if err != nil {
logger.Fatal(err)
}
defer file.Close()
fileSize, err := file.Stat()
if err != nil {
logger.Fatal(err)
}
credential, err := azfile.NewSharedKeyCredential(azure.StorageAccountName, azure.StorageAccountKey)
if err != nil{
logger.Fatal(err)
}
u, _ := url.Parse(fmt.Sprintf("https://%s.file.core.windows.net/%s/%s/%s", azure.StorageAccountName, azure.FileshareName ,azure.TargetDirectory, filepath.Base(fileName)))
fileURL := azfile.NewFileURL(*u, azfile.NewPipeline(credential, azfile.PipelineOptions{}))
ctx := context.Background()
fmt.Printf("[%s] Starting upload to Azure File Share...\n", strings.Trim(filepath.Base(fileName), ".bak"))
err = azfile.UploadFileToAzureFile(ctx, file, fileURL,
azfile.UploadToAzureFileOptions{
Parallelism: 3,
FileHTTPHeaders: azfile.FileHTTPHeaders{
CacheControl: "no-transform",
},
Progress: func(bytesTransferred int64){
fmt.Printf("[%s] Uploaded %d of %d bytes.\n", strings.Trim(filepath.Base(fileName), ".bak") ,bytesTransferred, fileSize.Size())
}})
fmt.Printf("[%s] Upload finished.\n", strings.Trim(filepath.Base(fileName), ".bak"))
}
func readConfig() []byte {

View file

@ -1,14 +1,18 @@
package StorageTypes
import "fmt"
type Storage interface {
upload() error
upload(fileName string)
}
func UploadFile(storage Storage){
err := storage.upload()
if err != nil{
fmt.Print(err)
}
func UploadFile(storage Storage, fileName string){
storage.upload(fileName)
}
func CheckStorageType(storageType string) Storage{
if storageType == "azure-fileshare"{
return GetAzureStorage()
}
return nil
}

View file

@ -1,6 +1,6 @@
{
"azcopyPath": "",
"storageAccUrl": "",
"targetDirectory": "",
"SASKey": ""
"storageAccountName": "",
"storageAccountKey": "",
"fileshareName": "",
"targetDirectory": ""
}

View file

@ -1,7 +1,19 @@
{
"_7zPath": "",
"_7zArgs": "",
"storageType": "azure",
"localbackup": 0,
"localbackupPath": ""
"localBackupPath": "",
"sqlConfig": {
"sqlType": "",
"sql-address": "",
"sql-port": 0,
"database": "",
"db-user": "",
"db-password": ""
},
"foldersToBackup": [
{
"backupName": "",
"folderPath": "",
"storageType": "",
"createLocalBackup": false
}
]
}

11
go.mod
View file

@ -4,4 +4,13 @@ go 1.17
require github.com/sirupsen/logrus v1.8.1
require golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 // indirect
require github.com/Azure/azure-storage-file-go v0.8.0
require (
github.com/Azure/azure-pipeline-go v0.2.1 // indirect
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 // indirect
github.com/pkg/errors v0.9.1 // indirect
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 // indirect
golang.org/x/text v0.3.0 // indirect
)

23
go.sum
View file

@ -1,10 +1,33 @@
github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
github.com/Azure/azure-storage-file-go v0.8.0 h1:OX8DGsleWLUE6Mw4R/OeWEZMvsTIpwN94J59zqKQnTI=
github.com/Azure/azure-storage-file-go v0.8.0/go.mod h1:3w3mufGcMjcOJ3w+4Gs+5wsSgkT7xDwWWqMMIrXtW4c=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

46
main.go
View file

@ -1,9 +1,49 @@
package main
import "scabiosa/StorageTypes"
import (
"os"
"scabiosa/Logging"
"scabiosa/StorageTypes"
"time"
)
func main(){
azure := StorageTypes.GetAzureStorage()
config := GetConfig()
for _, backupItem := range config.FolderToBackup{
storage := StorageTypes.CheckStorageType(backupItem.StorageType)
destPath := checkTmpPath(config, backupItem.CreateLocalBackup)
bakFile := CreateBakFile(backupItem.BackupName + getTimeSuffix(), backupItem.FolderPath, destPath)
StorageTypes.UploadFile(storage, destPath + string(os.PathSeparator) + bakFile)
if !backupItem.CreateLocalBackup {
_ = os.Remove(destPath + string(os.PathSeparator) + bakFile)
}
}
StorageTypes.UploadFile(azure)
}
func getTimeSuffix() string{
currTime := time.Now()
return "_" + currTime.Format("02-01-2006_15-04")
}
func checkTmpPath(config Config, createLocalBackup bool) string{
logger := Logging.DetailedLogger("mainThread", "checkTmpPath")
if !createLocalBackup{
if _, err := os.Stat("tmp"); os.IsNotExist(err) {
dirErr := os.Mkdir("tmp", 600)
if dirErr != nil {
logger.Fatal(err)
}
}
return "tmp"
}
return config.LocalBackupPath
}