Reverted multithreaded compression due to not wanting to work
Me sad :c
This commit is contained in:
parent
675a8dafb3
commit
d51cf67997
1 changed files with 49 additions and 56 deletions
|
|
@ -2,6 +2,7 @@ package Compressor
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
|
|
@ -14,83 +15,75 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func CreateBakFile(fileName string, folderPath string, destinationPath string) string {
|
||||
func CreateBakFile(fileName string, folderPath string, destinationPath string, backupName string) string {
|
||||
logger := Logging.DetailedLogger("Compression", "CreateBakFile")
|
||||
|
||||
var buf bytes.Buffer
|
||||
compress(folderPath, &buf, backupName)
|
||||
|
||||
pathToFile := destinationPath + string(os.PathSeparator) + fileName + ".bak"
|
||||
|
||||
|
||||
fileToWrite, err := os.OpenFile(pathToFile, os.O_CREATE|os.O_RDWR, os.FileMode(600))
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
compress(fileToWrite, folderPath)
|
||||
|
||||
SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, filepath.Base(folderPath), SQL.SQLStage_Compress, SQL.REMOTE_NONE, "File successfully written.", time.Now())
|
||||
fileToWrite.Close()
|
||||
return pathToFile
|
||||
if _, err := io.Copy(fileToWrite, &buf); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
||||
SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, backupName, SQL.SQLStage_Compress, SQL.REMOTE_NONE, "File successfully written.", time.Now())
|
||||
|
||||
|
||||
return fileName
|
||||
}
|
||||
|
||||
func compressFile(targetFile *os.File, file string, fi os.FileInfo, folderPath string) error {
|
||||
|
||||
fileWriter, _ := gzip.NewWriterLevel(targetFile, flate.BestCompression)
|
||||
tw := tar.NewWriter(fileWriter)
|
||||
|
||||
header, err := tar.FileInfoHeader(fi, file)
|
||||
if err != nil{
|
||||
return err
|
||||
}
|
||||
|
||||
relPath, _ := filepath.Rel(filepath.Dir(folderPath), file)
|
||||
header.Name = relPath
|
||||
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !fi.IsDir(){
|
||||
data, err := os.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("[%s] Compressing: %s (%d bytes)\n", filepath.Base(folderPath) ,relPath, fi.Size())
|
||||
if _, err := io.Copy(tw, data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := tw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fileWriter.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func compress(targetFile *os.File, folderPath string) {
|
||||
func compress(folderPath string, buf io.Writer, backupName string){
|
||||
logger := Logging.DetailedLogger("Gzip", "compress")
|
||||
|
||||
fmt.Printf("[%s] Start compression...\n", filepath.Base(folderPath))
|
||||
SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, filepath.Base(folderPath), SQL.SQLStage_Compress, SQL.REMOTE_NONE, "Start compression", time.Now())
|
||||
filepath.Walk(folderPath, func(file string, fi os.FileInfo, err error) error {
|
||||
zr, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
|
||||
tw := tar.NewWriter(zr)
|
||||
|
||||
//This delay is to ensure the files don't get a sudden "file aleady close" error
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
go func() {
|
||||
err := compressFile(targetFile, file, fi, folderPath)
|
||||
fmt.Printf("[%s] Start compression...\n", filepath.Base(folderPath))
|
||||
SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, backupName, SQL.SQLStage_Compress, SQL.REMOTE_NONE, "Start compression", time.Now())
|
||||
filepath.Walk(folderPath, func(file string, fi os.FileInfo, err error) error {
|
||||
header, err := tar.FileInfoHeader(fi, file)
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
||||
relPath, _ := filepath.Rel(filepath.Dir(folderPath), file)
|
||||
|
||||
header.Name = relPath
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
||||
if !fi.IsDir(){
|
||||
data, err := os.Open(file)
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
fmt.Printf("[%s] Compressing: %s (%d bytes)\n", filepath.Base(folderPath) ,relPath, fi.Size())
|
||||
if _, err := io.Copy(tw, data); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
//Wait until all file writes all done
|
||||
time.Sleep(5 * time.Second)
|
||||
if err := tw.Close(); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
||||
if err := zr.Close(); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
||||
|
||||
fmt.Printf("[%s] Compression Done.\n", filepath.Base(folderPath))
|
||||
SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, filepath.Base(folderPath), SQL.SQLStage_Compress, SQL.REMOTE_NONE, "Compression complete.", time.Now())
|
||||
SQL.NewLogEntry(SQL.GetSQLInstance(), uuid.New(), SQL.LogInfo, backupName, SQL.SQLStage_Compress, SQL.REMOTE_NONE, "Compression complete.", time.Now())
|
||||
}
|
||||
Reference in a new issue