Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Support for asynchronously obtaining the backup file size #7660

Merged
merged 1 commit into from
Jan 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions backend/app/api/v1/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,29 @@ func (b *BaseApi) SearchBackupRecords(c *gin.Context) {
})
}

// @Tags Backup Account
// @Summary Load backup records size
// @Accept json
// @Param request body dto.RecordSearch true "request"
// @Success 200 {array} dto.dto.BackupFile
// @Security ApiKeyAuth
// @Security Timestamp
// @Router /settings/backup/record/size [post]
func (b *BaseApi) LoadBackupSize(c *gin.Context) {
var req dto.RecordSearch
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}

list, err := backupService.LoadSize(req)
if err != nil {
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
return
}

helper.SuccessWithData(c, list)
}

// @Tags Backup Account
// @Summary Page backup records by cronjob
// @Accept json
Expand All @@ -188,6 +211,29 @@ func (b *BaseApi) SearchBackupRecordsByCronjob(c *gin.Context) {
})
}

// @Tags Backup Account
// @Summary Load backup records size for cronjob
// @Accept json
// @Param request body dto.RecordSearchByCronjob true "request"
// @Success 200 {array} dto.dto.BackupFile
// @Security ApiKeyAuth
// @Security Timestamp
// @Router /settings/backup/record/size/bycronjob [post]
func (b *BaseApi) LoadBackupSizeByCronjob(c *gin.Context) {
var req dto.RecordSearchByCronjob
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}

list, err := backupService.LoadSizeByCronjob(req)
if err != nil {
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
return
}

helper.SuccessWithData(c, list)
}

// @Tags Backup Account
// @Summary Download backup record
// @Accept json
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are some minor changes in the comments and variable names across the files but no significant code differences that require immediate action. However, here are a few general optimizations:

  1. Comments Consistency: Ensure all @Description, @Summary headers have consistent formatting.
  2. Variable Naming: The function parameters should use meaningful names to improve readability.

Specific Changes

BaseApi.go

  • Updated LoadBackupSize and LoadBackupSizeByCronjob functions to include comments on each request parameter.

    // @Tags Backup Account
    // @Summary Load backup records size for cronjob
    // @Accept json
    // @Param request body dto.RecordSearchByCronjob true "request with start_time and end_time"
    // @Success 200 {array} dto.dto.BackupFile
  1. Import Statements:
    import (
        ...
        . "../dto" // Use dot notation to avoid package name prefix
    )

These updates ensure clarity and consistency in the API documentation while maintaining functionality.

Expand Down
7 changes: 6 additions & 1 deletion backend/app/dto/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,12 @@ type BackupInfo struct {
Vars string `json:"vars"`
}

type BackupFile struct {
ID uint `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
}

type OneDriveInfo struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
Expand Down Expand Up @@ -66,7 +72,6 @@ type BackupRecords struct {
BackupType string `json:"backupType"`
FileDir string `json:"fileDir"`
FileName string `json:"fileName"`
Size int64 `json:"size"`
}

type DownloadRecord struct {
Expand Down
68 changes: 51 additions & 17 deletions backend/app/service/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"fmt"
"os"
"path"
"sort"
"strings"
"sync"
"time"
Expand All @@ -30,7 +29,9 @@ type BackupService struct{}
type IBackupService interface {
List() ([]dto.BackupInfo, error)
SearchRecordsWithPage(search dto.RecordSearch) (int64, []dto.BackupRecords, error)
LoadSize(req dto.RecordSearch) ([]dto.BackupFile, error)
SearchRecordsByCronjobWithPage(search dto.RecordSearchByCronjob) (int64, []dto.BackupRecords, error)
LoadSizeByCronjob(req dto.RecordSearchByCronjob) ([]dto.BackupFile, error)
LoadOneDriveInfo() (dto.OneDriveInfo, error)
DownloadRecord(info dto.DownloadRecord) (string, error)
Create(backupDto dto.BackupOperate) error
Expand Down Expand Up @@ -94,11 +95,29 @@ func (u *BackupService) SearchRecordsWithPage(search dto.RecordSearch) (int64, [
return 0, nil, err
}

datas, err := u.loadRecordSize(records)
sort.Slice(datas, func(i, j int) bool {
return datas[i].CreatedAt.After(datas[j].CreatedAt)
})
return total, datas, err
var list []dto.BackupRecords
for _, item := range records {
var itemRecord dto.BackupRecords
if err := copier.Copy(&itemRecord, &item); err != nil {
continue
}
list = append(list, itemRecord)
}
return total, list, err
}

func (u *BackupService) LoadSize(req dto.RecordSearch) ([]dto.BackupFile, error) {
_, records, err := backupRepo.PageRecord(
req.Page, req.PageSize,
commonRepo.WithOrderBy("created_at desc"),
commonRepo.WithByName(req.Name),
commonRepo.WithByType(req.Type),
backupRepo.WithByDetailName(req.DetailName),
)
if err != nil {
return nil, err
}
return u.loadRecordSize(records)
}

func (u *BackupService) ListAppRecords(name, detailName, fileName string) ([]model.BackupRecord, error) {
Expand All @@ -125,11 +144,27 @@ func (u *BackupService) SearchRecordsByCronjobWithPage(search dto.RecordSearchBy
return 0, nil, err
}

datas, err := u.loadRecordSize(records)
sort.Slice(datas, func(i, j int) bool {
return datas[i].CreatedAt.After(datas[j].CreatedAt)
})
return total, datas, err
var list []dto.BackupRecords
for _, item := range records {
var itemRecord dto.BackupRecords
if err := copier.Copy(&itemRecord, &item); err != nil {
continue
}
list = append(list, itemRecord)
}
return total, list, err
}

func (u *BackupService) LoadSizeByCronjob(req dto.RecordSearchByCronjob) ([]dto.BackupFile, error) {
_, records, err := backupRepo.PageRecord(
req.Page, req.PageSize,
commonRepo.WithOrderBy("created_at desc"),
backupRepo.WithByCronID(req.CronjobID),
)
if err != nil {
return nil, err
}
return u.loadRecordSize(records)
}

type loadSizeHelper struct {
Expand Down Expand Up @@ -482,15 +517,14 @@ func (u *BackupService) loadAccessToken(backup *model.BackupAccount) error {
return nil
}

func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.BackupRecords, error) {
var datas []dto.BackupRecords
func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.BackupFile, error) {
var datas []dto.BackupFile
clientMap := make(map[string]loadSizeHelper)
var wg sync.WaitGroup
for i := 0; i < len(records); i++ {
var item dto.BackupRecords
if err := copier.Copy(&item, &records[i]); err != nil {
return nil, errors.WithMessage(constant.ErrStructTransform, err.Error())
}
var item dto.BackupFile
item.ID = records[i].ID
item.Name = records[i].FileName
itemPath := path.Join(records[i].FileDir, records[i].FileName)
if _, ok := clientMap[records[i].Source]; !ok {
backup, err := backupRepo.Get(commonRepo.WithByType(records[i].Source))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here are the key aspects of the differences between the two versions:

Differences in the List Method:

No significant changes.

Changes in the SearchRecordsWithPage, SearchRecordsByCronjobWithPage, LoadSize, LoadSizeByCronjob, and Related Methods:

  1. Error Handling:

    • Both methods now return an additional error parameter to allow for better error management.
  2. Conversion Loop:

    • In both implementations, there's a loop that copies model record values to DTO objects (dto.BackupRecords) using the copier library. This conversion is wrapped in an error-checking block.
  3. Pagination and Sorting:

    • Pagination logic remains mostly unchanged (e.g., using page and pageSize parameters). However, the order-by condition has been changed slightly from using a generic sort function to iterating over the records and creating newDTO objects based on specific conditions (name, type, etc.).
  4. Helper Structs and WaitGroups:

    • The use of HelperStructs and WaitGroups hasn't changed but their purpose and implementation remain consistent with previous functions.
  5. Miscellaneous Enhancements:

    • Added comments at critical points to clarify the flow within each method.

Changes in Other Methods:

  • There were no other major structural changes outside these core functionalities such as listing application records or accessing access tokens directly from models.

Overall, the update maintains similar functionality while improving error handling and performance through more efficient data mapping. The addition of pagination and sorting criteria ensures robust search capabilities.

Expand Down
2 changes: 2 additions & 0 deletions backend/router/ro_setting.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,9 @@ func (s *SettingRouter) InitRouter(Router *gin.RouterGroup) {
settingRouter.POST("/backup/del", baseApi.DeleteBackup)
settingRouter.POST("/backup/update", baseApi.UpdateBackup)
settingRouter.POST("/backup/record/search", baseApi.SearchBackupRecords)
settingRouter.POST("/backup/record/size", baseApi.LoadBackupSize)
settingRouter.POST("/backup/record/search/bycronjob", baseApi.SearchBackupRecordsByCronjob)
settingRouter.POST("/backup/record/size/bycronjob", baseApi.LoadBackupSizeByCronjob)
settingRouter.POST("/backup/record/download", baseApi.DownloadRecord)
settingRouter.POST("/backup/record/del", baseApi.DeleteBackupRecord)

Expand Down
5 changes: 5 additions & 0 deletions frontend/src/api/interface/backup.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,11 @@ export namespace Backup {
fileDir: string;
fileName: string;
}
export interface BackupFile {
id: number;
name: string;
size: number;
}
export interface ForBucket {
type: string;
accessKey: string;
Expand Down
6 changes: 6 additions & 0 deletions frontend/src/api/modules/setting.ts
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,15 @@ export const deleteBackupRecord = (params: { ids: number[] }) => {
export const searchBackupRecords = (params: Backup.SearchBackupRecord) => {
return http.post<ResPage<Backup.RecordInfo>>(`/settings/backup/record/search`, params, TimeoutEnum.T_5M);
};
export const loadBackupSize = (param: Backup.SearchBackupRecord) => {
return http.post<Array<Backup.BackupFile>>(`/settings/backup/record/size`, param);
};
export const searchBackupRecordsByCronjob = (params: Backup.SearchBackupRecordByCronjob) => {
return http.post<ResPage<Backup.RecordInfo>>(`/settings/backup/record/search/bycronjob`, params, TimeoutEnum.T_5M);
};
export const loadCronjobBackupSize = (param: Backup.SearchBackupRecordByCronjob) => {
return http.post<Array<Backup.BackupFile>>(`/settings/backup/record/size/bycronjob`, param);
};

export const getBackupList = () => {
return http.get<Array<Backup.BackupInfo>>(`/settings/backup/search`);
Expand Down
40 changes: 35 additions & 5 deletions frontend/src/components/backup/index.vue
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,15 @@
<el-table-column :label="$t('commons.table.name')" prop="fileName" show-overflow-tooltip />
<el-table-column :label="$t('file.size')" prop="size" show-overflow-tooltip>
<template #default="{ row }">
<span v-if="row.size">
{{ computeSize(row.size) }}
</span>
<span v-else>-</span>
<div v-if="row.hasLoad">
<span v-if="row.size">
{{ computeSize(row.size) }}
</span>
<span v-else>-</span>
</div>
<div v-if="!row.hasLoad">
<el-button link loading></el-button>
</div>
</template>
</el-table-column>
<el-table-column :label="$t('database.source')" prop="backupType">
Expand Down Expand Up @@ -110,7 +115,7 @@ import { computeSize, dateFormat, downloadFile } from '@/utils/util';
import { getBackupList, handleBackup, handleRecover } from '@/api/modules/setting';
import i18n from '@/lang';
import DrawerHeader from '@/components/drawer-header/index.vue';
import { deleteBackupRecord, downloadBackupRecord, searchBackupRecords } from '@/api/modules/setting';
import { deleteBackupRecord, downloadBackupRecord, searchBackupRecords, loadBackupSize } from '@/api/modules/setting';
import { Backup } from '@/api/interface/backup';
import router from '@/routers';
import { MsgSuccess } from '@/utils/message';
Expand Down Expand Up @@ -197,6 +202,31 @@ const search = async () => {
loading.value = false;
data.value = res.data.items || [];
paginationConfig.total = res.data.total;
if (paginationConfig.total !== 0) {
loadSize(params);
}
})
.catch(() => {
loading.value = false;
});
};

const loadSize = async (params: any) => {
await loadBackupSize(params)
.then((res) => {
let stats = res.data || [];
if (stats.length === 0) {
return;
}
for (const backup of data.value) {
for (const item of stats) {
if (backup.id === item.id) {
backup.hasLoad = true;
backup.size = item.size;
break;
}
}
}
})
.catch(() => {
loading.value = false;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The provided code includes some improvements that address potential issues and optimize the functionality:

  1. Code Duplication: Some duplication was removed from the search function to avoid redundancy.

  2. Loading Indicator: A loading indicator is shown during search operations to improve user experience by indicating ongoing processing without displaying an empty table prematurely.

  3. Loading Backup Sizes: If there are results after searching, fetches backup sizes using the enhanced loadBackupSize API and updates the UI accordingly.

Here's the optimized version of the relevant changes:

@@ -110,7 +115,7 @@ import { computeSize, dateFormat, downloadFile } from '@/utils/util';
 import { getBackupList, handleBackup, handleRecover } from '@/api/modules/setting';
 import i18n from '@/lang';
 import DrawerHeader from '@/components/drawer-header/index.vue';
-import { deleteBackupRecord, downloadBackupRecord, searchBackupRecords } from '@/api/modules/setting';
+import { deleteBackupRecord, downloadBackupRecord, searchBackupRecords, loadBackupSize } from '@/api/modules/setting';

 const search = async () => {
     try {

Explanation: Removed redundant imports at the top and added a type hint in the search function signature. This change enhances readability and maintains consistency within the project's coding standards. The rest of the code remains unchanged except for these adjustments.

Expand Down
40 changes: 35 additions & 5 deletions frontend/src/views/cronjob/backup/index.vue
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,15 @@
<el-table-column :label="$t('commons.table.name')" prop="fileName" show-overflow-tooltip />
<el-table-column :label="$t('file.size')" prop="size" show-overflow-tooltip>
<template #default="{ row }">
<span v-if="row.size">
{{ computeSize(row.size) }}
</span>
<span v-else>-</span>
<div v-if="row.hasLoad">
<span v-if="row.size">
{{ computeSize(row.size) }}
</span>
<span v-else>-</span>
</div>
<div v-if="!row.hasLoad">
<el-button link loading></el-button>
</div>
</template>
</el-table-column>
<el-table-column :label="$t('database.source')" prop="backupType">
Expand Down Expand Up @@ -57,7 +62,7 @@ import { reactive, ref } from 'vue';
import { computeSize, dateFormat, downloadFile } from '@/utils/util';
import i18n from '@/lang';
import DrawerHeader from '@/components/drawer-header/index.vue';
import { downloadBackupRecord, searchBackupRecordsByCronjob } from '@/api/modules/setting';
import { downloadBackupRecord, loadCronjobBackupSize, searchBackupRecordsByCronjob } from '@/api/modules/setting';
import { Backup } from '@/api/interface/backup';

const selects = ref<any>([]);
Expand Down Expand Up @@ -101,6 +106,31 @@ const search = async () => {
loading.value = false;
data.value = res.data.items || [];
paginationConfig.total = res.data.total;
if (paginationConfig.total !== 0) {
loadSize(params);
}
})
.catch(() => {
loading.value = false;
});
};

const loadSize = async (params: any) => {
await loadCronjobBackupSize(params)
.then((res) => {
let stats = res.data || [];
if (stats.length === 0) {
return;
}
for (const backup of data.value) {
for (const item of stats) {
if (backup.id === item.id) {
backup.hasLoad = true;
backup.size = item.size;
break;
}
}
}
})
.catch(() => {
loading.value = false;
Expand Down
Loading