From f9a2cbffa94145b84182c7939ab19a6c39106e8f Mon Sep 17 00:00:00 2001 From: Wenbo han Date: Mon, 13 Feb 2023 21:16:28 +0800 Subject: [PATCH 1/2] Add database pool --- database/gorm/gorm.go | 18 ++++ database/gorm/test_utils.go | 174 ++++++++++++++++++++---------------- 2 files changed, 113 insertions(+), 79 deletions(-) diff --git a/database/gorm/gorm.go b/database/gorm/gorm.go index 701bad446..2900358d8 100644 --- a/database/gorm/gorm.go +++ b/database/gorm/gorm.go @@ -40,6 +40,10 @@ func New(connection string) (*gorm.DB, error) { return nil, err } + if err := configurePool(instance); err != nil { + return nil, err + } + if err := readWriteSeparate(connection, instance, readConfigs, writeConfigs); err != nil { return nil, err } @@ -47,6 +51,20 @@ func New(connection string) (*gorm.DB, error) { return instance, err } +func configurePool(db *gorm.DB) error { + sqlDB, err := db.DB() + if err != nil { + return err + } + + sqlDB.SetMaxIdleConns(facades.Config.GetInt("database.pool.max_idle_conns", 10)) + sqlDB.SetMaxOpenConns(facades.Config.GetInt("database.pool.max_open_conns", 100)) + sqlDB.SetConnMaxIdleTime(time.Duration(facades.Config.GetInt("database.pool.conn_max_idletime", 3600)) * time.Second) + sqlDB.SetConnMaxLifetime(time.Duration(facades.Config.GetInt("database.pool.conn_max_lifetime", 3600)) * time.Second) + + return nil +} + func instance(dialector gorm.Dialector) (*gorm.DB, error) { var logLevel gormLogger.LogLevel if facades.Config.GetBool("app.debug") { diff --git a/database/gorm/test_utils.go b/database/gorm/test_utils.go index 3014f00fc..6fe065cce 100644 --- a/database/gorm/test_utils.go +++ b/database/gorm/test_utils.go @@ -6,6 +6,7 @@ import ( "github.com/ory/dockertest/v3" "github.com/spf13/cast" + configmocks "github.com/goravel/framework/contracts/config/mocks" "github.com/goravel/framework/contracts/database" contractsorm "github.com/goravel/framework/contracts/database/orm" testingdocker "github.com/goravel/framework/testing/docker" @@ -85,6 +86,13 @@ func SqlserverDocker() (*dockertest.Pool, *dockertest.Resource, contractsorm.DB, return pool, resource, db, nil } +func mockPool(mockConfig *configmocks.Config) { + mockConfig.On("GetInt", "database.pool.max_idle_conns", 10).Return(10) + mockConfig.On("GetInt", "database.pool.max_open_conns", 100).Return(100) + mockConfig.On("GetInt", "database.pool.conn_max_idletime", 3600).Return(3600) + mockConfig.On("GetInt", "database.pool.conn_max_lifetime", 3600).Return(3600) +} + func mockSingleMysql(port int) { mockConfig := mock.Config() mockConfig.On("Get", "database.connections.mysql.read").Return(nil) @@ -98,6 +106,7 @@ func mockSingleMysql(port int) { mockConfig.On("GetString", "database.connections.mysql.loc").Return("Local") mockConfig.On("GetString", "database.connections.mysql.database").Return("mysql") mockConfig.On("GetInt", "database.connections.mysql.port").Return(port) + mockPool(mockConfig) } func mockReadWriteMysql(readPort, writePort int) { @@ -114,6 +123,92 @@ func mockReadWriteMysql(readPort, writePort int) { mockConfig.On("GetString", "database.connections.mysql.loc").Return("Local") mockConfig.On("GetString", "database.connections.mysql.database").Return("mysql") mockConfig.On("GetString", "database.connections.mysql.database").Return(dbDatabase) + mockPool(mockConfig) +} + +func mockSinglePostgresql(port int) { + mockConfig := mock.Config() + mockConfig.On("Get", "database.connections.postgresql.read").Return(nil) + mockConfig.On("Get", "database.connections.postgresql.write").Return(nil) + mockConfig.On("GetBool", "app.debug").Return(true) + mockConfig.On("GetString", "database.connections.postgresql.driver").Return(contractsorm.DriverPostgresql.String()) + mockConfig.On("GetString", "database.connections.postgresql.host").Return("localhost") + mockConfig.On("GetString", "database.connections.postgresql.username").Return(dbUser) + mockConfig.On("GetString", "database.connections.postgresql.password").Return(dbPassword) + mockConfig.On("GetString", "database.connections.postgresql.sslmode").Return("disable") + mockConfig.On("GetString", "database.connections.postgresql.timezone").Return("UTC") + mockConfig.On("GetString", "database.connections.postgresql.database").Return("postgres") + mockConfig.On("GetInt", "database.connections.postgresql.port").Return(port) + mockPool(mockConfig) +} + +func mockReadWritePostgresql(readPort, writePort int) { + mockConfig := mock.Config() + mockConfig.On("Get", "database.connections.postgresql.read").Return([]database.Config{ + {Host: "localhost", Port: readPort, Username: dbUser, Password: dbPassword}, + }) + mockConfig.On("Get", "database.connections.postgresql.write").Return([]database.Config{ + {Host: "localhost", Port: writePort, Username: dbUser, Password: dbPassword}, + }) + mockConfig.On("GetBool", "app.debug").Return(true) + mockConfig.On("GetString", "database.connections.postgresql.driver").Return(contractsorm.DriverPostgresql.String()) + mockConfig.On("GetString", "database.connections.postgresql.sslmode").Return("disable") + mockConfig.On("GetString", "database.connections.postgresql.timezone").Return("UTC") + mockConfig.On("GetString", "database.connections.postgresql.database").Return("postgres") + mockPool(mockConfig) +} + +func mockSingleSqlite(dbName string) { + mockConfig := mock.Config() + mockConfig.On("Get", "database.connections.sqlite.read").Return(nil) + mockConfig.On("Get", "database.connections.sqlite.write").Return(nil) + mockConfig.On("GetBool", "app.debug").Return(true) + mockConfig.On("GetString", "database.connections.sqlite.driver").Return(contractsorm.DriverSqlite.String()) + mockConfig.On("GetString", "database.connections.sqlite.database").Return(dbName) + mockPool(mockConfig) +} + +func mockReadWriteSqlite() { + mockConfig := mock.Config() + mockConfig.On("Get", "database.connections.sqlite.read").Return([]database.Config{ + {Database: dbDatabase}, + }) + mockConfig.On("Get", "database.connections.sqlite.write").Return([]database.Config{ + {Database: dbDatabase1}, + }) + mockConfig.On("GetBool", "app.debug").Return(true) + mockConfig.On("GetString", "database.connections.sqlite.driver").Return(contractsorm.DriverSqlite.String()) + mockPool(mockConfig) +} + +func mockSingleSqlserver(port int) { + mockConfig := mock.Config() + mockConfig.On("Get", "database.connections.sqlserver.read").Return(nil) + mockConfig.On("Get", "database.connections.sqlserver.write").Return(nil) + mockConfig.On("GetBool", "app.debug").Return(true) + mockConfig.On("GetString", "database.connections.sqlserver.driver").Return(contractsorm.DriverSqlserver.String()) + mockConfig.On("GetString", "database.connections.sqlserver.host").Return("localhost") + mockConfig.On("GetString", "database.connections.sqlserver.username").Return(dbUser1) + mockConfig.On("GetString", "database.connections.sqlserver.password").Return(dbPassword) + mockConfig.On("GetString", "database.connections.sqlserver.database").Return("msdb") + mockConfig.On("GetString", "database.connections.sqlserver.charset").Return("utf8mb4") + mockConfig.On("GetInt", "database.connections.sqlserver.port").Return(port) + mockPool(mockConfig) +} + +func mockReadWriteSqlserver(readPort, writePort int) { + mockConfig := mock.Config() + mockConfig.On("Get", "database.connections.sqlserver.read").Return([]database.Config{ + {Host: "localhost", Port: readPort, Username: dbUser1, Password: dbPassword}, + }) + mockConfig.On("Get", "database.connections.sqlserver.write").Return([]database.Config{ + {Host: "localhost", Port: writePort, Username: dbUser1, Password: dbPassword}, + }) + mockConfig.On("GetBool", "app.debug").Return(true) + mockConfig.On("GetString", "database.connections.sqlserver.driver").Return(contractsorm.DriverSqlserver.String()) + mockConfig.On("GetString", "database.connections.sqlserver.database").Return("msdb") + mockConfig.On("GetString", "database.connections.sqlserver.charset").Return("utf8mb4") + mockPool(mockConfig) } func mysqlDockerDB(pool *dockertest.Pool, createTable bool) (contractsorm.DB, error) { @@ -169,36 +264,6 @@ func initMysql(pool *dockertest.Pool) (contractsorm.DB, error) { return db, nil } -func mockSinglePostgresql(port int) { - mockConfig := mock.Config() - mockConfig.On("Get", "database.connections.postgresql.read").Return(nil) - mockConfig.On("Get", "database.connections.postgresql.write").Return(nil) - mockConfig.On("GetBool", "app.debug").Return(true) - mockConfig.On("GetString", "database.connections.postgresql.driver").Return(contractsorm.DriverPostgresql.String()) - mockConfig.On("GetString", "database.connections.postgresql.host").Return("localhost") - mockConfig.On("GetString", "database.connections.postgresql.username").Return(dbUser) - mockConfig.On("GetString", "database.connections.postgresql.password").Return(dbPassword) - mockConfig.On("GetString", "database.connections.postgresql.sslmode").Return("disable") - mockConfig.On("GetString", "database.connections.postgresql.timezone").Return("UTC") - mockConfig.On("GetString", "database.connections.postgresql.database").Return("postgres") - mockConfig.On("GetInt", "database.connections.postgresql.port").Return(port) -} - -func mockReadWritePostgresql(readPort, writePort int) { - mockConfig := mock.Config() - mockConfig.On("Get", "database.connections.postgresql.read").Return([]database.Config{ - {Host: "localhost", Port: readPort, Username: dbUser, Password: dbPassword}, - }) - mockConfig.On("Get", "database.connections.postgresql.write").Return([]database.Config{ - {Host: "localhost", Port: writePort, Username: dbUser, Password: dbPassword}, - }) - mockConfig.On("GetBool", "app.debug").Return(true) - mockConfig.On("GetString", "database.connections.postgresql.driver").Return(contractsorm.DriverPostgresql.String()) - mockConfig.On("GetString", "database.connections.postgresql.sslmode").Return("disable") - mockConfig.On("GetString", "database.connections.postgresql.timezone").Return("UTC") - mockConfig.On("GetString", "database.connections.postgresql.database").Return("postgres") -} - func postgresqlDockerDB(pool *dockertest.Pool, createTable bool) (contractsorm.DB, error) { db, err := initPostgresql(pool) if err != nil { @@ -254,27 +319,6 @@ func initPostgresql(pool *dockertest.Pool) (contractsorm.DB, error) { return db, nil } -func mockSingleSqlite(dbName string) { - mockConfig := mock.Config() - mockConfig.On("Get", "database.connections.sqlite.read").Return(nil) - mockConfig.On("Get", "database.connections.sqlite.write").Return(nil) - mockConfig.On("GetBool", "app.debug").Return(true) - mockConfig.On("GetString", "database.connections.sqlite.driver").Return(contractsorm.DriverSqlite.String()) - mockConfig.On("GetString", "database.connections.sqlite.database").Return(dbName) -} - -func mockReadWriteSqlite() { - mockConfig := mock.Config() - mockConfig.On("Get", "database.connections.sqlite.read").Return([]database.Config{ - {Database: dbDatabase}, - }) - mockConfig.On("Get", "database.connections.sqlite.write").Return([]database.Config{ - {Database: dbDatabase1}, - }) - mockConfig.On("GetBool", "app.debug").Return(true) - mockConfig.On("GetString", "database.connections.sqlite.driver").Return(contractsorm.DriverSqlite.String()) -} - func sqliteDockerDB(pool *dockertest.Pool, createTable bool) (contractsorm.DB, error) { db, err := initSqlite(pool) if err != nil { @@ -323,34 +367,6 @@ func initSqlite(pool *dockertest.Pool) (contractsorm.DB, error) { return db, nil } -func mockSingleSqlserver(port int) { - mockConfig := mock.Config() - mockConfig.On("Get", "database.connections.sqlserver.read").Return(nil) - mockConfig.On("Get", "database.connections.sqlserver.write").Return(nil) - mockConfig.On("GetBool", "app.debug").Return(true) - mockConfig.On("GetString", "database.connections.sqlserver.driver").Return(contractsorm.DriverSqlserver.String()) - mockConfig.On("GetString", "database.connections.sqlserver.host").Return("localhost") - mockConfig.On("GetString", "database.connections.sqlserver.username").Return(dbUser1) - mockConfig.On("GetString", "database.connections.sqlserver.password").Return(dbPassword) - mockConfig.On("GetString", "database.connections.sqlserver.database").Return("msdb") - mockConfig.On("GetString", "database.connections.sqlserver.charset").Return("utf8mb4") - mockConfig.On("GetInt", "database.connections.sqlserver.port").Return(port) -} - -func mockReadWriteSqlserver(readPort, writePort int) { - mockConfig := mock.Config() - mockConfig.On("Get", "database.connections.sqlserver.read").Return([]database.Config{ - {Host: "localhost", Port: readPort, Username: dbUser1, Password: dbPassword}, - }) - mockConfig.On("Get", "database.connections.sqlserver.write").Return([]database.Config{ - {Host: "localhost", Port: writePort, Username: dbUser1, Password: dbPassword}, - }) - mockConfig.On("GetBool", "app.debug").Return(true) - mockConfig.On("GetString", "database.connections.sqlserver.driver").Return(contractsorm.DriverSqlserver.String()) - mockConfig.On("GetString", "database.connections.sqlserver.database").Return("msdb") - mockConfig.On("GetString", "database.connections.sqlserver.charset").Return("utf8mb4") -} - func sqlserverDockerDB(pool *dockertest.Pool, createTable bool) (contractsorm.DB, error) { db, err := initSqlserver(pool) if err != nil { From ac7ba2df6fdd27b19ca4d6e2c061fb4c17becd5b Mon Sep 17 00:00:00 2001 From: Wenbo han Date: Mon, 13 Feb 2023 22:20:59 +0800 Subject: [PATCH 2/2] Optimize air --- contracts/filesystem/storage.go | 32 +-- contracts/http/mocks/Response.go | 10 + contracts/http/mocks/ResponseSuccess.go | 5 + filesystem/cos.go | 364 ++++++++++++------------ filesystem/local.go | 258 ++++++++--------- filesystem/oss.go | 276 +++++++++--------- filesystem/s3.go | 312 ++++++++++---------- foundation/application.go | 11 +- 8 files changed, 646 insertions(+), 622 deletions(-) diff --git a/contracts/filesystem/storage.go b/contracts/filesystem/storage.go index f81f3977f..e6de89a0c 100644 --- a/contracts/filesystem/storage.go +++ b/contracts/filesystem/storage.go @@ -13,27 +13,27 @@ type Storage interface { //go:generate mockery --name=Driver type Driver interface { - WithContext(ctx context.Context) Driver + AllDirectories(path string) ([]string, error) + AllFiles(path string) ([]string, error) + Copy(oldFile, newFile string) error + Delete(file ...string) error + DeleteDirectory(directory string) error + Directories(path string) ([]string, error) + // Download(path string) + Exists(file string) bool + Files(path string) ([]string, error) + Get(file string) (string, error) + MakeDirectory(directory string) error + Missing(file string) bool + Move(oldFile, newFile string) error + Path(file string) string Put(file, content string) error PutFile(path string, source File) (string, error) PutFileAs(path string, source File, name string) (string, error) - Get(file string) (string, error) Size(file string) (int64, error) - Path(file string) string - Exists(file string) bool - Missing(file string) bool - // Download(path string) - Url(file string) string TemporaryUrl(file string, time time.Time) (string, error) - Copy(oldFile, newFile string) error - Move(oldFile, newFile string) error - Delete(file ...string) error - Files(path string) ([]string, error) - AllFiles(path string) ([]string, error) - Directories(path string) ([]string, error) - AllDirectories(path string) ([]string, error) - MakeDirectory(directory string) error - DeleteDirectory(directory string) error + WithContext(ctx context.Context) Driver + Url(file string) string } //go:generate mockery --name=File diff --git a/contracts/http/mocks/Response.go b/contracts/http/mocks/Response.go index 7cfe43fee..8ec9659a0 100644 --- a/contracts/http/mocks/Response.go +++ b/contracts/http/mocks/Response.go @@ -14,6 +14,11 @@ type Response struct { mock.Mock } +// Data provides a mock function with given fields: code, contentType, data +func (_m *Response) Data(code int, contentType string, data []byte) { + _m.Called(code, contentType, data) +} + // Download provides a mock function with given fields: filepath, filename func (_m *Response) Download(filepath string, filename string) { _m.Called(filepath, filename) @@ -61,6 +66,11 @@ func (_m *Response) Origin() http.ResponseOrigin { return r0 } +// Redirect provides a mock function with given fields: code, location +func (_m *Response) Redirect(code int, location string) { + _m.Called(code, location) +} + // String provides a mock function with given fields: code, format, values func (_m *Response) String(code int, format string, values ...interface{}) { var _ca []interface{} diff --git a/contracts/http/mocks/ResponseSuccess.go b/contracts/http/mocks/ResponseSuccess.go index 4ef1ff074..2710c3154 100644 --- a/contracts/http/mocks/ResponseSuccess.go +++ b/contracts/http/mocks/ResponseSuccess.go @@ -9,6 +9,11 @@ type ResponseSuccess struct { mock.Mock } +// Data provides a mock function with given fields: contentType, data +func (_m *ResponseSuccess) Data(contentType string, data []byte) { + _m.Called(contentType, data) +} + // Json provides a mock function with given fields: obj func (_m *ResponseSuccess) Json(obj interface{}) { _m.Called(obj) diff --git a/filesystem/cos.go b/filesystem/cos.go index d605e89cb..53c4f7e07 100644 --- a/filesystem/cos.go +++ b/filesystem/cos.go @@ -63,109 +63,70 @@ func NewCos(ctx context.Context, disk string) (*Cos, error) { }, nil } -func (r *Cos) WithContext(ctx context.Context) filesystem.Driver { - driver, err := NewCos(ctx, r.disk) - if err != nil { - facades.Log.Errorf("init %s disk fail: %+v", r.disk, err) - } - - return driver -} - -func (r *Cos) Put(file string, content string) error { - tempFile, err := r.tempFile(content) - defer os.Remove(tempFile.Name()) - if err != nil { - return err - } - - _, _, err = r.instance.Object.Upload( - r.ctx, file, tempFile.Name(), nil, - ) - - return err -} - -func (r *Cos) PutFile(filePath string, source filesystem.File) (string, error) { - return r.PutFileAs(filePath, source, str.Random(40)) -} - -func (r *Cos) PutFileAs(filePath string, source filesystem.File, name string) (string, error) { - fullPath, err := fullPathOfFile(filePath, source, name) - if err != nil { - return "", err - } - - if _, _, err := r.instance.Object.Upload( - r.ctx, fullPath, source.File(), nil, - ); err != nil { - return "", err - } - - return fullPath, nil -} - -func (r *Cos) Get(file string) (string, error) { - opt := &cos.ObjectGetOptions{ - ResponseContentType: "text/html", - } - resp, err := r.instance.Object.Get(r.ctx, file, opt) - if err != nil { - return "", err - } - - data, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - - return string(data), nil -} - -func (r *Cos) Size(file string) (int64, error) { - resp, err := r.instance.Object.Head(r.ctx, file, nil) - if err != nil { - return 0, err +func (r *Cos) AllDirectories(path string) ([]string, error) { + var directories []string + var marker string + validPath := validPath(path) + opt := &cos.BucketGetOptions{ + Prefix: validPath, + Delimiter: "/", + MaxKeys: 1000, } - - contentLength := resp.Header.Get("Content-Length") - contentLengthInt, err := strconv.ParseInt(contentLength, 10, 64) - if err != nil { - return 0, err + isTruncated := true + for isTruncated { + opt.Marker = marker + v, _, err := r.instance.Bucket.Get(context.Background(), opt) + if err != nil { + return nil, err + } + wg := sync.WaitGroup{} + for _, commonPrefix := range v.CommonPrefixes { + directories = append(directories, strings.ReplaceAll(commonPrefix, validPath, "")) + wg.Add(1) + subDirectories, err := r.AllDirectories(commonPrefix) + if err != nil { + return nil, err + } + for _, subDirectory := range subDirectories { + if strings.HasSuffix(subDirectory, "/") { + directories = append(directories, strings.ReplaceAll(commonPrefix+subDirectory, validPath, "")) + } + } + wg.Done() + } + wg.Wait() + isTruncated = v.IsTruncated + marker = v.NextMarker } - return contentLengthInt, nil -} - -func (r *Cos) Path(file string) string { - return file + return directories, nil } -func (r *Cos) Exists(file string) bool { - ok, err := r.instance.Object.IsExist(r.ctx, file) - if err != nil { - return false +func (r *Cos) AllFiles(path string) ([]string, error) { + var files []string + var marker string + validPath := validPath(path) + opt := &cos.BucketGetOptions{ + Prefix: validPath, + MaxKeys: 1000, } - - return ok -} - -func (r *Cos) Missing(file string) bool { - return !r.Exists(file) -} - -func (r *Cos) Url(file string) string { - objectUrl := r.instance.Object.GetObjectURL(file) - - return objectUrl.String() -} - -func (r *Cos) TemporaryUrl(file string, time time.Time) (string, error) { - // 获取预签名URL - presignedURL, err := r.instance.Object.GetPresignedURL(r.ctx, http.MethodGet, file, r.accessKeyId, r.accessKeySecret, time.Sub(supporttime.Now()), nil) - if err != nil { - return "", err + isTruncated := true + for isTruncated { + opt.Marker = marker + v, _, err := r.instance.Bucket.Get(r.ctx, opt) + if err != nil { + return nil, err + } + for _, content := range v.Contents { + if !strings.HasSuffix(content.Key, "/") { + files = append(files, strings.ReplaceAll(content.Key, validPath, "")) + } + } + isTruncated = v.IsTruncated + marker = v.NextMarker } - return presignedURL.String(), nil + return files, nil } func (r *Cos) Copy(originFile, targetFile string) error { @@ -177,14 +138,6 @@ func (r *Cos) Copy(originFile, targetFile string) error { return nil } -func (r *Cos) Move(oldFile, newFile string) error { - if err := r.Copy(oldFile, newFile); err != nil { - return err - } - - return r.Delete(oldFile) -} - func (r *Cos) Delete(files ...string) error { var obs []cos.Object for _, v := range files { @@ -202,18 +155,6 @@ func (r *Cos) Delete(files ...string) error { return nil } -func (r *Cos) MakeDirectory(directory string) error { - if !strings.HasSuffix(directory, "/") { - directory += "/" - } - - if _, err := r.instance.Object.Put(r.ctx, directory, strings.NewReader(""), nil); err != nil { - return err - } - - return nil -} - func (r *Cos) DeleteDirectory(directory string) error { if !strings.HasSuffix(directory, "/") { directory += "/" @@ -248,8 +189,8 @@ func (r *Cos) DeleteDirectory(directory string) error { return nil } -func (r *Cos) Files(path string) ([]string, error) { - var files []string +func (r *Cos) Directories(path string) ([]string, error) { + var directories []string var marker string validPath := validPath(path) opt := &cos.BucketGetOptions{ @@ -260,27 +201,37 @@ func (r *Cos) Files(path string) ([]string, error) { isTruncated := true for isTruncated { opt.Marker = marker - v, _, err := r.instance.Bucket.Get(r.ctx, opt) + v, _, err := r.instance.Bucket.Get(context.Background(), opt) if err != nil { return nil, err } - for _, content := range v.Contents { - files = append(files, strings.ReplaceAll(content.Key, validPath, "")) + for _, commonPrefix := range v.CommonPrefixes { + directories = append(directories, strings.ReplaceAll(commonPrefix, validPath, "")) } isTruncated = v.IsTruncated marker = v.NextMarker } - return files, nil + return directories, nil } -func (r *Cos) AllFiles(path string) ([]string, error) { +func (r *Cos) Exists(file string) bool { + ok, err := r.instance.Object.IsExist(r.ctx, file) + if err != nil { + return false + } + + return ok +} + +func (r *Cos) Files(path string) ([]string, error) { var files []string var marker string validPath := validPath(path) opt := &cos.BucketGetOptions{ - Prefix: validPath, - MaxKeys: 1000, + Prefix: validPath, + Delimiter: "/", + MaxKeys: 1000, } isTruncated := true for isTruncated { @@ -290,9 +241,7 @@ func (r *Cos) AllFiles(path string) ([]string, error) { return nil, err } for _, content := range v.Contents { - if !strings.HasSuffix(content.Key, "/") { - files = append(files, strings.ReplaceAll(content.Key, validPath, "")) - } + files = append(files, strings.ReplaceAll(content.Key, validPath, "")) } isTruncated = v.IsTruncated marker = v.NextMarker @@ -301,69 +250,120 @@ func (r *Cos) AllFiles(path string) ([]string, error) { return files, nil } -func (r *Cos) Directories(path string) ([]string, error) { - var directories []string - var marker string - validPath := validPath(path) - opt := &cos.BucketGetOptions{ - Prefix: validPath, - Delimiter: "/", - MaxKeys: 1000, +func (r *Cos) Get(file string) (string, error) { + opt := &cos.ObjectGetOptions{ + ResponseContentType: "text/html", } - isTruncated := true - for isTruncated { - opt.Marker = marker - v, _, err := r.instance.Bucket.Get(context.Background(), opt) - if err != nil { - return nil, err - } - for _, commonPrefix := range v.CommonPrefixes { - directories = append(directories, strings.ReplaceAll(commonPrefix, validPath, "")) - } - isTruncated = v.IsTruncated - marker = v.NextMarker + resp, err := r.instance.Object.Get(r.ctx, file, opt) + if err != nil { + return "", err } - return directories, nil + data, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + + return string(data), nil } -func (r *Cos) AllDirectories(path string) ([]string, error) { - var directories []string - var marker string - validPath := validPath(path) - opt := &cos.BucketGetOptions{ - Prefix: validPath, - Delimiter: "/", - MaxKeys: 1000, +func (r *Cos) MakeDirectory(directory string) error { + if !strings.HasSuffix(directory, "/") { + directory += "/" } - isTruncated := true - for isTruncated { - opt.Marker = marker - v, _, err := r.instance.Bucket.Get(context.Background(), opt) - if err != nil { - return nil, err - } - wg := sync.WaitGroup{} - for _, commonPrefix := range v.CommonPrefixes { - directories = append(directories, strings.ReplaceAll(commonPrefix, validPath, "")) - wg.Add(1) - subDirectories, err := r.AllDirectories(commonPrefix) - if err != nil { - return nil, err - } - for _, subDirectory := range subDirectories { - if strings.HasSuffix(subDirectory, "/") { - directories = append(directories, strings.ReplaceAll(commonPrefix+subDirectory, validPath, "")) - } - } - wg.Done() - } - wg.Wait() - isTruncated = v.IsTruncated - marker = v.NextMarker + + if _, err := r.instance.Object.Put(r.ctx, directory, strings.NewReader(""), nil); err != nil { + return err } - return directories, nil + return nil +} + +func (r *Cos) Missing(file string) bool { + return !r.Exists(file) +} + +func (r *Cos) Move(oldFile, newFile string) error { + if err := r.Copy(oldFile, newFile); err != nil { + return err + } + + return r.Delete(oldFile) +} + +func (r *Cos) Path(file string) string { + return file +} + +func (r *Cos) Put(file string, content string) error { + tempFile, err := r.tempFile(content) + defer os.Remove(tempFile.Name()) + if err != nil { + return err + } + + _, _, err = r.instance.Object.Upload( + r.ctx, file, tempFile.Name(), nil, + ) + + return err +} + +func (r *Cos) PutFile(filePath string, source filesystem.File) (string, error) { + return r.PutFileAs(filePath, source, str.Random(40)) +} + +func (r *Cos) PutFileAs(filePath string, source filesystem.File, name string) (string, error) { + fullPath, err := fullPathOfFile(filePath, source, name) + if err != nil { + return "", err + } + + if _, _, err := r.instance.Object.Upload( + r.ctx, fullPath, source.File(), nil, + ); err != nil { + return "", err + } + + return fullPath, nil +} + +func (r *Cos) Size(file string) (int64, error) { + resp, err := r.instance.Object.Head(r.ctx, file, nil) + if err != nil { + return 0, err + } + + contentLength := resp.Header.Get("Content-Length") + contentLengthInt, err := strconv.ParseInt(contentLength, 10, 64) + if err != nil { + return 0, err + } + + return contentLengthInt, nil +} + +func (r *Cos) TemporaryUrl(file string, time time.Time) (string, error) { + // 获取预签名URL + presignedURL, err := r.instance.Object.GetPresignedURL(r.ctx, http.MethodGet, file, r.accessKeyId, r.accessKeySecret, time.Sub(supporttime.Now()), nil) + if err != nil { + return "", err + } + + return presignedURL.String(), nil +} + +func (r *Cos) WithContext(ctx context.Context) filesystem.Driver { + driver, err := NewCos(ctx, r.disk) + if err != nil { + facades.Log.Errorf("init %s disk fail: %+v", r.disk, err) + } + + return driver +} + +func (r *Cos) Url(file string) string { + objectUrl := r.instance.Object.GetObjectURL(file) + + return objectUrl.String() } func (r *Cos) tempFile(content string) (*os.File, error) { diff --git a/filesystem/local.go b/filesystem/local.go index f24a9d22e..3d239531a 100644 --- a/filesystem/local.go +++ b/filesystem/local.go @@ -29,76 +29,86 @@ func NewLocal(disk string) (*Local, error) { }, nil } -func (r *Local) WithContext(ctx context.Context) filesystem.Driver { - return r -} +func (r *Local) AllDirectories(path string) ([]string, error) { + var directories []string + err := filepath.Walk(r.fullPath(path), func(fullPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + realPath := strings.ReplaceAll(fullPath, r.fullPath(path), "") + realPath = strings.TrimPrefix(realPath, "/") + if realPath != "" { + directories = append(directories, realPath+"/") + } + } -func (r *Local) Put(file, content string) error { - file = r.fullPath(file) - if err := os.MkdirAll(path.Dir(file), os.ModePerm); err != nil { - return err - } + return nil + }) - f, err := os.Create(file) - defer f.Close() - if err != nil { - return err - } + return directories, err +} - if _, err = f.WriteString(content); err != nil { - return err - } +func (r *Local) AllFiles(path string) ([]string, error) { + var files []string + err := filepath.Walk(r.fullPath(path), func(fullPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + files = append(files, strings.ReplaceAll(fullPath, r.fullPath(path)+"/", "")) + } - return nil -} + return nil + }) -func (r *Local) PutFile(filePath string, source filesystem.File) (string, error) { - return r.PutFileAs(filePath, source, str.Random(40)) + return files, err } -func (r *Local) PutFileAs(filePath string, source filesystem.File, name string) (string, error) { - data, err := ioutil.ReadFile(source.File()) +func (r *Local) Copy(originFile, targetFile string) error { + content, err := r.Get(originFile) if err != nil { - return "", err + return err } - fullPath, err := fullPathOfFile(filePath, source, name) - if err != nil { - return "", err - } + return r.Put(targetFile, content) +} - if err := r.Put(fullPath, string(data)); err != nil { - return "", err - } +func (r *Local) Delete(files ...string) error { + for _, file := range files { + fileInfo, err := os.Stat(r.fullPath(file)) + if err != nil { + return err + } - return fullPath, nil -} + if fileInfo.IsDir() { + return errors.New("can't delete directory, please use DeleteDirectory") + } + } -func (r *Local) Get(file string) (string, error) { - data, err := ioutil.ReadFile(r.fullPath(file)) - if err != nil { - return "", err + for _, file := range files { + if err := os.Remove(r.fullPath(file)); err != nil { + return err + } } - return string(data), nil + return nil } -func (r *Local) Size(file string) (int64, error) { - fileInfo, err := os.Open(r.fullPath(file)) - if err != nil { - return 0, err - } +func (r *Local) DeleteDirectory(directory string) error { + return os.RemoveAll(r.fullPath(directory)) +} - fi, err := fileInfo.Stat() - if err != nil { - return 0, err +func (r *Local) Directories(path string) ([]string, error) { + var directories []string + fileInfo, _ := ioutil.ReadDir(r.fullPath(path)) + for _, f := range fileInfo { + if f.IsDir() { + directories = append(directories, f.Name()+"/") + } } - return fi.Size(), nil -} - -func (r *Local) Path(file string) string { - return support.RootPath + "/" + strings.TrimPrefix(strings.TrimPrefix(r.fullPath(file), "/"), "./") + return directories, nil } func (r *Local) Exists(file string) bool { @@ -109,25 +119,36 @@ func (r *Local) Exists(file string) bool { return true } -func (r *Local) Missing(file string) bool { - return !r.Exists(file) -} - -func (r *Local) Url(file string) string { - return strings.TrimSuffix(r.url, "/") + "/" + strings.TrimPrefix(file, "/") -} +func (r *Local) Files(path string) ([]string, error) { + var files []string + fileInfo, err := ioutil.ReadDir(r.fullPath(path)) + if err != nil { + return nil, err + } + for _, f := range fileInfo { + if !f.IsDir() { + files = append(files, f.Name()) + } + } -func (r *Local) TemporaryUrl(file string, time time.Time) (string, error) { - return r.Url(file), nil + return files, nil } -func (r *Local) Copy(originFile, targetFile string) error { - content, err := r.Get(originFile) +func (r *Local) Get(file string) (string, error) { + data, err := ioutil.ReadFile(r.fullPath(file)) if err != nil { - return err + return "", err } - return r.Put(targetFile, content) + return string(data), nil +} + +func (r *Local) MakeDirectory(directory string) error { + return os.MkdirAll(path.Dir(r.fullPath(directory)+"/"), os.ModePerm) +} + +func (r *Local) Missing(file string) bool { + return !r.Exists(file) } func (r *Local) Move(oldFile, newFile string) error { @@ -143,96 +164,75 @@ func (r *Local) Move(oldFile, newFile string) error { return nil } -func (r *Local) Delete(files ...string) error { - for _, file := range files { - fileInfo, err := os.Stat(r.fullPath(file)) - if err != nil { - return err - } +func (r *Local) Path(file string) string { + return support.RootPath + "/" + strings.TrimPrefix(strings.TrimPrefix(r.fullPath(file), "/"), "./") +} - if fileInfo.IsDir() { - return errors.New("can't delete directory, please use DeleteDirectory") - } +func (r *Local) Put(file, content string) error { + file = r.fullPath(file) + if err := os.MkdirAll(path.Dir(file), os.ModePerm); err != nil { + return err } - for _, file := range files { - if err := os.Remove(r.fullPath(file)); err != nil { - return err - } + f, err := os.Create(file) + defer f.Close() + if err != nil { + return err + } + + if _, err = f.WriteString(content); err != nil { + return err } return nil } -func (r *Local) MakeDirectory(directory string) error { - return os.MkdirAll(path.Dir(r.fullPath(directory)+"/"), os.ModePerm) +func (r *Local) PutFile(filePath string, source filesystem.File) (string, error) { + return r.PutFileAs(filePath, source, str.Random(40)) } -func (r *Local) DeleteDirectory(directory string) error { - return os.RemoveAll(r.fullPath(directory)) -} +func (r *Local) PutFileAs(filePath string, source filesystem.File, name string) (string, error) { + data, err := ioutil.ReadFile(source.File()) + if err != nil { + return "", err + } -func (r *Local) Files(path string) ([]string, error) { - var files []string - fileInfo, err := ioutil.ReadDir(r.fullPath(path)) + fullPath, err := fullPathOfFile(filePath, source, name) if err != nil { - return nil, err + return "", err } - for _, f := range fileInfo { - if !f.IsDir() { - files = append(files, f.Name()) - } + + if err := r.Put(fullPath, string(data)); err != nil { + return "", err } - return files, nil + return fullPath, nil } -func (r *Local) AllFiles(path string) ([]string, error) { - var files []string - err := filepath.Walk(r.fullPath(path), func(fullPath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - files = append(files, strings.ReplaceAll(fullPath, r.fullPath(path)+"/", "")) - } - - return nil - }) - - return files, err -} +func (r *Local) Size(file string) (int64, error) { + fileInfo, err := os.Open(r.fullPath(file)) + if err != nil { + return 0, err + } -func (r *Local) Directories(path string) ([]string, error) { - var directories []string - fileInfo, _ := ioutil.ReadDir(r.fullPath(path)) - for _, f := range fileInfo { - if f.IsDir() { - directories = append(directories, f.Name()+"/") - } + fi, err := fileInfo.Stat() + if err != nil { + return 0, err } - return directories, nil + return fi.Size(), nil } -func (r *Local) AllDirectories(path string) ([]string, error) { - var directories []string - err := filepath.Walk(r.fullPath(path), func(fullPath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - realPath := strings.ReplaceAll(fullPath, r.fullPath(path), "") - realPath = strings.TrimPrefix(realPath, "/") - if realPath != "" { - directories = append(directories, realPath+"/") - } - } +func (r *Local) TemporaryUrl(file string, time time.Time) (string, error) { + return r.Url(file), nil +} - return nil - }) +func (r *Local) WithContext(ctx context.Context) filesystem.Driver { + return r +} - return directories, err +func (r *Local) Url(file string) string { + return strings.TrimSuffix(r.url, "/") + "/" + strings.TrimPrefix(file, "/") } func (r *Local) fullPath(path string) string { diff --git a/filesystem/oss.go b/filesystem/oss.go index 1c1135998..422b00683 100644 --- a/filesystem/oss.go +++ b/filesystem/oss.go @@ -63,101 +63,49 @@ func NewOss(ctx context.Context, disk string) (*Oss, error) { }, nil } -func (r *Oss) WithContext(ctx context.Context) filesystem.Driver { - driver, err := NewOss(ctx, r.disk) - if err != nil { - facades.Log.Errorf("init %s disk fail: %+v", r.disk, err) - } - - return driver -} - -func (r *Oss) Put(file string, content string) error { - tempFile, err := r.tempFile(content) - defer os.Remove(tempFile.Name()) - if err != nil { - return err - } - - return r.bucketInstance.PutObjectFromFile(file, tempFile.Name()) -} - -func (r *Oss) PutFile(filePath string, source filesystem.File) (string, error) { - return r.PutFileAs(filePath, source, str.Random(40)) -} - -func (r *Oss) PutFileAs(filePath string, source filesystem.File, name string) (string, error) { - fullPath, err := fullPathOfFile(filePath, source, name) - if err != nil { - return "", err - } - - if err := r.bucketInstance.PutObjectFromFile(fullPath, source.File()); err != nil { - return "", err - } - - return fullPath, nil -} - -func (r *Oss) Get(file string) (string, error) { - res, err := r.bucketInstance.GetObject(file) - if err != nil { - return "", err - } - defer res.Close() - - data, err := ioutil.ReadAll(res) - - return string(data), nil -} - -func (r *Oss) Size(file string) (int64, error) { - props, err := r.bucketInstance.GetObjectDetailedMeta(file) +func (r *Oss) AllDirectories(path string) ([]string, error) { + var directories []string + validPath := validPath(path) + lsRes, err := r.bucketInstance.ListObjectsV2(oss.MaxKeys(MaxFileNum), oss.Prefix(validPath), oss.Delimiter("/")) if err != nil { - return 0, err + return nil, err } - lens := props["Content-Length"] - if len(lens) == 0 { - return 0, nil - } + wg := sync.WaitGroup{} + for _, commonPrefix := range lsRes.CommonPrefixes { + directories = append(directories, strings.ReplaceAll(commonPrefix, validPath, "")) - contentLengthInt, err := strconv.ParseInt(lens[0], 10, 64) - if err != nil { - return 0, err + wg.Add(1) + subDirectories, err := r.AllDirectories(commonPrefix) + if err != nil { + return nil, err + } + for _, subDirectory := range subDirectories { + if strings.HasSuffix(subDirectory, "/") { + directories = append(directories, strings.ReplaceAll(commonPrefix+subDirectory, validPath, "")) + } + } + wg.Done() } + wg.Wait() - return contentLengthInt, nil -} - -func (r *Oss) Path(file string) string { - return file + return directories, nil } -func (r *Oss) Exists(file string) bool { - exist, err := r.bucketInstance.IsObjectExist(file) +func (r *Oss) AllFiles(path string) ([]string, error) { + var files []string + validPath := validPath(path) + lsRes, err := r.bucketInstance.ListObjectsV2(oss.MaxKeys(MaxFileNum), oss.Prefix(validPath)) if err != nil { - return false + return nil, err } - - return exist -} - -func (r *Oss) Missing(file string) bool { - return !r.Exists(file) -} - -func (r *Oss) Url(file string) string { - return r.url + "/" + file -} - -func (r *Oss) TemporaryUrl(file string, time time.Time) (string, error) { - signedURL, err := r.bucketInstance.SignURL(file, oss.HTTPGet, int64(time.Sub(supporttime.Now()).Seconds())) - if err != nil { - return "", err + for _, object := range lsRes.Objects { + if !strings.HasSuffix(object.Key, "/") { + files = append(files, strings.ReplaceAll(object.Key, validPath, "")) + } } - return signedURL, nil + return files, nil } func (r *Oss) Copy(originFile, targetFile string) error { @@ -168,14 +116,6 @@ func (r *Oss) Copy(originFile, targetFile string) error { return nil } -func (r *Oss) Move(oldFile, newFile string) error { - if err := r.Copy(oldFile, newFile); err != nil { - return err - } - - return r.Delete(oldFile) -} - func (r *Oss) Delete(files ...string) error { _, err := r.bucketInstance.DeleteObjects(files) if err != nil { @@ -185,14 +125,6 @@ func (r *Oss) Delete(files ...string) error { return nil } -func (r *Oss) MakeDirectory(directory string) error { - if !strings.HasSuffix(directory, "/") { - directory += "/" - } - - return r.bucketInstance.PutObject(directory, bytes.NewReader([]byte(""))) -} - func (r *Oss) DeleteDirectory(directory string) error { if !strings.HasSuffix(directory, "/") { directory += "/" @@ -228,78 +160,146 @@ func (r *Oss) DeleteDirectory(directory string) error { return nil } -func (r *Oss) Files(path string) ([]string, error) { - var files []string +func (r *Oss) Directories(path string) ([]string, error) { + var directories []string validPath := validPath(path) lsRes, err := r.bucketInstance.ListObjectsV2(oss.MaxKeys(MaxFileNum), oss.Prefix(validPath), oss.Delimiter("/")) if err != nil { return nil, err } - for _, object := range lsRes.Objects { - files = append(files, strings.ReplaceAll(object.Key, validPath, "")) + + for _, directory := range lsRes.CommonPrefixes { + directories = append(directories, strings.ReplaceAll(directory, validPath, "")) } - return files, nil + return directories, nil } -func (r *Oss) AllFiles(path string) ([]string, error) { +func (r *Oss) Exists(file string) bool { + exist, err := r.bucketInstance.IsObjectExist(file) + if err != nil { + return false + } + + return exist +} + +func (r *Oss) Files(path string) ([]string, error) { var files []string validPath := validPath(path) - lsRes, err := r.bucketInstance.ListObjectsV2(oss.MaxKeys(MaxFileNum), oss.Prefix(validPath)) + lsRes, err := r.bucketInstance.ListObjectsV2(oss.MaxKeys(MaxFileNum), oss.Prefix(validPath), oss.Delimiter("/")) if err != nil { return nil, err } for _, object := range lsRes.Objects { - if !strings.HasSuffix(object.Key, "/") { - files = append(files, strings.ReplaceAll(object.Key, validPath, "")) - } + files = append(files, strings.ReplaceAll(object.Key, validPath, "")) } return files, nil } -func (r *Oss) Directories(path string) ([]string, error) { - var directories []string - validPath := validPath(path) - lsRes, err := r.bucketInstance.ListObjectsV2(oss.MaxKeys(MaxFileNum), oss.Prefix(validPath), oss.Delimiter("/")) +func (r *Oss) Get(file string) (string, error) { + res, err := r.bucketInstance.GetObject(file) if err != nil { - return nil, err + return "", err } + defer res.Close() - for _, directory := range lsRes.CommonPrefixes { - directories = append(directories, strings.ReplaceAll(directory, validPath, "")) + data, err := ioutil.ReadAll(res) + + return string(data), nil +} + +func (r *Oss) MakeDirectory(directory string) error { + if !strings.HasSuffix(directory, "/") { + directory += "/" } - return directories, nil + return r.bucketInstance.PutObject(directory, bytes.NewReader([]byte(""))) } -func (r *Oss) AllDirectories(path string) ([]string, error) { - var directories []string - validPath := validPath(path) - lsRes, err := r.bucketInstance.ListObjectsV2(oss.MaxKeys(MaxFileNum), oss.Prefix(validPath), oss.Delimiter("/")) +func (r *Oss) Missing(file string) bool { + return !r.Exists(file) +} + +func (r *Oss) Move(oldFile, newFile string) error { + if err := r.Copy(oldFile, newFile); err != nil { + return err + } + + return r.Delete(oldFile) +} + +func (r *Oss) Path(file string) string { + return file +} + +func (r *Oss) Put(file string, content string) error { + tempFile, err := r.tempFile(content) + defer os.Remove(tempFile.Name()) if err != nil { - return nil, err + return err } - wg := sync.WaitGroup{} - for _, commonPrefix := range lsRes.CommonPrefixes { - directories = append(directories, strings.ReplaceAll(commonPrefix, validPath, "")) + return r.bucketInstance.PutObjectFromFile(file, tempFile.Name()) +} - wg.Add(1) - subDirectories, err := r.AllDirectories(commonPrefix) - if err != nil { - return nil, err - } - for _, subDirectory := range subDirectories { - if strings.HasSuffix(subDirectory, "/") { - directories = append(directories, strings.ReplaceAll(commonPrefix+subDirectory, validPath, "")) - } - } - wg.Done() +func (r *Oss) PutFile(filePath string, source filesystem.File) (string, error) { + return r.PutFileAs(filePath, source, str.Random(40)) +} + +func (r *Oss) PutFileAs(filePath string, source filesystem.File, name string) (string, error) { + fullPath, err := fullPathOfFile(filePath, source, name) + if err != nil { + return "", err } - wg.Wait() - return directories, nil + if err := r.bucketInstance.PutObjectFromFile(fullPath, source.File()); err != nil { + return "", err + } + + return fullPath, nil +} + +func (r *Oss) Size(file string) (int64, error) { + props, err := r.bucketInstance.GetObjectDetailedMeta(file) + if err != nil { + return 0, err + } + + lens := props["Content-Length"] + if len(lens) == 0 { + return 0, nil + } + + contentLengthInt, err := strconv.ParseInt(lens[0], 10, 64) + if err != nil { + return 0, err + } + + return contentLengthInt, nil +} + +func (r *Oss) TemporaryUrl(file string, time time.Time) (string, error) { + signedURL, err := r.bucketInstance.SignURL(file, oss.HTTPGet, int64(time.Sub(supporttime.Now()).Seconds())) + if err != nil { + return "", err + } + + return signedURL, nil +} + +func (r *Oss) WithContext(ctx context.Context) filesystem.Driver { + driver, err := NewOss(ctx, r.disk) + if err != nil { + facades.Log.Errorf("init %s disk fail: %+v", r.disk, err) + } + + return driver +} + +func (r *Oss) Url(file string) string { + return r.url + "/" + file } func (r *Oss) tempFile(content string) (*os.File, error) { diff --git a/filesystem/s3.go b/filesystem/s3.go index 530a289ba..280f5954a 100644 --- a/filesystem/s3.go +++ b/filesystem/s3.go @@ -55,113 +55,58 @@ func NewS3(ctx context.Context, disk string) (*S3, error) { }, nil } -func (r *S3) WithContext(ctx context.Context) filesystem.Driver { - driver, err := NewS3(ctx, r.disk) - if err != nil { - facades.Log.Errorf("init %s disk fail: %+v", r.disk, err) - } - - return driver -} - -func (r *S3) Put(file string, content string) error { - _, err := r.instance.PutObject(r.ctx, &s3.PutObjectInput{ - Bucket: aws.String(r.bucket), - Key: aws.String(file), - Body: strings.NewReader(content), - }) - - return err -} - -func (r *S3) PutFile(filePath string, source filesystem.File) (string, error) { - return r.PutFileAs(filePath, source, str.Random(40)) -} - -func (r *S3) PutFileAs(filePath string, source filesystem.File, name string) (string, error) { - fullPath, err := fullPathOfFile(filePath, source, name) - if err != nil { - return "", err - } - - data, err := ioutil.ReadFile(source.File()) - if err != nil { - return "", err - } - - if err := r.Put(fullPath, string(data)); err != nil { - return "", err - } - - return fullPath, nil -} - -func (r *S3) Get(file string) (string, error) { - resp, err := r.instance.GetObject(r.ctx, &s3.GetObjectInput{ - Bucket: aws.String(r.bucket), - Key: aws.String(file), +func (r *S3) AllDirectories(path string) ([]string, error) { + var directories []string + validPath := validPath(path) + listObjsResponse, err := r.instance.ListObjectsV2(r.ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(r.bucket), + Delimiter: aws.String("/"), + Prefix: aws.String(validPath), }) if err != nil { - return "", err + return nil, err } - data, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - - return string(data), nil -} + wg := sync.WaitGroup{} + for _, commonPrefix := range listObjsResponse.CommonPrefixes { + prefix := *commonPrefix.Prefix + directories = append(directories, strings.ReplaceAll(prefix, validPath, "")) -func (r *S3) Size(file string) (int64, error) { - resp, err := r.instance.HeadObject(r.ctx, &s3.HeadObjectInput{ - Bucket: aws.String(r.bucket), - Key: aws.String(file), - }) - if err != nil { - return 0, err + wg.Add(1) + subDirectories, err := r.AllDirectories(*commonPrefix.Prefix) + if err != nil { + return nil, err + } + for _, subDirectory := range subDirectories { + if strings.HasSuffix(subDirectory, "/") { + directories = append(directories, strings.ReplaceAll(prefix+subDirectory, validPath, "")) + } + } + wg.Done() } + wg.Wait() - return resp.ContentLength, nil -} - -func (r *S3) Path(file string) string { - return file + return directories, nil } -func (r *S3) Exists(file string) bool { - _, err := r.instance.HeadObject(r.ctx, &s3.HeadObjectInput{ +func (r *S3) AllFiles(path string) ([]string, error) { + var files []string + validPath := validPath(path) + listObjsResponse, err := r.instance.ListObjectsV2(r.ctx, &s3.ListObjectsV2Input{ Bucket: aws.String(r.bucket), - Key: aws.String(file), + Prefix: aws.String(validPath), }) if err != nil { - return false - } - - return true -} - -func (r *S3) Missing(file string) bool { - return !r.Exists(file) -} - -func (r *S3) Url(file string) string { - return strings.TrimSuffix(r.url, "/") + "/" + strings.TrimPrefix(file, "/") -} - -func (r *S3) TemporaryUrl(file string, time time.Time) (string, error) { - presignClient := s3.NewPresignClient(r.instance) - presignParams := &s3.GetObjectInput{ - Bucket: aws.String(r.bucket), - Key: aws.String(file), - } - presignDuration := func(po *s3.PresignOptions) { - po.Expires = time.Sub(supporttime.Now()) + return nil, err } - presignResult, err := presignClient.PresignGetObject(r.ctx, presignParams, presignDuration) - if err != nil { - return "", err + for _, object := range listObjsResponse.Contents { + file := *object.Key + if !strings.HasSuffix(file, "/") { + files = append(files, strings.ReplaceAll(file, validPath, "")) + } } - return presignResult.URL, nil + return files, nil } func (r *S3) Copy(originFile, targetFile string) error { @@ -174,14 +119,6 @@ func (r *S3) Copy(originFile, targetFile string) error { return err } -func (r *S3) Move(oldFile, newFile string) error { - if err := r.Copy(oldFile, newFile); err != nil { - return err - } - - return r.Delete(oldFile) -} - func (r *S3) Delete(files ...string) error { var objectIdentifiers []types.ObjectIdentifier for _, file := range files { @@ -201,14 +138,6 @@ func (r *S3) Delete(files ...string) error { return err } -func (r *S3) MakeDirectory(directory string) error { - if !strings.HasSuffix(directory, "/") { - directory += "/" - } - - return r.Put(directory, "") -} - func (r *S3) DeleteDirectory(directory string) error { if !strings.HasSuffix(directory, "/") { directory += "/" @@ -252,8 +181,8 @@ func (r *S3) DeleteDirectory(directory string) error { return nil } -func (r *S3) Files(path string) ([]string, error) { - var files []string +func (r *S3) Directories(path string) ([]string, error) { + var directories []string validPath := validPath(path) listObjsResponse, err := r.instance.ListObjectsV2(r.ctx, &s3.ListObjectsV2Input{ Bucket: aws.String(r.bucket), @@ -263,35 +192,27 @@ func (r *S3) Files(path string) ([]string, error) { if err != nil { return nil, err } - for _, object := range listObjsResponse.Contents { - files = append(files, strings.ReplaceAll(*object.Key, validPath, "")) + for _, commonPrefix := range listObjsResponse.CommonPrefixes { + directories = append(directories, strings.ReplaceAll(*commonPrefix.Prefix, validPath, "")) } - return files, nil + return directories, nil } -func (r *S3) AllFiles(path string) ([]string, error) { - var files []string - validPath := validPath(path) - listObjsResponse, err := r.instance.ListObjectsV2(r.ctx, &s3.ListObjectsV2Input{ +func (r *S3) Exists(file string) bool { + _, err := r.instance.HeadObject(r.ctx, &s3.HeadObjectInput{ Bucket: aws.String(r.bucket), - Prefix: aws.String(validPath), + Key: aws.String(file), }) if err != nil { - return nil, err - } - for _, object := range listObjsResponse.Contents { - file := *object.Key - if !strings.HasSuffix(file, "/") { - files = append(files, strings.ReplaceAll(file, validPath, "")) - } + return false } - return files, nil + return true } -func (r *S3) Directories(path string) ([]string, error) { - var directories []string +func (r *S3) Files(path string) ([]string, error) { + var files []string validPath := validPath(path) listObjsResponse, err := r.instance.ListObjectsV2(r.ctx, &s3.ListObjectsV2Input{ Bucket: aws.String(r.bucket), @@ -301,45 +222,124 @@ func (r *S3) Directories(path string) ([]string, error) { if err != nil { return nil, err } - for _, commonPrefix := range listObjsResponse.CommonPrefixes { - directories = append(directories, strings.ReplaceAll(*commonPrefix.Prefix, validPath, "")) + for _, object := range listObjsResponse.Contents { + files = append(files, strings.ReplaceAll(*object.Key, validPath, "")) } - return directories, nil + return files, nil } -func (r *S3) AllDirectories(path string) ([]string, error) { - var directories []string - validPath := validPath(path) - listObjsResponse, err := r.instance.ListObjectsV2(r.ctx, &s3.ListObjectsV2Input{ - Bucket: aws.String(r.bucket), - Delimiter: aws.String("/"), - Prefix: aws.String(validPath), +func (r *S3) Get(file string) (string, error) { + resp, err := r.instance.GetObject(r.ctx, &s3.GetObjectInput{ + Bucket: aws.String(r.bucket), + Key: aws.String(file), }) if err != nil { - return nil, err + return "", err } - wg := sync.WaitGroup{} - for _, commonPrefix := range listObjsResponse.CommonPrefixes { - prefix := *commonPrefix.Prefix - directories = append(directories, strings.ReplaceAll(prefix, validPath, "")) + data, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() - wg.Add(1) - subDirectories, err := r.AllDirectories(*commonPrefix.Prefix) - if err != nil { - return nil, err - } - for _, subDirectory := range subDirectories { - if strings.HasSuffix(subDirectory, "/") { - directories = append(directories, strings.ReplaceAll(prefix+subDirectory, validPath, "")) - } - } - wg.Done() + return string(data), nil +} + +func (r *S3) MakeDirectory(directory string) error { + if !strings.HasSuffix(directory, "/") { + directory += "/" } - wg.Wait() - return directories, nil + return r.Put(directory, "") +} + +func (r *S3) Missing(file string) bool { + return !r.Exists(file) +} + +func (r *S3) Move(oldFile, newFile string) error { + if err := r.Copy(oldFile, newFile); err != nil { + return err + } + + return r.Delete(oldFile) +} + +func (r *S3) Path(file string) string { + return file +} + +func (r *S3) Put(file string, content string) error { + _, err := r.instance.PutObject(r.ctx, &s3.PutObjectInput{ + Bucket: aws.String(r.bucket), + Key: aws.String(file), + Body: strings.NewReader(content), + }) + + return err +} + +func (r *S3) PutFile(filePath string, source filesystem.File) (string, error) { + return r.PutFileAs(filePath, source, str.Random(40)) +} + +func (r *S3) PutFileAs(filePath string, source filesystem.File, name string) (string, error) { + fullPath, err := fullPathOfFile(filePath, source, name) + if err != nil { + return "", err + } + + data, err := ioutil.ReadFile(source.File()) + if err != nil { + return "", err + } + + if err := r.Put(fullPath, string(data)); err != nil { + return "", err + } + + return fullPath, nil +} + +func (r *S3) Size(file string) (int64, error) { + resp, err := r.instance.HeadObject(r.ctx, &s3.HeadObjectInput{ + Bucket: aws.String(r.bucket), + Key: aws.String(file), + }) + if err != nil { + return 0, err + } + + return resp.ContentLength, nil +} + +func (r *S3) TemporaryUrl(file string, time time.Time) (string, error) { + presignClient := s3.NewPresignClient(r.instance) + presignParams := &s3.GetObjectInput{ + Bucket: aws.String(r.bucket), + Key: aws.String(file), + } + presignDuration := func(po *s3.PresignOptions) { + po.Expires = time.Sub(supporttime.Now()) + } + presignResult, err := presignClient.PresignGetObject(r.ctx, presignParams, presignDuration) + if err != nil { + return "", err + } + + return presignResult.URL, nil +} + +func (r *S3) WithContext(ctx context.Context) filesystem.Driver { + driver, err := NewS3(ctx, r.disk) + if err != nil { + facades.Log.Errorf("init %s disk fail: %+v", r.disk, err) + } + + return driver +} + +func (r *S3) Url(file string) string { + return strings.TrimSuffix(r.url, "/") + "/" + strings.TrimPrefix(file, "/") } func (r *S3) tempFile(content string) (*os.File, error) { diff --git a/foundation/application.go b/foundation/application.go index 66f15c3a0..bb35eaac9 100644 --- a/foundation/application.go +++ b/foundation/application.go @@ -2,6 +2,7 @@ package foundation import ( "os" + "strings" "github.com/goravel/framework/config" "github.com/goravel/framework/contracts" @@ -30,7 +31,15 @@ func (app *Application) Boot() { } func (app *Application) setRootPath() { - support.RootPath = getCurrentAbPath() + rootPath := getCurrentAbPath() + + // Hack air path + airPath := "/storage/temp" + if strings.HasSuffix(rootPath, airPath) { + rootPath = strings.ReplaceAll(rootPath, airPath, "") + } + + support.RootPath = rootPath } //bootArtisan Boot artisan command.