Browse Source

add new ping (metrics)

rr-01-add-metrics
loveckiy.ivan 1 year ago
parent
commit
2705b65bab
  1. 3
      .gitignore
  2. 29
      cmd/logbox/main.go
  3. 6
      go.mod
  4. 8
      go.sum
  5. 17
      pkg/service/ping.go
  6. 3
      vendor/cloud.google.com/go/storage/.release-please-manifest.json
  7. 19
      vendor/cloud.google.com/go/storage/CHANGES.md
  8. 2
      vendor/cloud.google.com/go/storage/README.md
  9. 136
      vendor/cloud.google.com/go/storage/bucket.go
  10. 9
      vendor/cloud.google.com/go/storage/client.go
  11. 15
      vendor/cloud.google.com/go/storage/copy.go
  12. 103
      vendor/cloud.google.com/go/storage/grpc_client.go
  13. 4
      vendor/cloud.google.com/go/storage/hmac.go
  14. 20
      vendor/cloud.google.com/go/storage/http_client.go
  15. 104
      vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
  16. 2956
      vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
  17. 2
      vendor/cloud.google.com/go/storage/internal/version.go
  18. 2
      vendor/cloud.google.com/go/storage/post_policy_v4.go
  19. 12
      vendor/cloud.google.com/go/storage/release-please-config.json
  20. 132
      vendor/cloud.google.com/go/storage/storage.go
  21. 5
      vendor/cloud.google.com/go/storage/writer.go
  22. 31
      vendor/git.lowcodeplatform.net/fabric/lib/async.go
  23. 51
      vendor/git.lowcodeplatform.net/fabric/lib/logger.go
  24. 1
      vendor/git.lowcodeplatform.net/fabric/lib/logger_logbox.go
  25. 2
      vendor/git.lowcodeplatform.net/fabric/lib/logger_vfs.go
  26. 114
      vendor/git.lowcodeplatform.net/fabric/lib/metrics.go
  27. 2
      vendor/git.lowcodeplatform.net/fabric/models/README.md
  28. 93
      vendor/git.lowcodeplatform.net/fabric/models/data.go
  29. 35
      vendor/git.lowcodeplatform.net/fabric/models/ping.go
  30. 107
      vendor/git.lowcodeplatform.net/fabric/models/profile.go
  31. 8
      vendor/modules.txt

3
.gitignore

@ -5,4 +5,5 @@
*~merged*
*~merged
/public
.env
.env
prometeus_*

29
cmd/logbox/main.go

@ -67,6 +67,10 @@ func Start(configfile, dir, port, mode, services, param1, param2, param3, source
cfg.UidService = cfg.DataUid
cfg.Workingdir, _ = filepath.Abs(dir)
cfg.HashRun = lib.UUID()
cfg.Name = "controller"
sf := strings.Split(configfile, "/")
cfg.ConfigName = strings.Replace(sf[len(sf)-1], ".cfg", "", 1)
///////////////// ЛОГИРОВАНИЕ //////////////////
// формирование пути к лог-файлам и метрикам
@ -135,18 +139,23 @@ func Start(configfile, dir, port, mode, services, param1, param2, param3, source
}
}()
// получаем порт, если он не передан явно
if port == "" {
port, err = lib.AddressProxy(cfg.ProxyPointsrc, cfg.PortInterval)
if port != "" {
portInt, err := strconv.Atoi(port)
if err != nil {
logger.Panic(fmt.Errorf("%s Port is empty. err:%s\n", fail, err))
return
fmt.Printf("%s Error get port. err: %s\n", fail, err)
os.Exit(0)
}
cfg.HTTPPort = portInt
}
cfg.HTTPPort, err = strconv.Atoi(port)
if err != nil {
fmt.Printf("%s Port is failed. err:%s\n", fail, err)
return
port, err = lib.AddressProxy(cfg.ProxyPointsrc, cfg.PortInterval)
if err == nil {
portInt, err := strconv.Atoi(port)
if err != nil {
fmt.Printf("%s Error get port. err: %s\n", fail, err)
os.Exit(0)
}
cfg.HTTPPort = portInt
}
// подключаемся к хранилищу логов (пока только postgres)
@ -218,6 +227,8 @@ func Start(configfile, dir, port, mode, services, param1, param2, param3, source
logger,
"http,metric,grpc",
"logbox",
serviceVersion,
hashCommit,
)
err = transport.Run()

6
go.mod

@ -3,8 +3,8 @@ module git.lowcodeplatform.net/fabric/logbox
go 1.18
require (
git.lowcodeplatform.net/fabric/lib v0.1.11
git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237
git.lowcodeplatform.net/fabric/lib v0.1.35
git.lowcodeplatform.net/fabric/models v0.1.12
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751
github.com/go-kit/kit v0.12.0
github.com/google/uuid v1.3.0
@ -29,7 +29,7 @@ require (
cloud.google.com/go/compute v1.12.1 // indirect
cloud.google.com/go/compute/metadata v0.2.1 // indirect
cloud.google.com/go/iam v0.7.0 // indirect
cloud.google.com/go/storage v1.27.0 // indirect
cloud.google.com/go/storage v1.28.0 // indirect
git.lowcodeplatform.net/fabric/logbox-client v0.0.0-20230129180131-bf48bfbee92f // indirect
git.lowcodeplatform.net/fabric/packages v0.0.0-20230129123752-a3dc6393a856 // indirect
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible // indirect

8
go.sum

@ -43,6 +43,8 @@ cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ=
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
cloud.google.com/go/storage v1.28.0 h1:DLrIZ6xkeZX6K70fU/boWx5INJumt6f+nwwWSHXzzGY=
cloud.google.com/go/storage v1.28.0/go.mod h1:qlgZML35PXA3zoEnIkiPLY4/TOkUleufRlu6qmcf7sI=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.lowcodeplatform.net/fabric/lib v0.0.0-20221023073958-5f6c90acd183 h1:42S73FfOEDBEzvCl18JBYLAJv6D7nF9s5BHf8R6QV2g=
git.lowcodeplatform.net/fabric/lib v0.0.0-20221023073958-5f6c90acd183/go.mod h1:63EYUgLgTQd3SmTrHdZuqnjL+oHWDk7Bn1x8Ko+tFmk=
@ -50,10 +52,16 @@ git.lowcodeplatform.net/fabric/lib v0.1.10 h1:TSMOvwB5+9bfAJ9wCZEQQ+TsZOCjaDTLyB
git.lowcodeplatform.net/fabric/lib v0.1.10/go.mod h1:33uPBsE/hSIRZJ5PHLz0FrLEMg8kTes+GsTV7YIzw90=
git.lowcodeplatform.net/fabric/lib v0.1.11 h1:zDHOcFhfGAkdlfQdc0R8mkQOBojhAWpslVuyT5s0fiM=
git.lowcodeplatform.net/fabric/lib v0.1.11/go.mod h1:33uPBsE/hSIRZJ5PHLz0FrLEMg8kTes+GsTV7YIzw90=
git.lowcodeplatform.net/fabric/lib v0.1.35 h1:M0DyDUYJs9xWtKxSyeCcjtsrFRWtB9kOVvBfYQumzjo=
git.lowcodeplatform.net/fabric/lib v0.1.35/go.mod h1:jacsP7euNkUSb6d2MBDZaFnv6h4f5VgMZ9hCWFpHS70=
git.lowcodeplatform.net/fabric/logbox-client v0.0.0-20230129180131-bf48bfbee92f h1:cIy3ydYa2OjXR1k/jnm/hHrIlA8dX9IwenRQ39aX8kY=
git.lowcodeplatform.net/fabric/logbox-client v0.0.0-20230129180131-bf48bfbee92f/go.mod h1:2VJOF5aTMh6dJEzXvOsfarKQdeQ2Q+Ve1I9OK+exaHg=
git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237 h1:TiGs+dG9tueKWvMDos7XMIej9kekY0777bCI3+QXsh4=
git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237/go.mod h1:kTVmb3xTTlMOV0PJ+IFHC3kS6pkOszNigaRsZeZp0M0=
git.lowcodeplatform.net/fabric/models v0.1.10 h1:jYTHWkoNaA2hADUE3UMefgzh2sr++6s4IuciX9PODWY=
git.lowcodeplatform.net/fabric/models v0.1.10/go.mod h1:kTVmb3xTTlMOV0PJ+IFHC3kS6pkOszNigaRsZeZp0M0=
git.lowcodeplatform.net/fabric/models v0.1.12 h1:khvXlUg99yUigyYvt1ZeQvJijtggpY7oOAPD5Pz2Pu4=
git.lowcodeplatform.net/fabric/models v0.1.12/go.mod h1:RSR+ysusHS7bhYOCDuWbkuGQkFL0Mum4r/FXPzStUQQ=
git.lowcodeplatform.net/fabric/packages v0.0.0-20230129123752-a3dc6393a856 h1:jZP6kGB6bKcXUEtW/FAtPVTPWPdBvPwYvbo0u25wWVU=
git.lowcodeplatform.net/fabric/packages v0.0.0-20230129123752-a3dc6393a856/go.mod h1:fBsBvUTmm+1rM5Es6RbCQeEE/QFDIPb1iy6/dmUgch8=
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible h1:Hn/DsObfmw0M7dMGS/c0MlVrJuGFzHzOpBWL89acR68=

17
pkg/service/ping.go

@ -2,9 +2,11 @@ package service
import (
"context"
"fmt"
"os"
"strconv"
"git.lowcodeplatform.net/fabric/lib"
"git.lowcodeplatform.net/fabric/models"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/expfmt"
@ -13,8 +15,18 @@ import (
// Ping ...
func (s *service) Ping(ctx context.Context) (result []models.Pong, err error) {
// преобразование метрик
pathTemp := "./tmp/prometeus.prom"
// если файла нет - создаем
pathTemp := "prometeus_" + s.cfg.ServiceType + ".prom"
if !lib.IsExist(pathTemp) {
err = lib.CreateFile(pathTemp)
if err != nil {
err = fmt.Errorf("error create prometeus temp-file. path: %s, err: %s", pathTemp, err)
s.logger.Error(err)
return nil, err
}
}
// пишем метрики в файл
err = prometheus.WriteToTextfile(pathTemp, prometheus.DefaultGatherer)
reader, err := os.Open(pathTemp)
if err != nil {
@ -31,6 +43,7 @@ func (s *service) Ping(ctx context.Context) (result []models.Pong, err error) {
pong.Uid = s.cfg.HashRun
pong.Name = s.cfg.Name
pong.Version = s.cfg.ServiceType
pong.Config = s.cfg.ConfigName
pong.Status = "run"
pong.PortHTTP = s.cfg.HTTPPort
pong.Pid = strconv.Itoa(os.Getpid())

3
vendor/cloud.google.com/go/storage/.release-please-manifest.json

@ -1,3 +0,0 @@
{
"storage": "1.27.0"
}

19
vendor/cloud.google.com/go/storage/CHANGES.md

@ -1,6 +1,25 @@
# Changes
## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03)
### Features
* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1))
* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8))
### Bug Fixes
* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb))
* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857)
### Documentation
* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3))
## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22)

2
vendor/cloud.google.com/go/storage/README.md

@ -25,7 +25,7 @@ if err != nil {
log.Fatal(err)
}
defer rc.Close()
body, err := ioutil.ReadAll(rc)
body, err := io.ReadAll(rc)
if err != nil {
log.Fatal(err)
}

136
vendor/cloud.google.com/go/storage/bucket.go

@ -170,7 +170,7 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
// for this method.
//
// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4]
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) {
if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
return SignedURL(b.name, object, opts)
@ -212,7 +212,7 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string,
// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields
// in some cases. Read more on the [automatic detection of credentials] for this method.
//
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4]
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
return GenerateSignedPostPolicyV4(b.name, object, opts)
@ -298,18 +298,18 @@ func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte,
// circumventing the cost of recreating the auth/transport layer
svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc))
if err != nil {
return nil, fmt.Errorf("unable to create iamcredentials client: %v", err)
return nil, fmt.Errorf("unable to create iamcredentials client: %w", err)
}
resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
Payload: base64.StdEncoding.EncodeToString(in),
}).Do()
if err != nil {
return nil, fmt.Errorf("unable to sign bytes: %v", err)
return nil, fmt.Errorf("unable to sign bytes: %w", err)
}
out, err := base64.StdEncoding.DecodeString(resp.SignedBlob)
if err != nil {
return nil, fmt.Errorf("unable to base64 decode response: %v", err)
return nil, fmt.Errorf("unable to base64 decode response: %w", err)
}
return out, nil
}
@ -444,6 +444,11 @@ type BucketAttrs struct {
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
// more information.
RPO RPO
// Autoclass holds the bucket's autoclass configuration. If enabled,
// allows for the automatic selection of the best storage class
// based on object access patterns.
Autoclass *Autoclass
}
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
@ -710,6 +715,20 @@ type CustomPlacementConfig struct {
DataLocations []string
}
// Autoclass holds the bucket's autoclass configuration. If enabled,
// allows for the automatic selection of the best storage class
// based on object access patterns. See
// https://cloud.google.com/storage/docs/using-autoclass for more information.
type Autoclass struct {
// Enabled specifies whether the autoclass feature is enabled
// on the bucket.
Enabled bool
// ToggleTime is the time from which Autoclass was last toggled.
// If Autoclass is enabled when the bucket is created, the ToggleTime
// is set to the bucket creation time. This field is read-only.
ToggleTime time.Time
}
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil, nil
@ -744,6 +763,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
ProjectNumber: b.ProjectNumber,
RPO: toRPO(b),
CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig),
Autoclass: toAutoclassFromRaw(b.Autoclass),
}, nil
}
@ -776,6 +796,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
RPO: toRPOFromProto(b),
CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()),
ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based
Autoclass: toAutoclassFromProto(b.GetAutoclass()),
}
}
@ -830,6 +851,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
IamConfiguration: bktIAM,
Rpo: b.RPO.String(),
CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(),
Autoclass: b.Autoclass.toRawAutoclass(),
}
}
@ -889,6 +911,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
IamConfig: bktIAM,
Rpo: b.RPO.String(),
CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(),
Autoclass: b.Autoclass.toProtoAutoclass(),
}
}
@ -907,23 +930,30 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
if ua.RequesterPays != nil {
bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)}
}
var bktIAM *storagepb.Bucket_IamConfig
var ublaEnabled bool
var bktPolicyOnlyEnabled bool
if ua.UniformBucketLevelAccess != nil {
ublaEnabled = optional.ToBool(ua.UniformBucketLevelAccess.Enabled)
}
if ua.BucketPolicyOnly != nil {
bktPolicyOnlyEnabled = optional.ToBool(ua.BucketPolicyOnly.Enabled)
}
if ublaEnabled || bktPolicyOnlyEnabled {
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
Enabled: true,
if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM = &storagepb.Bucket_IamConfig{}
if ua.BucketPolicyOnly != nil {
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled),
}
}
if ua.UniformBucketLevelAccess != nil {
// UniformBucketLevelAccess takes precedence over BucketPolicyOnly,
// so Enabled will be overriden here if both are set
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled),
}
}
if ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String()
}
}
if ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String()
}
var defaultHold bool
if ua.DefaultEventBasedHold != nil {
defaultHold = optional.ToBool(ua.DefaultEventBasedHold)
@ -964,6 +994,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
Website: ua.Website.toProtoBucketWebsite(),
IamConfig: bktIAM,
Rpo: ua.RPO.String(),
Autoclass: ua.Autoclass.toProtoAutoclass(),
}
}
@ -1079,6 +1110,10 @@ type BucketAttrsToUpdate struct {
// more information.
RPO RPO
// If set, updates the autoclass configuration of the bucket.
// See https://cloud.google.com/storage/docs/using-autoclass for more information.
Autoclass *Autoclass
// acl is the list of access control rules on the bucket.
// It is unexported and only used internally by the gRPC client.
// Library users should use ACLHandle methods directly.
@ -1192,6 +1227,12 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
rb.Website = ua.Website.toRawBucketWebsite()
}
}
if ua.Autoclass != nil {
rb.Autoclass = &raw.BucketAutoclass{
Enabled: ua.Autoclass.Enabled,
ForceSendFields: []string{"Enabled"},
}
}
if ua.PredefinedACL != "" {
// Clear ACL or the call will fail.
rb.Acl = nil
@ -1346,8 +1387,14 @@ func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionP
if rp == nil {
return nil
}
// RetentionPeriod must be greater than 0, so if it is 0, the user left it
// unset, and so we should not send it in the request i.e. nil is sent.
var period *int64
if rp.RetentionPeriod != 0 {
period = proto.Int64(int64(rp.RetentionPeriod / time.Second))
}
return &storagepb.Bucket_RetentionPolicy{
RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
RetentionPeriod: period,
}
}
@ -1367,7 +1414,7 @@ func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error)
}
func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy {
if rp == nil {
if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 {
return nil
}
return &RetentionPolicy{
@ -1886,6 +1933,53 @@ func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *Custom
return &CustomPlacementConfig{DataLocations: c.GetDataLocations()}
}
func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass {
if a == nil {
return nil
}
// Excluding read only field ToggleTime.
return &raw.BucketAutoclass{
Enabled: a.Enabled,
}
}
func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass {
if a == nil {
return nil
}
// Excluding read only field ToggleTime.
return &storagepb.Bucket_Autoclass{
Enabled: a.Enabled,
}
}
func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass {
if a == nil || a.ToggleTime == "" {
return nil
}
// Return Autoclass.ToggleTime only if parsed with a valid value.
t, err := time.Parse(time.RFC3339, a.ToggleTime)
if err != nil {
return &Autoclass{
Enabled: a.Enabled,
}
}
return &Autoclass{
Enabled: a.Enabled,
ToggleTime: t,
}
}
func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass {
if a == nil || a.GetToggleTime().AsTime().Unix() == 0 {
return nil
}
return &Autoclass{
Enabled: a.GetEnabled(),
ToggleTime: a.GetToggleTime().AsTime(),
}
}
// Objects returns an iterator over the objects in the bucket that match the
// Query q. If q is nil, no filtering is done. Objects will be iterated over
// lexicographically by name.

9
vendor/cloud.google.com/go/storage/client.go

@ -317,10 +317,11 @@ type destinationObject struct {
}
type rewriteObjectRequest struct {
srcObject sourceObject
dstObject destinationObject
predefinedACL string
token string
srcObject sourceObject
dstObject destinationObject
predefinedACL string
token string
maxBytesRewrittenPerCall int64
}
type rewriteObjectResponse struct {

15
vendor/cloud.google.com/go/storage/copy.go

@ -69,6 +69,15 @@ type Copier struct {
DestinationKMSKeyName string
dst, src *ObjectHandle
// The maximum number of bytes that will be rewritten per rewrite request.
// Most callers shouldn't need to specify this parameter - it is primarily
// in place to support testing. If specified the value must be an integral
// multiple of 1 MiB (1048576). Also, this only applies to requests where
// the source and destination span locations and/or storage classes. Finally,
// this value must not change across rewrite calls else you'll get an error
// that the `rewriteToken` is invalid.
maxBytesRewrittenPerCall int64
}
// Run performs the copy.
@ -108,8 +117,9 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
encryptionKey: c.dst.encryptionKey,
keyName: c.DestinationKMSKeyName,
},
predefinedACL: c.PredefinedACL,
token: c.RewriteToken,
predefinedACL: c.PredefinedACL,
token: c.RewriteToken,
maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall,
}
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
@ -127,6 +137,7 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
return nil, err
}
c.RewriteToken = res.token
req.token = res.token
if c.ProgressFunc != nil {
c.ProgressFunc(uint64(res.written), uint64(res.size))
}

103
vendor/cloud.google.com/go/storage/grpc_client.go

@ -25,6 +25,7 @@ import (
"cloud.google.com/go/internal/trace"
gapic "cloud.google.com/go/storage/internal/apiv2"
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
"github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@ -246,7 +247,8 @@ func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, con
func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
req := &storagepb.GetBucketRequest{
Name: bucketResourceName(globalProjectAlias, bucket),
Name: bucketResourceName(globalProjectAlias, bucket),
ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}},
}
if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil {
return nil, err
@ -344,6 +346,9 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
if uattrs.RPO != RPOUnknown {
fieldMask.Paths = append(fieldMask.Paths, "rpo")
}
if uattrs.Autoclass != nil {
fieldMask.Paths = append(fieldMask.Paths, "autoclass")
}
// TODO(cathyo): Handle labels. Pending b/230510191.
req.UpdateMask = fieldMask
@ -380,14 +385,14 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
it.query = *q
}
req := &storagepb.ListObjectsRequest{
Parent: bucketResourceName(globalProjectAlias, bucket),
Prefix: it.query.Prefix,
Delimiter: it.query.Delimiter,
Versions: it.query.Versions,
LexicographicStart: it.query.StartOffset,
LexicographicEnd: it.query.EndOffset,
// TODO(noahietz): Convert a projection to a FieldMask.
// ReadMask: q.Projection,
Parent: bucketResourceName(globalProjectAlias, bucket),
Prefix: it.query.Prefix,
Delimiter: it.query.Delimiter,
Versions: it.query.Versions,
LexicographicStart: it.query.StartOffset,
LexicographicEnd: it.query.EndOffset,
IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter,
ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
@ -411,6 +416,12 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
it.items = append(it.items, b)
}
// Response is always non-nil after a successful request.
res := gitr.Response.(*storagepb.ListObjectsResponse)
for _, prefix := range res.GetPrefixes() {
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
}
return token, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
@ -449,6 +460,8 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string
req := &storagepb.GetObjectRequest{
Bucket: bucketResourceName(globalProjectAlias, bucket),
Object: object,
// ProjectionFull by default.
ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}},
}
if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil {
return nil, err
@ -492,10 +505,7 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str
req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey)
}
var paths []string
fieldMask := &fieldmaskpb.FieldMask{
Paths: paths,
}
fieldMask := &fieldmaskpb.FieldMask{Paths: nil}
if uattrs.EventBasedHold != nil {
fieldMask.Paths = append(fieldMask.Paths, "event_based_hold")
}
@ -522,7 +532,7 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str
}
// Note: This API currently does not support entites using project ID.
// Use project numbers in ACL entities. Pending b/233617896.
if uattrs.ACL != nil {
if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 {
fieldMask.Paths = append(fieldMask.Paths, "acl")
}
// TODO(cathyo): Handle metadata. Pending b/230510191.
@ -812,6 +822,9 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes()
call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes()
}
call.MaxBytesRewrittenPerCall = req.maxBytesRewrittenPerCall
var res *storagepb.RewriteResponse
var err error
@ -943,6 +956,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
// Store the content from the first Recv in the
// client buffer for reading later.
leftovers: msg.GetChecksummedData().GetContent(),
settings: s,
},
}
@ -964,6 +978,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
}
func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
s := callSettings(c.settings, opts...)
var offset int64
errorf := params.setError
progress := params.progress
@ -971,6 +987,10 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage
pr, pw := io.Pipe()
gw := newGRPCWriter(c, params, pr)
gw.settings = s
if s.userProject != "" {
gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject)
}
// This function reads the data sent to the pipe and sends sets of messages
// on the gRPC client-stream as the buffer is filled.
@ -1315,6 +1335,7 @@ type gRPCReader struct {
reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error)
leftovers []byte
cancel context.CancelFunc
settings *settings
}
// Read reads bytes into the user's buffer from an open gRPC stream.
@ -1390,7 +1411,11 @@ func (r *gRPCReader) Close() error {
// an attempt to reopen the stream.
func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) {
msg, err := r.stream.Recv()
if err != nil && ShouldRetry(err) {
var shouldRetry = ShouldRetry
if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
shouldRetry = r.settings.retry.shouldRetry
}
if err != nil && shouldRetry(err) {
// This will "close" the existing stream and immediately attempt to
// reopen the stream, but will backoff if further attempts are necessary.
// Reopening the stream Recvs the first message, so if retrying is
@ -1454,6 +1479,7 @@ type gRPCWriter struct {
attrs *ObjectAttrs
conds *Conditions
encryptionKey []byte
settings *settings
sendCRC32C bool
@ -1471,21 +1497,27 @@ func (w *gRPCWriter) startResumableUpload() error {
if err != nil {
return err
}
upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{
WriteObjectSpec: spec,
})
w.upid = upres.GetUploadId()
return err
return run(w.ctx, func() error {
upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{
WriteObjectSpec: spec,
})
w.upid = upres.GetUploadId()
return err
}, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx))
}
// queryProgress is a helper that queries the status of the resumable upload
// associated with the given upload ID.
func (w *gRPCWriter) queryProgress() (int64, error) {
q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid})
var persistedSize int64
err := run(w.ctx, func() error {
q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid})
persistedSize = q.GetPersistedSize()
return err
}, w.settings.retry, true, setRetryHeaderGRPC(w.ctx))
// q.GetCommittedSize() will return 0 if q is nil.
return q.GetPersistedSize(), err
return persistedSize, err
}
// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if
@ -1500,6 +1532,10 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
var err error
var finishWrite bool
var sent, limit int = 0, maxPerMessageWriteSize
var shouldRetry = ShouldRetry
if w.settings.retry != nil && w.settings.retry.shouldRetry != nil {
shouldRetry = w.settings.retry.shouldRetry
}
offset := start
toWrite := w.buf[:recvd]
for {
@ -1553,8 +1589,16 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// on the *last* message of the stream (instead of the first).
if w.sendCRC32C {
req.ObjectChecksums = &storagepb.ObjectChecksums{
Crc32C: proto.Uint32(w.attrs.CRC32C),
Md5Hash: w.attrs.MD5,
Crc32C: proto.Uint32(w.attrs.CRC32C),
}
}
if len(w.attrs.MD5) != 0 {
if cs := req.GetObjectChecksums(); cs == nil {
req.ObjectChecksums = &storagepb.ObjectChecksums{
Md5Hash: w.attrs.MD5,
}
} else {
cs.Md5Hash = w.attrs.MD5
}
}
}
@ -1570,7 +1614,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// resend the entire buffer via a new stream.
// If not retriable, falling through will return the error received
// from closing the stream.
if ShouldRetry(err) {
if shouldRetry(err) {
sent = 0
finishWrite = false
// TODO: Add test case for failure modes of querying progress.
@ -1601,7 +1645,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// resend the entire buffer via a new stream.
// If not retriable, falling through will return the error received
// from closing the stream.
if ShouldRetry(err) {
if shouldRetry(err) {
sent = 0
finishWrite = false
offset, err = w.determineOffset(start)
@ -1673,7 +1717,12 @@ func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) {
// read copies the data in the reader to the given buffer and reports how much
// data was read into the buffer and if there is no more data to read (EOF).
// Furthermore, if the attrs.ContentType is unset, the first bytes of content
// will be sniffed for a matching content type.
func (w *gRPCWriter) read() (int, bool, error) {
if w.attrs.ContentType == "" {
w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader)
}
// Set n to -1 to start the Read loop.
var n, recvd int = -1, 0
var err error

4
vendor/cloud.google.com/go/storage/hmac.go

@ -147,11 +147,11 @@ func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, erro
}
createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated)
if err != nil {
return nil, fmt.Errorf("field CreatedTime: %v", err)
return nil, fmt.Errorf("field CreatedTime: %w", err)
}
updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated)
if err != nil && !updatedTimeCanBeNil {
return nil, fmt.Errorf("field UpdatedTime: %v", err)
return nil, fmt.Errorf("field UpdatedTime: %w", err)
}
hmKey := &HMACKey{

20
vendor/cloud.google.com/go/storage/http_client.go

@ -114,17 +114,17 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
hc, ep, err := htransport.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
return nil, fmt.Errorf("dialing: %w", err)
}
// RawService should be created with the chosen endpoint to take account of user override.
rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc))
if err != nil {
return nil, fmt.Errorf("storage client: %v", err)
return nil, fmt.Errorf("storage client: %w", err)
}
// Update readHost and scheme with the chosen endpoint.
u, err := url.Parse(ep)
if err != nil {
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err)
}
return &httpStorageClient{
@ -344,8 +344,8 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
req.EndOffset(it.query.EndOffset)
req.Versions(it.query.Versions)
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
if len(it.query.fieldSelection) > 0 {
req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection))
if selection := it.query.toFieldSelection(); selection != "" {
req.Fields("nextPageToken", googleapi.Field(selection))
}
req.PageToken(pageToken)
if s.userProject != "" {
@ -747,6 +747,11 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil {
return nil, err
}
if req.maxBytesRewrittenPerCall != 0 {
call.MaxBytesRewrittenPerCall(req.maxBytesRewrittenPerCall)
}
var res *raw.RewriteResponse
var err error
setClientHeader(call.Header())
@ -905,7 +910,7 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange
if dashIndex >= 0 {
startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err)
return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err)
}
}
} else {
@ -1033,9 +1038,8 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
// there is no need to add retries here.
// Retry only when the operation is idempotent or the retry policy is RetryAlways.
isIdempotent := params.conds != nil && (params.conds.GenerationMatch >= 0 || params.conds.DoesNotExist == true)
var useRetry bool
if (s.retry == nil || s.retry.policy == RetryIdempotent) && isIdempotent {
if (s.retry == nil || s.retry.policy == RetryIdempotent) && s.idempotent {
useRetry = true
} else if s.retry != nil && s.retry.policy == RetryAlways {
useRetry = true

104
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go

@ -589,7 +589,18 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ
}
func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<project>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...)
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -604,7 +615,18 @@ func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBuck
}
func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<project>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...)
it := &BucketIterator{}
req = proto.Clone(req).(*storagepb.ListBucketsRequest)
@ -1215,7 +1237,18 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW
}
func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...)
var resp *storagepb.ServiceAccount
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -1230,7 +1263,18 @@ func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetSe
}
func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...)
var resp *storagepb.CreateHmacKeyResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -1245,7 +1289,18 @@ func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHma
}
func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@ -1256,7 +1311,18 @@ func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHma
}
func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...)
var resp *storagepb.HmacKeyMetadata
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -1271,7 +1337,18 @@ func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRe
}
func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...)
it := &HmacKeyMetadataIterator{}
req = proto.Clone(req).(*storagepb.ListHmacKeysRequest)
@ -1314,7 +1391,18 @@ func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKe
}
func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<project>.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 {
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...)
var resp *storagepb.HmacKeyMetadata
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

2956
vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go

File diff suppressed because it is too large

2
vendor/cloud.google.com/go/storage/internal/version.go

@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.27.0"
const Version = "1.28.0"

2
vendor/cloud.google.com/go/storage/post_policy_v4.go

@ -340,7 +340,7 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options
"expiration": opts.Expires.Format(time.RFC3339),
})
if err != nil {
return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %v", err)
return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err)
}
b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON)

12
vendor/cloud.google.com/go/storage/release-please-config.json

@ -1,12 +0,0 @@
{
"release-type": "go-yoshi",
"separate-pull-requests": true,
"include-component-in-tag": true,
"tag-separator": "/",
"packages": {
"storage": {
"component": "storage"
}
},
"plugins": ["sentence-case"]
}

132
vendor/cloud.google.com/go/storage/storage.go

@ -52,6 +52,7 @@ import (
htransport "google.golang.org/api/transport/http"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/fieldmaskpb"
"google.golang.org/protobuf/types/known/timestamppb"
)
@ -187,22 +188,22 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
hc, ep, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
return nil, fmt.Errorf("dialing: %w", err)
}
// RawService should be created with the chosen endpoint to take account of user override.
rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc))
if err != nil {
return nil, fmt.Errorf("storage client: %v", err)
return nil, fmt.Errorf("storage client: %w", err)
}
// Update readHost and scheme with the chosen endpoint.
u, err := url.Parse(ep)
if err != nil {
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err)
}
tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...))
if err != nil {
return nil, fmt.Errorf("storage: %v", err)
return nil, fmt.Errorf("storage: %w", err)
}
return &Client{
@ -1089,11 +1090,6 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
// toProtoObject copies the editable attributes from o to the proto library's Object type.
func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
checksums := &storagepb.ObjectChecksums{Md5Hash: o.MD5}
if o.CRC32C > 0 {
checksums.Crc32C = proto.Uint32(o.CRC32C)
}
// For now, there are only globally unique buckets, and "_" is the alias
// project ID for such buckets. If the bucket is not provided, like in the
// destination ObjectAttrs of a Copy, do not attempt to format it.
@ -1122,7 +1118,6 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
KmsKey: o.KMSKeyName,
Generation: o.Generation,
Size: o.Size,
Checksums: checksums,
}
}
@ -1489,10 +1484,11 @@ type Query struct {
// object will be included in the results.
Versions bool
// fieldSelection is used to select only specific fields to be returned by
// the query. It's used internally and is populated for the user by
// calling Query.SetAttrSelection
fieldSelection string
// attrSelection is used to select only specific fields to be returned by
// the query. It is set by the user calling calling SetAttrSelection. These
// are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON
// clients repsectively.
attrSelection []string
// StartOffset is used to filter results to objects whose names are
// lexicographically equal to or after startOffset. If endOffset is also set,
@ -1552,6 +1548,39 @@ var attrToFieldMap = map[string]string{
"CustomTime": "customTime",
}
// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field
// names in the protobuf Object message.
var attrToProtoFieldMap = map[string]string{
"Name": "name",
"Bucket": "bucket",
"Etag": "etag",
"Generation": "generation",
"Metageneration": "metageneration",
"StorageClass": "storage_class",
"Size": "size",
"ContentEncoding": "content_encoding",
"ContentDisposition": "content_disposition",
"CacheControl": "cache_control",
"ACL": "acl",
"ContentLanguage": "content_language",
"Deleted": "delete_time",
"ContentType": "content_type",
"Created": "create_time",
"CRC32C": "checksums.crc32c",
"MD5": "checksums.md5_hash",
"Updated": "update_time",
"KMSKeyName": "kms_key",
"TemporaryHold": "temporary_hold",
"RetentionExpirationTime": "retention_expire_time",
"Metadata": "metadata",
"EventBasedHold": "event_based_hold",
"Owner": "owner",
"CustomerKeySHA256": "customer_encryption",
"CustomTime": "custom_time",
// MediaLink was explicitly excluded from the proto as it is an HTTP-ism.
// "MediaLink": "mediaLink",
}
// SetAttrSelection makes the query populate only specific attributes of
// objects. When iterating over objects, if you only need each object's name
// and size, pass []string{"Name", "Size"} to this method. Only these fields
@ -1560,16 +1589,42 @@ var attrToFieldMap = map[string]string{
// optimization; for more information, see
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance
func (q *Query) SetAttrSelection(attrs []string) error {
// Validate selections.
for _, attr := range attrs {
// If the attr is acceptable for one of the two sets, then it is OK.
// If it is not acceptable for either, then return an error.
// The respective masking implementations ignore unknown attrs which
// makes switching between transports a little easier.
_, okJSON := attrToFieldMap[attr]
_, okGRPC := attrToProtoFieldMap[attr]
if !okJSON && !okGRPC {
return fmt.Errorf("storage: attr %v is not valid", attr)
}
}
q.attrSelection = attrs
return nil
}
func (q *Query) toFieldSelection() string {
if q == nil || len(q.attrSelection) == 0 {
return ""
}
fieldSet := make(map[string]bool)
for _, attr := range attrs {
for _, attr := range q.attrSelection {
field, ok := attrToFieldMap[attr]
if !ok {
return fmt.Errorf("storage: attr %v is not valid", attr)
// Future proofing, skip unknown fields, let SetAttrSelection handle
// error modes.
continue
}
fieldSet[field] = true
}
var s string
if len(fieldSet) > 0 {
var b bytes.Buffer
b.WriteString("prefixes,items(")
@ -1582,9 +1637,50 @@ func (q *Query) SetAttrSelection(attrs []string) error {
b.WriteString(field)
}
b.WriteString(")")
q.fieldSelection = b.String()
s = b.String()
}
return nil
return s
}
func (q *Query) toFieldMask() *fieldmaskpb.FieldMask {
// The default behavior with no Query is ProjectionDefault (i.e. ProjectionFull).
if q == nil {
return &fieldmaskpb.FieldMask{Paths: []string{"*"}}
}
// User selected attributes via q.SetAttrSeleciton. This takes precedence
// over the Projection.
if numSelected := len(q.attrSelection); numSelected > 0 {
protoFieldPaths := make([]string, 0, numSelected)
for _, attr := range q.attrSelection {
pf, ok := attrToProtoFieldMap[attr]
if !ok {
// Future proofing, skip unknown fields, let SetAttrSelection
// handle error modes.
continue
}
protoFieldPaths = append(protoFieldPaths, pf)
}
return &fieldmaskpb.FieldMask{Paths: protoFieldPaths}
}
// ProjectDefault == ProjectionFull which means all fields.
fm := &fieldmaskpb.FieldMask{Paths: []string{"*"}}
if q.Projection == ProjectionNoACL {
paths := make([]string, 0, len(attrToProtoFieldMap)-2) // omitting two fields
for _, f := range attrToProtoFieldMap {
// Skip the acl and owner fields for "NoACL".
if f == "acl" || f == "owner" {
continue
}
paths = append(paths, f)
}
fm.Paths = paths
}
return fm
}
// Conditions constrain methods to act on specific generations of

5
vendor/cloud.google.com/go/storage/writer.go

@ -176,7 +176,6 @@ func (w *Writer) openWriter() (err error) {
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject)
go w.monitorCancel()
params := &openWriterParams{
ctx: w.ctx,
chunkSize: w.ChunkSize,
@ -191,11 +190,15 @@ func (w *Writer) openWriter() (err error) {
progress: w.progress,
setObj: func(o *ObjectAttrs) { w.obj = o },
}
if err := w.ctx.Err(); err != nil {
return err // short-circuit
}
w.pw, err = w.o.c.tc.OpenWriter(params, opts...)
if err != nil {
return err
}
w.opened = true
go w.monitorCancel()
return nil
}

31
vendor/git.lowcodeplatform.net/fabric/lib/async.go

@ -0,0 +1,31 @@
package lib
import (
"context"
"fmt"
"runtime"
"runtime/debug"
)
func RunAsync(ctx context.Context, fn func()) {
go func() {
defer Recover(ctx)
fn()
}()
}
func Recover(ctx context.Context) bool {
recoverErr := recover()
if recoverErr == nil {
return false
}
stack := debug.Stack()
pc, file, line, _ := runtime.Caller(2)
msg := fmt.Sprintf("Recovered panic. file: %s, line: %d, function: %s, error: %s, stack: %s", file, line, runtime.FuncForPC(pc).Name(), recoverErr, stack)
// TODO заменить на логгер
fmt.Println(msg)
return true
}

51
vendor/git.lowcodeplatform.net/fabric/lib/logger.go

@ -126,6 +126,9 @@ func (l *log) Trace(args ...interface{}) {
"srv": l.Service,
"config": l.Config,
}).Trace(args...)
if strings.Contains(l.Levels, "Stdout") {
fmt.Printf("Trace: %+v\n", args)
}
}
}
@ -143,6 +146,9 @@ func (l *log) Debug(args ...interface{}) {
"srv": l.Service,
"config": l.Config,
}).Debug(args...)
if strings.Contains(l.Levels, "Stdout") {
fmt.Printf("Debug: %+v\n", args)
}
}
}
@ -159,6 +165,9 @@ func (l *log) Info(args ...interface{}) {
"srv": l.Service,
"config": l.Config,
}).Info(args...)
if strings.Contains(l.Levels, "Stdout") {
fmt.Printf("Info: %+v\n", args)
}
}
}
@ -174,10 +183,20 @@ func (l *log) Warning(args ...interface{}) {
"srv": l.Service,
"config": l.Config,
}).Warn(args...)
if strings.Contains(l.Levels, "Stdout") {
fmt.Printf("Warn: %+v\n", args)
}
}
}
func (l *log) Error(err error, args ...interface{}) {
if err != nil {
if args != nil {
args = append(args, "; error:", err)
} else {
args = append(args, "error:", err)
}
}
if strings.Contains(l.Levels, "Error") {
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
@ -188,40 +207,60 @@ func (l *log) Error(err error, args ...interface{}) {
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
"error": fmt.Sprint(err),
}).Error(args...)
if strings.Contains(l.Levels, "Stdout") {
fmt.Printf("Error: %+v\n", args)
}
}
}
func (l *log) Panic(err error, args ...interface{}) {
if err != nil {
if args != nil {
args = append(args, "; error:", err)
} else {
args = append(args, "error:", err)
}
}
if strings.Contains(l.Levels, "Panic") {
if strings.Contains(l.Levels, "Stdout") {
fmt.Printf("Panic: %+v\n", args)
}
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
logrusB.SetLevel(logrus.PanicLevel)
logrusB.WithFields(logrus.Fields{
"name": l.Name,
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
"error": fmt.Sprint(err),
}).Panic(args...)
}
}
// Exit внутренняя ф-ция логирования и прекращения работы программы
func (l *log) Exit(err error, args ...interface{}) {
if err != nil {
if args != nil {
args = append(args, "; error:", err)
} else {
args = append(args, "error:", err)
}
}
if strings.Contains(l.Levels, "Fatal") {
if strings.Contains(l.Levels, "Stdout") {
fmt.Printf("Exit: %+v\n", args)
}
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
logrusB.SetLevel(logrus.FatalLevel)
logrusB.WithFields(logrus.Fields{
"name": l.Name,
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
"error": fmt.Sprint(err),
}).Fatal(args...)
}
}
@ -261,7 +300,7 @@ func NewLogger(ctx context.Context, cfg ConfigLogger) (logger Log, initType stri
if len(vs) != 0 {
cfg.Vfs.Dir = "logs"
}
// инициализировать лог и его ротацию
logger, errI = NewVfsLogger(ctx, cfg)
fmt.Println(logger, errI)

1
vendor/git.lowcodeplatform.net/fabric/lib/logger_logbox.go

@ -32,6 +32,7 @@ func NewLogboxLogger(ctx context.Context, cfg ConfigLogger) (logger Log, err err
l := &log{
Output: output,
Config: cfg.Config,
Levels: cfg.Level,
UID: cfg.Uid,
Name: cfg.Name,

2
vendor/git.lowcodeplatform.net/fabric/lib/logger_vfs.go

@ -39,7 +39,7 @@ func NewVfsLogger(ctx context.Context, cfg ConfigLogger) (logger Log, err error)
IntervalReload: cfg.Vfs.IntervalReload,
mux: &m,
}
return l, nil
}

114
vendor/git.lowcodeplatform.net/fabric/lib/metrics.go

@ -0,0 +1,114 @@
package lib
import (
"strings"
"github.com/go-kit/kit/metrics"
kitprometheus "github.com/go-kit/kit/metrics/prometheus"
"github.com/prometheus/client_golang/prometheus"
)
var (
// uid
service_uid metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_uid",
}, []string{"value"})
service_name metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_name",
}, []string{"value"})
service_version metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_version",
}, []string{"value"})
service_status metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_status",
}, []string{"value"})
service_port_http metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_port_http",
}, []string{"value"})
service_pid metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_pid",
}, []string{"value"})
service_replicas metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_replicas",
}, []string{"value"})
service_port_https metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_port_https",
}, []string{"value"})
service_dead_time metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_dead_time",
}, []string{"value"})
service_follower metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_follower",
}, []string{"value"})
service_port_grpc metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_port_grpc",
}, []string{"value"})
service_port_metrics metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{
Name: "service_port_metrics",
}, []string{"value"})
)
func SendServiceParamsToMetric(uid, name, version, status, pid, replicas, portHTTP, portGRPC, portMetrics, portHTTPS, dead_time, follower string) {
var count float64
service_uid.With("value", uid).Set(count)
service_name.With("value", name).Set(count)
service_version.With("value", version).Set(count)
service_status.With("value", status).Set(count)
service_port_http.With("value", portHTTP).Set(count)
service_pid.With("value", pid).Set(count)
service_replicas.With("value", replicas).Set(count)
service_port_https.With("value", portHTTPS).Set(count)
service_dead_time.With("value", dead_time).Set(count)
service_follower.With("value", follower).Set(count)
service_port_grpc.With("value", portGRPC).Set(count)
service_port_metrics.With("value", portMetrics).Set(count)
}
// ValidateNameVersion - формирует правильные имя проекта и версию сервиса исходя из того, что пришло из настроек
func ValidateNameVersion(project, version, domain string) (resName, resVersion string) {
name := "unknown"
if project != "" {
if len(strings.Split(project, "-")) > 3 { // признак того, что получили UID (для совместимости)
if domain != "" {
project = strings.Split(domain, "/")[0]
}
}
name = project // название проекта
}
if name == "unknown" && domain != "" {
name = strings.Split(domain, "/")[0]
}
// TODO deplicated - удалить когда все сервисы переедут на адресацию по короткому имени проекта
if version == "" || name == "" {
pp := strings.Split(domain, "/")
if len(pp) == 1 {
if pp[0] != "" {
name = pp[0]
}
}
if len(pp) == 2 {
if pp[0] != "" {
name = pp[0]
}
if pp[1] != "" {
version = pp[1]
}
}
}
return name, version
}

2
vendor/git.lowcodeplatform.net/fabric/models/README.md

@ -1,3 +1,3 @@
# models
Модели общих сущностей проекта Buildbox Fabric
Модели общих сущностей проекта Lowcodeplatform Fabric

93
vendor/git.lowcodeplatform.net/fabric/models/data.go

@ -1,15 +1,17 @@
package models
import "strings"
type Data struct {
Uid string `json:"uid"`
Id string `json:"id"`
Source string `json:"source"`
Parent string `json:"parent"`
Type string `json:"type"`
Title string `json:"title"`
Rev string `json:"rev"`
Сopies string `json:"copies"`
Attributes map[string]Attribute `json:"attributes"`
Uid string `json:"uid"`
Id string `json:"id"`
Source string `json:"source"`
Parent string `json:"parent"`
Type string `json:"type"`
Title string `json:"title"`
Rev string `json:"rev"`
Сopies string `json:"copies"`
Attributes map[string]Attribute `json:"attributes"`
}
type Attribute struct {
@ -22,39 +24,38 @@ type Attribute struct {
}
type Response struct {
Data interface{} `json:"data"`
Status RestStatus `json:"status"`
Metrics Metrics `json:"metrics"`
Data interface{} `json:"data"`
Status RestStatus `json:"status"`
Metrics Metrics `json:"metrics"`
}
type ResponseData struct {
Data []Data `json:"data"`
Res interface{} `json:"res"`
Status RestStatus `json:"status"`
Metrics Metrics `json:"metrics"`
Data []Data `json:"data"`
Res interface{} `json:"res"`
Status RestStatus `json:"status"`
Metrics Metrics `json:"metrics"`
}
type Metrics struct {
ResultSize int `json:"result_size"`
ResultCount int `json:"result_count"`
ResultOffset int `json:"result_offset"`
ResultLimit int `json:"result_limit"`
ResultPage int `json:"result_page"`
TimeExecution string `json:"time_execution"`
TimeQuery string `json:"time_query"`
PageLast int `json:"page_last"`
PageCurrent int `json:"page_current"`
PageList []int `json:"page_list"`
PageFrom int `json:"page_from"`
PageTo int `json:"page_to"`
ResultSize int `json:"result_size"`
ResultCount int `json:"result_count"`
ResultOffset int `json:"result_offset"`
ResultLimit int `json:"result_limit"`
ResultPage int `json:"result_page"`
TimeExecution string `json:"time_execution"`
TimeQuery string `json:"time_query"`
PageLast int `json:"page_last"`
PageCurrent int `json:"page_current"`
PageList []int `json:"page_list"`
PageFrom int `json:"page_from"`
PageTo int `json:"page_to"`
}
// возвращаем необходимый значение атрибута для объекта если он есть, инае пусто
// а также из заголовка объекта
func (p *Data) Attr(name, element string) (result string, found bool) {
if _, found := p.Attributes[name]; found {
// фикс для тех объектов, на которых добавлено скрытое поле Uid
@ -102,7 +103,7 @@ func (p *Data) Attr(name, element string) (result string, found bool) {
}
// заменяем значение аттрибутов в объекте профиля
func (p *Data) AttrSet(name, element, value string) bool {
func (p *Data) AttrSet(name, element, value string) bool {
g := Attribute{}
for k, v := range p.Attributes {
@ -133,14 +134,13 @@ func (p *Data) AttrSet(name, element, value string) bool {
}
}
return false
}
// удаляем элемент из слайса
func (p *ResponseData) RemoveData(i int) bool {
if (i < len(p.Data)){
if i < len(p.Data) {
p.Data = append(p.Data[:i], p.Data[i+1:]...)
} else {
//log.Warning("Error! Position invalid (", i, ")")
@ -148,4 +148,29 @@ func (p *ResponseData) RemoveData(i int) bool {
}
return true
}
}
// FilterRole применяем ограничения доступа для объектов типа ResponseData
// фильтруем массив данных
// если непустое поле access_read, значит назначены права, а следовательно проверяем право просмотра для роли пользователя
// также возвращаем
func (p *ResponseData) FilterRole(role string) {
sliceData := p.Data
for i := len(sliceData) - 1; i >= 0; i-- {
v := sliceData[i]
attr_read, _ := v.Attr("access_read", "src")
attr_write, _ := v.Attr("attr_write", "src")
attr_delete, _ := v.Attr("attr_delete", "src")
attr_admin, _ := v.Attr("attr_admin", "src")
if (!strings.Contains(attr_read, role) || attr_read == "") &&
(!strings.Contains(attr_write, role) || attr_write == "") &&
(!strings.Contains(attr_delete, role) || attr_delete == "") &&
(!strings.Contains(attr_admin, role) || attr_admin == "") {
p.RemoveData(i)
}
}
return
}

35
vendor/git.lowcodeplatform.net/fabric/models/ping.go

@ -1,18 +1,27 @@
package models
// тип ответа, который сервис отдает прокси при периодическом опросе (ping-е)
import (
dto "github.com/prometheus/client_model/go"
)
// Pong тип ответа, который сервис отдает прокси при периодическом опросе (ping-е)
type Pong struct {
Uid string `json:"uid"`
Name string `json:"name"`
Version string `json:"version"`
Status string `json:"status"`
Port int `json:"port"`
Pid string `json:"pid"`
State string `json:"state"`
Replicas int `json:"replicas"`
Https bool `json:"https"`
DeadTime int64 `json:"dead_time"`
Follower string `json:"follower"`
Uid string `json:"uid"`
Config string `json:"config"`
Name string `json:"name"`
Version string `json:"version"`
Status string `json:"status"`
Host string `json:"host"`
Pid string `json:"pid"`
Replicas int `json:"replicas"`
PortHTTP int `json:"portHTTP"`
PortGrpc int `json:"portGrpc"`
PortMetric int `json:"portMetric"`
PortHTTPS int `json:"portHTTPS"`
EnableHttps bool `json:"enableHttps"`
DeadTime int64 `json:"dead_time"`
Follower string `json:"follower"`
Metrics map[string]*dto.MetricFamily `json:"metrics"`
}
type Hosts struct {
@ -20,4 +29,4 @@ type Hosts struct {
PortFrom int `json:"portfrom"`
PortTo int `json:"portto"`
Protocol string `json:"protocol"`
}
}

107
vendor/git.lowcodeplatform.net/fabric/models/profile.go

@ -1,46 +1,77 @@
package models
type ProfileData struct {
Revision string `json:"revision"` // ревизия текущей сессии (если сессия обновляется (меняется профиль) - ID-сессии остается, но ревизия меняется
Hash string `json:"hash"`
Email string `json:"email"`
Uid string `json:"uid"`
ObjUid string `json:"obj_uid"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Photo string `json:"photo"`
Age string `json:"age"`
City string `json:"city"`
Country string `json:"country"`
Oauth_identity string `json:"oauth_identity"`
Status string `json:"status"` // - src поля Status в профиле (иногда необходимо для доп.фильтрации)
Raw []Data `json:"raw"` // объект пользователя (нужен при сборки проекта для данного юзера при добавлении прав на базу)
Tables []Data `json:"tables"`
Roles map[string]string `json:"roles"`
Homepage string `json:"homepage"`
Maket string `json:"maket"`
UpdateFlag bool `json:"update_flag"`
UpdateData []Data `json:"update_data"`
CurrentRole Data `json:"current_role"`
CurrentProfile Data `json:"current_profile"`
Navigator []*Items `json:"navigator"`
}
Revision string `json:"revision"` // ревизия текущей сессии (если сессия обновляется (меняется профиль) - ID-сессии остается, но ревизия меняется
Hash string `json:"hash"`
Email string `json:"email"`
Uid string `json:"uid"`
ObjUid string `json:"obj_uid"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Photo string `json:"photo"`
Age string `json:"age"`
City string `json:"city"`
Country string `json:"country"`
Oauth_identity string `json:"oauth_identity"`
Status string `json:"status"` // - src поля Status в профиле (иногда необходимо для доп.фильтрации)
Raw []Data `json:"raw"` // объект пользователя (нужен при сборки проекта для данного юзера при добавлении прав на базу)
Tables []Data `json:"tables"`
Roles []Data
Homepage string `json:"homepage"`
Maket string `json:"maket"`
UpdateFlag bool `json:"update_flag"`
UpdateData []Data `json:"update_data"`
CurrentRole Data `json:"current_role"`
Profiles []Data `json:"profiles"`
CurrentProfile Data `json:"current_profile"`
Navigator []*Items `json:"navigator"`
Groups string
GroupsValue string
GroupsDefaultSrc string
GroupsDefaultValue string
ButtonsNavTop []Data
CountLicense int
BaseMode map[string]string
// TODO проверить где используется и выпилить
RolesOld map[string]string `json:"roles"` //deplicated
First_name string //deplicated
Last_name string //deplicated
type Items struct {
Title string `json:"title"`
ExtentedLink string `json:"extentedLink"`
Uid string `json:"uid"`
Source string `json:"source"`
Icon string `json:"icon"`
Leader string `json:"leader"`
Order string `json:"order"`
Type string `json:"type"`
Preview string `json:"preview"`
Url string `json:"url"`
Sub []string `json:"sub"`
Incl []*Items `json:"incl"`
Class string `json:"class"`
}
type Items struct {
Title string `json:"title"`
ExtentedLink string `json:"extentedLink"`
Uid string `json:"uid"`
Source string `json:"source"`
Icon string `json:"icon"`
Leader string `json:"leader"`
Order string `json:"order"`
Type string `json:"type"`
Preview string `json:"preview"`
Url string `json:"url"`
Sub []string `json:"sub"`
Incl []*Items `json:"incl"`
Class string `json:"class"`
FinderMode string `json:"finder_mode"`
}
// ScanSub метод типа Items (перемещаем структуры в карте, исходя из заявленной вложенности элементов)
// (переделать дубль фукнции)
func (p *Items) ScanSub(maps *map[string]*Items) {
if p.Sub != nil && len(p.Sub) != 0 {
for _, c := range p.Sub {
gg := *maps
fromP := gg[c]
if fromP != nil {
copyPolygon := *fromP
p.Incl = append(p.Incl, &copyPolygon)
delete(*maps, c)
copyPolygon.ScanSub(maps)
}
}
}
}

8
vendor/modules.txt

@ -13,19 +13,19 @@ cloud.google.com/go/compute/metadata
# cloud.google.com/go/iam v0.7.0
## explicit; go 1.19
cloud.google.com/go/iam
# cloud.google.com/go/storage v1.27.0
## explicit; go 1.17
# cloud.google.com/go/storage v1.28.0
## explicit; go 1.19
cloud.google.com/go/storage
cloud.google.com/go/storage/internal
cloud.google.com/go/storage/internal/apiv2
cloud.google.com/go/storage/internal/apiv2/stubs
# git.lowcodeplatform.net/fabric/lib v0.1.11
# git.lowcodeplatform.net/fabric/lib v0.1.35
## explicit; go 1.17
git.lowcodeplatform.net/fabric/lib
# git.lowcodeplatform.net/fabric/logbox-client v0.0.0-20230129180131-bf48bfbee92f
## explicit; go 1.18
git.lowcodeplatform.net/fabric/logbox-client
# git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237
# git.lowcodeplatform.net/fabric/models v0.1.12
## explicit; go 1.17
git.lowcodeplatform.net/fabric/models
# git.lowcodeplatform.net/fabric/packages v0.0.0-20230129123752-a3dc6393a856

Loading…
Cancel
Save