Browse Source

[init]

rr-01-add-metrics
loveckiy.ivan 2 years ago
commit
514190e21a
  1. 54
      .drone.yml
  2. 2
      .gitconfig
  3. 8
      .gitignore
  4. 68
      README.md
  5. 164
      cmd/logbox/main.go
  6. 66
      go.mod
  7. 249
      go.sum
  8. 28
      pkg/logbox/interface.go
  9. 150
      pkg/logbox/logbox.go
  10. 93
      pkg/model/config.go
  11. 74
      pkg/model/configutils.go
  12. 186
      pkg/model/models.go
  13. 20
      pkg/model/service.go
  14. 1073
      pkg/servers/docs/docs.go
  15. 1010
      pkg/servers/docs/swagger.json
  16. 682
      pkg/servers/docs/swagger.yaml
  17. 64
      pkg/servers/grpcserver/grpcserver.go
  18. 14
      pkg/servers/grpcserver/methods/server.go
  19. 1
      pkg/servers/grpcserver/methods/write.go
  20. 22
      pkg/servers/httpserver/handlers/alive.go
  21. 58
      pkg/servers/httpserver/handlers/handlers.go
  22. 55
      pkg/servers/httpserver/handlers/ping.go
  23. 53
      pkg/servers/httpserver/handlers/stat.go
  24. 70
      pkg/servers/httpserver/httpserver.go
  25. 83
      pkg/servers/httpserver/middleware.go
  26. 70
      pkg/servers/httpserver/routers.go
  27. 53
      pkg/servers/servers.go
  28. 38
      pkg/service/interface.go
  29. 37
      pkg/service/ping.go
  30. 13
      pkg/service/stat.go
  31. 13
      pkg/service/write.go
  32. 202
      vendor/cloud.google.com/go/LICENSE
  33. 513
      vendor/cloud.google.com/go/compute/metadata/metadata.go
  34. 315
      vendor/cloud.google.com/go/iam/iam.go
  35. 54
      vendor/cloud.google.com/go/internal/annotate.go
  36. 108
      vendor/cloud.google.com/go/internal/optional/optional.go
  37. 54
      vendor/cloud.google.com/go/internal/retry.go
  38. 109
      vendor/cloud.google.com/go/internal/trace/trace.go
  39. 6
      vendor/cloud.google.com/go/internal/version/update_version.sh
  40. 71
      vendor/cloud.google.com/go/internal/version/version.go
  41. 335
      vendor/cloud.google.com/go/storage/acl.go
  42. 1186
      vendor/cloud.google.com/go/storage/bucket.go
  43. 228
      vendor/cloud.google.com/go/storage/copy.go
  44. 176
      vendor/cloud.google.com/go/storage/doc.go
  45. 32
      vendor/cloud.google.com/go/storage/go110.go
  46. 130
      vendor/cloud.google.com/go/storage/iam.go
  47. 37
      vendor/cloud.google.com/go/storage/invoke.go
  48. 42
      vendor/cloud.google.com/go/storage/not_go110.go
  49. 188
      vendor/cloud.google.com/go/storage/notifications.go
  50. 385
      vendor/cloud.google.com/go/storage/reader.go
  51. 1116
      vendor/cloud.google.com/go/storage/storage.go
  52. 23841
      vendor/cloud.google.com/go/storage/storage.replay
  53. 261
      vendor/cloud.google.com/go/storage/writer.go
  54. 2
      vendor/git.lowcodeplatform.net/fabric/lib/.gitconfig
  55. 9
      vendor/git.lowcodeplatform.net/fabric/lib/.gitignore
  56. 3
      vendor/git.lowcodeplatform.net/fabric/lib/README.md
  57. 305
      vendor/git.lowcodeplatform.net/fabric/lib/cli.go
  58. 107
      vendor/git.lowcodeplatform.net/fabric/lib/config.go
  59. 99
      vendor/git.lowcodeplatform.net/fabric/lib/crypto.go
  60. 305
      vendor/git.lowcodeplatform.net/fabric/lib/files.go
  61. 204
      vendor/git.lowcodeplatform.net/fabric/lib/function.go
  62. 139
      vendor/git.lowcodeplatform.net/fabric/lib/http.go
  63. 386
      vendor/git.lowcodeplatform.net/fabric/lib/logger.go
  64. 343
      vendor/git.lowcodeplatform.net/fabric/lib/metric.go
  65. 41
      vendor/git.lowcodeplatform.net/fabric/lib/state.go
  66. 217
      vendor/git.lowcodeplatform.net/fabric/lib/vfs.go
  67. 205
      vendor/git.lowcodeplatform.net/fabric/lib/vfs_ext.go
  68. 2
      vendor/git.lowcodeplatform.net/fabric/models/.gitconfig
  69. 9
      vendor/git.lowcodeplatform.net/fabric/models/.gitignore
  70. 3
      vendor/git.lowcodeplatform.net/fabric/models/README.md
  71. 98
      vendor/git.lowcodeplatform.net/fabric/models/code.go
  72. 151
      vendor/git.lowcodeplatform.net/fabric/models/data.go
  73. 23
      vendor/git.lowcodeplatform.net/fabric/models/ping.go
  74. 46
      vendor/git.lowcodeplatform.net/fabric/models/profile.go
  75. 20
      vendor/git.lowcodeplatform.net/fabric/models/token.go
  76. 50
      vendor/git.lowcodeplatform.net/fabric/models/tree.go
  77. 202
      vendor/github.com/Azure/azure-sdk-for-go/LICENSE
  78. 5
      vendor/github.com/Azure/azure-sdk-for-go/NOTICE
  79. 22
      vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
  80. 91
      vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go
  81. 246
      vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go
  82. 632
      vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
  83. 179
      vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go
  84. 186
      vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go
  85. 270
      vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go
  86. 991
      vendor/github.com/Azure/azure-sdk-for-go/storage/client.go
  87. 38
      vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go
  88. 640
      vendor/github.com/Azure/azure-sdk-for-go/storage/container.go
  89. 237
      vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go
  90. 238
      vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go
  91. 466
      vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go
  92. 484
      vendor/github.com/Azure/azure-sdk-for-go/storage/file.go
  93. 338
      vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go
  94. 201
      vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go
  95. 171
      vendor/github.com/Azure/azure-sdk-for-go/storage/message.go
  96. 48
      vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go
  97. 203
      vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go
  98. 436
      vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go
  99. 146
      vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go
  100. 42
      vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go

54
.drone.yml

@ -0,0 +1,54 @@
kind: pipeline
type: docker
name: app
steps:
- name: linux amd64
failure: ignore
image: tetafro/golang-gcc:1.17-alpine
commands:
- cd /drone/src
- GOOS=linux GOARCH=amd64 go build -ldflags "-extldflags '-static'" -o /opt/releases/logbox_linux_amd64_${DRONE_TAG} cmd/logbox/main.go
- tar -cf /opt/releases/app_linux_amd64_${DRONE_TAG}.tar /opt/releases/logbox_linux_amd64_${DRONE_TAG}
- rm /opt/releases/logbox_linux_amd64_${DRONE_TAG}
volumes:
- name: releases
path: /opt/releases
when:
event:
- tag
# - name: linux 386
# failure: ignore
# image: tetafro/golang-gcc:1.15-alpine
# commands:
# - cd /drone/src
# - GOOS=linux GOARCH=386 go build -ldflags "-extldflags '-static'" -o /opt/releases/app_linux_386_${DRONE_TAG} cmd/projectlogbox/main.go
# - tar -cf /opt/releases/app_linux_386_${DRONE_TAG}.tar /opt/releases/app_linux_386_${DRONE_TAG}
# - rm /opt/releases/app_linux_386_${DRONE_TAG}
# volumes:
# - name: releases
# path: /opt/releases
# when:
# event:
# - tag
- name: darwin amd64
failure: ignore
image: tetafro/golang-gcc:1.17-alpine
commands:
- cd /drone/src
- GOOS=darwin GOARCH=amd64 go build -ldflags "-extldflags '-static'" -o /opt/releases/app_darwin_amd64_${DRONE_TAG} cmd/logbox/main.go
- tar -cf /opt/releases/app_darwin_amd64_${DRONE_TAG}.tar /opt/releases/app_darwin_amd64_${DRONE_TAG}
- rm /opt/releases/app_darwin_amd64_${DRONE_TAG}
volumes:
- name: releases
path: /opt/releases
when:
event:
- tag
volumes:
- name: releases
host:
path: /opt/releases

2
.gitconfig

@ -0,0 +1,2 @@
[url "ssh://git@git.lowcodeplatform.net/"]
insteadOf = https://git.lowcodeplatform.net/

8
.gitignore

@ -0,0 +1,8 @@
.history
.idea
.vscode
.DS_Store
*~merged*
*~merged
/public
.env

68
README.md

@ -0,0 +1,68 @@
# LogBox
## Описание
Предназначен для получения логов из сервиса через клиентов, работающих в каждом сервисе (если в сервисе не настроен клиент LogBox или сервис недоступен - логирование происходит в локальный файл)
## Принцип работы
Сервис принимает по протоколу gPRC сообщения на добавления логов в хранилище.
Формат логов
- "level" - уровень логирования
- "name" - имя сервиса
- "uid": - идентификатор сервиса
- "srv": - тип сервиса
- "config": - идентификатор конфигурации, с которой был запущен сервис
- "error": - описание ошибки (строка)
## Реализация
Имеет очередь входящих запросов, а также воркеры добавления данных из очереди в хранилище.
Если хранилище недоступно, запись будет производиться в локальный файл (во избежании потери данных)
В качестве хранилища могуи выступать:
- локальный файл (в этом случае файлы ротируются)
- файлы в хранилище S3 (при наличии распределенного хранилища)
- база данных SQLite
- база данных Reindexer
- база данных ElasticSearch (стек ELK)
## Настройка
Настраивается через файл конфигурации в момент запуска сервиса
Параметры:
- type [local/s3/sqlite/reindexer/elastic]
- connect - настройки подключения к каждому из получателей
## Доп.информация
### Типы логирования
//Debug:
// сообщения отладки, профилирования.
// В production системе обычно сообщения этого уровня включаются при первоначальном
// запуске системы или для поиска узких мест (bottleneck-ов).
//Info: - логировать процесс выполнения
// обычные сообщения, информирующие о действиях системы.
// Реагировать на такие сообщения вообще не надо, но они могут помочь, например,
// при поиске багов, расследовании интересных ситуаций итд.
//Warning: - логировать странные операции
// записывая такое сообщение, система пытается привлечь внимание обслуживающего персонала.
// Произошло что-то странное. Возможно, это новый тип ситуации, ещё не известный системе.
// Следует разобраться в том, что произошло, что это означает, и отнести ситуацию либо к
// инфо-сообщению, либо к ошибке. Соответственно, придётся доработать код обработки таких ситуаций.
//Error: - логировать ошибки
// ошибка в работе системы, требующая вмешательства. Что-то не сохранилось, что-то отвалилось.
// Необходимо принимать меры довольно быстро! Ошибки этого уровня и выше требуют немедленной записи в лог,
// чтобы ускорить реакцию на них. Нужно понимать, что ошибка пользователя – это не ошибка системы.
// Если пользователь ввёл в поле -1, где это не предполагалось – не надо писать об этом в лог ошибок.
//Panic: - логировать критические ошибки
// это особый класс ошибок. Такие ошибки приводят к неработоспособности системы в целом, или
// неработоспособности одной из подсистем. Чаще всего случаются фатальные ошибки из-за неверной конфигурации
// или отказов оборудования. Требуют срочной, немедленной реакции. Возможно, следует предусмотреть уведомление о таких ошибках по SMS.
// указываем уровни логирования Error/Warning/Debug/Info/Panic
//Trace: - логировать обработки запросов

164
cmd/logbox/main.go

@ -0,0 +1,164 @@
package main
import (
"context"
"fmt"
"io"
"os"
"os/signal"
"path/filepath"
"runtime/debug"
"git.lowcodeplatform.net/fabric/lib"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
"git.lowcodeplatform.net/fabric/logbox/pkg/servers"
"git.lowcodeplatform.net/fabric/logbox/pkg/servers/grpcserver"
"git.lowcodeplatform.net/fabric/logbox/pkg/servers/httpserver"
"git.lowcodeplatform.net/fabric/logbox/pkg/service"
"github.com/labstack/gommon/color"
)
const sep = string(os.PathSeparator)
var fileLog *os.File
var outpurLog io.Writer
func main() {
lib.RunServiceFuncCLI(Start)
// закрываем файл с логами
defer fileLog.Close()
}
// стартуем сервис приложения
func Start(configfile, dir, port, mode, services, param1, param2, param3, sourcedb, action, version string) {
var cfg model.Config
done := color.Green("[OK]")
fail := color.Red("[NO]")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// инициируем пакеты
err := lib.ConfigLoad(configfile, &cfg)
if err != nil {
fmt.Printf("%s (%s)", "Error. Load config is failed.", err)
return
}
// создаем рабочую диреторию если ее еще нет
pathDir, _ := filepath.Abs(cfg.Dir)
err = lib.CreateDir(pathDir, 0711)
if err != nil {
fmt.Printf("%s (%s)", "Error. Create working dir is failed.", err)
return
}
cfg.UidService = cfg.DataUid
cfg.Workingdir, _ = filepath.Abs(dir)
///////////////// ЛОГИРОВАНИЕ //////////////////
// формирование пути к лог-файлам и метрикам
if cfg.LogsDir == "" {
cfg.LogsDir = "logs"
}
// если путь указан относительно / значит задан абсолютный путь, иначе в директории
if cfg.LogsDir[:1] != sep {
rootDir, _ := lib.RootDir()
cfg.LogsDir = rootDir + sep + "upload" + sep + cfg.Domain + sep + cfg.LogsDir
}
// инициализируем
cfg.UrlProxy = cfg.AddressProxyPointsrc
// инициализировать лог и его ротацию
var logger = lib.NewLogger(
cfg.LogsDir,
cfg.ServiceLevelLogsPointsrc,
lib.UUID(),
cfg.Domain,
"logbox",
cfg.UidService,
cfg.LogIntervalReload.Value,
cfg.LogIntervalClearFiles.Value,
cfg.LogPeriodSaveFiles,
)
if logger == nil {
fmt.Printf("%s Error init logger. LogsDir: %s\n", fail, cfg.LogsDir)
os.Exit(0)
}
logger.RotateInit(ctx)
fmt.Printf("%s Enabled logs. Level:%s, Dir:%s\n", done, cfg.ServiceLevelLogsPointsrc, cfg.LogsDir)
logger.Info("Запускаем logbox-сервис: ", cfg.Domain)
// создаем метрики
metrics := lib.NewMetric(
ctx,
logger,
cfg.LogIntervalMetric.Value,
)
defer func() {
rec := recover()
if rec != nil {
b := string(debug.Stack())
logger.Panic(fmt.Errorf("%s", b), "Recover panic from main function.")
cancel()
os.Exit(1)
}
}()
// получаем порт, если он не передан явно
if port == "" {
port, err = lib.AddressProxy(cfg.ProxyPointsrc, cfg.PortInterval)
if err != nil {
fmt.Printf("%s Port is empty. err:%s\n", fail, err)
return
}
}
cfg.Port = port
// собираем сервис
src := service.New(
logger,
cfg,
metrics,
nil,
)
httpServer := httpserver.New(
ctx,
cfg,
src,
metrics,
logger,
)
grpcServer := grpcserver.New(
ctx,
cfg,
src,
metrics,
logger,
)
// для завершения сервиса ждем сигнал в процесс
ch := make(chan os.Signal)
signal.Notify(ch, os.Kill)
go ListenForShutdown(ch)
srv := servers.New(
"http,grpc",
cfg,
metrics,
httpServer,
grpcServer,
src,
)
srv.Run()
}
func ListenForShutdown(ch <-chan os.Signal) {
<-ch
os.Exit(0)
}

66
go.mod

@ -0,0 +1,66 @@
module git.lowcodeplatform.net/fabric/logbox
go 1.18
require (
git.lowcodeplatform.net/fabric/lib v0.0.0-20221023073958-5f6c90acd183
git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751
github.com/gorilla/mux v1.8.0
github.com/labstack/gommon v0.4.0
github.com/pkg/errors v0.8.1
github.com/swaggo/http-swagger v1.3.3
github.com/swaggo/swag v1.8.7
google.golang.org/grpc v1.20.1
)
require (
cloud.google.com/go v0.38.0 // indirect
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.9.0 // indirect
github.com/Azure/go-autorest/autorest/adal v0.5.0 // indirect
github.com/Azure/go-autorest/autorest/date v0.1.0 // indirect
github.com/Azure/go-autorest/logger v0.1.0 // indirect
github.com/Azure/go-autorest/tracing v0.5.0 // indirect
github.com/BurntSushi/toml v1.2.0 // indirect
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/aws/aws-sdk-go v1.23.4 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/gabriel-vasile/mimetype v1.4.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/spec v0.20.6 // indirect
github.com/go-openapi/swag v0.19.15 // indirect
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/golang/protobuf v1.3.1 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/graymeta/stow v0.2.8 // indirect
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/kelseyhightower/envconfig v1.4.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-colorable v0.1.11 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/ncw/swift v1.0.49 // indirect
github.com/russross/blackfriday/v2 v2.0.1 // indirect
github.com/satori/go.uuid v1.2.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe // indirect
github.com/urfave/cli v1.22.10 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.opencensus.io v0.21.0 // indirect
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.1.12 // indirect
google.golang.org/api v0.8.0 // indirect
google.golang.org/appengine v1.5.0 // indirect
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

249
go.sum

@ -0,0 +1,249 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
git.lowcodeplatform.net/fabric/lib v0.0.0-20221023073958-5f6c90acd183 h1:42S73FfOEDBEzvCl18JBYLAJv6D7nF9s5BHf8R6QV2g=
git.lowcodeplatform.net/fabric/lib v0.0.0-20221023073958-5f6c90acd183/go.mod h1:63EYUgLgTQd3SmTrHdZuqnjL+oHWDk7Bn1x8Ko+tFmk=
git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237 h1:TiGs+dG9tueKWvMDos7XMIej9kekY0777bCI3+QXsh4=
git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237/go.mod h1:kTVmb3xTTlMOV0PJ+IFHC3kS6pkOszNigaRsZeZp0M0=
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible h1:Hn/DsObfmw0M7dMGS/c0MlVrJuGFzHzOpBWL89acR68=
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/aws/aws-sdk-go v1.23.4 h1:F6f/iQRhuSfrpUdy80q29898H0NYN27pX+95tkJ+BIY=
github.com/aws/aws-sdk-go v1.23.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
github.com/gabriel-vasile/mimetype v1.4.1 h1:TRWk7se+TOjCYgRth7+1/OYLNiRNIotknkFtf/dnN7Q=
github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/spec v0.20.6 h1:ich1RQ3WDbfoeTqTAb+5EIxNmpKVJZWBNah9RAT0jIQ=
github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/readahead v0.0.0-20161222183148-eaceba169032/go.mod h1:qYysrqQXuV4tzsizt4oOQ6mrBZQ0xnQXP3ylXX8Jk5Y=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/graymeta/stow v0.2.8 h1:fxN42iKy/bUg5nMR/2iWSc5+57hctCBbnFQ31PrYIOU=
github.com/graymeta/stow v0.2.8/go.mod h1:JAs139Zr29qfsecy7b+h9DRsWXbFbsd7LCrbCDYI84k=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8=
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/ncw/swift v1.0.49 h1:eQaKIjSt/PXLKfYgzg01nevmO+CMXfXGRhB1gOhDs7E=
github.com/ncw/swift v1.0.49/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe h1:K8pHPVoTgxFJt1lXuIzzOX7zZhZFldJQK/CgKx9BFIc=
github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w=
github.com/swaggo/http-swagger v1.3.3 h1:Hu5Z0L9ssyBLofaama21iYaF2VbWyA8jdohaaCGpHsc=
github.com/swaggo/http-swagger v1.3.3/go.mod h1:sE+4PjD89IxMPm77FnkDz0sdO+p5lbXzrVWT6OTVVGo=
github.com/swaggo/swag v1.8.7 h1:2K9ivTD3teEO+2fXV6zrZKDqk5IuU2aJtBDo8U7omWU=
github.com/swaggo/swag v1.8.7/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk=
github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk=
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/kothar/go-backblaze.v0 v0.0.0-20190520213052-702d4e7eb465/go.mod h1:zJ2QpyDCYo1KvLXlmdnFlQAyF/Qfth0fB8239Qg7BIE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

28
pkg/logbox/interface.go

@ -0,0 +1,28 @@
package logbox
type serviceInfo struct {
Path string `json:"path"`
Hash string `json:"hash"`
}
type logbox struct {
dir string
card map[string]map[string]map[string]map[string]serviceInfo
}
type Logbox interface {
Set(service, platform, os, version, hash, value string) (err error)
Get() (result map[string]map[string]map[string]map[string]serviceInfo, err error)
Delete(name string) (err error)
Lookup(name, version string) (result serviceInfo, err error)
Scan(rootDir string) (err error)
}
func New(
dir string,
) Logbox {
return &logbox{
dir: dir,
card: map[string]map[string]map[string]map[string]serviceInfo{},
}
}

150
pkg/logbox/logbox.go

@ -0,0 +1,150 @@
// Package logbox defines the Logbox interface which can be used with goproxy.
package logbox
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"git.lowcodeplatform.net/fabric/lib"
)
const sep = string(os.PathSeparator)
// Global lock for the default logbox.
var lock sync.RWMutex
// Common errors.
var (
ErrServiceNotFound = errors.New("service name/version not found")
)
// Lookup return the endpoint list for the given service name/version.
func (r *logbox) Lookup(os, version string) (result serviceInfo, err error) {
//lock.RLock()
//targets, ok := r.card[os][version]
//lock.RUnlock()
//if !ok {
// return result, ErrServiceNotFound
//}
return result, nil
}
// Set Add adds the given endpoit for the service name/version.
func (r *logbox) Set(service, platform, os, version, hash, value string) (err error) {
lock.Lock()
defer lock.Unlock()
_, ok := r.card[service]
if !ok {
r.card[service] = map[string]map[string]map[string]serviceInfo{}
}
_, ok = r.card[service][os]
if !ok {
r.card[service][os] = map[string]map[string]serviceInfo{}
}
v, ok := r.card[service][os][platform]
if !ok {
r.card[service][os][platform] = map[string]serviceInfo{}
flag := false
//for i, v := range r.card[service][os][platform] {
_, ok := v[version]
// нашли текущую версию в массиве (обновляем)
if ok {
vv := v[version]
vv.Path = value
r.card[service][os][platform][version] = vv
flag = true
}
//}
if !flag {
si := serviceInfo{}
vv := si
vv.Path = "/download/" + value
vv.Hash = hash
r.card[service][os][platform][version] = vv
}
}
return err
}
func (r *logbox) Get() (result map[string]map[string]map[string]map[string]serviceInfo, err error) {
return r.card, err
}
// Delete removes the given endpoit for the service name/version.
func (r *logbox) Delete(os string) (err error) {
lock.Lock()
defer lock.Unlock()
_, ok := r.card[os]
if ok {
delete(r.card, os)
return err
}
return err
}
// Scan сканируем директорию репозитория на предмет наличия собранных билдов и строим карту
func (d *logbox) Scan(rootDir string) (err error) {
//lock.Lock()
//defer lock.Unlock()
defer func() {
fmt.Println(err)
}()
logboxDir, _ := filepath.Abs(rootDir)
directory, err := os.Open(logboxDir)
if err != nil {
return err
}
defer directory.Close()
objects, err := directory.Readdir(-1)
if err != nil {
return err
}
// пробегаем текущую папку и считаем совпадание признаков
for _, obj := range objects {
if !obj.IsDir() {
// делим путь для формирования map-а реестра
//fmt.Println(obj.Name())
slPath := strings.Split(obj.Name(), "_")
l := len(slPath)
if l < 4 {
err = fmt.Errorf("%s", "Error. Structure logbox folder is failed.")
return err
}
service := slPath[0]
osR := slPath[1]
platform := slPath[2]
version := slPath[3]
hash := ""
value := obj.Name()
//if strings.Contains(obj.Name(), ".tar") {
hash = lib.Hash(obj.Name() + fmt.Sprint(obj.Size()))
value = strings.Replace(value, sep, "/", -1)
//}
err = d.Set(service, platform, osR, version, hash, value)
if err != nil {
return err
}
}
}
return err
}

93
pkg/model/config.go

@ -0,0 +1,93 @@
package model
type Config struct {
// новые параметры для совместимости с новый деплоем
Dir string `envconfig:"DIR" default:""`
Port string `envconfig:"PORT" default:""`
GrpcPort string `envconfig:"GRPC_PORT" default:""`
ProxyPointsrc string `envconfig:"PROXY_POINTSRC" default:""`
ProxyPointvalue string `envconfig:"PROXY_POINTVALUE" default:""`
PortInterval string `envconfig:"PORT_INTERVAL" default:""`
ProjectKey string `envconfig:"PROJECT_KEY" default:"LKHlhb899Y09olUi"`
ClientPath string `envconfig:"CLIENT_PATH" default:""`
UrlProxy string `envconfig:"URL_PROXY" default:""`
UrlGui string `envconfig:"URL_GUI" default:""`
UrlApi string `envconfig:"URL_API" default:""`
UidService string `envconfig:"UID_SERVICE" default:""`
// Config
ConfigName string `envconfig:"CONFIG_NAME" default:""`
RootDir string `envconfig:"ROOT_DIR" default:""`
// Logger
LogsDir string `envconfig:"LOGS_DIR" default:"logs"`
LogsLevel string `envconfig:"LOGS_LEVEL" default:""`
LogIntervalReload Duration `envconfig:"LOG_INTERVAL_RELOAD" default:"10m" description:"интервал проверки необходимости пересозданния нового файла"`
LogIntervalClearFiles Duration `envconfig:"LOG_INTERVAL_CLEAR_FILES" default:"30m" description:"интервал проверка на необходимость очистки старых логов"`
LogPeriodSaveFiles string `envconfig:"LOG_PERION_SAVE_FILES" default:"0-1-0" description:"период хранения логов"`
LogIntervalMetric Duration `envconfig:"LOG_INTERVAL_METRIC" default:"10s" description:"период сохранения метрик в файл логирования"`
// Http
MaxRequestBodySize Int `envconfig:"MAX_REQUEST_BODY_SIZE" default:"10485760"`
ReadTimeout Duration `envconnfig:"READ_TIMEOUT" default:"10s"`
WriteTimeout Duration `envconnfig:"WRITE_TIMEOUT" default:"10s"`
ReadBufferSize Int `envconfig:"READ_BUFFER_SIZE" default:"16384"`
// Params from .cfg
SlashDatecreate string `envconfig:"SLASH_DATECREATE" default:""`
SlashOwnerPointsrc string `envconfig:"SLASH_OWNER_POINTSRC" default:""`
SlashOwnerPointvalue string `envconfig:"SLASH_OWNER_POINTVALUE" default:""`
SlashTitle string `envconfig:"SLASH_TITLE" default:""`
AccessAdminPointsrc string `envconfig:"ACCESS_ADMIN_POINTSRC" default:""`
AccessAdminPointvalue string `envconfig:"ACCESS_ADMIN_POINTVALUE" default:""`
AccessDeletePointsrc string `envconfig:"ACCESS_DELETE_POINTSRC" default:""`
AccessDeletePointvalue string `envconfig:"ACCESS_DELETE_POINTVALUE" default:""`
AccessReadPointsrc string `envconfig:"ACCESS_READ_POINTSRC" default:""`
AccessReadPointvalue string `envconfig:"ACCESS_READ_POINTVALUE" default:""`
AccessWritePointsrc string `envconfig:"ACCESS_WRITE_POINTSRC" default:""`
AccessWritePointvalue string `envconfig:"ACCESS_WRITE_POINTVALUE" default:""`
AddressProxyPointsrc string `envconfig:"ADDRESS_PROXY_POINTSRC" default:""`
AddressProxyPointvalue string `envconfig:"ADDRESS_PROXY_POINTVALUE" default:""`
Checkrun string `envconfig:"CHECKRUN" default:""`
CheckServiceext string `envconfig:"CHECK_SERVICEEXT" default:""`
Configuration string `envconfig:"CONFIGURATION" default:""`
DataUid string `envconfig:"DATA_UID" default:""`
Domain string `envconfig:"DOMAIN" default:""`
Driver string `envconfig:"DRIVER" default:""`
Pathrun string `envconfig:"PATHRUN" default:""`
PortAutoInterval string `envconfig:"PORT_AUTO_INTERVAL" default:""`
Projectuid string `envconfig:"PROJECTUID" default:""`
ProjectPointsrc string `envconfig:"PROJECT_POINTSRC" default:""`
ProjectPointvalue string `envconfig:"PROJECT_POINTVALUE" default:""`
ReplicasService Int `envconfig:"REPLICAS_SERVICE" default:"1"`
// возможные варианты полей с путями к файлам запуска
ServiceExec string `envconfig:"SERVICE_EXEC" default:""`
VersionPointsrc string `envconfig:"VERSION_POINTSRC" default:""`
ServiceVersionPointsrc string `envconfig:"SERVICE_VERSION_POINTSRC" default:""`
AppVersionPointsrc string `envconfig:"APP_VERSION_POINTSRC" default:""`
ApiVersionPointsrc string `envconfig:"API_VERSION_POINTSRC" default:""`
GuiVersionPointsrc string `envconfig:"GUI_VERSION_POINTSRC" default:""`
ProxyVersionPointsrc string `envconfig:"PROXY_VERSION_POINTSRC" default:""`
ServiceLevelLogsPointsrc string `envconfig:"SERVICE_LEVEL_LOGS_POINTSRC" default:""`
ServiceLevelLogsPointvalue string `envconfig:"SERVICE_LEVEL_LOGS_POINTVALUE" default:""`
ServiceLogs string `envconfig:"SERVICE_LOGS" default:""`
ServiceMetricInterval string `envconfig:"SERVICE_METRIC_INTERVAL" default:""`
ServiveLevelLogsPointsrc string `envconfig:"SERVIVE_LEVEL_LOGS_POINTSRC" default:""`
ServiveLevelLogsPointvalue string `envconfig:"SERVIVE_LEVEL_LOGS_POINTVALUE" default:""`
Title string `envconfig:"TITLE" default:""`
ToBuild string `envconfig:"TO_BUILD" default:""`
ToUpdate string `envconfig:"TO_UPDATE" default:""`
Workingdir string `envconfig:"WORKINGDIR" default:""`
}

74
pkg/model/configutils.go

@ -0,0 +1,74 @@
package model
import (
"path/filepath"
"strconv"
"time"
)
const sep = string(filepath.Separator)
//Float32 custom duration for toml configs
type Float struct {
float64
Value float64
}
//UnmarshalText method satisfying toml unmarshal interface
func (d *Float) UnmarshalText(text []byte) error {
var err error
i, err := strconv.ParseFloat(string(text), 10)
d.Value = i
return err
}
//Float32 custom duration for toml configs
type Bool struct {
bool
Value bool
}
//UnmarshalText method satisfying toml unmarshal interface
func (d *Bool) UnmarshalText(text []byte) error {
var err error
d.Value = false
if string(text) == "true" {
d.Value = true
}
return err
}
//Duration custom duration for toml configs
type Duration struct {
time.Duration
Value time.Duration
}
//UnmarshalText method satisfying toml unmarshal interface
func (d *Duration) UnmarshalText(text []byte) error {
var err error
t := string(text)
// если получили только цифру - добавляем минуты (по-умолчанию)
if len(t) != 0 {
lastStr := t[len(t)-1:]
if lastStr != "h" && lastStr != "m" && lastStr != "s" {
t = t + "m"
}
}
d.Value, err = time.ParseDuration(t)
return err
}
//Duration custom duration for toml configs
type Int struct {
int
Value int
}
//UnmarshalText method satisfying toml unmarshal interface
func (d *Int) UnmarshalText(text []byte) error {
var err error
i, err := strconv.Atoi(string(text))
d.Value = i
return err
}

186
pkg/model/models.go

@ -0,0 +1,186 @@
package model
import (
"encoding/json"
"fmt"
)
// тип ответа, который сервис отдает прокси при периодическом опросе (ping-е)
type Pong struct {
Name string `json:"name"`
Version string `json:"version"`
Status string `json:"status"`
Port int `json:"port"`
Pid string `json:"pid"`
State string `json:"state"`
Replicas int `json:"replicas"`
}
type Request struct {
Data []interface{} `json:"data"`
}
type Response struct {
Data interface{} `json:"data"`
Status RestStatus `json:"status"`
Metrics Metrics `json:"metrics"`
}
type Metrics struct {
ResultSize int `json:"result_size"`
ResultCount int `json:"result_count"`
ResultOffset int `json:"result_offset"`
ResultLimit int `json:"result_limit"`
ResultPage int `json:"result_page"`
TimeExecution string `json:"time_execution"`
TimeQuery string `json:"time_query"`
PageLast int `json:"page_last"`
PageCurrent int `json:"page_current"`
PageList []int `json:"page_list"`
PageFrom int `json:"page_from"`
PageTo int `json:"page_to"`
}
type RestStatus struct {
Description string `json:"description"`
Status int `json:"status"`
Code string `json:"code"`
Error error `json:"error"`
}
func (r RestStatus) MarshalJSON() ([]byte, error) {
type RestStatusJson struct {
Description string `json:"description"`
Status int `json:"status"`
Code string `json:"code"`
Error string `json:"error"`
}
var n = RestStatusJson{}
n.Description = r.Description
n.Status = r.Status
n.Code = r.Code
n.Error = fmt.Sprint(r.Error)
json, err := json.Marshal(n)
return json, err
}
type ResponseData struct {
Data []Data `json:"data"`
Res interface{} `json:"res"`
Status RestStatus `json:"status"`
Metrics Metrics `json:"metrics"`
}
// ------------------------------------------
type Attribute struct {
Value string `json:"value"`
Src string `json:"src"`
Tpls string `json:"tpls"`
Status string `json:"status"`
Rev string `json:"rev"`
Editor string `json:"editor"`
}
type Data struct {
Uid string `json:"uid"`
Id string `json:"id"`
Source string `json:"source"`
Parent string `json:"parent"`
Type string `json:"type"`
Title string `json:"title"`
Rev string `json:"rev"`
Сopies string `json:"copies"`
Attributes map[string]Attribute `json:"attributes"`
}
// возвращаем необходимый значение атрибута для объекта если он есть, инае пусто
// а также из заголовка объекта
func (p *Data) Attr(name, element string) (result string, found bool) {
if _, found := p.Attributes[name]; found {
// фикс для тех объектов, на которых добавлено скрытое поле Uid
if name == "uid" {
return p.Uid, true
}
switch element {
case "src":
return p.Attributes[name].Src, true
case "value":
return p.Attributes[name].Value, true
case "tpls":
return p.Attributes[name].Tpls, true
case "rev":
return p.Attributes[name].Rev, true
case "status":
return p.Attributes[name].Status, true
case "uid":
return p.Uid, true
case "source":
return p.Source, true
case "id":
return p.Id, true
case "title":
return p.Title, true
case "type":
return p.Type, true
}
} else {
switch name {
case "uid":
return p.Uid, true
case "source":
return p.Source, true
case "id":
return p.Id, true
case "title":
return p.Title, true
case "type":
return p.Type, true
}
}
return "", false
}
// заменяем значение аттрибутов в объекте профиля
func (p *Data) AttrSet(name, element, value string) bool {
g := Attribute{}
for k, v := range p.Attributes {
if k == name {
g = v
}
}
switch element {
case "src":
g.Src = value
case "value":
g.Value = value
case "tpls":
g.Tpls = value
case "rev":
g.Rev = value
case "status":
g.Status = value
}
f := p.Attributes
for k, _ := range f {
if k == name {
f[k] = g
return true
}
}
return false
}

20
pkg/model/service.go

@ -0,0 +1,20 @@
package model
import "git.lowcodeplatform.net/fabric/logbox/pkg/logbox"
type StatIn struct {
Action string `json:"action"`
ServiceLogbox logbox.Logbox
}
type StatOut struct {
Result interface{} `json:"result"`
}
type WriteIn struct {
Payload []byte
}
type WriteOut struct {
Result interface{} `json:"result"`
}

1073
pkg/servers/docs/docs.go

File diff suppressed because it is too large

1010
pkg/servers/docs/swagger.json

File diff suppressed because it is too large

682
pkg/servers/docs/swagger.yaml

@ -0,0 +1,682 @@
definitions:
models.AttemptsOutput:
properties:
bad_attempts:
example: 1
type: integer
period_minutes:
example: 10
type: integer
type: object
models.ChangePassInput:
properties:
current_password:
example: Qwerty123
type: string
new_password:
example: Qwerty112233
type: string
type: object
models.ChangePassUnsafeInput:
properties:
new_password:
example: Qwerty112233
type: string
type: object
models.CheckSumInput:
properties:
checksum:
example: mWzulhSEIsdRfw8rYaPDiAS9dTFx96VuMd7pAKzo4MM=
type: string
login:
example: Tolkacheva.Elena@wildberries.ru
type: string
type: object
models.CheckedOutput:
properties:
checked:
example: true
type: boolean
type: object
models.LoginPassInput:
properties:
login:
description: login OR email
example: Tolkacheva.Elena@wildberries.ru
type: string
password:
example: qwerty
type: string
period_minutes:
default: 10
description: bad attempts count during this period in minutes
example: 15
type: integer
type: object
models.PassForgotInput:
properties:
email:
type: string
template_id:
type: string
url:
type: string
type: object
models.PassResetInput:
properties:
password:
type: string
template_id:
type: string
token:
type: string
type: object
models.ShortUser:
properties:
checksum:
example: 2L7xhnV9qw/witp9iPdXmKEG14Ea5qbjvgsXruGGO5I=
type: string
created:
example: "2019-05-21T12:41:04+03:00"
type: string
email:
example: Tolkacheva.Elena@wildberries.ru
type: string
full_name:
type: string
lang:
type: string
login:
example: supp374836
type: string
supplier_id:
example: 8759
type: integer
user_id:
example: 374836
type: integer
type: object
models.UserForCreate:
properties:
email:
example: Tolkacheva.Elena@wildberries.ru
type: string
full_name:
example: Иванов Иван Иванович
type: string
lang:
example: en
type: string
login:
example: mylogin
type: string
password:
example: Qwerty112233
type: string
supplier_id:
example: 8759
type: integer
type: object
models.Users:
items:
properties:
checksum:
example: 2L7xhnV9qw/witp9iPdXmKEG14Ea5qbjvgsXruGGO5I=
type: string
created:
example: "2019-05-21T12:41:04+03:00"
type: string
email:
example: Tolkacheva.Elena@wildberries.ru
type: string
full_name:
type: string
lang:
type: string
login:
example: supp374836
type: string
supplier_id:
example: 8759
type: integer
user_id:
example: 374836
type: integer
type: object
type: array
responses.Param:
properties:
name:
example: login
type: string
reason:
example: login reqired
type: string
type: object
responses.Params:
items:
properties:
name:
example: login
type: string
reason:
example: login reqired
type: string
type: object
type: array
responses.Response:
properties:
detail:
example: validation error
type: string
result:
type: object
title:
example: Ошибка запроса
type: string
type:
example: business/error
type: string
version:
example: "1.0"
type: string
type: object
responses.Response[models.AttemptsOutput]:
properties:
detail:
example: validation error
type: string
result:
properties:
bad_attempts:
example: 1
type: integer
period_minutes:
example: 10
type: integer
type: object
title:
example: Ошибка запроса
type: string
type:
example: business/error
type: string
version:
example: "1.0"
type: string
type: object
responses.Response[models.CheckedOutput]:
properties:
detail:
example: validation error
type: string
result:
properties:
checked:
example: true
type: boolean
type: object
title:
example: Ошибка запроса
type: string
type:
example: business/error
type: string
version:
example: "1.0"
type: string
type: object
responses.Response[models.ShortUser]:
properties:
detail:
example: validation error
type: string
result:
properties:
checksum:
example: 2L7xhnV9qw/witp9iPdXmKEG14Ea5qbjvgsXruGGO5I=
type: string
created:
example: "2019-05-21T12:41:04+03:00"
type: string
email:
example: Tolkacheva.Elena@wildberries.ru
type: string
login:
example: supp374836
type: string
supplier_id:
example: 8759
type: integer
user_id:
example: 374836
type: integer
type: object
title:
example: Ошибка запроса
type: string
type:
example: business/error
type: string
version:
example: "1.0"
type: string
type: object
responses.Response[models.Users]:
properties:
detail:
example: validation error
type: string
result:
items:
$ref: '#/definitions/models.ShortUser'
type: array
title:
example: Ошибка запроса
type: string
type:
example: business/error
type: string
version:
example: "1.0"
type: string
type: object
responses.Response[responses.Params]:
properties:
detail:
example: validation error
type: string
result:
items:
$ref: '#/definitions/responses.Param'
type: array
title:
example: Ошибка запроса
type: string
type:
example: business/error
type: string
version:
example: "1.0"
type: string
type: object
info:
contact: {}
license: {}
title: PORTAL User Identity API
version: 0.0.1
paths:
/alive:
get:
description: check application health
produces:
- text/plain
responses:
"200":
description: OK
schema:
type: string
summary: alive
/api/v1/pass/forgot:
post:
parameters:
- description: pass forgot data
in: body
name: body
required: true
schema:
$ref: '#/definitions/models.PassForgotInput'
type: object
- description: initiator of operation
in: header
name: X-Initiator-Id
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response'
"404":
description: Not Found
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: issue reset token
/api/v1/pass/reset:
post:
parameters:
- description: pass reset data
in: body
name: body
required: true
schema:
$ref: '#/definitions/models.PassResetInput'
type: object
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response'
"400":
description: Bad Request
schema:
$ref: '#/definitions/responses.Response'
"410":
description: Gone
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: issue reset token
/api/v1/user:
post:
parameters:
- description: new user data
in: body
name: user
required: true
schema:
$ref: '#/definitions/models.UserForCreate'
type: object
- description: initiator of operation
in: header
name: X-Initiator-Id
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response[models.ShortUser]'
"400":
description: Bad Request
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: create new user
/api/v1/user/{id}:
delete:
parameters:
- description: user ID
in: path
name: id
required: true
type: integer
- description: initiator of operation
in: header
name: X-Initiator-Id
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response'
"404":
description: Not Found
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: delete user by id
get:
parameters:
- description: user ID
in: path
name: id
required: true
type: integer
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response[models.ShortUser]'
"404":
description: Not Found
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: get user data by id
patch:
parameters:
- description: user ID
in: path
name: id
required: true
type: integer
- description: new users data
in: body
name: user
required: true
schema:
$ref: '#/definitions/models.ShortUser'
type: object
- description: initiator of operation
in: header
name: X-Initiator-Id
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response[models.ShortUser]'
"404":
description: Not Found
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: update user data by id, except password
/api/v1/user/{id}/changepass:
post:
parameters:
- description: user ID
in: path
name: id
required: true
type: integer
- description: change pass data
in: body
name: user
required: true
schema:
$ref: '#/definitions/models.ChangePassInput'
type: object
- description: initiator of operation
in: header
name: X-Initiator-Id
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response'
"404":
description: Not Found
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: change password of user
/api/v1/user/{id}/changepassunsafe:
post:
parameters:
- description: user ID
in: path
name: id
required: true
type: integer
- description: change pass data
in: body
name: user
required: true
schema:
$ref: '#/definitions/models.ChangePassUnsafeInput'
type: object
- description: initiator of operation
in: header
name: X-Initiator-Id
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response'
"404":
description: Not Found
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: change password of user
/api/v1/user/check:
post:
parameters:
- description: login data
in: body
name: login_input
required: true
schema:
$ref: '#/definitions/models.LoginPassInput'
type: object
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response[models.ShortUser]'
"400":
description: Bad Request
schema:
$ref: '#/definitions/responses.Response'
"401":
description: Unauthorized
schema:
$ref: '#/definitions/responses.Response[models.AttemptsOutput]'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: get user by login+pass pair
/api/v1/user/checksum/approve:
post:
parameters:
- description: password checksum data
in: body
name: checksum_input
required: true
schema:
$ref: '#/definitions/models.CheckSumInput'
type: object
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response[models.CheckedOutput]'
"400":
description: Bad Request
schema:
$ref: '#/definitions/responses.Response[responses.Params]'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: approve password checksum
/api/v1/user/email/{email}:
get:
parameters:
- description: user email
in: path
name: email
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response[models.Users]'
"404":
description: Not Found
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: get users by email
/api/v1/user/login/{login}:
get:
parameters:
- description: user login
in: path
name: login
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response[models.ShortUser]'
"404":
description: Not Found
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: get users by email
/api/v1/user/supplier/{id}:
get:
parameters:
- description: supplier id
in: path
name: id
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/responses.Response[models.Users]'
"404":
description: Not Found
schema:
$ref: '#/definitions/responses.Response'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/responses.Response'
summary: get users by supplier id
schemes:
- https
security:
- ApiKeyAuth: []
securityDefinitions:
ApiKeyAuth:
in: header
name: X-App-Key
type: apiKey
swagger: "2.0"

64
pkg/servers/grpcserver/grpcserver.go

@ -0,0 +1,64 @@
package grpcserver
import (
"context"
"fmt"
"net"
"git.lowcodeplatform.net/fabric/lib"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
"git.lowcodeplatform.net/fabric/logbox/pkg/service"
"google.golang.org/grpc"
)
type grpcserver struct {
ctx context.Context
cfg model.Config
src service.Service
metric lib.ServiceMetric
logger lib.Log
}
type Server interface {
Run() (err error)
}
// Run server
func (h *grpcserver) Run() error {
addr := fmt.Sprintf(":%d", h.cfg.GrpcPort)
h.logger.Info(fmt.Sprintf("Starting grpc server on %s...", addr))
lis, err := net.Listen("tcp", addr)
if err != nil {
h.logger.Panic(err, "Cannot listen")
}
s := grpc.NewServer()
b, err := server.New(binExpert, binstoreStorage)
if err != nil {
logger.Logger(ctx).Fatal("Error init server", zap.Error(err))
}
pb.RegisterBinExpertServer(s, b)
listenErr := make(chan error, 1)
go func() {
listenErr <- s.Serve(lis)
}()
return nil
}
func New(
ctx context.Context,
cfg model.Config,
src service.Service,
metric lib.ServiceMetric,
logger lib.Log,
) Server {
return &grpcserver{
ctx,
cfg,
src,
metric,
logger,
}
}

14
pkg/servers/grpcserver/methods/server.go

@ -0,0 +1,14 @@
package methods
type Server struct {
pb.UnimplementedBinExpertServer
binexpert *binexpert.Expert
binstore binstore.Storage
}
func New(binexpert *binexpert.Expert, binstore binstore.Storage) (*Server, error) {
return &Server{
binexpert: binexpert,
binstore: binstore,
}, nil
}

1
pkg/servers/grpcserver/methods/write.go

@ -0,0 +1 @@
package methods

22
pkg/servers/httpserver/handlers/alive.go

@ -0,0 +1,22 @@
package handlers
import (
"encoding/json"
"fmt"
"net/http"
)
// Alive godoc
// @Summary alive
// @Description check application health
// @Produce plain
// @Success 200 {string} string "OK"
// @Router /alive [get]
func (h *handlers) Alive(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
curVersion := fmt.Sprintf("<p>HTTP OK. v%s</p>", h.cfg)
curConfig, _ := json.Marshal(h.cfg)
result := fmt.Sprintf("<html><body>Version: %s/n/nConfig: /n%s</body></html>", curVersion, curConfig)
_, _ = w.Write([]byte(result))
}

58
pkg/servers/httpserver/handlers/handlers.go

@ -0,0 +1,58 @@
package handlers
import (
"encoding/json"
"net/http"
"git.lowcodeplatform.net/fabric/lib"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
"git.lowcodeplatform.net/fabric/logbox/pkg/service"
)
type handlers struct {
service service.Service
logger lib.Log
cfg model.Config
}
type Handlers interface {
Alive(w http.ResponseWriter, r *http.Request)
Ping(w http.ResponseWriter, r *http.Request)
Stat(w http.ResponseWriter, r *http.Request)
}
func (h *handlers) transportResponse(w http.ResponseWriter, response interface{}) (err error) {
w.WriteHeader(200)
d, err := json.Marshal(response)
if err != nil {
w.WriteHeader(403)
}
w.Write(d)
return err
}
func (h *handlers) transportError(w http.ResponseWriter, code int, error error, message string) (err error) {
var res = model.Response{}
res.Status.Error = error
res.Status.Description = message
d, err := json.Marshal(res)
h.logger.Error(err, message)
w.WriteHeader(code)
w.Write(d)
return err
}
func New(
service service.Service,
logger lib.Log,
cfg model.Config,
) Handlers {
return &handlers{
service,
logger,
cfg,
}
}

55
pkg/servers/httpserver/handlers/ping.go

@ -0,0 +1,55 @@
package handlers
import (
"context"
"net/http"
"git.lowcodeplatform.net/fabric/models"
)
// Ping get user by login+pass pair
// @Summary get user by login+pass pair
// @Param login_input body model.Pong true "login data"
// @Success 200 {object} model.Pong [Result:model.Pong]
// @Failure 400 {object} model.Pong
// @Failure 500 {object} model.Pong
// @Router /api/v1/ping [get]
func (h *handlers) Ping(w http.ResponseWriter, r *http.Request) {
var err error
defer func() {
if err != nil {
h.transportError(w, 500, err, "")
}
}()
_, err = pingDecodeRequest(r.Context(), r)
if err != nil {
h.logger.Error(err, "[Ping] Error function execution (PLoginDecodeRequest).")
return
}
serviceResult, err := h.service.Ping(r.Context())
if err != nil {
h.logger.Error(err, "[Ping] Error service execution (service.Ping).")
return
}
response, _ := pingEncodeResponse(r.Context(), serviceResult)
if err != nil {
h.logger.Error(err, "[Ping] Error function execution (PLoginEncodeResponse).")
return
}
err = h.transportResponse(w, response)
if err != nil {
h.logger.Error(err, "[Ping] Error function execution (PLoginTransportResponse).")
return
}
return
}
func pingDecodeRequest(ctx context.Context, r *http.Request) (request *[]models.Pong, err error) {
return request, err
}
func pingEncodeResponse(ctx context.Context, serviceResult []models.Pong) (response []models.Pong, err error) {
return serviceResult, err
}

53
pkg/servers/httpserver/handlers/stat.go

@ -0,0 +1,53 @@
package handlers
import (
"context"
"net/http"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
"github.com/gorilla/mux"
)
// Stat get user by login+pass pair
func (h *handlers) Stat(w http.ResponseWriter, r *http.Request) {
var err error
defer func() {
if err != nil {
h.transportError(w, 500, err, "")
}
}()
in, err := h.statDecodeRequest(r.Context(), r)
if err != nil {
h.logger.Error(err, "[Stat] Error function execution (statDecodeRequest).")
return
}
serviceResult, err := h.service.Stat(r.Context(), in)
if err != nil {
h.logger.Error(err, "[Stat] Error service execution (service.Stat).")
return
}
response, _ := statEncodeResponse(r.Context(), serviceResult)
if err != nil {
h.logger.Error(err, "[Stat] Error function execution (statEncodeResponse).")
return
}
err = h.transportResponse(w, response)
if err != nil {
h.logger.Error(err, "[Stat] Error function execution (transportResponse).")
return
}
return
}
func (h *handlers) statDecodeRequest(ctx context.Context, r *http.Request) (request model.StatIn, err error) {
vars := mux.Vars(r)
request.Action = vars["action"]
return request, err
}
func statEncodeResponse(ctx context.Context, serviceResult model.StatOut) (response interface{}, err error) {
return serviceResult.Result, err
}

70
pkg/servers/httpserver/httpserver.go

@ -0,0 +1,70 @@
package httpserver
import (
"context"
"fmt"
"net/http"
"git.lowcodeplatform.net/fabric/lib"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
"git.lowcodeplatform.net/fabric/logbox/pkg/service"
"github.com/labstack/gommon/color"
"github.com/pkg/errors"
// should be so!
_ "git.lowcodeplatform.net/fabric/logbox/pkg/servers/docs"
)
type httpserver struct {
ctx context.Context
cfg model.Config
src service.Service
metric lib.ServiceMetric
logger lib.Log
}
type Server interface {
Run() (err error)
}
// Run server
func (h *httpserver) Run() error {
done := color.Green("[OK]")
//err := httpscerts.Check(h.cfg.SSLCertPath, h.cfg.SSLPrivateKeyPath)
//if err != nil {
// panic(err)
//}
srv := &http.Server{
Addr: ":" + h.cfg.Port,
Handler: h.NewRouter(),
ReadTimeout: h.cfg.ReadTimeout.Value,
WriteTimeout: h.cfg.WriteTimeout.Value,
}
fmt.Printf("%s Service run (port:%s)\n", done, h.cfg.Port)
h.logger.Info("Запуск https сервера. ", "port:", h.cfg.Port)
//e := srv.ListenAndServeTLS(h.cfg.SSLCertPath, h.cfg.SSLPrivateKeyPath)
e := srv.ListenAndServe()
if e != nil {
return errors.Wrap(e, "SERVER run")
}
return nil
}
func New(
ctx context.Context,
cfg model.Config,
src service.Service,
metric lib.ServiceMetric,
logger lib.Log,
) Server {
return &httpserver{
ctx,
cfg,
src,
metric,
logger,
}
}

83
pkg/servers/httpserver/middleware.go

@ -0,0 +1,83 @@
package httpserver
import (
"fmt"
"git.lowcodeplatform.net/fabric/lib"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
"net/http"
"runtime/debug"
"strings"
"time"
)
func (h *httpserver) MiddleLogger(next http.Handler, name string, logger lib.Log, serviceMetrics lib.ServiceMetric) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
next.ServeHTTP(w, r)
timeInterval := time.Since(start)
if name != "ProxyPing" { //&& false == true
mes := fmt.Sprintf("Query: %s %s %s %s",
r.Method,
r.RequestURI,
name,
timeInterval)
logger.Info(mes)
}
// сохраняем статистику всех запросов, в том числе и пинга (потому что этот запрос фиксируется в количестве)
serviceMetrics.SetTimeRequest(timeInterval)
})
}
func (h *httpserver) AuthProcessor(next http.Handler, cfg model.Config) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var authKey string
authKeyHeader := r.Header.Get("X-Auth-Key")
if authKeyHeader != "" {
authKey = authKeyHeader
} else {
authKeyCookie, err := r.Cookie("X-Auth-Key")
if err == nil {
authKey = authKeyCookie.Value
}
}
// не передали ключ (пропускаем пинги)
if strings.TrimSpace(authKey) == "" && r.URL.Path != "/ping" {
lib.ResponseJSON(w, nil, "Unauthorized", nil, nil)
return
}
// не соответствие переданного ключа и UID-а API (пропускаем пинги)
if strings.TrimSpace(authKey) != cfg.UidService && r.URL.Path != "/ping" {
lib.ResponseJSON(w, nil, "Unauthorized", nil, nil)
return
}
next.ServeHTTP(w, r)
})
}
func (h *httpserver) Recover(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func(r *http.Request) {
rec := recover()
if rec != nil {
b := string(debug.Stack())
//fmt.Println(r.URL.String())
h.logger.Panic(fmt.Errorf("%s", b), "Recover panic from path: ", r.URL.String(), "; form: ", r.Form)
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
}
}(r)
next.ServeHTTP(w, r)
})
}
func (h *httpserver) JsonHeaders(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
next.ServeHTTP(w, r)
})
}

70
pkg/servers/httpserver/routers.go

@ -0,0 +1,70 @@
package httpserver
import (
"net/http"
"net/http/pprof"
"path/filepath"
"git.lowcodeplatform.net/fabric/logbox/pkg/servers/httpserver/handlers"
"github.com/gorilla/mux"
httpSwagger "github.com/swaggo/http-swagger"
)
type Result struct {
Status string `json:"status"`
Content []interface{}
}
type Route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
type Routes []Route
func (h *httpserver) NewRouter() *mux.Router {
router := mux.NewRouter().StrictSlash(true)
handler := handlers.New(h.src, h.logger, h.cfg)
router.HandleFunc("/alive", handler.Alive).Methods("GET")
router.PathPrefix("/swagger/").Handler(httpSwagger.Handler(
httpSwagger.URL("/swagger/doc.json"),
))
logboxDir, _ := filepath.Abs(h.cfg.Dir)
router.PathPrefix("/download/").Handler(http.StripPrefix("/download/", http.FileServer(http.Dir(logboxDir))))
router.Use(h.Recover)
router.Use(h.metric.Middleware)
router.Use(h.JsonHeaders)
var routes = Routes{
// запросы (настроенные)
Route{"Ping", "GET", "/ping", handler.Ping},
Route{"Info", "GET", "/stat", handler.Stat},
// Регистрация pprof-обработчиков
Route{"pprofIndex", "GET", "/debug/pprof/", pprof.Index},
Route{"pprofIndex", "GET", "/debug/pprof/cmdline", pprof.Cmdline},
Route{"pprofIndex", "GET", "/debug/pprof/profile", pprof.Profile},
Route{"pprofIndex", "GET", "/debug/pprof/symbol", pprof.Symbol},
Route{"pprofIndex", "GET", "/debug/pprof/trace", pprof.Trace},
}
for _, route := range routes {
var handler http.Handler
handler = route.HandlerFunc
handler = h.MiddleLogger(handler, route.Name, h.logger, h.metric)
router.
Methods(route.Method).
Path(route.Pattern).
Name(route.Name).
Handler(handler)
}
return router
}

53
pkg/servers/servers.go

@ -0,0 +1,53 @@
// запускаем указанные виды из поддерживаемых серверов
package servers
import (
"strings"
"git.lowcodeplatform.net/fabric/lib"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
"git.lowcodeplatform.net/fabric/logbox/pkg/servers/grpcserver"
"git.lowcodeplatform.net/fabric/logbox/pkg/servers/httpserver"
"git.lowcodeplatform.net/fabric/logbox/pkg/service"
)
type servers struct {
mode string
cfg model.Config
metrics lib.ServiceMetric
httpserver httpserver.Server
grpcserver grpcserver.Server
service service.Service
}
type Servers interface {
Run()
}
// запускаем указанные севрера
func (s *servers) Run() {
if strings.Contains(s.mode, "http") {
s.httpserver.Run()
}
if strings.Contains(s.mode, "grpc") {
s.httpserver.Run()
}
}
func New(
mode string,
cfg model.Config,
metrics lib.ServiceMetric,
httpserver httpserver.Server,
grpcserver httpserver.Server,
service service.Service,
) Servers {
return &servers{
mode,
cfg,
metrics,
httpserver,
grpcserver,
service,
}
}

38
pkg/service/interface.go

@ -0,0 +1,38 @@
package service
import (
"context"
"git.lowcodeplatform.net/fabric/lib"
"git.lowcodeplatform.net/fabric/logbox/pkg/logbox"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
"git.lowcodeplatform.net/fabric/models"
)
type service struct {
logger lib.Log
cfg model.Config
metrics lib.ServiceMetric
logbox logbox.Logbox
}
// Service interface
type Service interface {
Ping(ctx context.Context) (result []models.Pong, err error)
Stat(ctx context.Context, in model.StatIn) (out model.StatOut, err error)
Write(ctx context.Context, in model.WriteIn) (out model.WriteOut, err error)
}
func New(
logger lib.Log,
cfg model.Config,
metrics lib.ServiceMetric,
logbox *logbox.Logbox,
) Service {
return &service{
logger,
cfg,
metrics,
*logbox,
}
}

37
pkg/service/ping.go

@ -0,0 +1,37 @@
package service
import (
"context"
"encoding/json"
"os"
"strconv"
"strings"
"git.lowcodeplatform.net/fabric/models"
)
// Ping ...
func (s *service) Ping(ctx context.Context) (result []models.Pong, err error) {
pp := strings.Split(s.cfg.Domain, "/")
name := "ru"
version := "ru"
if len(pp) == 1 {
name = pp[0]
}
if len(pp) == 2 {
name = pp[0]
version = pp[1]
}
pg, _ := strconv.Atoi(s.cfg.Port)
pid := strconv.Itoa(os.Getpid())+":"+s.cfg.DataUid
state, _ := json.Marshal(s.metrics.Get())
var r = []models.Pong{
{s.cfg.DataUid, name, version, "run",pg, pid, string(state),s.cfg.ReplicasService.Value, false, 0, ""},
}
return r, err
}

13
pkg/service/stat.go

@ -0,0 +1,13 @@
package service
import (
"context"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
)
// Stat Возвращаем информацию по запросам
func (s *service) Stat(ctx context.Context, in model.StatIn) (out model.StatOut, err error) {
return out, err
}

13
pkg/service/write.go

@ -0,0 +1,13 @@
package service
import (
"context"
"git.lowcodeplatform.net/fabric/logbox/pkg/model"
)
// Write Записываем данные в указаннный источник
func (s *service) Write(ctx context.Context, in model.WriteIn) (out model.WriteOut, err error) {
return out, err
}

202
vendor/cloud.google.com/go/LICENSE

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

513
vendor/cloud.google.com/go/compute/metadata/metadata.go

@ -0,0 +1,513 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = cl.getTrimmed(c.k)
} else {
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/googleapis/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
u := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", u, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
if res.StatusCode != 200 {
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// Error contains an error response from the server.
type Error struct {
// Code is the HTTP response status code.
Code int
// Message is the server response message.
Message string
}
func (e *Error) Error() string {
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
}

315
vendor/cloud.google.com/go/iam/iam.go

@ -0,0 +1,315 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package iam supports the resource-specific operations of Google Cloud
// IAM (Identity and Access Management) for the Google Cloud Libraries.
// See https://cloud.google.com/iam for more about IAM.
//
// Users of the Google Cloud Libraries will typically not use this package
// directly. Instead they will begin with some resource that supports IAM, like
// a pubsub topic, and call its IAM method to get a Handle for that resource.
package iam
import (
"context"
"fmt"
"time"
gax "github.com/googleapis/gax-go/v2"
pb "google.golang.org/genproto/googleapis/iam/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// client abstracts the IAMPolicy API to allow multiple implementations.
type client interface {
Get(ctx context.Context, resource string) (*pb.Policy, error)
Set(ctx context.Context, resource string, p *pb.Policy) error
Test(ctx context.Context, resource string, perms []string) ([]string, error)
}
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
type grpcClient struct {
c pb.IAMPolicyClient
}
var withRetry = gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60 * time.Second,
Multiplier: 1.3,
})
})
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
var proto *pb.Policy
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
return err
}, withRetry)
if err != nil {
return nil, err
}
return proto, nil
}
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
Resource: resource,
Policy: p,
})
return err
}, withRetry)
}
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
var res *pb.TestIamPermissionsResponse
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
Resource: resource,
Permissions: perms,
})
return err
}, withRetry)
if err != nil {
return nil, err
}
return res.Permissions, nil
}
// A Handle provides IAM operations for a resource.
type Handle struct {
c client
resource string
}
// InternalNewHandle is for use by the Google Cloud Libraries only.
//
// InternalNewHandle returns a Handle for resource.
// The conn parameter refers to a server that must support the IAMPolicy service.
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
}
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// grpc service that implements IAM as a mixin
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: c}, resource)
}
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// client implementation.
func InternalNewHandleClient(c client, resource string) *Handle {
return &Handle{
c: c,
resource: resource,
}
}
// Policy retrieves the IAM policy for the resource.
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
proto, err := h.c.Get(ctx, h.resource)
if err != nil {
return nil, err
}
return &Policy{InternalProto: proto}, nil
}
// SetPolicy replaces the resource's current policy with the supplied Policy.
//
// If policy was created from a prior call to Get, then the modification will
// only succeed if the policy has not changed since the Get.
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
return h.c.Set(ctx, h.resource, policy.InternalProto)
}
// TestPermissions returns the subset of permissions that the caller has on the resource.
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
return h.c.Test(ctx, h.resource, permissions)
}
// A RoleName is a name representing a collection of permissions.
type RoleName string
// Common role names.
const (
Owner RoleName = "roles/owner"
Editor RoleName = "roles/editor"
Viewer RoleName = "roles/viewer"
)
const (
// AllUsers is a special member that denotes all users, even unauthenticated ones.
AllUsers = "allUsers"
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
AllAuthenticatedUsers = "allAuthenticatedUsers"
)
// A Policy is a list of Bindings representing roles
// granted to members.
//
// The zero Policy is a valid policy with no bindings.
type Policy struct {
// TODO(jba): when type aliases are available, put Policy into an internal package
// and provide an exported alias here.
// This field is exported for use by the Google Cloud Libraries only.
// It may become unexported in a future release.
InternalProto *pb.Policy
}
// Members returns the list of members with the supplied role.
// The return value should not be modified. Use Add and Remove
// to modify the members of a role.
func (p *Policy) Members(r RoleName) []string {
b := p.binding(r)
if b == nil {
return nil
}
return b.Members
}
// HasRole reports whether member has role r.
func (p *Policy) HasRole(member string, r RoleName) bool {
return memberIndex(member, p.binding(r)) >= 0
}
// Add adds member member to role r if it is not already present.
// A new binding is created if there is no binding for the role.
func (p *Policy) Add(member string, r RoleName) {
b := p.binding(r)
if b == nil {
if p.InternalProto == nil {
p.InternalProto = &pb.Policy{}
}
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
Role: string(r),
Members: []string{member},
})
return
}
if memberIndex(member, b) < 0 {
b.Members = append(b.Members, member)
return
}
}
// Remove removes member from role r if it is present.
func (p *Policy) Remove(member string, r RoleName) {
bi := p.bindingIndex(r)
if bi < 0 {
return
}
bindings := p.InternalProto.Bindings
b := bindings[bi]
mi := memberIndex(member, b)
if mi < 0 {
return
}
// Order doesn't matter for bindings or members, so to remove, move the last item
// into the removed spot and shrink the slice.
if len(b.Members) == 1 {
// Remove binding.
last := len(bindings) - 1
bindings[bi] = bindings[last]
bindings[last] = nil
p.InternalProto.Bindings = bindings[:last]
return
}
// Remove member.
// TODO(jba): worry about multiple copies of m?
last := len(b.Members) - 1
b.Members[mi] = b.Members[last]
b.Members[last] = ""
b.Members = b.Members[:last]
}
// Roles returns the names of all the roles that appear in the Policy.
func (p *Policy) Roles() []RoleName {
if p.InternalProto == nil {
return nil
}
var rns []RoleName
for _, b := range p.InternalProto.Bindings {
rns = append(rns, RoleName(b.Role))
}
return rns
}
// binding returns the Binding for the suppied role, or nil if there isn't one.
func (p *Policy) binding(r RoleName) *pb.Binding {
i := p.bindingIndex(r)
if i < 0 {
return nil
}
return p.InternalProto.Bindings[i]
}
func (p *Policy) bindingIndex(r RoleName) int {
if p.InternalProto == nil {
return -1
}
for i, b := range p.InternalProto.Bindings {
if b.Role == string(r) {
return i
}
}
return -1
}
// memberIndex returns the index of m in b's Members, or -1 if not found.
func memberIndex(m string, b *pb.Binding) int {
if b == nil {
return -1
}
for i, mm := range b.Members {
if mm == m {
return i
}
}
return -1
}
// insertMetadata inserts metadata into the given context
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}

54
vendor/cloud.google.com/go/internal/annotate.go

@ -0,0 +1,54 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/status"
)
// Annotate prepends msg to the error message in err, attempting
// to preserve other information in err, like an error code.
//
// Annotate panics if err is nil.
//
// Annotate knows about these error types:
// - "google.golang.org/grpc/status".Status
// - "google.golang.org/api/googleapi".Error
// If the error is not one of these types, Annotate behaves
// like
// fmt.Errorf("%s: %v", msg, err)
func Annotate(err error, msg string) error {
if err == nil {
panic("Annotate called with nil")
}
if s, ok := status.FromError(err); ok {
p := s.Proto()
p.Message = msg + ": " + p.Message
return status.ErrorProto(p)
}
if g, ok := err.(*googleapi.Error); ok {
g.Message = msg + ": " + g.Message
return g
}
return fmt.Errorf("%s: %v", msg, err)
}
// Annotatef uses format and args to format a string, then calls Annotate.
func Annotatef(err error, format string, args ...interface{}) error {
return Annotate(err, fmt.Sprintf(format, args...))
}

108
vendor/cloud.google.com/go/internal/optional/optional.go

@ -0,0 +1,108 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package optional provides versions of primitive types that can
// be nil. These are useful in methods that update some of an API object's
// fields.
package optional
import (
"fmt"
"strings"
"time"
)
type (
// Bool is either a bool or nil.
Bool interface{}
// String is either a string or nil.
String interface{}
// Int is either an int or nil.
Int interface{}
// Uint is either a uint or nil.
Uint interface{}
// Float64 is either a float64 or nil.
Float64 interface{}
// Duration is either a time.Duration or nil.
Duration interface{}
)
// ToBool returns its argument as a bool.
// It panics if its argument is nil or not a bool.
func ToBool(v Bool) bool {
x, ok := v.(bool)
if !ok {
doPanic("Bool", v)
}
return x
}
// ToString returns its argument as a string.
// It panics if its argument is nil or not a string.
func ToString(v String) string {
x, ok := v.(string)
if !ok {
doPanic("String", v)
}
return x
}
// ToInt returns its argument as an int.
// It panics if its argument is nil or not an int.
func ToInt(v Int) int {
x, ok := v.(int)
if !ok {
doPanic("Int", v)
}
return x
}
// ToUint returns its argument as a uint.
// It panics if its argument is nil or not a uint.
func ToUint(v Uint) uint {
x, ok := v.(uint)
if !ok {
doPanic("Uint", v)
}
return x
}
// ToFloat64 returns its argument as a float64.
// It panics if its argument is nil or not a float64.
func ToFloat64(v Float64) float64 {
x, ok := v.(float64)
if !ok {
doPanic("Float64", v)
}
return x
}
// ToDuration returns its argument as a time.Duration.
// It panics if its argument is nil or not a time.Duration.
func ToDuration(v Duration) time.Duration {
x, ok := v.(time.Duration)
if !ok {
doPanic("Duration", v)
}
return x
}
func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
}

54
vendor/cloud.google.com/go/internal/retry.go

@ -0,0 +1,54 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"context"
"time"
gax "github.com/googleapis/gax-go/v2"
)
// Retry calls the supplied function f repeatedly according to the provided
// backoff parameters. It returns when one of the following occurs:
// When f's first return value is true, Retry immediately returns with f's second
// return value.
// When the provided context is done, Retry returns with an error that
// includes both ctx.Error() and the last error returned by f.
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
return retry(ctx, bo, f, gax.Sleep)
}
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
sleep func(context.Context, time.Duration) error) error {
var lastErr error
for {
stop, err := f()
if stop {
return err
}
// Remember the last "real" error from f.
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
lastErr = err
}
p := bo.Pause()
if cerr := sleep(ctx, p); cerr != nil {
if lastErr != nil {
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
}
return cerr
}
}
}

109
vendor/cloud.google.com/go/internal/trace/trace.go

@ -0,0 +1,109 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
"fmt"
"go.opencensus.io/trace"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
// StartSpan adds a span to the trace with the given name.
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
// EndSpan ends a span with the given error.
func EndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
}
span.End()
}
// toStatus interrogates an error and converts it to an appropriate
// OpenCensus status.
func toStatus(err error) trace.Status {
if err2, ok := err.(*googleapi.Error); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
} else {
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
}
}
// TODO (deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
switch httpStatusCode {
case 200:
return int32(code.Code_OK)
case 499:
return int32(code.Code_CANCELLED)
case 500:
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
case 400:
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
case 504:
return int32(code.Code_DEADLINE_EXCEEDED)
case 404:
return int32(code.Code_NOT_FOUND)
case 409:
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
case 403:
return int32(code.Code_PERMISSION_DENIED)
case 401:
return int32(code.Code_UNAUTHENTICATED)
case 429:
return int32(code.Code_RESOURCE_EXHAUSTED)
case 501:
return int32(code.Code_UNIMPLEMENTED)
case 503:
return int32(code.Code_UNAVAILABLE)
default:
return int32(code.Code_UNKNOWN)
}
}
// TODO: (odeke-em): perhaps just pass around spans due to the cost
// incurred from using trace.FromContext(ctx) yet we could avoid
// throwing away the work done by ctx, span := trace.StartSpan.
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
var attrs []trace.Attribute
for k, v := range attrMap {
var a trace.Attribute
switch v := v.(type) {
case string:
a = trace.StringAttribute(k, v)
case bool:
a = trace.BoolAttribute(k, v)
case int:
a = trace.Int64Attribute(k, int64(v))
case int64:
a = trace.Int64Attribute(k, v)
default:
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
}
attrs = append(attrs, a)
}
trace.FromContext(ctx).Annotatef(attrs, format, args...)
}

6
vendor/cloud.google.com/go/internal/version/update_version.sh

@ -0,0 +1,6 @@
#!/bin/bash
today=$(date +%Y%m%d)
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE

71
vendor/cloud.google.com/go/internal/version/version.go

@ -0,0 +1,71 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate ./update_version.sh
// Package version contains version information for Google Cloud Client
// Libraries for Go, as reported in request headers.
package version
import (
"runtime"
"strings"
"unicode"
)
// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20180226"
// Go returns the Go runtime version. The returned string
// has no whitespace.
func Go() string {
return goVersion
}
var goVersion = goVer(runtime.Version())
const develPrefix = "devel +"
func goVer(s string) string {
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return ""
}
func notSemverRune(r rune) bool {
return !strings.ContainsRune("0123456789.", r)
}

335
vendor/cloud.google.com/go/storage/acl.go

@ -0,0 +1,335 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"net/http"
"reflect"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)
// ACLRole is the level of access to grant.
type ACLRole string
const (
RoleOwner ACLRole = "OWNER"
RoleReader ACLRole = "READER"
RoleWriter ACLRole = "WRITER"
)
// ACLEntity refers to a user or group.
// They are sometimes referred to as grantees.
//
// It could be in the form of:
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
// "domain-<domain>" and "project-team-<projectId>".
//
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
type ACLEntity string
const (
AllUsers ACLEntity = "allUsers"
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
)
// ACLRule represents a grant for a role to an entity (user, group or team) for a
// Google Cloud Storage object or bucket.
type ACLRule struct {
Entity ACLEntity
EntityID string
Role ACLRole
Domain string
Email string
ProjectTeam *ProjectTeam
}
// ProjectTeam is the project team associated with the entity, if any.
type ProjectTeam struct {
ProjectNumber string
Team string
}
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
type ACLHandle struct {
c *Client
bucket string
object string
isDefault bool
userProject string // for requester-pays buckets
}
// Delete permanently deletes the ACL entry for the given entity.
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectDelete(ctx, entity)
}
if a.isDefault {
return a.bucketDefaultDelete(ctx, entity)
}
return a.bucketDelete(ctx, entity)
}
// Set sets the role for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectSet(ctx, entity, role, false)
}
if a.isDefault {
return a.objectSet(ctx, entity, role, true)
}
return a.bucketSet(ctx, entity, role)
}
// List retrieves ACL entries.
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectList(ctx)
}
if a.isDefault {
return a.bucketDefaultList(ctx)
}
return a.bucketList(ctx)
}
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
err = runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
return runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
a.configureCall(ctx, req)
return req.Do()
})
}
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.BucketAccessControls
var err error
err = runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.List(a.bucket)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return toBucketACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
acl := &raw.BucketAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
err := runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
a.configureCall(ctx, req)
_, err := req.Do()
return err
})
if err != nil {
return err
}
return nil
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
return runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
a.configureCall(ctx, req)
return req.Do()
})
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
err = runWithRetry(ctx, func() error {
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
type setRequest interface {
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
Header() http.Header
}
acl := &raw.ObjectAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
var req setRequest
if isBucketDefault {
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
} else {
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
}
a.configureCall(ctx, req)
return runWithRetry(ctx, func() error {
_, err := req.Do()
return err
})
}
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
return runWithRetry(ctx, func() error {
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
a.configureCall(ctx, req)
return req.Do()
})
}
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
vc := reflect.ValueOf(call)
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
if a.userProject != "" {
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
}
setClientHeader(call.Header())
}
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toObjectACLRule(item))
}
return rs
}
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toBucketACLRule(item))
}
return rs
}
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
}
}
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
}
}
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.ObjectAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
}
return r
}
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.BucketAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
}
return r
}
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
return &raw.BucketAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
return &raw.ObjectAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}

1186
vendor/cloud.google.com/go/storage/bucket.go

File diff suppressed because it is too large

228
vendor/cloud.google.com/go/storage/copy.go

@ -0,0 +1,228 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
)
// CopierFrom creates a Copier that can copy src to dst.
// You can immediately call Run on the returned Copier, or
// you can configure it first.
//
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
// in which case the user project of src is billed.
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
return &Copier{dst: dst, src: src}
}
// A Copier copies a source object to a destination.
type Copier struct {
// ObjectAttrs are optional attributes to set on the destination object.
// Any attributes must be initialized before any calls on the Copier. Nil
// or zero-valued attributes are ignored.
ObjectAttrs
// RewriteToken can be set before calling Run to resume a copy
// operation. After Run returns a non-nil error, RewriteToken will
// have been updated to contain the value needed to resume the copy.
RewriteToken string
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
// operation. If ProgressFunc is not nil and copying requires multiple
// calls to the underlying service (see
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
// ProgressFunc will be invoked after each call with the number of bytes of
// content copied so far and the total size in bytes of the source object.
//
// ProgressFunc is intended to make upload progress available to the
// application. For example, the implementation of ProgressFunc may update
// a progress bar in the application's UI, or log the result of
// float64(copiedBytes)/float64(totalBytes).
//
// ProgressFunc should return quickly without blocking.
ProgressFunc func(copiedBytes, totalBytes uint64)
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
// any.
//
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
// (via ObjectHandle.Key) on the destination object will result in an error when
// Run is called.
DestinationKMSKeyName string
dst, src *ObjectHandle
}
// Run performs the copy.
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.src.validate(); err != nil {
return nil, err
}
if err := c.dst.validate(); err != nil {
return nil, err
}
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
}
// Convert destination attributes to raw form, omitting the bucket.
// If the bucket is included but name or content-type aren't, the service
// returns a 400 with "Required" as the only message. Omitting the bucket
// does not cause any problems.
rawObject := c.ObjectAttrs.toRawObject("")
for {
res, err := c.callRewrite(ctx, rawObject)
if err != nil {
return nil, err
}
if c.ProgressFunc != nil {
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
}
if res.Done { // Finished successfully.
return newObject(res.Resource), nil
}
}
}
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
call.Context(ctx).Projection("full")
if c.RewriteToken != "" {
call.RewriteToken(c.RewriteToken)
}
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
} else if c.src.userProject != "" {
call.UserProject(c.src.userProject)
}
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
return nil, err
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
return nil, err
}
var res *raw.RewriteResponse
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
if err != nil {
return nil, err
}
c.RewriteToken = res.RewriteToken
return res, nil
}
// ComposerFrom creates a Composer that can compose srcs into dst.
// You can immediately call Run on the returned Composer, or you can
// configure it first.
//
// The encryption key for the destination object will be used to decrypt all
// source objects and encrypt the destination object. It is an error
// to specify an encryption key for any of the source objects.
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
return &Composer{dst: dst, srcs: srcs}
}
// A Composer composes source objects into a destination object.
//
// For Requester Pays buckets, the user project of dst is billed.
type Composer struct {
// ObjectAttrs are optional attributes to set on the destination object.
// Any attributes must be initialized before any calls on the Composer. Nil
// or zero-valued attributes are ignored.
ObjectAttrs
dst *ObjectHandle
srcs []*ObjectHandle
}
// Run performs the compose operation.
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.dst.validate(); err != nil {
return nil, err
}
if len(c.srcs) == 0 {
return nil, errors.New("storage: at least one source object must be specified")
}
req := &raw.ComposeRequest{}
// Compose requires a non-empty Destination, so we always set it,
// even if the caller-provided ObjectAttrs is the zero value.
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
for _, src := range c.srcs {
if err := src.validate(); err != nil {
return nil, err
}
if src.bucket != c.dst.bucket {
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
}
if src.encryptionKey != nil {
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
}
srcObj := &raw.ComposeRequestSourceObjects{
Name: src.object,
}
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
return nil, err
}
req.SourceObjects = append(req.SourceObjects, srcObj)
}
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if err != nil {
return nil, err
}
return newObject(obj), nil
}

176
vendor/cloud.google.com/go/storage/doc.go

@ -0,0 +1,176 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package storage provides an easy way to work with Google Cloud Storage.
Google Cloud Storage stores data in named objects, which are grouped into buckets.
More information about Google Cloud Storage is available at
https://cloud.google.com/storage/docs.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
All of the methods of this package use exponential backoff to retry calls that fail
with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
indefinitely unless the controlling context is canceled or the client is closed. See
context.WithTimeout and context.WithCancel.
Creating a Client
To start working with this package, create a client:
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
The client will use your default application credentials.
If you only wish to access public data, you can create
an unauthenticated client with
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
Buckets
A Google Cloud Storage bucket is a collection of objects. To work with a
bucket, make a bucket handle:
bkt := client.Bucket(bucketName)
A handle is a reference to a bucket. You can have a handle even if the
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
call Create on the handle:
if err := bkt.Create(ctx, projectID, nil); err != nil {
// TODO: Handle error.
}
Note that although buckets are associated with projects, bucket names are
global across all projects.
Each bucket has associated metadata, represented in this package by
BucketAttrs. The third argument to BucketHandle.Create allows you to set
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
Attrs:
attrs, err := bkt.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
Objects
An object holds arbitrary data as a sequence of bytes, like a file. You
refer to objects using a handle, just as with buckets, but unlike buckets
you don't explicitly create an object. Instead, the first time you write
to an object it will be created. You can use the standard Go io.Reader
and io.Writer interfaces to read and write object data:
obj := bkt.Object("data")
// Write something to obj.
// w implements io.Writer.
w := obj.NewWriter(ctx)
// Write some text to obj. This will either create the object or overwrite whatever is there already.
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
// TODO: Handle error.
}
// Close, just like writing a file.
if err := w.Close(); err != nil {
// TODO: Handle error.
}
// Read it back.
r, err := obj.NewReader(ctx)
if err != nil {
// TODO: Handle error.
}
defer r.Close()
if _, err := io.Copy(os.Stdout, r); err != nil {
// TODO: Handle error.
}
// Prints "This object contains text."
Objects also have attributes, which you can fetch with Attrs:
objAttrs, err := obj.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("object %s has size %d and can be read using %s\n",
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
ACLs
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
ACLRules, each of which specifies the role of a user, group or project. ACLs
are suitable for fine-grained control, but you may prefer using IAM to control
access at the project level (see
https://cloud.google.com/storage/docs/access-control/iam).
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
acls, err := obj.ACL().List(ctx)
if err != nil {
// TODO: Handle error.
}
for _, rule := range acls {
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
}
You can also set and delete ACLs.
Conditions
Every object has a generation and a metageneration. The generation changes
whenever the content changes, and the metageneration changes whenever the
metadata changes. Conditions let you check these values before an operation;
the operation only executes if the conditions match. You can use conditions to
prevent race conditions in read-modify-write operations.
For example, say you've read an object's metadata into objAttrs. Now
you want to write to that object, but only if its contents haven't changed
since you read it. Here is how to express that:
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
// Proceed with writing as above.
Signed URLs
You can obtain a URL that lets anyone read or write an object for a limited time.
You don't need to create a client to do this. See the documentation of
SignedURL for details.
url, err := storage.SignedURL(bucketName, "shared-object", opts)
if err != nil {
// TODO: Handle error.
}
fmt.Println(url)
Errors
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
if e, ok := err.(*googleapi.Error); ok {
if e.Code == 409 { ... }
}
*/
package storage // import "cloud.google.com/go/storage"

32
vendor/cloud.google.com/go/storage/go110.go

@ -0,0 +1,32 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.10
package storage
import "google.golang.org/api/googleapi"
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}
}

130
vendor/cloud.google.com/go/storage/iam.go

@ -0,0 +1,130 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"cloud.google.com/go/iam"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
iampb "google.golang.org/genproto/googleapis/iam/v1"
)
// IAM provides access to IAM access control for the bucket.
func (b *BucketHandle) IAM() *iam.Handle {
return iam.InternalNewHandleClient(&iamClient{
raw: b.c.raw,
userProject: b.userProject,
}, b.name)
}
// iamClient implements the iam.client interface.
type iamClient struct {
raw *raw.Service
userProject string
}
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.GetIamPolicy(resource)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var rp *raw.Policy
err = runWithRetry(ctx, func() error {
rp, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return iamFromStoragePolicy(rp), nil
}
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
defer func() { trace.EndSpan(ctx, err) }()
rp := iamToStoragePolicy(p)
call := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
return runWithRetry(ctx, func() error {
_, err := call.Context(ctx).Do()
return err
})
}
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.TestIamPermissions(resource, perms)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var res *raw.TestIamPermissionsResponse
err = runWithRetry(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return res.Permissions, nil
}
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
return &raw.Policy{
Bindings: iamToStorageBindings(ip.Bindings),
Etag: string(ip.Etag),
}
}
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
var rbs []*raw.PolicyBindings
for _, ib := range ibs {
rbs = append(rbs, &raw.PolicyBindings{
Role: ib.Role,
Members: ib.Members,
})
}
return rbs
}
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
return &iampb.Policy{
Bindings: iamFromStorageBindings(rp.Bindings),
Etag: []byte(rp.Etag),
}
}
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
var ibs []*iampb.Binding
for _, rb := range rbs {
ibs = append(ibs, &iampb.Binding{
Role: rb.Role,
Members: rb.Members,
})
}
return ibs
}

37
vendor/cloud.google.com/go/storage/invoke.go

@ -0,0 +1,37 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go/v2"
)
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
func runWithRetry(ctx context.Context, call func() error) error {
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
err = call()
if err == nil {
return true, nil
}
if shouldRetry(err) {
return false, nil
}
return true, err
})
}

42
vendor/cloud.google.com/go/storage/not_go110.go

@ -0,0 +1,42 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.10
package storage
import (
"net/url"
"strings"
"google.golang.org/api/googleapi"
)
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case *url.Error:
// Retry on REFUSED_STREAM.
// Unfortunately the error type is unexported, so we resort to string
// matching.
return strings.Contains(e.Error(), "REFUSED_STREAM")
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}
}

188
vendor/cloud.google.com/go/storage/notifications.go

@ -0,0 +1,188 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"regexp"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
)
// A Notification describes how to send Cloud PubSub messages when certain
// events occur in a bucket.
type Notification struct {
//The ID of the notification.
ID string
// The ID of the topic to which this subscription publishes.
TopicID string
// The ID of the project to which the topic belongs.
TopicProjectID string
// Only send notifications about listed event types. If empty, send notifications
// for all event types.
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
EventTypes []string
// If present, only apply this notification configuration to object names that
// begin with this prefix.
ObjectNamePrefix string
// An optional list of additional attributes to attach to each Cloud PubSub
// message published for this notification subscription.
CustomAttributes map[string]string
// The contents of the message payload.
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
PayloadFormat string
}
// Values for Notification.PayloadFormat.
const (
// Send no payload with notification messages.
NoPayload = "NONE"
// Send object metadata as JSON with notification messages.
JSONPayload = "JSON_API_V1"
)
// Values for Notification.EventTypes.
const (
// Event that occurs when an object is successfully created.
ObjectFinalizeEvent = "OBJECT_FINALIZE"
// Event that occurs when the metadata of an existing object changes.
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
// Event that occurs when an object is permanently deleted.
ObjectDeleteEvent = "OBJECT_DELETE"
// Event that occurs when the live version of an object becomes an
// archived version.
ObjectArchiveEvent = "OBJECT_ARCHIVE"
)
func toNotification(rn *raw.Notification) *Notification {
n := &Notification{
ID: rn.Id,
EventTypes: rn.EventTypes,
ObjectNamePrefix: rn.ObjectNamePrefix,
CustomAttributes: rn.CustomAttributes,
PayloadFormat: rn.PayloadFormat,
}
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
return n
}
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
// parseNotificationTopic extracts the project and topic IDs from from the full
// resource name returned by the service. If the name is malformed, it returns
// "?" for both IDs.
func parseNotificationTopic(nt string) (projectID, topicID string) {
matches := topicRE.FindStringSubmatch(nt)
if matches == nil {
return "?", "?"
}
return matches[1], matches[2]
}
func toRawNotification(n *Notification) *raw.Notification {
return &raw.Notification{
Id: n.ID,
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
n.TopicProjectID, n.TopicID),
EventTypes: n.EventTypes,
ObjectNamePrefix: n.ObjectNamePrefix,
CustomAttributes: n.CustomAttributes,
PayloadFormat: string(n.PayloadFormat),
}
}
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
defer func() { trace.EndSpan(ctx, err) }()
if n.ID != "" {
return nil, errors.New("storage: AddNotification: ID must not be set")
}
if n.TopicProjectID == "" {
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
}
if n.TopicID == "" {
return nil, errors.New("storage: AddNotification: missing TopicID")
}
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
rn, err := call.Context(ctx).Do()
if err != nil {
return nil, err
}
return toNotification(rn), nil
}
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.List(b.name)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
var res *raw.Notifications
err = runWithRetry(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return notificationsToMap(res.Items), nil
}
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
m := map[string]*Notification{}
for _, rn := range rns {
m[rn.Id] = toNotification(rn)
}
return m
}
// DeleteNotification deletes the notification with the given ID.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.Delete(b.name, id)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
return call.Context(ctx).Do()
}

385
vendor/cloud.google.com/go/storage/reader.go

@ -0,0 +1,385 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/googleapi"
)
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
// ReaderObjectAttrs are attributes about the object being read. These are populated
// during the New call. This struct only holds a subset of object attributes: to
// get the full set of attributes, use ObjectHandle.Attrs.
//
// Each field is read-only.
type ReaderObjectAttrs struct {
// Size is the length of the object's content.
Size int64
// ContentType is the MIME type of the object's content.
ContentType string
// ContentEncoding is the encoding of the object's content.
ContentEncoding string
// CacheControl specifies whether and for how long browser and Internet
// caches are allowed to cache your objects.
CacheControl string
// LastModified is the time that the object was last modified.
LastModified time.Time
// Generation is the generation number of the object's content.
Generation int64
// Metageneration is the version of the metadata for this object at
// this generation. This field is used for preconditions and for
// detecting changes in metadata. A metageneration number is only
// meaningful in the context of a particular generation of a
// particular object.
Metageneration int64
}
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if o.readCompressed {
req.Header.Set("Accept-Encoding", "gzip")
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
gen := o.gen
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
start := offset + seen
if length < 0 && start > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
} else if length > 0 {
// The end character isn't affected by how many bytes we've seen.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
// We wait to assign conditions here because the generation number can change in between reopen() runs.
req.URL.RawQuery = conditionsQuery(gen, o.conds)
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
// If a generation hasn't been specified, and this is the first response we get, let's record the
// generation. In future requests we'll use this generation as a precondition to avoid data races.
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
if err != nil {
return err
}
gen = gen64
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
res, err := reopen(0)
if err != nil {
return nil, err
}
var (
size int64 // total size of object, even if a range was requested.
checkCRC bool
crc uint32
)
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
} else {
size = res.ContentLength
// Check the CRC iff all of the following hold:
// - We asked for content (length != 0).
// - We got all the content (status != PartialContent).
// - The server sent a CRC header.
// - The Go http stack did not uncompress the file.
// - We were not served compressed data that was uncompressed on download.
// The problem with the last two cases is that the CRC will not match -- GCS
// computes it on the compressed contents, but we compute it on the
// uncompressed contents.
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
crc, checkCRC = parseCRC32c(res)
}
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
var metaGen int64
if res.Header.Get("X-Goog-Generation") != "" {
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
if err != nil {
return nil, err
}
}
var lm time.Time
if res.Header.Get("Last-Modified") != "" {
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
return nil, err
}
}
attrs := ReaderObjectAttrs{
Size: size,
ContentType: res.Header.Get("Content-Type"),
ContentEncoding: res.Header.Get("Content-Encoding"),
CacheControl: res.Header.Get("Cache-Control"),
LastModified: lm,
Generation: gen,
Metageneration: metaGen,
}
return &Reader{
Attrs: attrs,
body: body,
size: size,
remain: remain,
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
}, nil
}
func uncompressedByServer(res *http.Response) bool {
// If the data is stored as gzip but is not encoded as gzip, then it
// was uncompressed by the server.
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
res.Header.Get("Content-Encoding") != "gzip"
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// Reader reads a Cloud Storage object.
// It implements io.Reader.
//
// Typically, a Reader computes the CRC of the downloaded content and compares it to
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
Attrs ReaderObjectAttrs
body io.ReadCloser
seen, remain, size int64
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
reopen func(seen int64) (*http.Response, error)
}
// Close closes the Reader. It must be called when done reading.
func (r *Reader) Close() error {
return r.body.Close()
}
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.readWithRetry(p)
if r.remain != -1 {
r.remain -= int64(n)
}
if r.checkCRC {
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
// Check CRC here. It would be natural to check it in Close, but
// everybody defers Close on the assumption that it doesn't return
// anything worth looking at.
if err == io.EOF {
if r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC)
}
}
}
return n, err
}
func (r *Reader) readWithRetry(p []byte) (int, error) {
n := 0
for len(p[n:]) > 0 {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if !shouldRetryRead(err) {
return n, err
}
// Read failed, but we will try again. Send a ranged read request that takes
// into account the number of bytes we've already seen.
res, err := r.reopen(r.seen)
if err != nil {
// reopen already retries
return n, err
}
r.body.Close()
r.body = res.Body
}
return n, nil
}
func shouldRetryRead(err error) bool {
if err == nil {
return false
}
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
}
// Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by
// calls to Read or Close.
//
// Deprecated: use Reader.Attrs.Size.
func (r *Reader) Size() int64 {
return r.Attrs.Size
}
// Remain returns the number of bytes left to read, or -1 if unknown.
func (r *Reader) Remain() int64 {
return r.remain
}
// ContentType returns the content type of the object.
//
// Deprecated: use Reader.Attrs.ContentType.
func (r *Reader) ContentType() string {
return r.Attrs.ContentType
}
// ContentEncoding returns the content encoding of the object.
//
// Deprecated: use Reader.Attrs.ContentEncoding.
func (r *Reader) ContentEncoding() string {
return r.Attrs.ContentEncoding
}
// CacheControl returns the cache control of the object.
//
// Deprecated: use Reader.Attrs.CacheControl.
func (r *Reader) CacheControl() string {
return r.Attrs.CacheControl
}
// LastModified returns the value of the Last-Modified header.
//
// Deprecated: use Reader.Attrs.LastModified.
func (r *Reader) LastModified() (time.Time, error) {
return r.Attrs.LastModified, nil
}

1116
vendor/cloud.google.com/go/storage/storage.go

File diff suppressed because it is too large

23841
vendor/cloud.google.com/go/storage/storage.replay

File diff suppressed because one or more lines are too long

261
vendor/cloud.google.com/go/storage/writer.go

@ -0,0 +1,261 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"sync"
"unicode/utf8"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)
// A Writer writes a Cloud Storage object.
type Writer struct {
// ObjectAttrs are optional attributes to set on the object. Any attributes
// must be initialized before the first Write call. Nil or zero-valued
// attributes are ignored.
ObjectAttrs
// SendCRC specifies whether to transmit a CRC32C field. It should be set
// to true in addition to setting the Writer's CRC32C field, because zero
// is a valid CRC and normally a zero would not be transmitted.
// If a CRC32C is sent, and the data written does not match the checksum,
// the write will be rejected.
SendCRC32C bool
// ChunkSize controls the maximum number of bytes of the object that the
// Writer will attempt to send to the server in a single request. Objects
// smaller than the size will be sent in a single request, while larger
// objects will be split over multiple requests. The size will be rounded up
// to the nearest multiple of 256K. If zero, chunking will be disabled and
// the object will be uploaded in a single request.
//
// ChunkSize will default to a reasonable value. If you perform many concurrent
// writes of small objects, you may wish set ChunkSize to a value that matches
// your objects' sizes to avoid consuming large amounts of memory.
//
// ChunkSize must be set before the first Write call.
ChunkSize int
// ProgressFunc can be used to monitor the progress of a large write.
// operation. If ProgressFunc is not nil and writing requires multiple
// calls to the underlying service (see
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
// then ProgressFunc will be invoked after each call with the number of bytes of
// content copied so far.
//
// ProgressFunc should return quickly without blocking.
ProgressFunc func(int64)
ctx context.Context
o *ObjectHandle
opened bool
pw *io.PipeWriter
donec chan struct{} // closed after err and obj are set.
obj *ObjectAttrs
mu sync.Mutex
err error
}
func (w *Writer) open() error {
attrs := w.ObjectAttrs
// Check the developer didn't change the object Name (this is unfortunate, but
// we don't want to store an object under the wrong name).
if attrs.Name != w.o.object {
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
}
if !utf8.ValidString(attrs.Name) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
}
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
}
pr, pw := io.Pipe()
w.pw = pw
w.opened = true
go w.monitorCancel()
if w.ChunkSize < 0 {
return errors.New("storage: Writer.ChunkSize must be non-negative")
}
mediaOpts := []googleapi.MediaOption{
googleapi.ChunkSize(w.ChunkSize),
}
if c := attrs.ContentType; c != "" {
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
}
go func() {
defer close(w.donec)
rawObj := attrs.toRawObject(w.o.bucket)
if w.SendCRC32C {
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
}
if w.MD5 != nil {
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
}
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
Media(pr, mediaOpts...).
Projection("full").
Context(w.ctx)
if w.ProgressFunc != nil {
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
}
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if attrs.PredefinedACL != "" {
call.PredefinedAcl(attrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err
w.mu.Unlock()
pr.CloseWithError(err)
return
}
var resp *raw.Object
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
if err == nil {
if w.o.userProject != "" {
call.UserProject(w.o.userProject)
}
setClientHeader(call.Header())
// If the chunk size is zero, then no chunking is done on the Reader,
// which means we cannot retry: the first call will read the data, and if
// it fails, there is no way to re-read.
if w.ChunkSize == 0 {
resp, err = call.Do()
} else {
// We will only retry here if the initial POST, which obtains a URI for
// the resumable upload, fails with a retryable error. The upload itself
// has its own retry logic.
err = runWithRetry(w.ctx, func() error {
var err2 error
resp, err2 = call.Do()
return err2
})
}
}
if err != nil {
w.mu.Lock()
w.err = err
w.mu.Unlock()
pr.CloseWithError(err)
return
}
w.obj = newObject(resp)
}()
return nil
}
// Write appends to w. It implements the io.Writer interface.
//
// Since writes happen asynchronously, Write may return a nil
// error even though the write failed (or will fail). Always
// use the error returned from Writer.Close to determine if
// the upload was successful.
func (w *Writer) Write(p []byte) (n int, err error) {
w.mu.Lock()
werr := w.err
w.mu.Unlock()
if werr != nil {
return 0, werr
}
if !w.opened {
if err := w.open(); err != nil {
return 0, err
}
}
n, err = w.pw.Write(p)
if err != nil {
w.mu.Lock()
werr := w.err
w.mu.Unlock()
// Preserve existing functionality that when context is canceled, Write will return
// context.Canceled instead of "io: read/write on closed pipe". This hides the
// pipe implementation detail from users and makes Write seem as though it's an RPC.
if werr == context.Canceled || werr == context.DeadlineExceeded {
return n, werr
}
}
return n, err
}
// Close completes the write operation and flushes any buffered data.
// If Close doesn't return an error, metadata about the written object
// can be retrieved by calling Attrs.
func (w *Writer) Close() error {
if !w.opened {
if err := w.open(); err != nil {
return err
}
}
// Closing either the read or write causes the entire pipe to close.
if err := w.pw.Close(); err != nil {
return err
}
<-w.donec
w.mu.Lock()
defer w.mu.Unlock()
return w.err
}
// monitorCancel is intended to be used as a background goroutine. It monitors the
// the context, and when it observes that the context has been canceled, it manually
// closes things that do not take a context.
func (w *Writer) monitorCancel() {
select {
case <-w.ctx.Done():
w.mu.Lock()
werr := w.ctx.Err()
w.err = werr
w.mu.Unlock()
// Closing either the read or write causes the entire pipe to close.
w.CloseWithError(werr)
case <-w.donec:
}
}
// CloseWithError aborts the write operation with the provided error.
// CloseWithError always returns nil.
//
// Deprecated: cancel the context passed to NewWriter instead.
func (w *Writer) CloseWithError(err error) error {
if !w.opened {
return nil
}
return w.pw.CloseWithError(err)
}
// Attrs returns metadata about a successfully-written object.
// It's only valid to call it after Close returns nil.
func (w *Writer) Attrs() *ObjectAttrs {
return w.obj
}

2
vendor/git.lowcodeplatform.net/fabric/lib/.gitconfig

@ -0,0 +1,2 @@
[url "ssh://git@git.lowcodeplatform.net/"]
insteadOf = https://git.lowcodeplatform.net/

9
vendor/git.lowcodeplatform.net/fabric/lib/.gitignore

@ -0,0 +1,9 @@
.history
.idea
.vscode
.DS_Store
*~merged*
*~merged
/public
.env
local

3
vendor/git.lowcodeplatform.net/fabric/lib/README.md

@ -0,0 +1,3 @@
# lib
Библиотека общих компонентов для сервисов Buildbox Fabric

305
vendor/git.lowcodeplatform.net/fabric/lib/cli.go

@ -0,0 +1,305 @@
package lib
import (
"os"
"github.com/urfave/cli"
)
const sep = string(os.PathSeparator)
// RunServiceFuncCLI обраатываем параметры с консоли и вызываем переданую функцию
func RunServiceFuncCLI(funcCLI func(configfile, dir, port, mode, service, param1, param2, param3, sourcedb, action, version string)) error {
var err error
appCLI := cli.NewApp()
appCLI.Usage = "Demon Buildbox Proxy started"
appCLI.Commands = []cli.Command{
{
Name: "webinit", ShortName: "",
Usage: "Start Web-UI from init infractractire LowCodePlatform-service",
Flags: []cli.Flag{
cli.StringFlag{
Name: "port, c",
Usage: "Порт запуска UI",
Value: "8088",
},
},
Action: func(c *cli.Context) error {
port := c.String("port")
funcCLI("", "", port, "", "", "", "", "", "", "webinit", "")
return nil
},
},
{
Name: "stop", ShortName: "",
Usage: "Stop service",
Flags: []cli.Flag{
cli.StringFlag{
Name: "service, s",
Usage: "Остановить сервисы (через запятую). '-s systems' - остановить системные сервисы; '-s custom' - остановить рабочие пользовательские сервисы ",
Value: "all",
},
},
Action: func(c *cli.Context) error {
service := c.String("service")
funcCLI("", "", "", "", service, "", "", "", "", "stop", "")
return nil
},
},
{
Name: "start", ShortName: "",
Usage: "Start single Buildbox-service process",
Flags: []cli.Flag{
cli.StringFlag{
Name: "config, c",
Usage: "Название файла конфигурации, с которым будет запущен сервис",
Value: "default",
},
cli.StringFlag{
Name: "dir, d",
Usage: "Путь к шаблонам",
Value: "default",
},
cli.StringFlag{
Name: "port, p",
Usage: "Порт, на котором запустить процесс",
Value: "",
},
cli.StringFlag{
Name: "mode, m",
Usage: "Доп.режимы запуска: debug (логирования stdout в файл)",
Value: "",
},
cli.StringFlag{
Name: "service, s",
Usage: "Запуск сервиса (для запуска нескольких сервисов укажите их через запятую)",
Value: "systems",
},
},
Action: func(c *cli.Context) error {
configfile := c.String("config")
port := c.String("port")
dir := c.String("dir")
mode := c.String("mode")
service := c.String("service")
if dir == "default" {
dir, err = RootDir()
}
funcCLI(configfile, dir, port, mode, service, "", "", "", "", "start", "")
return nil
},
},
{
Name: "init", ShortName: "",
Usage: "Init single LowCodePlatform-service process",
Flags: []cli.Flag{
cli.StringFlag{
Name: "service, s",
Usage: "Инициализация сервиса",
Value: "false",
},
cli.StringFlag{
Name: "version, v",
Usage: "До какой версии обновить выбранный сервис",
Value: "latest",
},
cli.StringFlag{
Name: "param1, p1",
Usage: "Зарезервировано",
Value: "false",
},
cli.StringFlag{
Name: "param2, p2",
Usage: "Зарезервировано",
Value: "false",
},
cli.StringFlag{
Name: "param3, p3",
Usage: "Зарезервировано",
Value: "false",
},
cli.StringFlag{
Name: "dir, d",
Usage: "Директория создания проекта (по-умолчанию - текущая директория)",
Value: "",
},
cli.StringFlag{
Name: "sourcedb, db",
Usage: "База данных, где будет развернута фабрика (поддерживается SQLite, MySQL, Postgres, CocckroachDB) (по-умолчанию: SQLite)",
Value: "./default.db",
},
},
Action: func(c *cli.Context) error {
service := c.String("service")
param1 := c.String("param1")
param2 := c.String("param2")
param3 := c.String("param3")
dir := c.String("dir")
version := c.String("version")
sourcedb := c.String("sourcedb")
if dir == "default" {
dir, err = RootDir()
}
funcCLI("", dir, "", "", service, param1, param2, param3, sourcedb, "init", version)
return nil
},
},
}
appCLI.Run(os.Args)
return err
}
// Stop завершение процесса
func Stop(pid int) (err error) {
var sig os.Signal
sig = os.Kill
p, err := os.FindProcess(pid)
if err != nil {
return err
}
err = p.Signal(sig)
return err
}
// завершение всех процессов для текущей конфигурации
// config - ид-конфигурации
//func PidsByConfig(config, portProxy string) (result []string, err error) {
// _, fullresult, _, _ := Ps("full", portProxy)
//
// // получаем pid для переданной конфигурации
// for _, v1 := range fullresult {
// for _, v := range v1 {
// configfile := v[1] // файл
// idProcess := v[0] // pid
//
// if config == configfile {
// result = append(result, idProcess)
// }
//
// if err != nil {
// fmt.Println("Error stopped process config:", config, ", err:", err)
// }
// }
// }
//
// return
//}
// получаем строки пидов подходящих под условия, в котором:
// domain - название проекта (домен)
// alias - название алиас-сервиса (gui/api/proxy и тд - то, что в мап-прокси идет второй частью адреса)
// если алиас явно не задан, то он может быть получен из домена
//func PidsByAlias(domain, alias, portProxy string) (result []string, err error) {
//
// if domain == "" {
// domain = "all"
// }
// if alias == "" {
// alias = "all"
// }
//
// // можем в домене передать полный путь с учетом алиаса типа buildbox/gui
// // в этом случае алиас если он явно не задан заполним значением алиаса полученного из домена
// splitDomain := strings.Split(domain, "/")
// if len(splitDomain) == 2 {
// domain = splitDomain[0]
// alias = splitDomain[1]
// }
// _, _, raw, _ := Ps("full", portProxy)
//
// // получаем pid для переданной конфигурации
// for _, pidRegistry := range raw {
// for d, v1 := range pidRegistry {
// // пропускаем если точное сравнение и не подоходит
// if domain != "all" && d != domain {
// continue
// }
//
// for a, v2 := range v1 {
// // пропускаем если точное сравнение и не подоходит
// if alias != "all" && a != alias {
// continue
// }
//
// for _, v3 := range v2 {
// k3 := strings.Split(v3, ":")
// idProcess := k3[0] // pid
// // дополняем результат значениями домена и алиаса (для возврата их при остановке если не переданы алиас явно)
// // бывают значения, когда мы останавлитваем процесс тошько по домену и тогда мы не можем возврашить алиас остановленного процесса
// // а алиас нужен для поиска в прокси в картах /Pid и /Мар для удаления из активных сервисов по домену и алиасу
// // если алиаса нет (не приходит в ответе от лоадера, то не находим и прибитые процессы залипают в мапах)
// result = append(result, v3+":"+ d + ":" + a)
//
// if err != nil {
// fmt.Println("Error stopped process: pid:", idProcess, ", err:", err)
// }
// }
// }
// }
// }
//
// return
//}
// уничтожить все процессы
//func Destroy(portProxy string) (err error) {
// pids, _, _, _ := Ps("pid", portProxy)
// for _, v := range pids {
// pi, err := strconv.Atoi(v)
// if err == nil {
// Stop(pi)
// }
// }
// return err
//}
// инициализация приложения
//func Install() (err error) {
//
// // 1. задание переменных окружения
// currentDir, err := CurrentDir()
// if err != nil {
// return
// }
// os.Setenv("BBPATH", currentDir)
//
// //var rootPath = os.Getenv("BBPATH")
//
// //fmt.Println(rootPath)
// //path, _ := os.LookupEnv("BBPATH")
// //fmt.Print("BBPATH: ", path)
//
// // 2. копирование файла запуска в /etc/bin
// //src := "./buildbox"
// //dst := "/usr/bin/buildbox"
// //
// //in, err := os.Open(src)
// //if err != nil {
// // return err
// //}
// //defer in.Close()
// //
// //out, err := os.Create(dst)
// //if err != nil {
// // return err
// //}
// //defer out.Close()
// //
// //_, err = io.Copy(out, in)
// //if err != nil {
// // return err
// //}
// //return out.Close()
//
// return err
//}

107
vendor/git.lowcodeplatform.net/fabric/lib/config.go

@ -0,0 +1,107 @@
package lib
import (
"encoding/base64"
"fmt"
"os"
"strings"
"github.com/BurntSushi/toml"
"github.com/kelseyhightower/envconfig"
"github.com/labstack/gommon/color"
)
var warning = color.Red("[Fail]")
// ConfigLoad читаем конфигурации
// получаем только название конфигурации
// 1. поднимаемся до корневой директории
// 2. от нее ищем полный путь до конфига
// 3. читаем по этому пути
func ConfigLoad(config string, pointToCfg interface{}) (err error) {
var payload string
if err := envconfig.Process("", pointToCfg); err != nil {
fmt.Printf("%s Error load default enviroment: %s\n", warning, err)
err = fmt.Errorf("Error load default enviroment: %s", err)
return err
}
// проверка на длину конфигурационного файла
// если он больше 100, то скорее всего передали конфигурацию в base64
if len(config) < 200 {
// 3.
if len(config) == 0 {
return fmt.Errorf("%s", "Error. Configfile is empty.")
}
if !strings.Contains(config, "."){
config = config + ".cfg"
}
// 4. читаем из файла
payload, err = ReadFile(config)
if err != nil {
return fmt.Errorf("Error raed configfile: (%s), err: %s", config, err)
}
} else {
// пробуем расшифровать из base64
debase, err := base64.StdEncoding.DecodeString(config)
if err != nil {
return fmt.Errorf("Error decode to string from base64 configfile. err: %s", err)
}
payload = string(debase)
}
err = decodeConfig(payload, pointToCfg)
return err
}
// Читаем конфигурация по заданному полному пути
func decodeConfig(configfile string, cfg interface{}) (err error) {
if _, err = toml.Decode(configfile, cfg); err != nil {
fmt.Printf("%s Error: %s (configfile: %s)\n", warning, err, configfile)
}
return err
}
// SearchConfigDir получаем путь до искомой конфигурации от переданной директории
func searchConfigDir(startDir, configuration string) (configPath string, err error) {
var nextPath string
directory, err := os.Open(startDir)
if err != nil {
return "", err
}
defer directory.Close()
objects, err := directory.Readdir(-1)
if err != nil {
return "", err
}
// пробегаем текущую папку и считаем совпадание признаков
for _, obj := range objects {
nextPath = startDir + sep + obj.Name()
if obj.IsDir() {
dirName := obj.Name()
// не входим в скрытые папки
if dirName[:1] != "." {
configPath, err = searchConfigDir(nextPath, configuration)
if configPath != "" {
return configPath, err // поднимает результат наверх
}
}
} else {
if !strings.Contains(nextPath, "/.") {
// проверяем только файлы конфигурации (игнорируем .json)
if strings.Contains(obj.Name(), configuration + ".cfg") {
return nextPath, err
}
}
}
}
return configPath, err
}

99
vendor/git.lowcodeplatform.net/fabric/lib/crypto.go

@ -0,0 +1,99 @@
package lib
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"errors"
"io"
"strings"
)
// Пример использования
//func main() {
// key := []byte("LKHlhb899Y09olUi")
// encryptMsg, _ := encrypt(key, "Hello World")
// msg, _ := decrypt(key, encryptMsg)
// fmt.Println(msg) // Hello World
//}
func addBase64Padding(value string) string {
m := len(value) % 4
if m != 0 {
value += strings.Repeat("=", 4-m)
}
return value
}
func removeBase64Padding(value string) string {
return strings.Replace(value, "=", "", -1)
}
func unpad(src []byte) ([]byte, error) {
length := len(src)
unpadding := int(src[length-1])
if unpadding > length {
return nil, errors.New("unpad error. This could happen when incorrect encryption key is used")
}
return src[:(length - unpadding)], nil
}
func Pad(src []byte) []byte {
padding := aes.BlockSize - len(src)%aes.BlockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(src, padtext...)
}
func Encrypt(key []byte, text string) (string, error) {
block, err := aes.NewCipher(key)
if err != nil {
return "", err
}
msg := Pad([]byte(text))
ciphertext := make([]byte, aes.BlockSize+len(msg))
iv := ciphertext[:aes.BlockSize]
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return "", err
}
cfb := cipher.NewCFBEncrypter(block, iv)
cfb.XORKeyStream(ciphertext[aes.BlockSize:], []byte(msg))
finalMsg := removeBase64Padding(base64.URLEncoding.EncodeToString(ciphertext))
return finalMsg, nil
}
func Decrypt(key []byte, text string) (string, error) {
block, err := aes.NewCipher(key)
if err != nil {
return "", err
}
decodedMsg, err := base64.URLEncoding.DecodeString(addBase64Padding(text))
if err != nil {
return "", err
}
if (len(decodedMsg) % aes.BlockSize) != 0 {
return "", errors.New("blocksize must be multipe of decoded message length")
}
iv := decodedMsg[:aes.BlockSize]
msg := decodedMsg[aes.BlockSize:]
cfb := cipher.NewCFBDecrypter(block, iv)
cfb.XORKeyStream(msg, msg)
unpadMsg, err := unpad(msg)
if err != nil {
return "", err
}
return string(unpadMsg), nil
}

305
vendor/git.lowcodeplatform.net/fabric/lib/files.go

@ -0,0 +1,305 @@
package lib
import (
"archive/zip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
// CreateFile Создаем файл по указанному пути если его нет
func CreateFile(path string) (err error) {
// detect if file exists
_, err = os.Stat(path)
var file *os.File
// delete old file if exists
if !os.IsNotExist(err) {
os.RemoveAll(path)
}
// create file
file, err = os.Create(path)
if err != nil {
return err
}
defer file.Close()
return err
}
// WriteFile пишем в файл по указанному пути
func WriteFile(path string, data []byte) (err error) {
// detect if file exists and create
err = CreateFile(path)
if err != nil {
return
}
// open file using READ & WRITE permission
file, err := os.OpenFile(path, os.O_RDWR, 0644)
if err != nil {
return
}
defer file.Close()
// write into file
_, err = file.Write(data)
if err != nil {
return
}
// save changes
err = file.Sync()
if err != nil {
return
}
return
}
// ReadFile читаем файл. (отключил: всегда в рамках рабочей диретории)
func ReadFile(path string) (result string, err error) {
// если не от корня, то подставляем текущую директорию
//if path[:1] != "/" {
// path = CurrentDir() + "/" + path
//} else {
// path = CurrentDir() + path
//}
file, err := os.Open(path)
if err != nil {
return "", err
}
b, err := ioutil.ReadAll(file)
if err == nil {
result = string(b)
}
defer file.Close()
return result, err
}
// CopyFolder копирование папки
func CopyFolder(source string, dest string) (err error) {
sourceinfo, err := os.Stat(source)
if err != nil {
return err
}
err = os.MkdirAll(dest, sourceinfo.Mode())
if err != nil {
return err
}
directory, _ := os.Open(source)
objects, err := directory.Readdir(-1)
for _, obj := range objects {
sourcefilepointer := source + "/" + obj.Name()
destinationfilepointer := dest + "/" + obj.Name()
if obj.IsDir() {
err = CopyFolder(sourcefilepointer, destinationfilepointer)
if err != nil {
fmt.Println(err)
}
} else {
err = CopyFile(sourcefilepointer, destinationfilepointer)
if err != nil {
fmt.Println(err)
}
}
}
return
}
// CopyFile копирование файла
func CopyFile(source string, dest string) (err error) {
sourcefile, err := os.Open(source)
if err != nil {
return err
}
defer sourcefile.Close()
destfile, err := os.Create(dest)
if err != nil {
return err
}
defer destfile.Close()
_, err = io.Copy(destfile, sourcefile)
if err == nil {
sourceinfo, err := os.Stat(source)
if err != nil {
err = os.Chmod(dest, sourceinfo.Mode())
}
}
return
}
// IsExist определяем наличие директории/файла
func IsExist(path string) (exist bool) {
if _, err := os.Stat(path); !os.IsNotExist(err) {
return true
}
return false
}
// CreateDir создание папки
func CreateDir(path string, mode os.FileMode) (err error) {
if mode == 0 {
mode = 0711
}
err = os.MkdirAll(path, mode)
if err != nil {
return err
}
return nil
}
func DeleteFile(path string) (err error) {
err = os.Remove(path)
if err != nil {
return
}
return nil
}
func MoveFile(source string, dest string) (err error) {
err = CopyFile(source, dest)
if err != nil {
return
}
err = DeleteFile(source)
if err != nil {
return
}
return nil
}
// Zip
// zip("/tmp/documents", "/tmp/backup.zip")
func Zip(source, target string) (err error) {
zipfile, err := os.Create(target)
if err != nil {
return err
}
defer zipfile.Close()
archive := zip.NewWriter(zipfile)
defer archive.Close()
info, err := os.Stat(source)
if err != nil {
return nil
}
var baseDir string
if info.IsDir() {
baseDir = filepath.Base(source)
}
filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
header, err := zip.FileInfoHeader(info)
if err != nil {
return err
}
if baseDir != "" {
header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source))
}
if info.IsDir() {
header.Name += "/"
} else {
header.Method = zip.Deflate
}
writer, err := archive.CreateHeader(header)
if err != nil {
return err
}
if info.IsDir() {
return nil
}
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(writer, file)
return err
})
return err
}
// Unzip
// unzip("/tmp/report-2015.zip", "/tmp/reports/")
func Unzip(archive, target string) (err error) {
reader, err := zip.OpenReader(archive)
if err != nil {
return err
}
if err := os.MkdirAll(target, 0755); err != nil {
return err
}
for _, file := range reader.File {
path := filepath.Join(target, file.Name)
if file.FileInfo().IsDir() {
os.MkdirAll(path, file.Mode())
continue
}
fileReader, err := file.Open()
if err != nil {
return err
}
defer fileReader.Close()
targetFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
if err != nil {
return err
}
defer targetFile.Close()
if _, err := io.Copy(targetFile, fileReader); err != nil {
return err
}
}
return
}
func Chmod(path string, mode os.FileMode) (err error) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
err = file.Chmod(mode)
return err
}

204
vendor/git.lowcodeplatform.net/fabric/lib/function.go

@ -0,0 +1,204 @@
package lib
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"syscall"
"time"
"git.lowcodeplatform.net/fabric/models"
uuid "github.com/satori/go.uuid"
)
// ResponseJSON если status не из списка, то вставляем статус - 501 и Descraption из статуса
func ResponseJSON(w http.ResponseWriter, objResponse interface{}, status string, error error, metrics interface{}) (err error) {
if w == nil {
return
}
errMessage := models.RestStatus{}
st, found := models.StatusCode[status]
if found {
errMessage = st
} else {
errMessage = models.StatusCode["NotStatus"]
}
objResp := &models.Response{}
if error != nil {
errMessage.Error = error
}
// Metrics
b1, _ := json.Marshal(metrics)
var metricsR models.Metrics
json.Unmarshal(b1, &metricsR)
if metrics != nil {
objResp.Metrics = metricsR
}
objResp.Status = errMessage
objResp.Data = objResponse
// формируем ответ
out, err := json.Marshal(objResp)
if err != nil {
out = []byte(fmt.Sprintf("%s", err))
}
//WriteFile("./dump.json", out)
w.WriteHeader(errMessage.Status)
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.Write(out)
return
}
// RunProcess стартуем сервис из конфига
func RunProcess(path, config, command, mode string) (pid int, err error) {
var cmd *exec.Cmd
if config == "" {
return 0, fmt.Errorf("%s", "Configuration file is not found")
}
if command == "" {
command = "start"
}
path = strings.Replace(path, "//", "/", -1)
cmd = exec.Command(path, command, "--config", config, "--mode", mode)
if mode == "debug" {
t := time.Now().Format("2006.01.02-15-04-05")
s := strings.Split(path, sep)
srv := s[len(s)-1]
err = CreateDir("debug"+sep+srv, 0777)
config_name := strings.Replace(config, "-", "", -1)
f, _ := os.Create("debug" + sep + srv + sep + config_name + "_" + fmt.Sprint(t) + ".log")
cmd.Stdout = f
cmd.Stderr = f
}
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
err = cmd.Start()
if err != nil {
return 0, err
}
pid = cmd.Process.Pid
return
}
// RootDir получаем корневую директорию от места где запускаем файл
func RootDir() (rootDir string, err error) {
file, err := filepath.Abs(os.Args[0])
if err != nil {
return
}
rootDir = path.Dir(file)
if err != nil {
fmt.Println("Error calculation RootDir. File: ", file, "; Error: ", err)
}
return
}
func Hash(str string) (result string) {
h := sha1.New()
h.Write([]byte(str))
result = hex.EncodeToString(h.Sum(nil))
return
}
func PanicOnErr(err error) {
if err != nil {
fmt.Println("Error: ", err)
panic(err)
}
}
func UUID() (result string) {
stUUID := uuid.NewV4()
return stUUID.String()
}
// RemoveElementFromData удаляем элемент из слайса
func RemoveElementFromData(p *models.ResponseData, i int) bool {
if i < len(p.Data) {
p.Data = append(p.Data[:i], p.Data[i+1:]...)
} else {
//log.Warning("Error! Position invalid (", i, ")")
return false
}
return true
}
// JsonEscape экранируем "
// fmt.Println(jsonEscape(`dog "fish" cat`))
// output: dog \"fish\" cat
func JsonEscape(i string) string {
b, err := json.Marshal(i)
if err != nil {
panic(err)
}
s := string(b)
return s[1 : len(s)-1]
}
// SearchConfigDir получаем путь до искомой конфигурации от переданной директории
func SearchConfig(projectDir, configuration string) (configPath string, err error) {
var nextPath string
directory, err := os.Open(projectDir)
if err != nil {
return "", err
}
defer directory.Close()
objects, err := directory.Readdir(-1)
if err != nil {
return "", err
}
// пробегаем текущую папку и считаем совпадание признаков
for _, obj := range objects {
nextPath = projectDir + sep + obj.Name()
if obj.IsDir() {
dirName := obj.Name()
// не входим в скрытые папки
if dirName[:1] != "." {
configPath, err = SearchConfig(nextPath, configuration)
if configPath != "" {
return configPath, err // поднимает результат наверх
}
}
} else {
if !strings.Contains(nextPath, "/.") {
// проверяем только файлы конфигурации (игнорируем .json)
if strings.Contains(obj.Name(), configuration+".cfg") {
return nextPath, err
}
}
}
}
return configPath, err
}

139
vendor/git.lowcodeplatform.net/fabric/lib/http.go

@ -0,0 +1,139 @@
package lib
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"git.lowcodeplatform.net/fabric/models"
"github.com/labstack/gommon/color"
)
// Curl всегде возвращает результат в интерфейс + ошибка (полезно для внешних запросов с неизвестной структурой)
// сериализуем в объект, при передаче ссылки на переменную типа
func Curl(method, urlc, bodyJSON string, response interface{}, headers map[string]string, cookies []*http.Cookie) (result interface{}, err error) {
var mapValues map[string]string
var req *http.Request
client := &http.Client{}
if method == "" {
method = "POST"
}
method = strings.Trim(method, " ")
values := url.Values{}
actionType := ""
// если в гете мы передали еще и json (его добавляем в строку запроса)
// только если в запросе не указаны передаваемые параметры
clearUrl := strings.Contains(urlc, "?")
bodyJSON = strings.Replace(bodyJSON, " ", "", -1)
err = json.Unmarshal([]byte(bodyJSON), &mapValues)
if method == "JSONTOGET" && bodyJSON != "" && clearUrl {
actionType = "JSONTOGET"
}
if method == "JSONTOPOST" && bodyJSON != "" {
actionType = "JSONTOPOST"
}
switch actionType {
case "JSONTOGET": // преобразуем параметры в json в строку запроса
if err == nil {
for k, v := range mapValues {
values.Set(k, v)
}
uri, _ := url.Parse(urlc)
uri.RawQuery = values.Encode()
urlc = uri.String()
req, err = http.NewRequest("GET", urlc, strings.NewReader(bodyJSON))
} else {
fmt.Println("Error! Fail parsed bodyJSON from GET Curl: ", err)
}
case "JSONTOPOST": // преобразуем параметры в json в тело запроса
if err == nil {
for k, v := range mapValues {
values.Set(k, v)
}
req, err = http.NewRequest("POST", urlc, strings.NewReader(values.Encode()))
req.PostForm = values
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
} else {
fmt.Println("Error! Fail parsed bodyJSON to POST: ", err)
}
default:
req, err = http.NewRequest(method, urlc, strings.NewReader(bodyJSON))
}
if err != nil {
return "", err
}
// дополняем переданными заголовками
if len(headers) > 0 {
for k, v := range headers {
req.Header.Add(k, v)
}
}
// дополянем куками назначенными для данного запроса
if cookies != nil {
for _, v := range cookies {
req.AddCookie(v)
}
}
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error request: method:", method, ", url:", urlc, ", bodyJSON:", bodyJSON)
return "", err
} else {
defer resp.Body.Close()
}
responseData, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
responseString := string(responseData)
// возвращаем объект ответа, если передано - в какой объект класть результат
if response != nil {
json.Unmarshal([]byte(responseString), &response)
}
// всегда отдаем в интерфейсе результат (полезно, когда внешние запросы или сериализация на клиенте)
//json.Unmarshal([]byte(responseString), &result)
return responseString, err
}
func AddressProxy(addressProxy, interval string) (port string, err error) {
fail := color.Red("[Fail]")
urlProxy := ""
// если автоматическая настройка портов
if addressProxy != "" && interval != "" {
if addressProxy[len(addressProxy)-1:] != "/" {
addressProxy = addressProxy + "/"
}
var portDataAPI models.Response
// запрашиваем порт у указанного прокси-сервера
urlProxy = addressProxy + "port?interval=" + interval
Curl("GET", urlProxy, "", &portDataAPI, map[string]string{}, nil)
port = fmt.Sprint(portDataAPI.Data)
}
if port == "" {
err = fmt.Errorf("%s", "Port APP-service is null. Servive not running.")
fmt.Print(fail, " Port APP-service is null. Servive not running.\n")
}
return port, err
}

386
vendor/git.lowcodeplatform.net/fabric/lib/logger.go

@ -0,0 +1,386 @@
// обертка для логирования, которая дополняем аттрибутами логируемого процесса logrus
// дополняем значениями, идентифицирующими запущенный сервис UID,Name,Service
package lib
import (
"context"
"fmt"
"io"
"os"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
"github.com/sirupsen/logrus"
)
var logrusB = logrus.New()
// LogLine структура строк лог-файла. нужна для анмаршалинга
type LogLine struct {
Config string `json:"config"`
Level string `json:"level"`
Msg interface{} `json:"msg"`
Name string `json:"name"`
Srv string `json:"srv"`
Time string `json:"time"`
Uid string `json:"uid"`
}
type log struct {
// куда логируем? stdout/;*os.File на файл, в который будем писать логи
Output io.Writer `json:"output"`
//Debug:
// сообщения отладки, профилирования.
// В production системе обычно сообщения этого уровня включаются при первоначальном
// запуске системы или для поиска узких мест (bottleneck-ов).
//Info: - логировать процесс выполнения
// обычные сообщения, информирующие о действиях системы.
// Реагировать на такие сообщения вообще не надо, но они могут помочь, например,
// при поиске багов, расследовании интересных ситуаций итд.
//Warning: - логировать странные операции
// записывая такое сообщение, система пытается привлечь внимание обслуживающего персонала.
// Произошло что-то странное. Возможно, это новый тип ситуации, ещё не известный системе.
// Следует разобраться в том, что произошло, что это означает, и отнести ситуацию либо к
// инфо-сообщению, либо к ошибке. Соответственно, придётся доработать код обработки таких ситуаций.
//Error: - логировать ошибки
// ошибка в работе системы, требующая вмешательства. Что-то не сохранилось, что-то отвалилось.
// Необходимо принимать меры довольно быстро! Ошибки этого уровня и выше требуют немедленной записи в лог,
// чтобы ускорить реакцию на них. Нужно понимать, что ошибка пользователя – это не ошибка системы.
// Если пользователь ввёл в поле -1, где это не предполагалось – не надо писать об этом в лог ошибок.
//Panic: - логировать критические ошибки
// это особый класс ошибок. Такие ошибки приводят к неработоспособности системы в целом, или
// неработоспособности одной из подсистем. Чаще всего случаются фатальные ошибки из-за неверной конфигурации
// или отказов оборудования. Требуют срочной, немедленной реакции. Возможно, следует предусмотреть уведомление о таких ошибках по SMS.
// указываем уровни логирования Error/Warning/Debug/Info/Panic
//Trace: - логировать обработки запросов
// можно указывать через | разные уровени логирования, например Error|Warning
// можно указать All - логирование всех уровней
Levels string `json:"levels"`
// uid процесса (сервиса), который логируется (случайная величина)
UID string `json:"uid"`
// имя процесса (сервиса), который логируется
Name string `json:"name"`
// название сервиса (app/gui...)
Service string `json:"service"`
// директория сохранения логов
Dir string `json:"dir"`
// uid-конфигурации с которой был запущен процесс
Config string `json:"config"`
// интервал между проверками актуального файла логирования (для текущего дня)
IntervalReload time.Duration `json:"delay_reload"`
// интервал проверками на наличие файлов на удаление
IntervalClearFiles time.Duration `json:"interval_clear_files"`
// период хранения файлов лет-месяцев-дней (например: 0-1-0 - хранить 1 месяц)
PeriodSaveFiles string `json:"period_save_files"`
File *os.File
mux *sync.Mutex
}
type Log interface {
Trace(args ...interface{})
Debug(args ...interface{})
Info(args ...interface{})
Warning(args ...interface{})
Error(err error, args ...interface{})
Panic(err error, args ...interface{})
Exit(err error, args ...interface{})
RotateInit(ctx context.Context)
GetOutput() io.Writer
GetFile() *os.File
Close()
}
func (l *log) Trace(args ...interface{}) {
if strings.Contains(l.Levels, "Trace") {
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
logrusB.SetLevel(logrus.TraceLevel)
logrusB.WithFields(logrus.Fields{
"name": l.Name,
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
}).Trace(args...)
}
}
func (l *log) Debug(args ...interface{}) {
if strings.Contains(l.Levels, "Debug") {
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
// Only log the warning severity or above.
logrusB.SetLevel(logrus.DebugLevel)
logrusB.WithFields(logrus.Fields{
"name": l.Name,
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
}).Debug(args...)
}
}
func (l *log) Info(args ...interface{}) {
if strings.Contains(l.Levels, "Info") {
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
logrusB.SetLevel(logrus.InfoLevel)
logrusB.WithFields(logrus.Fields{
"name": l.Name,
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
}).Info(args...)
}
}
func (l *log) Warning(args ...interface{}) {
if strings.Contains(l.Levels, "Warning") {
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
logrusB.SetLevel(logrus.WarnLevel)
logrusB.WithFields(logrus.Fields{
"name": l.Name,
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
}).Warn(args...)
}
}
func (l *log) Error(err error, args ...interface{}) {
if strings.Contains(l.Levels, "Error") {
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
logrusB.SetLevel(logrus.ErrorLevel)
logrusB.WithFields(logrus.Fields{
"name": l.Name,
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
"error": fmt.Sprint(err),
}).Error(args...)
}
}
func (l *log) Panic(err error, args ...interface{}) {
if strings.Contains(l.Levels, "Panic") {
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
logrusB.SetLevel(logrus.PanicLevel)
logrusB.WithFields(logrus.Fields{
"name": l.Name,
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
"error": fmt.Sprint(err),
}).Panic(args...)
}
}
// Exit внутренняя ф-ция логирования и прекращения работы программы
func (l *log) Exit(err error, args ...interface{}) {
if strings.Contains(l.Levels, "Fatal") {
logrusB.SetOutput(l.Output)
logrusB.SetFormatter(&logrus.JSONFormatter{})
logrusB.SetLevel(logrus.FatalLevel)
logrusB.WithFields(logrus.Fields{
"name": l.Name,
"uid": l.UID,
"srv": l.Service,
"config": l.Config,
"error": fmt.Sprint(err),
}).Fatal(args...)
}
}
// RotateInit Переинициализация файла логирования
func (l *log) RotateInit(ctx context.Context) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l.IntervalReload = 5 * time.Second
defer func() {
rec := recover()
if rec != nil {
b := string(debug.Stack())
fmt.Printf("panic in loggier (RotateInit). stack: %+v", b)
//cancel()
//os.Exit(1)
}
}()
// попытка обновить файл (раз в 10 минут)
go func() {
ticker := time.NewTicker(l.IntervalReload)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
l.File.Close() // закрыл старый файл
b := NewLogger(l.Dir, l.Levels, l.UID, l.Name, l.Service, l.Config, l.IntervalReload, l.IntervalClearFiles, l.PeriodSaveFiles)
l.Output = b.GetOutput()
l.File = b.GetFile() // передал указатель на новый файл в структуру лога
ticker = time.NewTicker(l.IntervalReload)
}
}
}()
// попытка очистки старых файлов (каждые пол часа)
go func() {
ticker := time.NewTicker(l.IntervalClearFiles)
defer ticker.Stop()
// получаем период, через который мы будем удалять файлы
period := l.PeriodSaveFiles
if period == "" {
l.Error(fmt.Errorf("%s", "Fail perion save log files. (expected format: year-month-day; eg: 0-1-0)"))
return
}
slPeriod := strings.Split(period, "-")
if len(slPeriod) < 3 {
l.Error(fmt.Errorf("%s", "Fail perion save log files. (expected format: year-month-day; eg: 0-1-0)"))
return
}
// получаем числовые значения года месяца и дня для расчета даты удаления файлов
year, err := strconv.Atoi(slPeriod[0])
if err != nil {
l.Error(err, "Fail converted Year from period saved log files. (expected format: year-month-day; eg: 0-1-0)")
}
month, err := strconv.Atoi(slPeriod[1])
if err != nil {
l.Error(err, "Fail converted Month from period saved log files. (expected format: year-month-day; eg: 0-1-0)")
}
day, err := strconv.Atoi(slPeriod[2])
if err != nil {
l.Error(err, "Fail converted Day from period saved log files. (expected format: year-month-day; eg: 0-1-0)")
}
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
oneMonthAgo := time.Now().AddDate(-year, -month, -day) // minus 1 месяц
fileMonthAgoDate := oneMonthAgo.Format("2006.01.02")
// пробегаем директорию и читаем все файлы, если имя меньше текущее время - месяц = удаляем
directory, _ := os.Open(l.Dir)
objects, err := directory.Readdir(-1)
if err != nil {
l.Error(err, "Error read directory: ", directory)
return
}
for _, obj := range objects {
filename := obj.Name()
filenameMonthAgoDate := fileMonthAgoDate + "_" + l.Service
if filenameMonthAgoDate > filename {
pathFile := l.Dir + sep + filename
err = os.Remove(pathFile)
if err != nil {
l.Error(err, "Error deleted file: ", pathFile)
return
}
}
}
ticker = time.NewTicker(l.IntervalClearFiles)
}
}
}()
}
func (l *log) GetOutput() io.Writer {
l.mux.Lock()
defer l.mux.Unlock()
return l.Output
}
func (l *log) GetFile() *os.File {
return l.File
}
func (l *log) Close() {
l.File.Close()
}
func NewLogger(logsDir, level, uid, name, srv, config string, intervalReload, intervalClearFiles time.Duration, periodSaveFiles string) Log {
var output io.Writer
var file *os.File
var err error
var mode os.FileMode
m := sync.Mutex{}
datefile := time.Now().Format("2006.01.02")
logName := datefile + "_" + srv + "_" + uid + ".log"
// создаем/открываем файл логирования и назначаем его логеру
mode = 0711
CreateDir(logsDir, mode)
if err != nil {
logrus.Error(err, "Error creating directory")
return nil
}
pathFile := logsDir + "/" + logName
if !IsExist(pathFile) {
err := CreateFile(pathFile)
if err != nil {
logrus.Error(err, "Error creating file")
return nil
}
}
file, err = os.OpenFile(pathFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
output = file
if err != nil {
logrus.Panic(err, "error opening file")
return nil
}
return &log{
Output: output,
Levels: level,
UID: uid,
Name: name,
Service: srv,
Dir: logsDir,
Config: config,
IntervalReload: intervalReload,
IntervalClearFiles: intervalClearFiles,
PeriodSaveFiles: periodSaveFiles,
mux: &m,
File: file,
}
}

343
vendor/git.lowcodeplatform.net/fabric/lib/metric.go

@ -0,0 +1,343 @@
package lib
import (
"context"
"encoding/json"
"net/http"
"sort"
"sync"
"time"
)
type Metrics struct {
StateHost StateHost
Connections int // количество соединений за весь период учета
Queue_AVG float32 // среднее количество запросов в очереди
Queue_QTL_80 float32 // квантиль 80% - какое среднее кол-во запросов до границы 80% в отсорованном ряду
Queue_QTL_90 float32 // квантиль 90%
Queue_QTL_99 float32 // квантиль 99%
TPR_AVG_MS float32 // (ms) Time per request - среднее время обработки запроса
TPR_QTL_MS_80 float32 // (ms) квантиль 80% - какое среднее время обработки запросов до границы 80% в отсорованном ряду
TPR_QTL_MS_90 float32 // (ms) квантиль 90%
TPR_QTL_MS_99 float32 // (ms) квантиль 99%
RPS int // Request per second - количество запросов в секунду
}
type serviceMetric struct {
Metrics
Stash Metrics // карман для сохранения предыдущего значения
connectionOpen int // текущее кол-во открытых соединений (+ при запрос - при ответе)
queue []int // массив соединений в очереди (не закрытых) см.выше
tpr []time.Duration // массив времен обработки запросов
mux *sync.Mutex
ctx context.Context
}
type ServiceMetric interface {
SetState()
SetConnectionIncrement()
SetConnectionDecrement()
SetTimeRequest(timeRequest time.Duration)
Generate()
Get() (result Metrics)
Clear()
SaveToStash()
Middleware(next http.Handler) http.Handler
}
func (s *serviceMetric) SetState() {
//s.mux.Lock()
//defer s.mux.Unlock()
s.StateHost.Tick()
return
}
// записываем время обработки запроса в массив
func (s *serviceMetric) SetTimeRequest(timeRequest time.Duration) {
go func() {
s.mux.Lock()
defer s.mux.Unlock()
s.tpr = append(s.tpr, timeRequest)
}()
return
}
// SetConnectionIncrement увеличиваем счетчик и добавляем в массив метрик
// формируем временной ряд количества соединений
// при начале запроса увеличиваем, при завершении уменьшаем
// запускаем в отдельной рутине, потому что ф-ция вызывается из сервиса и не должна быть блокирующей
func (s *serviceMetric) SetConnectionIncrement() {
go func() {
s.mux.Lock()
defer s.mux.Unlock()
s.Connections = s.Connections + 1
s.connectionOpen = s.connectionOpen + 1
s.queue = append(s.queue, s.connectionOpen)
}()
return
}
// SetConnectionDecrement уменьшаем счетчик и добавляем в массив метрик
// запускаем в отдельной рутине, потому что ф-ция вызывается из сервиса и не должна быть блокирующей
func (s *serviceMetric) SetConnectionDecrement() {
go func() {
s.mux.Lock()
defer s.mux.Unlock()
if s.connectionOpen != 0 {
s.connectionOpen = s.connectionOpen - 1
}
s.queue = append(s.queue, s.connectionOpen)
}()
return
}
func (s *serviceMetric) SetP(value time.Duration) {
go func() {
s.mux.Lock()
defer s.mux.Unlock()
s.tpr = append(s.tpr, value)
}()
return
}
// SaveToStash сохраняем текущее значение расчитанных метрик в кармане
func (s *serviceMetric) SaveToStash() {
s.mux.Lock()
defer s.mux.Unlock()
s.Stash.StateHost = s.StateHost
s.Stash.Connections = s.Connections
s.Stash.RPS = s.RPS
s.Stash.Queue_AVG = s.Queue_AVG
s.Stash.Queue_QTL_99 = s.Queue_QTL_99
s.Stash.Queue_QTL_90 = s.Queue_QTL_90
s.Stash.Queue_QTL_80 = s.Queue_QTL_80
s.Stash.TPR_AVG_MS = s.TPR_AVG_MS
s.Stash.TPR_QTL_MS_80 = s.TPR_QTL_MS_80
s.Stash.TPR_QTL_MS_90 = s.TPR_QTL_MS_90
s.Stash.TPR_QTL_MS_99 = s.TPR_QTL_MS_99
return
}
func (s *serviceMetric) Clear() {
s.mux.Lock()
defer s.mux.Unlock()
s.Connections = 0
s.connectionOpen = 0
s.queue = []int{}
s.tpr = []time.Duration{}
s.RPS = 0
s.Queue_AVG = 0.0
s.Queue_QTL_80 = 0.0
s.Queue_QTL_90 = 0.0
s.Queue_QTL_99 = 0.0
s.TPR_AVG_MS = 0.0
s.TPR_QTL_MS_80 = 0.0
s.TPR_QTL_MS_90 = 0.0
s.TPR_QTL_MS_99 = 0.0
return
}
func (s *serviceMetric) Get() (result Metrics) {
s.mux.Lock()
defer s.mux.Unlock()
return s.Stash
}
func (s *serviceMetric) Generate() {
var val_Queue_QTL_80, val_Queue_QTL_90, val_Queue_QTL_99, val_Queue float32
var Queue_AVG, Queue_QTL_80, Queue_QTL_90, Queue_QTL_99 float32
var val_TPR_80, val_TPR_90, val_TPR_99, val_TPR float32
var AVG_TPR, QTL_TPR_80, QTL_TPR_90, QTL_TPR_99 float32
s.mux.Lock()
defer s.mux.Unlock()
s.SetState() // БЕЗ БЛОКИРОВКИ получаю текущие метрики загрузки хоста
//////////////////////////////////////////////////////////
// расчитываем среднее кол-во запросо и квартили (средние значения после 80-90-99 процентов всех запросов)
//////////////////////////////////////////////////////////
// сортируем список
sort.Ints(s.queue)
lenQueue := len(s.queue)
if lenQueue != 0 {
len_Queue_QTL_80 := lenQueue * 8 / 10
len_Queue_QTL_90 := lenQueue * 9 / 10
len_Queue_QTL_99 := lenQueue * 99 / 100
for i, v := range s.queue {
vall := float32(v)
// суммируем значения которые после 80% других
if i > len_Queue_QTL_80 {
val_Queue_QTL_80 = val_Queue_QTL_80 + vall
}
// суммируем значения которые после 90% других
if i > len_Queue_QTL_90 {
val_Queue_QTL_90 = val_Queue_QTL_90 + vall
}
// суммируем значения которые после 99% других
if i > len_Queue_QTL_99 {
val_Queue_QTL_99 = val_Queue_QTL_99 + vall
}
val_Queue = val_Queue + vall
}
lQ := float32(lenQueue) - 1 // проверка на 0
if lQ == 0 {
lQ = 1
}
Queue_AVG = val_Queue / lQ
Queue_QTL_80 = val_Queue_QTL_80 / float32(lenQueue-len_Queue_QTL_80)
Queue_QTL_90 = val_Queue_QTL_90 / float32(lenQueue-len_Queue_QTL_90)
Queue_QTL_99 = val_Queue_QTL_99 / float32(lenQueue-len_Queue_QTL_99)
}
//////////////////////////////////////////////////////////
// расчитываем среднее время запросо и квартили (средние значения после 80-90-99 процентов всех запросов)
//////////////////////////////////////////////////////////
// сортируем список
lenTPR := len(s.tpr)
if lenTPR != 0 {
timeInt := []float64{}
for _, v := range s.tpr {
timeInt = append(timeInt, float64(v.Microseconds()))
}
sort.Float64s(timeInt)
len_TPR_80 := lenTPR * 8 / 10
len_TPR_90 := lenTPR * 9 / 10
len_TPR_99 := lenTPR * 99 / 100
for i, v := range timeInt {
vall := float32(v)
// суммируем значения которые после 80% других
if i > len_TPR_80 {
val_TPR_80 = val_TPR_80 + vall
}
// суммируем значения которые после 90% других
if i > len_TPR_90 {
val_TPR_90 = val_TPR_90 + vall
}
// суммируем значения которые после 99% других
if i > len_TPR_99 {
val_TPR_99 = val_TPR_99 + vall
}
val_TPR = val_TPR + vall
}
lQ := float32(lenQueue) - 1
if lQ == 0 {
lQ = 1
}
AVG_TPR = val_TPR / lQ
QTL_TPR_80 = val_TPR_80 / float32(lenTPR-len_TPR_80)
QTL_TPR_90 = val_TPR_90 / float32(lenTPR-len_TPR_90)
QTL_TPR_99 = val_TPR_99 / float32(lenTPR-len_TPR_99)
}
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
s.RPS = lenQueue / 10
s.Queue_AVG = Queue_AVG
s.Queue_QTL_80 = Queue_QTL_80
s.Queue_QTL_90 = Queue_QTL_90
s.Queue_QTL_99 = Queue_QTL_99
s.TPR_AVG_MS = AVG_TPR
s.TPR_QTL_MS_80 = QTL_TPR_80
s.TPR_QTL_MS_90 = QTL_TPR_90
s.TPR_QTL_MS_99 = QTL_TPR_99
return
}
func (s *serviceMetric) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// увеличиваем счетчик активных сессий
s.SetConnectionIncrement()
next.ServeHTTP(w, r)
// уменьшаем счетчик активных сессий
s.SetConnectionDecrement()
})
}
// interval - интервалы времени, через которые статистика будет сбрасыватсья в лог
func NewMetric(ctx context.Context, logger Log, interval time.Duration) (metrics ServiceMetric) {
m := sync.Mutex{}
t := StateHost{}
s := Metrics{
StateHost: t,
Queue_AVG: 0,
Queue_QTL_99: 0,
Queue_QTL_90: 0,
Queue_QTL_80: 0,
TPR_AVG_MS: 0,
TPR_QTL_MS_80: 0,
TPR_QTL_MS_90: 0,
TPR_QTL_MS_99: 0,
RPS: 0,
}
metrics = &serviceMetric{
Metrics: s,
Stash: s,
connectionOpen: 0,
queue: []int{},
mux: &m,
ctx: ctx,
}
go RunMetricLogger(ctx, metrics, logger, interval)
return metrics
}
func RunMetricLogger(ctx context.Context, m ServiceMetric, logger Log, interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
// сохраняем значение метрик в лог
m.Generate() // сгенерировали метрики
m.SaveToStash() // сохранили в карман
m.Clear() // очистили объект метрик для приема новых данных
mes, _ := json.Marshal(m.Get())
logger.Trace(string(mes)) // записали в лог из кармана
ticker = time.NewTicker(interval)
}
}
}

41
vendor/git.lowcodeplatform.net/fabric/lib/state.go

@ -0,0 +1,41 @@
package lib
import (
"math"
"runtime"
"github.com/shirou/gopsutil/mem"
)
type StateHost struct {
PercentageCPU,
PercentageMemory,
PercentageDisk,
TotalCPU,
TotalMemory,
TotalDisk,
UsedCPU,
UsedMemory,
UsedDisk float64
Goroutines int
}
func (c *StateHost) Tick() {
//var pcpu, i float64
memoryStat, _ := mem.VirtualMemory()
//percentage, _ := cpu.Percent(0, true)
//diskStat, _ := disk.Usage("/")
//
//for _, cpupercent := range percentage {
// pcpu = (pcpu + cpupercent)
// i ++
//}
//c.PercentageCPU = math.Round(pcpu / i)
c.PercentageMemory = math.Round(memoryStat.UsedPercent)
//c.PercentageDisk = math.Round(diskStat.UsedPercent)
c.Goroutines = runtime.NumGoroutine()
return
}

217
vendor/git.lowcodeplatform.net/fabric/lib/vfs.go

@ -0,0 +1,217 @@
// Package lib/vfs позволяет хранить файлы на разных источниках без необходимости учитывать особенности
// каждой реализации файлового хранилища
// поддерживаются local, s3, azure (остальные активировать по-необходимости)
package lib
import (
"fmt"
"io/ioutil"
"net/url"
"strings"
"github.com/graymeta/stow"
"github.com/graymeta/stow/azure"
"github.com/graymeta/stow/local"
"github.com/graymeta/stow/s3"
// support Azure storage
_ "github.com/graymeta/stow/azure"
// support Google storage
_ "github.com/graymeta/stow/google"
// support local storage
_ "github.com/graymeta/stow/local"
// support swift storage
_ "github.com/graymeta/stow/swift"
// support s3 storage
_ "github.com/graymeta/stow/s3"
// support oracle storage
_ "github.com/graymeta/stow/oracle"
)
type vfs struct {
bucket string
kind, endpoint, accessKeyID, secretKey, region string
location stow.Location
container stow.Container
comma string
}
type Vfs interface {
List(prefix string, pageSize int) (files []Item, err error)
Read(file string) (data []byte, mimeType string, err error)
ReadFromBucket(file, bucket string) (data []byte, mimeType string, err error)
Write(file string, data []byte) (err error)
Connect() (err error)
Close() (err error)
}
type Item interface {
stow.Item
}
// Connect инициируем подключение к хранилищу, в зависимости от типа соединения
func (v *vfs) Connect() (err error) {
var config = stow.ConfigMap{}
var flagBucketExist bool
if v.region == "" {
v.region = "eu-west-1"
}
switch v.kind {
case "s3":
config = stow.ConfigMap{
s3.ConfigEndpoint: v.endpoint,
s3.ConfigAccessKeyID: v.accessKeyID,
s3.ConfigSecretKey: v.secretKey,
s3.ConfigRegion: v.region,
}
case "azure":
config = stow.ConfigMap{
azure.ConfigAccount: v.accessKeyID,
azure.ConfigKey: v.secretKey,
}
case "local":
config = stow.ConfigMap{
local.ConfigKeyPath: v.endpoint,
local.MetadataDir: v.bucket,
}
}
// подсключаемся к хранилищу
v.location, err = stow.Dial(v.kind, config)
if err != nil {
return fmt.Errorf("error create container from config. err: %s", err)
}
// ищем переданных бакет, если нет, то создаем его
err = stow.WalkContainers(v.location, stow.NoPrefix, 10000, func(c stow.Container, err error) error {
if err != nil {
return err
}
if c.Name() == v.bucket {
flagBucketExist = true
return nil
}
return nil
})
if err != nil {
return fmt.Errorf("error list to containers from config. err: %s", err)
}
// создаем если нет
if !flagBucketExist {
v.container, err = v.location.CreateContainer(v.bucket)
if err != nil {
return fmt.Errorf("error create container from config. err: %s", err)
}
}
// инициируем переданный контейнер
v.container, err = v.location.Container(v.bucket)
if err != nil {
return fmt.Errorf("error create container from config. err: %s", err)
}
return err
}
// Close закрываем соединение
func (v *vfs) Close() (err error) {
err = v.location.Close()
return err
}
// Read чтение по указанному пути из бакета проекта
func (v *vfs) Read(file string) (data []byte, mimeType string, err error) {
return v.ReadFromBucket(file, v.bucket)
}
// Read чтение по указанному пути из указанного бакета
func (v *vfs) ReadFromBucket(file, bucket string) (data []byte, mimeType string, err error) {
var urlPath url.URL
// если передан разделитель, то заменяем / на него (возможно понадобится для совместимости плоских хранилищ)
if v.comma != "" {
file = strings.Replace(file, v.comma, sep, -1)
}
// если локально, то добавляем к endpoint бакет
if v.kind == "local" {
file = v.endpoint + sep + bucket + sep + file
// подчищаем //
file = strings.Replace(file, sep+sep, sep, -1)
} else {
// подчищаем от части путей, которая использовалась раньше в локальном хранилище
// легаси, удалить когда все сайты переедут на использование только vfs
//localPrefix := sep + "upload" + sep + v.bucket
localPrefix := "upload" + sep + bucket
file = strings.Replace(file, localPrefix, "", -1)
file = strings.Replace(file, sep+sep, sep, -1)
}
//fmt.Printf("file: %s, bucket: %s, container: %-v\n", file, bucket, v.container)
urlPath.Host = bucket
urlPath.Path = file
item, err := v.location.ItemByURL(&urlPath)
if item != nil {
r, err := item.Open()
if err != nil {
return data, mimeType, err
}
data, err = ioutil.ReadAll(r)
mimeType = detectMIME(data, file) // - определяем MimeType отдаваемого файла
}
//fmt.Printf("item: %+v, len data: %-v, mimeType: %s, err: %s", item, len(data), mimeType, err)
return data, mimeType, err
}
// Write создаем объект в хранилище
func (v *vfs) Write(file string, data []byte) (err error) {
sdata := string(data)
r := strings.NewReader(sdata)
size := int64(len(sdata))
// если передан разделитель, то заменяем / на него (возможно понадобится для совместимости плоских хранилищ)
if v.comma != "" {
file = strings.Replace(file, sep, v.comma, -1)
}
_, err = v.container.Put(file, r, size, nil)
if err != nil {
return err
}
return err
}
// List список файлов выбранного
func (v *vfs) List(prefix string, pageSize int) (files []Item, err error) {
fmt.Printf("init list vfs. connect:%+v, prefix: %s\n", v, prefix)
err = stow.Walk(v.container, prefix, pageSize, func(item stow.Item, err error) error {
if err != nil {
fmt.Printf("error Walk from list vfs. connect:%+v, prefix: %s, err: %s\n", v, prefix, err)
return err
}
files = append(files, item)
return nil
})
return files, err
}
func NewVfs(kind, endpoint, accessKeyID, secretKey, region, bucket, comma string) Vfs {
return &vfs{
kind: kind,
endpoint: endpoint,
accessKeyID: accessKeyID,
secretKey: secretKey,
region: region,
bucket: bucket,
comma: comma,
}
}

205
vendor/git.lowcodeplatform.net/fabric/lib/vfs_ext.go

@ -0,0 +1,205 @@
package lib
import (
"github.com/gabriel-vasile/mimetype"
"path/filepath"
"sync"
)
var mimeDetector = map[string]string{}
var mu sync.RWMutex
var (
css = addMIME("text/css", ".css")
xz = addMIME("application/x-xz", ".xz")
gzip = addMIME("application/gzip", ".gz")
sevenZ = addMIME("application/x-7z-compressed", ".7z")
zipZ = addMIME("application/zip", ".zip")
tar = addMIME("application/x-tar", ".tar")
xar = addMIME("application/x-xar", ".xar")
bz2 = addMIME("application/x-bzip2", ".bz2")
pdf = addMIME("application/pdf", ".pdf")
fdf = addMIME("application/vnd.fdf", ".fdf")
xlsx = addMIME("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ".xlsx")
docx = addMIME("application/vnd.openxmlformats-officedocument.wordprocessingml.document", ".docx")
pptx = addMIME("application/vnd.openxmlformats-officedocument.presentationml.presentation", ".pptx")
epub = addMIME("application/epub+zip", ".epub")
jar = addMIME("application/jar", ".jar")
ole = addMIME("application/x-ole-storage", "")
msi = addMIME("application/x-ms-installer", ".msi")
aaf = addMIME("application/octet-stream", ".aaf")
doc = addMIME("application/msword", ".doc")
ppt = addMIME("application/vnd.ms-powerpoint", ".ppt")
pub = addMIME("application/vnd.ms-publisher", ".pub")
xls = addMIME("application/vnd.ms-excel", ".xls")
msg = addMIME("application/vnd.ms-outlook", ".msg")
ps = addMIME("application/postscript", ".ps")
fits = addMIME("application/fits", ".fits")
ogg = addMIME("application/ogg", ".ogg")
oggAudio = addMIME("audio/ogg", ".oga")
oggVideo = addMIME("video/ogg", ".ogv")
text = addMIME("text/plain", ".txt")
xml = addMIME("text/xml", ".xml")
jsonZ = addMIME("application/json", ".json")
har = addMIME("application/json", ".har")
csv = addMIME("text/csv", ".csv")
tsv = addMIME("text/tab-separated-values", ".tsv")
geoJSON = addMIME("application/geo+json", ".geojson")
ndJSON = addMIME("application/x-ndjson", ".ndjson")
html = addMIME("text/html", ".html")
php = addMIME("text/x-php", ".php")
rtf = addMIME("text/rtf", ".rtf")
js = addMIME("application/javascript", ".js")
srt = addMIME("application/x-subrip", ".srt")
vtt = addMIME("text/vtt", ".vtt")
lua = addMIME("text/x-lua", ".lua")
perl = addMIME("text/x-perl", ".pl")
python = addMIME("text/x-python", ".py")
tcl = addMIME("text/x-tcl", ".tcl")
vCard = addMIME("text/vcard", ".vcf")
iCalendar = addMIME("text/calendar", ".ics")
svg = addMIME("image/svg+xml", ".svg")
rss = addMIME("application/rss+xml", ".rss")
owl2 = addMIME("application/owl+xml", ".owl")
atom = addMIME("application/atom+xml", ".atom")
x3d = addMIME("model/x3d+xml", ".x3d")
kml = addMIME("application/vnd.google-earth.kml+xml", ".kml")
xliff = addMIME("application/x-xliff+xml", ".xlf")
collada = addMIME("model/vnd.collada+xml", ".dae")
gml = addMIME("application/gml+xml", ".gml")
gpx = addMIME("application/gpx+xml", ".gpx")
tcx = addMIME("application/vnd.garmin.tcx+xml", ".tcx")
amf = addMIME("application/x-amf", ".amf")
threemf = addMIME("application/vnd.ms-package.3dmanufacturing-3dmodel+xml", ".3mf")
png = addMIME("image/png", ".png")
apng = addMIME("image/vnd.mozilla.apng", ".png")
jpg = addMIME("image/jpeg", ".jpg")
jxl = addMIME("image/jxl", ".jxl")
jp2 = addMIME("image/jp2", ".jp2")
jpx = addMIME("image/jpx", ".jpf")
jpm = addMIME("image/jpm", ".jpm")
xpm = addMIME("image/x-xpixmap", ".xpm")
bpg = addMIME("image/bpg", ".bpg")
gif = addMIME("image/gif", ".gif")
webp = addMIME("image/webp", ".webp")
tiff = addMIME("image/tiff", ".tiff")
bmp = addMIME("image/bmp", ".bmp")
ico = addMIME("image/x-icon", ".ico")
icns = addMIME("image/x-icns", ".icns")
psd = addMIME("image/vnd.adobe.photoshop", ".psd")
heic = addMIME("image/heic", ".heic")
heicSeq = addMIME("image/heic-sequence", ".heic")
heif = addMIME("image/heif", ".heif")
heifSeq = addMIME("image/heif-sequence", ".heif")
hdr = addMIME("image/vnd.radiance", ".hdr")
avif = addMIME("image/avif", ".avif")
mp3 = addMIME("audio/mpeg", ".mp3")
flac = addMIME("audio/flac", ".flac")
midi = addMIME("audio/midi", ".midi")
ape = addMIME("audio/ape", ".ape")
musePack = addMIME("audio/musepack", ".mpc")
wav = addMIME("audio/wav", ".wav")
aiff = addMIME("audio/aiff", ".aiff")
au = addMIME("audio/basic", ".au")
amr = addMIME("audio/amr", ".amr")
aac = addMIME("audio/aac", ".aac")
voc = addMIME("audio/x-unknown", ".voc")
aMp4 = addMIME("audio/mp4", ".mp4")
m4a = addMIME("audio/x-m4a", ".m4a")
m3u = addMIME("application/vnd.apple.mpegurl", ".m3u")
m4v = addMIME("video/x-m4v", ".m4v")
mp4 = addMIME("video/mp4", ".mp4")
webM = addMIME("video/webm", ".webm")
mpeg = addMIME("video/mpeg", ".mpeg")
quickTime = addMIME("video/quicktime", ".mov")
mqv = addMIME("video/quicktime", ".mqv")
threeGP = addMIME("video/3gpp", ".3gp")
threeG2 = addMIME("video/3gpp2", ".3g2")
avi = addMIME("video/x-msvideo", ".avi")
flv = addMIME("video/x-flv", ".flv")
mkv = addMIME("video/x-matroska", ".mkv")
asf = addMIME("video/x-ms-asf", ".asf")
rmvb = addMIME("application/vnd.rn-realmedia-vbr", ".rmvb")
class = addMIME("application/x-java-applet", ".class")
swf = addMIME("application/x-shockwave-flash", ".swf")
crx = addMIME("application/x-chrome-extension", ".crx")
ttf = addMIME("font/ttf", ".ttf")
woff = addMIME("font/woff", ".woff")
woff2 = addMIME("font/woff2", ".woff2")
otf = addMIME("font/otf", ".otf")
ttc = addMIME("font/collection", ".ttc")
eot = addMIME("application/vnd.ms-fontobject", ".eot")
wasm = addMIME("application/wasm", ".wasm")
shp = addMIME("application/vnd.shp", ".shp")
shx = addMIME("application/vnd.shx", ".shx")
dbf = addMIME("application/x-dbf", ".dbf")
exe = addMIME("application/vnd.microsoft.portable-executable", ".exe")
elf = addMIME("application/x-elf", "")
elfObj = addMIME("application/x-object", "")
elfExe = addMIME("application/x-executable", "")
elfLib = addMIME("application/x-sharedlib", ".so")
elfDump = addMIME("application/x-coredump", "")
ar = addMIME("application/x-archive", ".a")
deb = addMIME("application/vnd.debian.binary-package", ".deb")
rpm = addMIME("application/x-rpm", ".rpm")
dcm = addMIME("application/dicom", ".dcm")
odt = addMIME("application/vnd.oasis.opendocument.text", ".odt")
ott = addMIME("application/vnd.oasis.opendocument.text-template", ".ott")
ods = addMIME("application/vnd.oasis.opendocument.spreadsheet", ".ods")
ots = addMIME("application/vnd.oasis.opendocument.spreadsheet-template", ".ots")
odp = addMIME("application/vnd.oasis.opendocument.presentation", ".odp")
otp = addMIME("application/vnd.oasis.opendocument.presentation-template", ".otp")
odg = addMIME("application/vnd.oasis.opendocument.graphics", ".odg")
otg = addMIME("application/vnd.oasis.opendocument.graphics-template", ".otg")
odf = addMIME("application/vnd.oasis.opendocument.formula", ".odf")
odc = addMIME("application/vnd.oasis.opendocument.chart", ".odc")
sxc = addMIME("application/vnd.sun.xml.calc", ".sxc")
rar = addMIME("application/x-rar-compressed", ".rar")
djvu = addMIME("image/vnd.djvu", ".djvu")
mobi = addMIME("application/x-mobipocket-ebook", ".mobi")
lit = addMIME("application/x-ms-reader", ".lit")
sqlite3 = addMIME("application/vnd.sqlite3", ".sqlite")
dwg = addMIME("image/vnd.dwg", ".dwg")
warc = addMIME("application/warc", ".warc")
nes = addMIME("application/vnd.nintendo.snes.rom", ".nes")
lnk = addMIME("application/x-ms-shortcut", ".lnk")
macho = addMIME("application/x-mach-binary", ".macho")
qcp = addMIME("audio/qcelp", ".qcp")
mrc = addMIME("application/marc", ".mrc")
mdb = addMIME("application/x-msaccess", ".mdb")
accdb = addMIME("application/x-msaccess", ".accdb")
zstd = addMIME("application/zstd", ".zst")
cab = addMIME("application/vnd.ms-cab-compressed", ".cab")
cabIS = addMIME("application/x-installshield", ".cab")
lzip = addMIME("application/lzip", ".lz")
torrent = addMIME("application/x-bittorrent", ".torrent")
cpio = addMIME("application/x-cpio", ".cpio")
tzif = addMIME("application/tzif", "")
p7s = addMIME("application/pkcs7-signature", ".p7s")
xcf = addMIME("image/x-xcf", ".xcf")
pat = addMIME("image/x-gimp-pat", ".pat")
gbr = addMIME("image/x-gimp-gbr", ".gbr")
xfdf = addMIME("application/vnd.adobe.xfdf", ".xfdf")
glb = addMIME("model/gltf-binary", ".glb")
)
// addMIME наполняем мапку значениями
func addMIME(mimeType, ext string) error {
mimeDetector[ext] = mimeType
return nil
}
func detectMIME(data []byte, file string) (mimeType string) {
mu.RLock()
defer mu.RUnlock()
// определяем по расширения, если нашли - возвращаем
if v, found := mimeDetector[filepath.Ext(file)]; found {
return v
}
// если не нашли или расширение пустое - пытаемся определить по содержимому файла (через сторонний пакет)
mimeType = mimetype.Detect(data).String() // - работает не быстро (по мануалу), но как доп.определялка можно включить
return mimeType
}

2
vendor/git.lowcodeplatform.net/fabric/models/.gitconfig

@ -0,0 +1,2 @@
[url "ssh://git@git.lowcodeplatform.net/"]
insteadOf = https://git.lowcodeplatform.net/

9
vendor/git.lowcodeplatform.net/fabric/models/.gitignore

@ -0,0 +1,9 @@
.history
.idea
.vscode
.DS_Store
*~merged*
*~merged
/public
.env
local

3
vendor/git.lowcodeplatform.net/fabric/models/README.md

@ -0,0 +1,3 @@
# models
Модели общих сущностей проекта Buildbox Fabric

98
vendor/git.lowcodeplatform.net/fabric/models/code.go

@ -0,0 +1,98 @@
package models
import (
"encoding/json"
"fmt"
)
var StatusCode = RStatus{
"OK": {"Запрос выполнен", 200, "", nil},
"OKLicenseActivation": {"Лицензия была активирована", 200, "", nil},
"Unauthorized": {"Ошибка авторизации", 401, "", nil},
"NotCache": {"Доступно только в Турбо-режиме", 200, "", nil},
"NotStatus": {"Ответ сервера не содержит статус выполнения запроса", 501, "", nil},
"NotExtended": {"На сервере отсутствует расширение, которое желает использовать клиент", 501, "", nil},
"ErrorFormatJson": {"Ошибка формата JSON-запроса", 500, "ErrorFormatJson", nil},
"ErrorTransactionFalse": {"Ошибка выполнения тразакции SQL", 500, "ErrorTransactionFalse", nil},
"ErrorBeginDB": {"Ошибка подключения к БД", 500, "ErrorBeginDB", nil},
"ErrorPrepareSQL": {"Ошибка подготовки запроса SQL", 500, "ErrorPrepareSQL", nil},
"ErrorNullParameter": {"Ошибка! Не передан параметр", 503, "ErrorNullParameter", nil},
"ErrorQuery": {"Ошибка запроса на выборку данных", 500, "ErrorQuery", nil},
"ErrorScanRows": {"Ошибка переноса данных из запроса в объект", 500, "ErrorScanRows", nil},
"ErrorNullFields": {"Не все поля заполнены", 500, "ErrorScanRows", nil},
"ErrorAccessType": {"Ошибка доступа к элементу типа", 500, "ErrorAccessType", nil},
"ErrorGetData": {"Ошибка доступа данным объекта", 500, "ErrorGetData", nil},
"ErrorRevElement": {"Значение было изменено ранее.", 409, "ErrorRevElement", nil},
"ErrorForbiddenElement": {"Значение занято другим пользователем.", 403, "ErrorForbiddenElement", nil},
"ErrorUnprocessableEntity": {"Необрабатываемый экземпляр", 422, "ErrorUnprocessableEntity", nil},
"ErrorNotFound": {"Значение не найдено", 404, "ErrorNotFound", nil},
"ErrorReadDir": {"Ошибка чтения директории", 403, "ErrorReadDir", nil},
"ErrorReadConfigDir": {"Ошибка чтения директории конфигураций", 403, "ErrorReadConfigDir", nil},
"errorOpenConfigDir": {"Ошибка открытия директории конфигураций", 403, "errorOpenConfigDir", nil},
"ErrorReadConfigFile": {"Ошибка чтения файла конфигураций", 403, "ErrorReadConfigFile", nil},
"ErrorReadLogFile": {"Ошибка чтения файла логирования", 403, "ErrorReadLogFile", nil},
"ErrorScanLogFile": {"Ошибка построчного чтения файла логирования", 403, "ErrorScanLogFile", nil},
"ErrorPortBusy": {"Указанный порт занят", 403, "ErrorPortBusy", nil},
"ErrorGone": {"Объект был удален ранее", 410, "ErrorGone", nil},
"ErrorShema": {"Ошибка формата заданной схемы формирования запроса", 410, "ErrorShema", nil},
"ErrorInitBase": {"Ошибка инициализации новой базы данных", 410, "ErrorInitBase", nil},
"ErrorCreateCacheRecord": {"Ошибка создания объекта в кеше", 410, "ErrorCreateCacheRecord", nil},
"ErrorUpdateParams": {"Не переданы параметры для обновления серверов (сервер источник, сервер получатель)", 410, "ErrorUpdateParams", nil},
"ErrorIntervalProxy": {"Ошибка переданного интервала (формат: 1000:2000)", 410, "ErrorIntervalProxy", nil},
"ErrorReservPortProxy": {"Ошибка выделения порта proxy-сервером", 410, "ErrorReservPortProxy", nil},
}
type RStatus map[string]RestStatus
type RestStatus struct {
Description string `json:"description"`
Status int `json:"status"`
Code string `json:"code"`
Error error `json:"error"`
}
func (r RestStatus) MarshalJSON() ([]byte, error) {
type RestStatusJson struct {
Description string `json:"description"`
Status int `json:"status"`
Code string `json:"code"`
Error string `json:"error"`
}
var n = RestStatusJson{}
n.Description = r.Description
n.Status = r.Status
n.Code = r.Code
n.Error = fmt.Sprint(r.Error)
if r.Error == nil {
n.Error = ""
}
res, err := json.Marshal(n)
return res, err
}
func (r RestStatus) UnmarshalJSON(b []byte) error {
type RestStatusJson struct {
Description string `json:"description"`
Status int `json:"status"`
Code string `json:"code"`
Error string `json:"error"`
}
t := RestStatusJson{}
err := json.Unmarshal(b, &t)
if err != nil {
return err
}
r.Description = t.Description
r.Code = t.Code
r.Status = t.Status
if t.Error != "" {
r.Error = nil
} else {
r.Error = fmt.Errorf("%s", t.Error)
}
return nil
}

151
vendor/git.lowcodeplatform.net/fabric/models/data.go

@ -0,0 +1,151 @@
package models
type Data struct {
Uid string `json:"uid"`
Id string `json:"id"`
Source string `json:"source"`
Parent string `json:"parent"`
Type string `json:"type"`
Title string `json:"title"`
Rev string `json:"rev"`
Сopies string `json:"copies"`
Attributes map[string]Attribute `json:"attributes"`
}
type Attribute struct {
Value string `json:"value"`
Src string `json:"src"`
Tpls string `json:"tpls"`
Status string `json:"status"`
Rev string `json:"rev"`
Editor string `json:"editor"`
}
type Response struct {
Data interface{} `json:"data"`
Status RestStatus `json:"status"`
Metrics Metrics `json:"metrics"`
}
type ResponseData struct {
Data []Data `json:"data"`
Res interface{} `json:"res"`
Status RestStatus `json:"status"`
Metrics Metrics `json:"metrics"`
}
type Metrics struct {
ResultSize int `json:"result_size"`
ResultCount int `json:"result_count"`
ResultOffset int `json:"result_offset"`
ResultLimit int `json:"result_limit"`
ResultPage int `json:"result_page"`
TimeExecution string `json:"time_execution"`
TimeQuery string `json:"time_query"`
PageLast int `json:"page_last"`
PageCurrent int `json:"page_current"`
PageList []int `json:"page_list"`
PageFrom int `json:"page_from"`
PageTo int `json:"page_to"`
}
// возвращаем необходимый значение атрибута для объекта если он есть, инае пусто
// а также из заголовка объекта
func (p *Data) Attr(name, element string) (result string, found bool) {
if _, found := p.Attributes[name]; found {
// фикс для тех объектов, на которых добавлено скрытое поле Uid
if name == "uid" {
return p.Uid, true
}
switch element {
case "src":
return p.Attributes[name].Src, true
case "value":
return p.Attributes[name].Value, true
case "tpls":
return p.Attributes[name].Tpls, true
case "rev":
return p.Attributes[name].Rev, true
case "status":
return p.Attributes[name].Status, true
case "uid":
return p.Uid, true
case "source":
return p.Source, true
case "id":
return p.Id, true
case "title":
return p.Title, true
case "type":
return p.Type, true
}
} else {
switch name {
case "uid":
return p.Uid, true
case "source":
return p.Source, true
case "id":
return p.Id, true
case "title":
return p.Title, true
case "type":
return p.Type, true
}
}
return "", false
}
// заменяем значение аттрибутов в объекте профиля
func (p *Data) AttrSet(name, element, value string) bool {
g := Attribute{}
for k, v := range p.Attributes {
if k == name {
g = v
}
}
switch element {
case "src":
g.Src = value
case "value":
g.Value = value
case "tpls":
g.Tpls = value
case "rev":
g.Rev = value
case "status":
g.Status = value
}
f := p.Attributes
for k, _ := range f {
if k == name {
f[k] = g
return true
}
}
return false
}
// удаляем элемент из слайса
func (p *ResponseData) RemoveData(i int) bool {
if (i < len(p.Data)){
p.Data = append(p.Data[:i], p.Data[i+1:]...)
} else {
//log.Warning("Error! Position invalid (", i, ")")
return false
}
return true
}

23
vendor/git.lowcodeplatform.net/fabric/models/ping.go

@ -0,0 +1,23 @@
package models
// тип ответа, который сервис отдает прокси при периодическом опросе (ping-е)
type Pong struct {
Uid string `json:"uid"`
Name string `json:"name"`
Version string `json:"version"`
Status string `json:"status"`
Port int `json:"port"`
Pid string `json:"pid"`
State string `json:"state"`
Replicas int `json:"replicas"`
Https bool `json:"https"`
DeadTime int64 `json:"dead_time"`
Follower string `json:"follower"`
}
type Hosts struct {
Host string `json:"host"`
PortFrom int `json:"portfrom"`
PortTo int `json:"portto"`
Protocol string `json:"protocol"`
}

46
vendor/git.lowcodeplatform.net/fabric/models/profile.go

@ -0,0 +1,46 @@
package models
type ProfileData struct {
Revision string `json:"revision"` // ревизия текущей сессии (если сессия обновляется (меняется профиль) - ID-сессии остается, но ревизия меняется
Hash string `json:"hash"`
Email string `json:"email"`
Uid string `json:"uid"`
ObjUid string `json:"obj_uid"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Photo string `json:"photo"`
Age string `json:"age"`
City string `json:"city"`
Country string `json:"country"`
Oauth_identity string `json:"oauth_identity"`
Status string `json:"status"` // - src поля Status в профиле (иногда необходимо для доп.фильтрации)
Raw []Data `json:"raw"` // объект пользователя (нужен при сборки проекта для данного юзера при добавлении прав на базу)
Tables []Data `json:"tables"`
Roles map[string]string `json:"roles"`
Homepage string `json:"homepage"`
Maket string `json:"maket"`
UpdateFlag bool `json:"update_flag"`
UpdateData []Data `json:"update_data"`
CurrentRole Data `json:"current_role"`
CurrentProfile Data `json:"current_profile"`
Navigator []*Items `json:"navigator"`
}
type Items struct {
Title string `json:"title"`
ExtentedLink string `json:"extentedLink"`
Uid string `json:"uid"`
Source string `json:"source"`
Icon string `json:"icon"`
Leader string `json:"leader"`
Order string `json:"order"`
Type string `json:"type"`
Preview string `json:"preview"`
Url string `json:"url"`
Sub []string `json:"sub"`
Incl []*Items `json:"incl"`
Class string `json:"class"`
}

20
vendor/git.lowcodeplatform.net/fabric/models/token.go

@ -0,0 +1,20 @@
package models
import "github.com/golang-jwt/jwt"
type Token struct {
Uid string
Role string
Profile string
Groups string
Local string
Type string
Session string
SessionRev string // ревизия текущей сессии (если сессия обновляется (меняется профиль) - ID-сессии остается, но ревизия меняется
jwt.StandardClaims
}
type Roles struct {
Title string
Uid string
}

50
vendor/git.lowcodeplatform.net/fabric/models/tree.go

@ -0,0 +1,50 @@
package models
type DataTree struct {
Uid string `json:"uid"`
Id string `json:"id"`
Source string `json:"source"`
Parent string `json:"parent"`
Type string `json:"type"`
Title string `json:"title"`
Rev string `json:"rev"`
Сopies string `json:"copies"`
Attributes map[string]Attribute `json:"attributes"`
Sub []string `json:"sub"`
Incl []*DataTree `json:"incl"`
}
type DataTreeOut struct {
Uid string `json:"uid"`
Id string `json:"id"`
Source string `json:"source"`
Parent string `json:"parent"`
Type string `json:"type"`
Title string `json:"title"`
Rev string `json:"rev"`
Сopies string `json:"copies"`
Attributes map[string]Attribute `json:"attributes"`
Sub []string `json:"sub"`
Incl []DataTree `json:"incl"`
}
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
// метод типа Items (перемещаем структуры в карте, исходя из заявленной вложенности элементов)
// (переделать дубль фукнции)
func (p *DataTree) ScanSub(maps *map[string]*DataTree) {
if p.Sub != nil && len(p.Sub) != 0 {
for _, c := range p.Sub {
gg := *maps
fromP := gg[c]
if fromP != nil {
copyPolygon := *fromP
p.Incl = append(p.Incl, &copyPolygon)
delete(*maps, c)
copyPolygon.ScanSub(maps)
}
}
}
}

202
vendor/github.com/Azure/azure-sdk-for-go/LICENSE

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

5
vendor/github.com/Azure/azure-sdk-for-go/NOTICE

@ -0,0 +1,5 @@
Microsoft Azure-SDK-for-Go
Copyright 2014-2017 Microsoft
This product includes software developed at
the Microsoft Corporation (https://www.microsoft.com).

22
vendor/github.com/Azure/azure-sdk-for-go/storage/README.md

@ -0,0 +1,22 @@
# Azure Storage SDK for Go (Preview)
:exclamation: IMPORTANT: This package is in maintenance only and will be deprecated in the
future. Please use one of the following packages instead.
| Service | Import Path/Repo |
|---------|------------------|
| Storage - Blobs | [github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) |
| Storage - Files | [github.com/Azure/azure-storage-file-go](https://github.com/Azure/azure-storage-file-go) |
| Storage - Queues | [github.com/Azure/azure-storage-queue-go](https://github.com/Azure/azure-storage-queue-go) |
The `github.com/Azure/azure-sdk-for-go/storage` package is used to manage
[Azure Storage](https://docs.microsoft.com/en-us/azure/storage/) data plane
resources: containers, blobs, tables, and queues.
To manage storage *accounts* use Azure Resource Manager (ARM) via the packages
at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/services/storage).
This package also supports the [Azure Storage
Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
(Windows only).

91
vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go

@ -0,0 +1,91 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"crypto/md5"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"time"
)
// PutAppendBlob initializes an empty append blob with specified name. An
// append blob must be created using this method before appending blocks.
//
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeAppend)
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeAppend)
}
// AppendBlockOptions includes the options for an append block operation
type AppendBlockOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
MaxSize *uint `header:"x-ms-blob-condition-maxsize"`
AppendPosition *uint `header:"x-ms-blob-condition-appendpos"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
ContentMD5 bool
}
// AppendBlock appends a block to an append blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block
func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
params := url.Values{"comp": {"appendblock"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeAppend)
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
if options.ContentMD5 {
md5sum := md5.Sum(chunk)
headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:])
}
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeAppend)
}

246
vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go

@ -0,0 +1,246 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
"net/url"
"sort"
"strings"
)
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
type authentication string
const (
sharedKey authentication = "sharedKey"
sharedKeyForTable authentication = "sharedKeyTable"
sharedKeyLite authentication = "sharedKeyLite"
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
// headers
headerAcceptCharset = "Accept-Charset"
headerAuthorization = "Authorization"
headerContentLength = "Content-Length"
headerDate = "Date"
headerXmsDate = "x-ms-date"
headerXmsVersion = "x-ms-version"
headerContentEncoding = "Content-Encoding"
headerContentLanguage = "Content-Language"
headerContentType = "Content-Type"
headerContentMD5 = "Content-MD5"
headerIfModifiedSince = "If-Modified-Since"
headerIfMatch = "If-Match"
headerIfNoneMatch = "If-None-Match"
headerIfUnmodifiedSince = "If-Unmodified-Since"
headerRange = "Range"
headerDataServiceVersion = "DataServiceVersion"
headerMaxDataServiceVersion = "MaxDataServiceVersion"
headerContentTransferEncoding = "Content-Transfer-Encoding"
)
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
if !c.sasClient {
authHeader, err := c.getSharedKey(verb, url, headers, auth)
if err != nil {
return nil, err
}
headers[headerAuthorization] = authHeader
}
return headers, nil
}
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
canRes, err := c.buildCanonicalizedResource(url, auth, false)
if err != nil {
return "", err
}
canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
if err != nil {
return "", err
}
return c.createAuthorizationHeader(canString, auth), nil
}
func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) {
errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := bytes.NewBufferString("")
if c.accountName != StorageEmulatorAccountName || !sas {
cr.WriteString("/")
cr.WriteString(c.getCanonicalizedAccountName())
}
if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr.WriteString(u.EscapedPath())
}
params, err := url.ParseQuery(u.RawQuery)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
if auth == sharedKey {
if len(params) > 0 {
cr.WriteString("\n")
keys := []string{}
for key := range params {
keys = append(keys, key)
}
sort.Strings(keys)
completeParams := []string{}
for _, key := range keys {
if len(params[key]) > 1 {
sort.Strings(params[key])
}
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
}
cr.WriteString(strings.Join(completeParams, "\n"))
}
} else {
// search for "comp" parameter, if exists then add it to canonicalizedresource
if v, ok := params["comp"]; ok {
cr.WriteString("?comp=" + v[0])
}
}
return string(cr.Bytes()), nil
}
func (c *Client) getCanonicalizedAccountName() string {
// since we may be trying to access a secondary storage account, we need to
// remove the -secondary part of the storage name
return strings.TrimSuffix(c.accountName, "-secondary")
}
func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
contentLength := headers[headerContentLength]
if contentLength == "0" {
contentLength = ""
}
date := headers[headerDate]
if v, ok := headers[headerXmsDate]; ok {
if auth == sharedKey || auth == sharedKeyLite {
date = ""
} else {
date = v
}
}
var canString string
switch auth {
case sharedKey:
canString = strings.Join([]string{
verb,
headers[headerContentEncoding],
headers[headerContentLanguage],
contentLength,
headers[headerContentMD5],
headers[headerContentType],
date,
headers[headerIfModifiedSince],
headers[headerIfMatch],
headers[headerIfNoneMatch],
headers[headerIfUnmodifiedSince],
headers[headerRange],
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case sharedKeyForTable:
canString = strings.Join([]string{
verb,
headers[headerContentMD5],
headers[headerContentType],
date,
canonicalizedResource,
}, "\n")
case sharedKeyLite:
canString = strings.Join([]string{
verb,
headers[headerContentMD5],
headers[headerContentType],
date,
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case sharedKeyLiteForTable:
canString = strings.Join([]string{
date,
canonicalizedResource,
}, "\n")
default:
return "", fmt.Errorf("%s authentication is not supported yet", auth)
}
return canString, nil
}
func buildCanonicalizedHeader(headers map[string]string) string {
cm := make(map[string]string)
for k, v := range headers {
headerName := strings.TrimSpace(strings.ToLower(k))
if strings.HasPrefix(headerName, "x-ms-") {
cm[headerName] = v
}
}
if len(cm) == 0 {
return ""
}
keys := []string{}
for key := range cm {
keys = append(keys, key)
}
sort.Strings(keys)
ch := bytes.NewBufferString("")
for _, key := range keys {
ch.WriteString(key)
ch.WriteRune(':')
ch.WriteString(cm[key])
ch.WriteRune('\n')
}
return strings.TrimSuffix(string(ch.Bytes()), "\n")
}
func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
signature := c.computeHmac256(canonicalizedString)
var key string
switch auth {
case sharedKey, sharedKeyForTable:
key = "SharedKey"
case sharedKeyLite, sharedKeyLiteForTable:
key = "SharedKeyLite"
}
return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
}

632
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go

@ -0,0 +1,632 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// A Blob is an entry in BlobListResponse.
type Blob struct {
Container *Container
Name string `xml:"Name"`
Snapshot time.Time `xml:"Snapshot"`
Properties BlobProperties `xml:"Properties"`
Metadata BlobMetadata `xml:"Metadata"`
}
// PutBlobOptions includes the options any put blob operation
// (page, block, append)
type PutBlobOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
Origin string `header:"Origin"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// BlobMetadata is a set of custom name/value pairs.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
type BlobMetadata map[string]string
type blobMetadataEntries struct {
Entries []blobMetadataEntry `xml:",any"`
}
type blobMetadataEntry struct {
XMLName xml.Name
Value string `xml:",chardata"`
}
// UnmarshalXML converts the xml:Metadata into Metadata map
func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var entries blobMetadataEntries
if err := d.DecodeElement(&entries, &start); err != nil {
return err
}
for _, entry := range entries.Entries {
if *bm == nil {
*bm = make(BlobMetadata)
}
(*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
}
return nil
}
// MarshalXML implements the xml.Marshaler interface. It encodes
// metadata name/value pairs as they would appear in an Azure
// ListBlobs response.
func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
entries := make([]blobMetadataEntry, 0, len(bm))
for k, v := range bm {
entries = append(entries, blobMetadataEntry{
XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)},
Value: v,
})
}
return enc.EncodeElement(blobMetadataEntries{
Entries: entries,
}, start)
}
// BlobProperties contains various properties of a blob
// returned in various endpoints like ListBlobs or GetBlobProperties.
type BlobProperties struct {
LastModified TimeRFC1123 `xml:"Last-Modified"`
Etag string `xml:"Etag"`
ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"`
ContentLength int64 `xml:"Content-Length"`
ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"`
ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"`
CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"`
ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"`
ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"`
BlobType BlobType `xml:"BlobType"`
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
CopyID string `xml:"CopyId"`
CopyStatus string `xml:"CopyStatus"`
CopySource string `xml:"CopySource"`
CopyProgress string `xml:"CopyProgress"`
CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"`
CopyStatusDescription string `xml:"CopyStatusDescription"`
LeaseStatus string `xml:"LeaseStatus"`
LeaseState string `xml:"LeaseState"`
LeaseDuration string `xml:"LeaseDuration"`
ServerEncrypted bool `xml:"ServerEncrypted"`
IncrementalCopy bool `xml:"IncrementalCopy"`
}
// BlobType defines the type of the Azure Blob.
type BlobType string
// Types of page blobs
const (
BlobTypeBlock BlobType = "BlockBlob"
BlobTypePage BlobType = "PageBlob"
BlobTypeAppend BlobType = "AppendBlob"
)
func (b *Blob) buildPath() string {
return b.Container.buildPath() + "/" + b.Name
}
// Exists returns true if a blob with given name exists on the specified
// container of the storage account.
func (b *Blob) Exists() (bool, error) {
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil)
headers := b.Container.bsc.client.getStandardHeaders()
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, nil
}
}
return false, err
}
// GetURL gets the canonical URL to the blob with the specified name in the
// specified container.
// This method does not create a publicly accessible URL if the blob or container
// is private and this method does not check if the blob exists.
func (b *Blob) GetURL() string {
container := b.Container.Name
if container == "" {
container = "$root"
}
return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil)
}
// GetBlobRangeOptions includes the options for a get blob range operation
type GetBlobRangeOptions struct {
Range *BlobRange
GetRangeContentMD5 bool
*GetBlobOptions
}
// GetBlobOptions includes the options for a get blob operation
type GetBlobOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
Origin string `header:"Origin"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// BlobRange represents the bytes range to be get
type BlobRange struct {
Start uint64
End uint64
}
func (br BlobRange) String() string {
if br.End == 0 {
return fmt.Sprintf("bytes=%d-", br.Start)
}
return fmt.Sprintf("bytes=%d-%d", br.Start, br.End)
}
// Get returns a stream to read the blob. Caller must call both Read and Close()
// to correctly close the underlying connection.
//
// See the GetRange method for use with a Range header.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) {
rangeOptions := GetBlobRangeOptions{
GetBlobOptions: options,
}
resp, err := b.getRange(&rangeOptions)
if err != nil {
return nil, err
}
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
if err := b.writeProperties(resp.Header, true); err != nil {
return resp.Body, err
}
return resp.Body, nil
}
// GetRange reads the specified range of a blob to a stream. The bytesRange
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
// Caller must call both Read and Close()// to correctly close the underlying
// connection.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) {
resp, err := b.getRange(options)
if err != nil {
return nil, err
}
if err := checkRespCode(resp, []int{http.StatusPartialContent}); err != nil {
return nil, err
}
// Content-Length header should not be updated, as the service returns the range length
// (which is not alwys the full blob length)
if err := b.writeProperties(resp.Header, false); err != nil {
return resp.Body, err
}
return resp.Body, nil
}
func (b *Blob) getRange(options *GetBlobRangeOptions) (*http.Response, error) {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
if options.Range != nil {
headers["Range"] = options.Range.String()
if options.GetRangeContentMD5 {
headers["x-ms-range-get-content-md5"] = "true"
}
}
if options.GetBlobOptions != nil {
headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions))
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
}
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return nil, err
}
return resp, err
}
// SnapshotOptions includes the options for a snapshot blob operation
type SnapshotOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// CreateSnapshot creates a snapshot for a blob
// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) {
params := url.Values{"comp": {"snapshot"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil || resp == nil {
return nil, err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil {
return nil, err
}
snapshotResponse := resp.Header.Get(http.CanonicalHeaderKey("x-ms-snapshot"))
if snapshotResponse != "" {
snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse)
if err != nil {
return nil, err
}
return &snapshotTimestamp, nil
}
return nil, errors.New("Snapshot not created")
}
// GetBlobPropertiesOptions includes the options for a get blob properties operation
type GetBlobPropertiesOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetProperties provides various information about the specified blob.
// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
return b.writeProperties(resp.Header, true)
}
func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error {
var err error
contentLength := b.Properties.ContentLength
if includeContentLen {
contentLengthStr := h.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return err
}
}
}
var sequenceNum int64
sequenceNumStr := h.Get("x-ms-blob-sequence-number")
if sequenceNumStr != "" {
sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
if err != nil {
return err
}
}
lastModified, err := getTimeFromHeaders(h, "Last-Modified")
if err != nil {
return err
}
copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time")
if err != nil {
return err
}
b.Properties = BlobProperties{
LastModified: TimeRFC1123(*lastModified),
Etag: h.Get("Etag"),
ContentMD5: h.Get("Content-MD5"),
ContentLength: contentLength,
ContentEncoding: h.Get("Content-Encoding"),
ContentType: h.Get("Content-Type"),
ContentDisposition: h.Get("Content-Disposition"),
CacheControl: h.Get("Cache-Control"),
ContentLanguage: h.Get("Content-Language"),
SequenceNumber: sequenceNum,
CopyCompletionTime: TimeRFC1123(*copyCompletionTime),
CopyStatusDescription: h.Get("x-ms-copy-status-description"),
CopyID: h.Get("x-ms-copy-id"),
CopyProgress: h.Get("x-ms-copy-progress"),
CopySource: h.Get("x-ms-copy-source"),
CopyStatus: h.Get("x-ms-copy-status"),
BlobType: BlobType(h.Get("x-ms-blob-type")),
LeaseStatus: h.Get("x-ms-lease-status"),
LeaseState: h.Get("x-ms-lease-state"),
}
b.writeMetadata(h)
return nil
}
// SetBlobPropertiesOptions contains various properties of a blob and is an entry
// in SetProperties
type SetBlobPropertiesOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
Origin string `header:"Origin"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
SequenceNumberAction *SequenceNumberAction
RequestID string `header:"x-ms-client-request-id"`
}
// SequenceNumberAction defines how the blob's sequence number should be modified
type SequenceNumberAction string
// Options for sequence number action
const (
SequenceNumberActionMax SequenceNumberAction = "max"
SequenceNumberActionUpdate SequenceNumberAction = "update"
SequenceNumberActionIncrement SequenceNumberAction = "increment"
)
// SetProperties replaces the BlobHeaders for the specified blob.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties
func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error {
params := url.Values{"comp": {"properties"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
if b.Properties.BlobType == BlobTypePage {
headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength))
if options != nil && options.SequenceNumberAction != nil {
headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction))
if *options.SequenceNumberAction != SequenceNumberActionIncrement {
headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber))
}
}
}
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// SetBlobMetadataOptions includes the options for a set blob metadata operation
type SetBlobMetadataOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// SetMetadata replaces the metadata for the specified blob.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// GetBlobMetadataOptions includes the options for a get blob metadata operation
type GetBlobMetadataOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetMetadata returns all user-defined metadata for the specified blob.
//
// All metadata keys will be returned in lower case. (HTTP header
// names are case-insensitive.)
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
b.writeMetadata(resp.Header)
return nil
}
func (b *Blob) writeMetadata(h http.Header) {
b.Metadata = BlobMetadata(writeMetadata(h))
}
// DeleteBlobOptions includes the options for a delete blob operation
type DeleteBlobOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
DeleteSnapshots *bool
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// Delete deletes the given blob from the specified container.
// If the blob does not exists at the time of the Delete Blob operation, it
// returns error.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
func (b *Blob) Delete(options *DeleteBlobOptions) error {
resp, err := b.delete(options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}
// DeleteIfExists deletes the given blob from the specified container If the
// blob is deleted with this call, returns true. Otherwise returns false.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) {
resp, err := b.delete(options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
func (b *Blob) delete(options *DeleteBlobOptions) (*http.Response, error) {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
if options.DeleteSnapshots != nil {
if *options.DeleteSnapshots {
headers["x-ms-delete-snapshots"] = "include"
} else {
headers["x-ms-delete-snapshots"] = "only"
}
}
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth)
}
// helper method to construct the path to either a blob or container
func pathForResource(container, name string) string {
if name != "" {
return fmt.Sprintf("/%s/%s", container, name)
}
return fmt.Sprintf("/%s", container)
}
func (b *Blob) respondCreation(resp *http.Response, bt BlobType) error {
defer drainRespBody(resp)
err := checkRespCode(resp, []int{http.StatusCreated})
if err != nil {
return err
}
b.Properties.BlobType = bt
return nil
}

179
vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go

@ -0,0 +1,179 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/url"
"strings"
"time"
)
// OverrideHeaders defines overridable response heaedrs in
// a request using a SAS URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type OverrideHeaders struct {
CacheControl string
ContentDisposition string
ContentEncoding string
ContentLanguage string
ContentType string
}
// BlobSASOptions are options to construct a blob SAS
// URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type BlobSASOptions struct {
BlobServiceSASPermissions
OverrideHeaders
SASOptions
}
// BlobServiceSASPermissions includes the available permissions for
// blob service SAS URI.
type BlobServiceSASPermissions struct {
Read bool
Add bool
Create bool
Write bool
Delete bool
}
func (p BlobServiceSASPermissions) buildString() string {
permissions := ""
if p.Read {
permissions += "r"
}
if p.Add {
permissions += "a"
}
if p.Create {
permissions += "c"
}
if p.Write {
permissions += "w"
}
if p.Delete {
permissions += "d"
}
return permissions
}
// GetSASURI creates an URL to the blob which contains the Shared
// Access Signature with the specified options.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) {
uri := b.GetURL()
signedResource := "b"
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true)
if err != nil {
return "", err
}
permissions := options.BlobServiceSASPermissions.buildString()
return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
}
func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) {
start := ""
if options.Start != (time.Time{}) {
start = options.Start.UTC().Format(time.RFC3339)
}
expiry := options.Expiry.UTC().Format(time.RFC3339)
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
canonicalizedResource, err := url.QueryUnescape(canonicalizedResource)
if err != nil {
return "", err
}
protocols := ""
if options.UseHTTPS {
protocols = "https"
}
stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, signedResource, "", headers)
if err != nil {
return "", err
}
sig := c.computeHmac256(stringToSign)
sasParams := url.Values{
"sv": {c.apiVersion},
"se": {expiry},
"sr": {signedResource},
"sp": {permissions},
"sig": {sig},
}
if start != "" {
sasParams.Add("st", start)
}
if c.apiVersion >= "2015-04-05" {
if protocols != "" {
sasParams.Add("spr", protocols)
}
if options.IP != "" {
sasParams.Add("sip", options.IP)
}
}
// Add override response hedaers
addQueryParameter(sasParams, "rscc", headers.CacheControl)
addQueryParameter(sasParams, "rscd", headers.ContentDisposition)
addQueryParameter(sasParams, "rsce", headers.ContentEncoding)
addQueryParameter(sasParams, "rscl", headers.ContentLanguage)
addQueryParameter(sasParams, "rsct", headers.ContentType)
sasURL, err := url.Parse(uri)
if err != nil {
return "", err
}
sasURL.RawQuery = sasParams.Encode()
return sasURL.String(), nil
}
func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime string, headers OverrideHeaders) (string, error) {
rscc := headers.CacheControl
rscd := headers.ContentDisposition
rsce := headers.ContentEncoding
rscl := headers.ContentLanguage
rsct := headers.ContentType
if signedVersion >= "2015-02-21" {
canonicalizedResource = "/blob" + canonicalizedResource
}
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
if signedVersion >= "2018-11-09" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime, rscc, rscd, rsce, rscl, rsct), nil
}
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
if signedVersion >= "2015-04-05" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
}
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
if signedVersion >= "2013-08-15" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
}
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
}

186
vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go

@ -0,0 +1,186 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
)
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
// Service.
type BlobStorageClient struct {
client Client
auth authentication
}
// GetServiceProperties gets the properties of your storage account's blob service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
return b.client.getServiceProperties(blobServiceName, b.auth)
}
// SetServiceProperties sets the properties of your storage account's blob service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
return b.client.setServiceProperties(props, blobServiceName, b.auth)
}
// ListContainersParameters defines the set of customizable parameters to make a
// List Containers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ListContainersParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// GetContainerReference returns a Container object for the specified container name.
func (b *BlobStorageClient) GetContainerReference(name string) *Container {
return &Container{
bsc: b,
Name: name,
}
}
// GetContainerReferenceFromSASURI returns a Container object for the specified
// container SASURI
func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) {
path := strings.Split(sasuri.Path, "/")
if len(path) <= 1 {
return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String())
}
c, err := newSASClientFromURL(&sasuri)
if err != nil {
return nil, err
}
cli := c.GetBlobService()
return &Container{
bsc: &cli,
Name: path[1],
sasuri: sasuri,
}, nil
}
// ListContainers returns the list of containers in a storage account along with
// pagination token and other response details.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
uri := b.client.getEndpoint(blobServiceName, "", q)
headers := b.client.getStandardHeaders()
type ContainerAlias struct {
bsc *BlobStorageClient
Name string `xml:"Name"`
Properties ContainerProperties `xml:"Properties"`
Metadata BlobMetadata
sasuri url.URL
}
type ContainerListResponseAlias struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Containers []ContainerAlias `xml:"Containers>Container"`
}
var outAlias ContainerListResponseAlias
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &outAlias)
if err != nil {
return nil, err
}
out := ContainerListResponse{
XMLName: outAlias.XMLName,
Xmlns: outAlias.Xmlns,
Prefix: outAlias.Prefix,
Marker: outAlias.Marker,
NextMarker: outAlias.NextMarker,
MaxResults: outAlias.MaxResults,
Containers: make([]Container, len(outAlias.Containers)),
}
for i, cnt := range outAlias.Containers {
out.Containers[i] = Container{
bsc: &b,
Name: cnt.Name,
Properties: cnt.Properties,
Metadata: map[string]string(cnt.Metadata),
sasuri: cnt.sasuri,
}
}
return &out, err
}
func (p ListContainersParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
if p.Timeout != 0 {
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
}
return out
}
func writeMetadata(h http.Header) map[string]string {
metadata := make(map[string]string)
for k, v := range h {
// Can't trust CanonicalHeaderKey() to munge case
// reliably. "_" is allowed in identifiers:
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
// http://tools.ietf.org/html/rfc7230#section-3.2
// ...but "_" is considered invalid by
// CanonicalMIMEHeaderKey in
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
k = strings.ToLower(k)
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
continue
}
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
k = k[len(userDefinedMetadataHeaderPrefix):]
metadata[k] = v[len(v)-1]
}
return metadata
}

270
vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go

@ -0,0 +1,270 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// BlockListType is used to filter out types of blocks in a Get Blocks List call
// for a block blob.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
// block types.
type BlockListType string
// Filters for listing blocks in block blobs
const (
BlockListTypeAll BlockListType = "all"
BlockListTypeCommitted BlockListType = "committed"
BlockListTypeUncommitted BlockListType = "uncommitted"
)
// Maximum sizes (per REST API) for various concepts
const (
MaxBlobBlockSize = 100 * 1024 * 1024
MaxBlobPageSize = 4 * 1024 * 1024
)
// BlockStatus defines states a block for a block blob can
// be in.
type BlockStatus string
// List of statuses that can be used to refer to a block in a block list
const (
BlockStatusUncommitted BlockStatus = "Uncommitted"
BlockStatusCommitted BlockStatus = "Committed"
BlockStatusLatest BlockStatus = "Latest"
)
// Block is used to create Block entities for Put Block List
// call.
type Block struct {
ID string
Status BlockStatus
}
// BlockListResponse contains the response fields from Get Block List call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
type BlockListResponse struct {
XMLName xml.Name `xml:"BlockList"`
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
}
// BlockResponse contains the block information returned
// in the GetBlockListCall.
type BlockResponse struct {
Name string `xml:"Name"`
Size int64 `xml:"Size"`
}
// CreateBlockBlob initializes an empty block blob with no blocks.
//
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
return b.CreateBlockBlobFromReader(nil, options)
}
// CreateBlockBlobFromReader initializes a block blob using data from
// reader. Size must be the number of bytes read from reader. To
// create an empty blob, use size==0 and reader==nil.
//
// Any headers set in blob.Properties or metadata in blob.Metadata
// will be set on the blob.
//
// The API rejects requests with size > 256 MiB (but this limit is not
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
// PutBlock, and PutBlockList.
//
// To create a blob from scratch, call container.GetBlobReference() to
// get an empty blob, fill in blob.Properties and blob.Metadata as
// appropriate then call this method.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeBlock)
headers["Content-Length"] = "0"
var n int64
var err error
if blob != nil {
type lener interface {
Len() int
}
// TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc.
if l, ok := blob.(lener); ok {
n = int64(l.Len())
} else {
var buf bytes.Buffer
n, err = io.Copy(&buf, blob)
if err != nil {
return err
}
blob = &buf
}
headers["Content-Length"] = strconv.FormatInt(n, 10)
}
b.Properties.ContentLength = n
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeBlock)
}
// PutBlockOptions includes the options for a put block operation
type PutBlockOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
ContentMD5 string `header:"Content-MD5"`
RequestID string `header:"x-ms-client-request-id"`
}
// PutBlock saves the given data chunk to the specified block blob with
// given ID.
//
// The API rejects chunks larger than 100 MiB (but this limit is not
// checked by the SDK).
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error {
return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options)
}
// PutBlockWithLength saves the given data stream of exactly specified size to
// the block blob with given ID. It is an alternative to PutBlocks where data
// comes as stream but the length is known in advance.
//
// The API rejects requests with size > 100 MiB (but this limit is not
// checked by the SDK).
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error {
query := url.Values{
"comp": {"block"},
"blockid": {blockID},
}
headers := b.Container.bsc.client.getStandardHeaders()
headers["Content-Length"] = fmt.Sprintf("%v", size)
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeBlock)
}
// PutBlockListOptions includes the options for a put block list operation
type PutBlockListOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// PutBlockList saves list of blocks to the specified block blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List
func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error {
params := url.Values{"comp": {"blocklist"}}
blockListXML := prepareBlockListRequest(blocks)
headers := b.Container.bsc.client.getStandardHeaders()
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// GetBlockListOptions includes the options for a get block list operation
type GetBlockListOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetBlockList retrieves list of blocks in the specified block blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List
func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) {
params := url.Values{
"comp": {"blocklist"},
"blocklisttype": {string(blockType)},
}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
var out BlockListResponse
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}

991
vendor/github.com/Azure/azure-sdk-for-go/storage/client.go

@ -0,0 +1,991 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bufio"
"encoding/base64"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net/http"
"net/url"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/version"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
)
const (
// DefaultBaseURL is the domain name used for storage requests in the
// public cloud when a default client is created.
DefaultBaseURL = "core.windows.net"
// DefaultAPIVersion is the Azure Storage API version string used when a
// basic client is created.
DefaultAPIVersion = "2018-03-28"
defaultUseHTTPS = true
defaultRetryAttempts = 5
defaultRetryDuration = time.Second * 5
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountName = "devstoreaccount1"
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
blobServiceName = "blob"
tableServiceName = "table"
queueServiceName = "queue"
fileServiceName = "file"
storageEmulatorBlob = "127.0.0.1:10000"
storageEmulatorTable = "127.0.0.1:10002"
storageEmulatorQueue = "127.0.0.1:10001"
userAgentHeader = "User-Agent"
userDefinedMetadataHeaderPrefix = "x-ms-meta-"
connectionStringAccountName = "accountname"
connectionStringAccountKey = "accountkey"
connectionStringEndpointSuffix = "endpointsuffix"
connectionStringEndpointProtocol = "defaultendpointsprotocol"
connectionStringBlobEndpoint = "blobendpoint"
connectionStringFileEndpoint = "fileendpoint"
connectionStringQueueEndpoint = "queueendpoint"
connectionStringTableEndpoint = "tableendpoint"
connectionStringSAS = "sharedaccesssignature"
)
var (
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
defaultValidStatusCodes = []int{
http.StatusRequestTimeout, // 408
http.StatusInternalServerError, // 500
http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503
http.StatusGatewayTimeout, // 504
}
)
// Sender sends a request
type Sender interface {
Send(*Client, *http.Request) (*http.Response, error)
}
// DefaultSender is the default sender for the client. It implements
// an automatic retry strategy.
type DefaultSender struct {
RetryAttempts int
RetryDuration time.Duration
ValidStatusCodes []int
attempts int // used for testing
}
// Send is the default retry strategy in the client
func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) {
rr := autorest.NewRetriableRequest(req)
for attempts := 0; attempts < ds.RetryAttempts; attempts++ {
err = rr.Prepare()
if err != nil {
return resp, err
}
resp, err = c.HTTPClient.Do(rr.Request())
if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) {
return resp, err
}
drainRespBody(resp)
autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel)
ds.attempts = attempts
}
ds.attempts++
return resp, err
}
// Client is the object that needs to be constructed to perform
// operations on the storage account.
type Client struct {
// HTTPClient is the http.Client used to initiate API
// requests. http.DefaultClient is used when creating a
// client.
HTTPClient *http.Client
// Sender is an interface that sends the request. Clients are
// created with a DefaultSender. The DefaultSender has an
// automatic retry strategy built in. The Sender can be customized.
Sender Sender
accountName string
accountKey []byte
useHTTPS bool
UseSharedKeyLite bool
baseURL string
apiVersion string
userAgent string
sasClient bool
accountSASToken url.Values
}
type odataResponse struct {
resp *http.Response
odata odataErrorWrapper
}
// AzureStorageServiceError contains fields of the error response from
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
// Some fields might be specific to certain calls.
type AzureStorageServiceError struct {
Code string `xml:"Code"`
Message string `xml:"Message"`
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
QueryParameterName string `xml:"QueryParameterName"`
QueryParameterValue string `xml:"QueryParameterValue"`
Reason string `xml:"Reason"`
Lang string
StatusCode int
RequestID string
Date string
APIVersion string
}
type odataErrorMessage struct {
Lang string `json:"lang"`
Value string `json:"value"`
}
type odataError struct {
Code string `json:"code"`
Message odataErrorMessage `json:"message"`
}
type odataErrorWrapper struct {
Err odataError `json:"odata.error"`
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int
got int
inner error
}
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
got := s(e.got)
expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner)
}
// Got is the actual status code returned by Azure.
func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
// Inner returns any inner error info.
func (e UnexpectedStatusCodeError) Inner() error {
return e.inner
}
// NewClientFromConnectionString creates a Client from the connection string.
func NewClientFromConnectionString(input string) (Client, error) {
// build a map of connection string key/value pairs
parts := map[string]string{}
for _, pair := range strings.Split(input, ";") {
if pair == "" {
continue
}
equalDex := strings.IndexByte(pair, '=')
if equalDex <= 0 {
return Client{}, fmt.Errorf("Invalid connection segment %q", pair)
}
value := strings.TrimSpace(pair[equalDex+1:])
key := strings.TrimSpace(strings.ToLower(pair[:equalDex]))
parts[key] = value
}
// TODO: validate parameter sets?
if parts[connectionStringAccountName] == StorageEmulatorAccountName {
return NewEmulatorClient()
}
if parts[connectionStringSAS] != "" {
endpoint := ""
if parts[connectionStringBlobEndpoint] != "" {
endpoint = parts[connectionStringBlobEndpoint]
} else if parts[connectionStringFileEndpoint] != "" {
endpoint = parts[connectionStringFileEndpoint]
} else if parts[connectionStringQueueEndpoint] != "" {
endpoint = parts[connectionStringQueueEndpoint]
} else {
endpoint = parts[connectionStringTableEndpoint]
}
return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS])
}
useHTTPS := defaultUseHTTPS
if parts[connectionStringEndpointProtocol] != "" {
useHTTPS = parts[connectionStringEndpointProtocol] == "https"
}
return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey],
parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS)
}
// NewBasicClient constructs a Client with given storage service name and
// key.
func NewBasicClient(accountName, accountKey string) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
}
// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
// key in the referenced cloud.
func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
}
//NewEmulatorClient contructs a Client intended to only work with Azure
//Storage Emulator
func NewEmulatorClient() (Client, error) {
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
}
// NewClient constructs a Client. This should be used if the caller wants
// to specify whether to use HTTPS, a specific REST API version or a custom
// storage endpoint than Azure Public Cloud.
func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
var c Client
if !IsValidStorageAccount(accountName) {
return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName)
} else if accountKey == "" {
return c, fmt.Errorf("azure: account key required")
} else if serviceBaseURL == "" {
return c, fmt.Errorf("azure: base storage service url required")
}
key, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
}
c = Client{
HTTPClient: http.DefaultClient,
accountName: accountName,
accountKey: key,
useHTTPS: useHTTPS,
baseURL: serviceBaseURL,
apiVersion: apiVersion,
sasClient: false,
UseSharedKeyLite: false,
Sender: &DefaultSender{
RetryAttempts: defaultRetryAttempts,
ValidStatusCodes: defaultValidStatusCodes,
RetryDuration: defaultRetryDuration,
},
}
c.userAgent = c.getDefaultUserAgent()
return c, nil
}
// IsValidStorageAccount checks if the storage account name is valid.
// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account
func IsValidStorageAccount(account string) bool {
return validStorageAccount.MatchString(account)
}
// NewAccountSASClient contructs a client that uses accountSAS authorization
// for its operations.
func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client {
return newSASClient(account, env.StorageEndpointSuffix, token)
}
// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization
// for its operations using the specified endpoint and SAS token.
func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) {
u, err := url.Parse(endpoint)
if err != nil {
return Client{}, err
}
_, err = url.ParseQuery(sasToken)
if err != nil {
return Client{}, err
}
u.RawQuery = sasToken
return newSASClientFromURL(u)
}
func newSASClient(accountName, baseURL string, sasToken url.Values) Client {
c := Client{
HTTPClient: http.DefaultClient,
apiVersion: DefaultAPIVersion,
sasClient: true,
Sender: &DefaultSender{
RetryAttempts: defaultRetryAttempts,
ValidStatusCodes: defaultValidStatusCodes,
RetryDuration: defaultRetryDuration,
},
accountName: accountName,
baseURL: baseURL,
accountSASToken: sasToken,
useHTTPS: defaultUseHTTPS,
}
c.userAgent = c.getDefaultUserAgent()
// Get API version and protocol from token
c.apiVersion = sasToken.Get("sv")
if spr := sasToken.Get("spr"); spr != "" {
c.useHTTPS = spr == "https"
}
return c
}
func newSASClientFromURL(u *url.URL) (Client, error) {
// the host name will look something like this
// - foo.blob.core.windows.net
// "foo" is the account name
// "core.windows.net" is the baseURL
// find the first dot to get account name
i1 := strings.IndexByte(u.Host, '.')
if i1 < 0 {
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host)
}
// now find the second dot to get the base URL
i2 := strings.IndexByte(u.Host[i1+1:], '.')
if i2 < 0 {
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:])
}
sasToken := u.Query()
c := newSASClient(u.Host[:i1], u.Host[i1+i2+2:], sasToken)
if spr := sasToken.Get("spr"); spr == "" {
// infer from URL if not in the query params set
c.useHTTPS = u.Scheme == "https"
}
return c, nil
}
func (c Client) isServiceSASClient() bool {
return c.sasClient && c.accountSASToken == nil
}
func (c Client) isAccountSASClient() bool {
return c.sasClient && c.accountSASToken != nil
}
func (c Client) getDefaultUserAgent() string {
return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
version.Number,
c.apiVersion,
)
}
// AddToUserAgent adds an extension to the current user agent
func (c *Client) AddToUserAgent(extension string) error {
if extension != "" {
c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
return nil
}
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
}
// protectUserAgent is used in funcs that include extraheaders as a parameter.
// It prevents the User-Agent header to be overwritten, instead if it happens to
// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
if v, ok := extraheaders[userAgentHeader]; ok {
c.AddToUserAgent(v)
delete(extraheaders, userAgentHeader)
}
return extraheaders
}
func (c Client) getBaseURL(service string) *url.URL {
scheme := "http"
if c.useHTTPS {
scheme = "https"
}
host := ""
if c.accountName == StorageEmulatorAccountName {
switch service {
case blobServiceName:
host = storageEmulatorBlob
case tableServiceName:
host = storageEmulatorTable
case queueServiceName:
host = storageEmulatorQueue
}
} else {
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
}
return &url.URL{
Scheme: scheme,
Host: host,
}
}
func (c Client) getEndpoint(service, path string, params url.Values) string {
u := c.getBaseURL(service)
// API doesn't accept path segments not starting with '/'
if !strings.HasPrefix(path, "/") {
path = fmt.Sprintf("/%v", path)
}
if c.accountName == StorageEmulatorAccountName {
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
}
u.Path = path
u.RawQuery = params.Encode()
return u.String()
}
// AccountSASTokenOptions includes options for constructing
// an account SAS token.
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
type AccountSASTokenOptions struct {
APIVersion string
Services Services
ResourceTypes ResourceTypes
Permissions Permissions
Start time.Time
Expiry time.Time
IP string
UseHTTPS bool
}
// Services specify services accessible with an account SAS.
type Services struct {
Blob bool
Queue bool
Table bool
File bool
}
// ResourceTypes specify the resources accesible with an
// account SAS.
type ResourceTypes struct {
Service bool
Container bool
Object bool
}
// Permissions specifies permissions for an accountSAS.
type Permissions struct {
Read bool
Write bool
Delete bool
List bool
Add bool
Create bool
Update bool
Process bool
}
// GetAccountSASToken creates an account SAS token
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) {
if options.APIVersion == "" {
options.APIVersion = c.apiVersion
}
if options.APIVersion < "2015-04-05" {
return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion)
}
// build services string
services := ""
if options.Services.Blob {
services += "b"
}
if options.Services.Queue {
services += "q"
}
if options.Services.Table {
services += "t"
}
if options.Services.File {
services += "f"
}
// build resources string
resources := ""
if options.ResourceTypes.Service {
resources += "s"
}
if options.ResourceTypes.Container {
resources += "c"
}
if options.ResourceTypes.Object {
resources += "o"
}
// build permissions string
permissions := ""
if options.Permissions.Read {
permissions += "r"
}
if options.Permissions.Write {
permissions += "w"
}
if options.Permissions.Delete {
permissions += "d"
}
if options.Permissions.List {
permissions += "l"
}
if options.Permissions.Add {
permissions += "a"
}
if options.Permissions.Create {
permissions += "c"
}
if options.Permissions.Update {
permissions += "u"
}
if options.Permissions.Process {
permissions += "p"
}
// build start time, if exists
start := ""
if options.Start != (time.Time{}) {
start = options.Start.UTC().Format(time.RFC3339)
}
// build expiry time
expiry := options.Expiry.UTC().Format(time.RFC3339)
protocol := "https,http"
if options.UseHTTPS {
protocol = "https"
}
stringToSign := strings.Join([]string{
c.accountName,
permissions,
services,
resources,
start,
expiry,
options.IP,
protocol,
options.APIVersion,
"",
}, "\n")
signature := c.computeHmac256(stringToSign)
sasParams := url.Values{
"sv": {options.APIVersion},
"ss": {services},
"srt": {resources},
"sp": {permissions},
"se": {expiry},
"spr": {protocol},
"sig": {signature},
}
if start != "" {
sasParams.Add("st", start)
}
if options.IP != "" {
sasParams.Add("sip", options.IP)
}
return sasParams, nil
}
// GetBlobService returns a BlobStorageClient which can operate on the blob
// service of the storage account.
func (c Client) GetBlobService() BlobStorageClient {
b := BlobStorageClient{
client: c,
}
b.client.AddToUserAgent(blobServiceName)
b.auth = sharedKey
if c.UseSharedKeyLite {
b.auth = sharedKeyLite
}
return b
}
// GetQueueService returns a QueueServiceClient which can operate on the queue
// service of the storage account.
func (c Client) GetQueueService() QueueServiceClient {
q := QueueServiceClient{
client: c,
}
q.client.AddToUserAgent(queueServiceName)
q.auth = sharedKey
if c.UseSharedKeyLite {
q.auth = sharedKeyLite
}
return q
}
// GetTableService returns a TableServiceClient which can operate on the table
// service of the storage account.
func (c Client) GetTableService() TableServiceClient {
t := TableServiceClient{
client: c,
}
t.client.AddToUserAgent(tableServiceName)
t.auth = sharedKeyForTable
if c.UseSharedKeyLite {
t.auth = sharedKeyLiteForTable
}
return t
}
// GetFileService returns a FileServiceClient which can operate on the file
// service of the storage account.
func (c Client) GetFileService() FileServiceClient {
f := FileServiceClient{
client: c,
}
f.client.AddToUserAgent(fileServiceName)
f.auth = sharedKey
if c.UseSharedKeyLite {
f.auth = sharedKeyLite
}
return f
}
func (c Client) getStandardHeaders() map[string]string {
return map[string]string{
userAgentHeader: c.userAgent,
"x-ms-version": c.apiVersion,
"x-ms-date": currentTimeRfc1123Formatted(),
}
}
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) {
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
if err != nil {
return nil, err
}
req, err := http.NewRequest(verb, url, body)
if err != nil {
return nil, errors.New("azure/storage: error creating request: " + err.Error())
}
// http.NewRequest() will automatically set req.ContentLength for a handful of types
// otherwise we will handle here.
if req.ContentLength < 1 {
if clstr, ok := headers["Content-Length"]; ok {
if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil {
req.ContentLength = cl
}
}
}
for k, v := range headers {
req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645
}
if c.isAccountSASClient() {
// append the SAS token to the query params
v := req.URL.Query()
v = mergeParams(v, c.accountSASToken)
req.URL.RawQuery = v.Encode()
}
resp, err := c.Sender.Send(&c, req)
if err != nil {
return nil, err
}
if resp.StatusCode >= 400 && resp.StatusCode <= 505 {
return resp, getErrorFromResponse(resp)
}
return resp, nil
}
func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) {
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
if err != nil {
return nil, nil, nil, err
}
req, err := http.NewRequest(verb, url, body)
for k, v := range headers {
req.Header.Add(k, v)
}
resp, err := c.Sender.Send(&c, req)
if err != nil {
return nil, nil, nil, err
}
respToRet := &odataResponse{resp: resp}
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
var respBody []byte
respBody, err = readAndCloseBody(resp.Body)
if err != nil {
return nil, nil, nil, err
}
requestID, date, version := getDebugHeaders(resp.Header)
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
return respToRet, req, resp, err
}
// try unmarshal as odata.error json
err = json.Unmarshal(respBody, &respToRet.odata)
}
return respToRet, req, resp, err
}
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
return respToRet, err
}
func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
// execute common query, get back generated request, response etc... for more processing.
respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
if err != nil {
return nil, err
}
// return the OData in the case of executing batch commands.
// In this case we need to read the outer batch boundary and contents.
// Then we read the changeset information within the batch
var respBody []byte
respBody, err = readAndCloseBody(resp.Body)
if err != nil {
return nil, err
}
// outer multipart body
_, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0])
if err != nil {
return nil, err
}
// batch details.
batchBoundary := batchHeader["boundary"]
batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody)
if err != nil {
return nil, err
}
// changeset details.
err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary)
if err != nil {
return nil, err
}
return respToRet, nil
}
func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error {
changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary)
changesetPart, err := changesetMultiReader.NextPart()
if err != nil {
return err
}
changesetPartBufioReader := bufio.NewReader(changesetPart)
changesetResp, err := http.ReadResponse(changesetPartBufioReader, req)
if err != nil {
return err
}
if changesetResp.StatusCode != http.StatusNoContent {
changesetBody, err := readAndCloseBody(changesetResp.Body)
err = json.Unmarshal(changesetBody, &respToRet.odata)
if err != nil {
return err
}
respToRet.resp = changesetResp
}
return nil
}
func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) {
respBodyString := string(respBody)
respBodyReader := strings.NewReader(respBodyString)
// reading batchresponse
batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary)
batchPart, err := batchMultiReader.NextPart()
if err != nil {
return nil, "", err
}
batchPartBufioReader := bufio.NewReader(batchPart)
_, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type"))
if err != nil {
return nil, "", err
}
changesetBoundary := changesetHeader["boundary"]
return batchPartBufioReader, changesetBoundary, nil
}
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
defer body.Close()
out, err := ioutil.ReadAll(body)
if err == io.EOF {
err = nil
}
return out, err
}
// reads the response body then closes it
func drainRespBody(resp *http.Response) {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error {
if err := xml.Unmarshal(body, storageErr); err != nil {
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
return err
}
return nil
}
func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error {
odataError := odataErrorWrapper{}
if err := json.Unmarshal(body, &odataError); err != nil {
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
return err
}
storageErr.Code = odataError.Err.Code
storageErr.Message = odataError.Err.Message.Value
storageErr.Lang = odataError.Err.Message.Lang
return nil
}
func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError {
return AzureStorageServiceError{
StatusCode: code,
Code: status,
RequestID: requestID,
Date: date,
APIVersion: version,
Message: "no response body was available for error status code",
}
}
func (e AzureStorageServiceError) Error() string {
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s",
e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue)
}
// checkRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func checkRespCode(resp *http.Response, allowed []int) error {
for _, v := range allowed {
if resp.StatusCode == v {
return nil
}
}
err := getErrorFromResponse(resp)
return UnexpectedStatusCodeError{
allowed: allowed,
got: resp.StatusCode,
inner: err,
}
}
func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string {
metadata = c.protectUserAgent(metadata)
for k, v := range metadata {
h[userDefinedMetadataHeaderPrefix+k] = v
}
return h
}
func getDebugHeaders(h http.Header) (requestID, date, version string) {
requestID = h.Get("x-ms-request-id")
version = h.Get("x-ms-version")
date = h.Get("Date")
return
}
func getErrorFromResponse(resp *http.Response) error {
respBody, err := readAndCloseBody(resp.Body)
if err != nil {
return err
}
requestID, date, version := getDebugHeaders(resp.Header)
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
} else {
storageErr := AzureStorageServiceError{
StatusCode: resp.StatusCode,
RequestID: requestID,
Date: date,
APIVersion: version,
}
// response contains storage service error object, unmarshal
if resp.Header.Get("Content-Type") == "application/xml" {
errIn := serviceErrFromXML(respBody, &storageErr)
if err != nil { // error unmarshaling the error response
err = errIn
}
} else {
errIn := serviceErrFromJSON(respBody, &storageErr)
if err != nil { // error unmarshaling the error response
err = errIn
}
}
err = storageErr
}
return err
}

38
vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go

@ -0,0 +1,38 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/url"
"time"
)
// SASOptions includes options used by SAS URIs for different
// services and resources.
type SASOptions struct {
APIVersion string
Start time.Time
Expiry time.Time
IP string
UseHTTPS bool
Identifier string
}
func addQueryParameter(query url.Values, key, value string) url.Values {
if value != "" {
query.Add(key, value)
}
return query
}

640
vendor/github.com/Azure/azure-sdk-for-go/storage/container.go

@ -0,0 +1,640 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// Container represents an Azure container.
type Container struct {
bsc *BlobStorageClient
Name string `xml:"Name"`
Properties ContainerProperties `xml:"Properties"`
Metadata map[string]string
sasuri url.URL
}
// Client returns the HTTP client used by the Container reference.
func (c *Container) Client() *Client {
return &c.bsc.client
}
func (c *Container) buildPath() string {
return fmt.Sprintf("/%s", c.Name)
}
// GetURL gets the canonical URL to the container.
// This method does not create a publicly accessible URL if the container
// is private and this method does not check if the blob exists.
func (c *Container) GetURL() string {
container := c.Name
if container == "" {
container = "$root"
}
return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil)
}
// ContainerSASOptions are options to construct a container SAS
// URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type ContainerSASOptions struct {
ContainerSASPermissions
OverrideHeaders
SASOptions
}
// ContainerSASPermissions includes the available permissions for
// a container SAS URI.
type ContainerSASPermissions struct {
BlobServiceSASPermissions
List bool
}
// GetSASURI creates an URL to the container which contains the Shared
// Access Signature with the specified options.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) {
uri := c.GetURL()
signedResource := "c"
canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true)
if err != nil {
return "", err
}
// build permissions string
permissions := options.BlobServiceSASPermissions.buildString()
if options.List {
permissions += "l"
}
return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
}
// ContainerProperties contains various properties of a container returned from
// various endpoints like ListContainers.
type ContainerProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
LeaseStatus string `xml:"LeaseStatus"`
LeaseState string `xml:"LeaseState"`
LeaseDuration string `xml:"LeaseDuration"`
PublicAccess ContainerAccessType `xml:"PublicAccess"`
}
// ContainerListResponse contains the response fields from
// ListContainers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ContainerListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Containers []Container `xml:"Containers>Container"`
}
// BlobListResponse contains the response fields from ListBlobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type BlobListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Blobs []Blob `xml:"Blobs>Blob"`
// BlobPrefix is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
// The list here can be thought of as "folders" that may contain
// other folders or blobs.
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
// Delimiter is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
Delimiter string `xml:"Delimiter"`
}
// IncludeBlobDataset has options to include in a list blobs operation
type IncludeBlobDataset struct {
Snapshots bool
Metadata bool
UncommittedBlobs bool
Copy bool
}
// ListBlobsParameters defines the set of customizable
// parameters to make a List Blobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type ListBlobsParameters struct {
Prefix string
Delimiter string
Marker string
Include *IncludeBlobDataset
MaxResults uint
Timeout uint
RequestID string
}
func (p ListBlobsParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Delimiter != "" {
out.Set("delimiter", p.Delimiter)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != nil {
include := []string{}
include = addString(include, p.Include.Snapshots, "snapshots")
include = addString(include, p.Include.Metadata, "metadata")
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
include = addString(include, p.Include.Copy, "copy")
fullInclude := strings.Join(include, ",")
out.Set("include", fullInclude)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
if p.Timeout != 0 {
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
}
return out
}
func addString(datasets []string, include bool, text string) []string {
if include {
datasets = append(datasets, text)
}
return datasets
}
// ContainerAccessType defines the access level to the container from a public
// request.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
// blob-public-access" header.
type ContainerAccessType string
// Access options for containers
const (
ContainerAccessTypePrivate ContainerAccessType = ""
ContainerAccessTypeBlob ContainerAccessType = "blob"
ContainerAccessTypeContainer ContainerAccessType = "container"
)
// ContainerAccessPolicy represents each access policy in the container ACL.
type ContainerAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanWrite bool
CanDelete bool
}
// ContainerPermissions represents the container ACLs.
type ContainerPermissions struct {
AccessType ContainerAccessType
AccessPolicies []ContainerAccessPolicy
}
// ContainerAccessHeader references header used when setting/getting container ACL
const (
ContainerAccessHeader string = "x-ms-blob-public-access"
)
// GetBlobReference returns a Blob object for the specified blob name.
func (c *Container) GetBlobReference(name string) *Blob {
return &Blob{
Container: c,
Name: name,
}
}
// CreateContainerOptions includes the options for a create container operation
type CreateContainerOptions struct {
Timeout uint
Access ContainerAccessType `header:"x-ms-blob-public-access"`
RequestID string `header:"x-ms-client-request-id"`
}
// Create creates a blob container within the storage account
// with given name and access level. Returns error if container already exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container
func (c *Container) Create(options *CreateContainerOptions) error {
resp, err := c.create(options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// CreateIfNotExists creates a blob container if it does not exist. Returns
// true if container is newly created or false if container already exists.
func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) {
resp, err := c.create(options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
return resp.StatusCode == http.StatusCreated, nil
}
}
return false, err
}
func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) {
query := url.Values{"restype": {"container"}}
headers := c.bsc.client.getStandardHeaders()
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
}
// Exists returns true if a container with given name exists
// on the storage account, otherwise returns false.
func (c *Container) Exists() (bool, error) {
q := url.Values{"restype": {"container"}}
var uri string
if c.bsc.client.isServiceSASClient() {
q = mergeParams(q, c.sasuri.Query())
newURI := c.sasuri
newURI.RawQuery = q.Encode()
uri = newURI.String()
} else {
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
}
headers := c.bsc.client.getStandardHeaders()
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, nil
}
}
return false, err
}
// SetContainerPermissionOptions includes options for a set container permissions operation
type SetContainerPermissionOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
RequestID string `header:"x-ms-client-request-id"`
}
// SetPermissions sets up container permissions
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL
func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error {
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
if err != nil {
return err
}
params := url.Values{
"restype": {"container"},
"comp": {"acl"},
}
headers := c.bsc.client.getStandardHeaders()
headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType))
headers["Content-Length"] = strconv.Itoa(length)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// GetContainerPermissionOptions includes options for a get container permissions operation
type GetContainerPermissionOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
// If timeout is 0 then it will not be passed to Azure
// leaseID will only be passed to Azure if populated
func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) {
params := url.Values{
"restype": {"container"},
"comp": {"acl"},
}
headers := c.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var ap AccessPolicy
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
return buildAccessPolicy(ap, &resp.Header), nil
}
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
// containerAccess. Blob, Container, empty
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
permissions := ContainerPermissions{
AccessType: ContainerAccessType(containerAccess),
AccessPolicies: []ContainerAccessPolicy{},
}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
capd := ContainerAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
}
return &permissions
}
// DeleteContainerOptions includes options for a delete container operation
type DeleteContainerOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
RequestID string `header:"x-ms-client-request-id"`
}
// Delete deletes the container with given name on the storage
// account. If the container does not exist returns error.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
func (c *Container) Delete(options *DeleteContainerOptions) error {
resp, err := c.delete(options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}
// DeleteIfExists deletes the container with given name on the storage
// account if it exists. Returns true if container is deleted with this call, or
// false if the container did not exist at the time of the Delete Container
// operation.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) {
resp, err := c.delete(options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) {
query := url.Values{"restype": {"container"}}
headers := c.bsc.client.getStandardHeaders()
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
}
// ListBlobs returns an object that contains list of blobs in the container,
// pagination token and other information in the response of List Blobs call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{
"restype": {"container"},
"comp": {"list"},
})
var uri string
if c.bsc.client.isServiceSASClient() {
q = mergeParams(q, c.sasuri.Query())
newURI := c.sasuri
newURI.RawQuery = q.Encode()
uri = newURI.String()
} else {
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
}
headers := c.bsc.client.getStandardHeaders()
headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID)
var out BlobListResponse
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
for i := range out.Blobs {
out.Blobs[i].Container = c
}
return out, err
}
// ContainerMetadataOptions includes options for container metadata operations
type ContainerMetadataOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// SetMetadata replaces the metadata for the specified container.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata
func (c *Container) SetMetadata(options *ContainerMetadataOptions) error {
params := url.Values{
"comp": {"metadata"},
"restype": {"container"},
}
headers := c.bsc.client.getStandardHeaders()
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// GetMetadata returns all user-defined metadata for the specified container.
//
// All metadata keys will be returned in lower case. (HTTP header
// names are case-insensitive.)
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata
func (c *Container) GetMetadata(options *ContainerMetadataOptions) error {
params := url.Values{
"comp": {"metadata"},
"restype": {"container"},
}
headers := c.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
c.writeMetadata(resp.Header)
return nil
}
func (c *Container) writeMetadata(h http.Header) {
c.Metadata = writeMetadata(h)
}
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, capd := range policies {
permission := capd.generateContainerPermissions()
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
// generate the permissions string (rwd).
// still want the end user API to have bool flags.
permissions = ""
if capd.CanRead {
permissions += "r"
}
if capd.CanWrite {
permissions += "w"
}
if capd.CanDelete {
permissions += "d"
}
return permissions
}
// GetProperties updated the properties of the container.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
func (c *Container) GetProperties() error {
params := url.Values{
"restype": {"container"},
}
headers := c.bsc.client.getStandardHeaders()
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return err
}
defer resp.Body.Close()
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
// update properties
c.Properties.Etag = resp.Header.Get(headerEtag)
c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status")
c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state")
c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration")
c.Properties.LastModified = resp.Header.Get("Last-Modified")
c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader))
return nil
}

237
vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go

@ -0,0 +1,237 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
)
const (
blobCopyStatusPending = "pending"
blobCopyStatusSuccess = "success"
blobCopyStatusAborted = "aborted"
blobCopyStatusFailed = "failed"
)
// CopyOptions includes the options for a copy blob operation
type CopyOptions struct {
Timeout uint
Source CopyOptionsConditions
Destiny CopyOptionsConditions
RequestID string
}
// IncrementalCopyOptions includes the options for an incremental copy blob operation
type IncrementalCopyOptions struct {
Timeout uint
Destination IncrementalCopyOptionsConditions
RequestID string
}
// CopyOptionsConditions includes some conditional options in a copy blob operation
type CopyOptionsConditions struct {
LeaseID string
IfModifiedSince *time.Time
IfUnmodifiedSince *time.Time
IfMatch string
IfNoneMatch string
}
// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation
type IncrementalCopyOptionsConditions struct {
IfModifiedSince *time.Time
IfUnmodifiedSince *time.Time
IfMatch string
IfNoneMatch string
}
// Copy starts a blob copy operation and waits for the operation to
// complete. sourceBlob parameter must be a canonical URL to the blob (can be
// obtained using the GetURL method.) There is no SLA on blob copy and therefore
// this helper method works faster on smaller files.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error {
copyID, err := b.StartCopy(sourceBlob, options)
if err != nil {
return err
}
return b.WaitForCopy(copyID)
}
// StartCopy starts a blob copy operation.
// sourceBlob parameter must be a canonical URL to the blob (can be
// obtained using the GetURL method.)
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-copy-source"] = sourceBlob
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
// source
headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID)
headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince)
headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince)
headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch)
headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch)
//destiny
headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID)
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince)
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince)
headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch)
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch)
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return "", err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
return "", err
}
copyID := resp.Header.Get("x-ms-copy-id")
if copyID == "" {
return "", errors.New("Got empty copy id header")
}
return copyID, nil
}
// AbortCopyOptions includes the options for an abort blob operation
type AbortCopyOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
// copyID is generated from StartBlobCopy function.
// currentLeaseID is required IF the destination blob has an active lease on it.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob
func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error {
params := url.Values{
"comp": {"copy"},
"copyid": {copyID},
}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-copy-action"] = "abort"
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// WaitForCopy loops until a BlobCopy operation is completed (or fails with error)
func (b *Blob) WaitForCopy(copyID string) error {
for {
err := b.GetProperties(nil)
if err != nil {
return err
}
if b.Properties.CopyID != copyID {
return errBlobCopyIDMismatch
}
switch b.Properties.CopyStatus {
case blobCopyStatusSuccess:
return nil
case blobCopyStatusPending:
continue
case blobCopyStatusAborted:
return errBlobCopyAborted
case blobCopyStatusFailed:
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription)
default:
return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus)
}
}
}
// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob
// sourceBlob parameter must be a valid snapshot URL of the original blob.
// THe original blob mut be public, or use a Shared Access Signature.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob .
func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) {
params := url.Values{"comp": {"incrementalcopy"}}
// need formatting to 7 decimal places so it's friendly to Windows and *nix
snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z")
u, err := url.Parse(sourceBlobURL)
if err != nil {
return "", err
}
query := u.Query()
query.Add("snapshot", snapshotTimeFormatted)
encodedQuery := query.Encode()
encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1)
u.RawQuery = encodedQuery
snapshotURL := u.String()
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-copy-source"] = snapshotURL
if options != nil {
addTimeout(params, options.Timeout)
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince)
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince)
headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch)
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch)
}
// get URI of destination blob
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return "", err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil {
return "", err
}
copyID := resp.Header.Get("x-ms-copy-id")
if copyID == "" {
return "", errors.New("Got empty copy id header")
}
return copyID, nil
}

238
vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go

@ -0,0 +1,238 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"net/http"
"net/url"
"sync"
)
// Directory represents a directory on a share.
type Directory struct {
fsc *FileServiceClient
Metadata map[string]string
Name string `xml:"Name"`
parent *Directory
Properties DirectoryProperties
share *Share
}
// DirectoryProperties contains various properties of a directory.
type DirectoryProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
}
// ListDirsAndFilesParameters defines the set of customizable parameters to
// make a List Files and Directories call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
type ListDirsAndFilesParameters struct {
Prefix string
Marker string
MaxResults uint
Timeout uint
}
// DirsAndFilesListResponse contains the response fields from
// a List Files and Directories call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
type DirsAndFilesListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Marker string `xml:"Marker"`
MaxResults int64 `xml:"MaxResults"`
Directories []Directory `xml:"Entries>Directory"`
Files []File `xml:"Entries>File"`
NextMarker string `xml:"NextMarker"`
}
// builds the complete directory path for this directory object.
func (d *Directory) buildPath() string {
path := ""
current := d
for current.Name != "" {
path = "/" + current.Name + path
current = current.parent
}
return d.share.buildPath() + path
}
// Create this directory in the associated share.
// If a directory with the same name already exists, the operation fails.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
func (d *Directory) Create(options *FileRequestOptions) error {
// if this is the root directory exit early
if d.parent == nil {
return nil
}
params := prepareOptions(options)
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
return nil
}
// CreateIfNotExists creates this directory under the associated share if the
// directory does not exists. Returns true if the directory is newly created or
// false if the directory already exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
// if this is the root directory exit early
if d.parent == nil {
return false, nil
}
params := prepareOptions(options)
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
if resp.StatusCode == http.StatusCreated {
d.updateEtagAndLastModified(resp.Header)
return true, nil
}
return false, d.FetchAttributes(nil)
}
}
return false, err
}
// Delete removes this directory. It must be empty in order to be deleted.
// If the directory does not exist the operation fails.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
func (d *Directory) Delete(options *FileRequestOptions) error {
return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options)
}
// DeleteIfExists removes this directory if it exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) {
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
// Exists returns true if this directory exists.
func (d *Directory) Exists() (bool, error) {
exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
if exists {
d.updateEtagAndLastModified(headers)
}
return exists, err
}
// FetchAttributes retrieves metadata for this directory.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties
func (d *Directory) FetchAttributes(options *FileRequestOptions) error {
params := prepareOptions(options)
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead)
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
d.Metadata = getMetadataFromHeaders(headers)
return nil
}
// GetDirectoryReference returns a child Directory object for this directory.
func (d *Directory) GetDirectoryReference(name string) *Directory {
return &Directory{
fsc: d.fsc,
Name: name,
parent: d,
share: d.share,
}
}
// GetFileReference returns a child File object for this directory.
func (d *Directory) GetFileReference(name string) *File {
return &File{
fsc: d.fsc,
Name: name,
parent: d,
share: d.share,
mutex: &sync.Mutex{},
}
}
// ListDirsAndFiles returns a list of files and directories under this directory.
// It also contains a pagination token and other response details.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
resp, err := d.fsc.listContent(d.buildPath(), q, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out DirsAndFilesListResponse
err = xmlUnmarshal(resp.Body, &out)
return &out, err
}
// SetMetadata replaces the metadata for this directory.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetDirectoryMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata
func (d *Directory) SetMetadata(options *FileRequestOptions) error {
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options)
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (d *Directory) updateEtagAndLastModified(headers http.Header) {
d.Properties.Etag = headers.Get("Etag")
d.Properties.LastModified = headers.Get("Last-Modified")
}
// URL gets the canonical URL to this directory.
// This method does not create a publicly accessible URL if the directory
// is private and this method does not check if the directory exists.
func (d *Directory) URL() string {
return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
}

466
vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go

@ -0,0 +1,466 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
uuid "github.com/satori/go.uuid"
)
// Annotating as secure for gas scanning
/* #nosec */
const (
partitionKeyNode = "PartitionKey"
rowKeyNode = "RowKey"
etagErrorTemplate = "Etag didn't match: %v"
)
var (
errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation")
errNilPreviousResult = errors.New("The previous results page is nil")
errNilNextLink = errors.New("There are no more pages in this query results")
)
// Entity represents an entity inside an Azure table.
type Entity struct {
Table *Table
PartitionKey string
RowKey string
TimeStamp time.Time
OdataMetadata string
OdataType string
OdataID string
OdataEtag string
OdataEditLink string
Properties map[string]interface{}
}
// GetEntityReference returns an Entity object with the specified
// partition key and row key.
func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity {
return &Entity{
PartitionKey: partitionKey,
RowKey: rowKey,
Table: t,
}
}
// EntityOptions includes options for entity operations.
type EntityOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// GetEntityOptions includes options for a get entity operation
type GetEntityOptions struct {
Select []string
RequestID string `header:"x-ms-client-request-id"`
}
// Get gets the referenced entity. Which properties to get can be
// specified using the select option.
// See:
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error {
if ml == EmptyPayload {
return errEmptyPayload
}
// RowKey and PartitionKey could be lost if not included in the query
// As those are the entity identifiers, it is best if they are not lost
rk := e.RowKey
pk := e.PartitionKey
query := url.Values{
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
}
headers := e.Table.tsc.client.getStandardHeaders()
headers[headerAccept] = string(ml)
if options != nil {
if len(options.Select) > 0 {
query.Add("$select", strings.Join(options.Select, ","))
}
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, e)
if err != nil {
return err
}
e.PartitionKey = pk
e.RowKey = rk
return nil
}
// Insert inserts the referenced entity in its table.
// The function fails if there is an entity with the same
// PartitionKey and RowKey in the table.
// ml determines the level of detail of metadata in the operation response,
// or no data at all.
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity
func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
body, err := json.Marshal(e)
if err != nil {
return err
}
headers = addBodyRelatedHeaders(headers, len(body))
headers = addReturnContentHeaders(headers, ml)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query)
resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if ml != EmptyPayload {
if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil {
return err
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if err = e.UnmarshalJSON(data); err != nil {
return err
}
} else {
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
}
return nil
}
// Update updates the contents of an entity. The function fails if there is no entity
// with the same PartitionKey and RowKey in the table or if the ETag is different
// than the one in Azure.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2
func (e *Entity) Update(force bool, options *EntityOptions) error {
return e.updateMerge(force, http.MethodPut, options)
}
// Merge merges the contents of entity specified with PartitionKey and RowKey
// with the content specified in Properties.
// The function fails if there is no entity with the same PartitionKey and
// RowKey in the table or if the ETag is different than the one in Azure.
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity
func (e *Entity) Merge(force bool, options *EntityOptions) error {
return e.updateMerge(force, "MERGE", options)
}
// Delete deletes the entity.
// The function fails if there is no entity with the same PartitionKey and
// RowKey in the table or if the ETag is different than the one in Azure.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1
func (e *Entity) Delete(force bool, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
headers = addIfMatchHeader(headers, force, e.OdataEtag)
headers = addReturnContentHeaders(headers, EmptyPayload)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth)
if err != nil {
if resp.StatusCode == http.StatusPreconditionFailed {
return fmt.Errorf(etagErrorTemplate, err)
}
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
return e.updateTimestamp(resp.Header)
}
// InsertOrReplace inserts an entity or replaces the existing one.
// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity
func (e *Entity) InsertOrReplace(options *EntityOptions) error {
return e.insertOr(http.MethodPut, options)
}
// InsertOrMerge inserts an entity or merges the existing one.
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity
func (e *Entity) InsertOrMerge(options *EntityOptions) error {
return e.insertOr("MERGE", options)
}
func (e *Entity) buildPath() string {
return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey)
}
// MarshalJSON is a custom marshaller for entity
func (e *Entity) MarshalJSON() ([]byte, error) {
completeMap := map[string]interface{}{}
completeMap[partitionKeyNode] = e.PartitionKey
completeMap[rowKeyNode] = e.RowKey
for k, v := range e.Properties {
typeKey := strings.Join([]string{k, OdataTypeSuffix}, "")
switch t := v.(type) {
case []byte:
completeMap[typeKey] = OdataBinary
completeMap[k] = t
case time.Time:
completeMap[typeKey] = OdataDateTime
completeMap[k] = t.Format(time.RFC3339Nano)
case uuid.UUID:
completeMap[typeKey] = OdataGUID
completeMap[k] = t.String()
case int64:
completeMap[typeKey] = OdataInt64
completeMap[k] = fmt.Sprintf("%v", v)
case float32, float64:
completeMap[typeKey] = OdataDouble
completeMap[k] = fmt.Sprintf("%v", v)
default:
completeMap[k] = v
}
if strings.HasSuffix(k, OdataTypeSuffix) {
if !(completeMap[k] == OdataBinary ||
completeMap[k] == OdataDateTime ||
completeMap[k] == OdataGUID ||
completeMap[k] == OdataInt64 ||
completeMap[k] == OdataDouble) {
return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k)
}
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
if _, ok := completeMap[valueKey]; !ok {
return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k)
}
}
}
return json.Marshal(completeMap)
}
// UnmarshalJSON is a custom unmarshaller for entities
func (e *Entity) UnmarshalJSON(data []byte) error {
errorTemplate := "Deserializing error: %v"
props := map[string]interface{}{}
err := json.Unmarshal(data, &props)
if err != nil {
return err
}
// deselialize metadata
e.OdataMetadata = stringFromMap(props, "odata.metadata")
e.OdataType = stringFromMap(props, "odata.type")
e.OdataID = stringFromMap(props, "odata.id")
e.OdataEtag = stringFromMap(props, "odata.etag")
e.OdataEditLink = stringFromMap(props, "odata.editLink")
e.PartitionKey = stringFromMap(props, partitionKeyNode)
e.RowKey = stringFromMap(props, rowKeyNode)
// deserialize timestamp
timeStamp, ok := props["Timestamp"]
if ok {
str, ok := timeStamp.(string)
if !ok {
return fmt.Errorf(errorTemplate, "Timestamp casting error")
}
t, err := time.Parse(time.RFC3339Nano, str)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
e.TimeStamp = t
}
delete(props, "Timestamp")
delete(props, "Timestamp@odata.type")
// deserialize entity (user defined fields)
for k, v := range props {
if strings.HasSuffix(k, OdataTypeSuffix) {
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
str, ok := props[valueKey].(string)
if !ok {
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v))
}
switch v {
case OdataBinary:
props[valueKey], err = base64.StdEncoding.DecodeString(str)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
case OdataDateTime:
t, err := time.Parse("2006-01-02T15:04:05Z", str)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
props[valueKey] = t
case OdataGUID:
props[valueKey] = uuid.FromStringOrNil(str)
case OdataInt64:
i, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
props[valueKey] = i
case OdataDouble:
f, err := strconv.ParseFloat(str, 64)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
props[valueKey] = f
default:
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v))
}
delete(props, k)
}
}
e.Properties = props
return nil
}
func getAndDelete(props map[string]interface{}, key string) interface{} {
if value, ok := props[key]; ok {
delete(props, key)
return value
}
return nil
}
func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string {
if force {
h[headerIfMatch] = "*"
} else {
h[headerIfMatch] = etag
}
return h
}
// updates Etag and timestamp
func (e *Entity) updateEtagAndTimestamp(headers http.Header) error {
e.OdataEtag = headers.Get(headerEtag)
return e.updateTimestamp(headers)
}
func (e *Entity) updateTimestamp(headers http.Header) error {
str := headers.Get(headerDate)
t, err := time.Parse(time.RFC1123, str)
if err != nil {
return fmt.Errorf("Update timestamp error: %v", err)
}
e.TimeStamp = t
return nil
}
func (e *Entity) insertOr(verb string, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
body, err := json.Marshal(e)
if err != nil {
return err
}
headers = addBodyRelatedHeaders(headers, len(body))
headers = addReturnContentHeaders(headers, EmptyPayload)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
return e.updateEtagAndTimestamp(resp.Header)
}
func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
body, err := json.Marshal(e)
if err != nil {
return err
}
headers = addBodyRelatedHeaders(headers, len(body))
headers = addIfMatchHeader(headers, force, e.OdataEtag)
headers = addReturnContentHeaders(headers, EmptyPayload)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
if err != nil {
if resp.StatusCode == http.StatusPreconditionFailed {
return fmt.Errorf(etagErrorTemplate, err)
}
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
return e.updateEtagAndTimestamp(resp.Header)
}
func stringFromMap(props map[string]interface{}, key string) string {
value := getAndDelete(props, key)
if value != nil {
return value.(string)
}
return ""
}
func (options *EntityOptions) getParameters() (url.Values, map[string]string) {
query := url.Values{}
headers := map[string]string{}
if options != nil {
query = addTimeout(query, options.Timeout)
headers = headersFromStruct(*options)
}
return query, headers
}

484
vendor/github.com/Azure/azure-sdk-for-go/storage/file.go

@ -0,0 +1,484 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"sync"
)
const fourMB = uint64(4194304)
const oneTB = uint64(1099511627776)
// Export maximum range and file sizes
// MaxRangeSize defines the maximum size in bytes for a file range.
const MaxRangeSize = fourMB
// MaxFileSize defines the maximum size in bytes for a file.
const MaxFileSize = oneTB
// File represents a file on a share.
type File struct {
fsc *FileServiceClient
Metadata map[string]string
Name string `xml:"Name"`
parent *Directory
Properties FileProperties `xml:"Properties"`
share *Share
FileCopyProperties FileCopyState
mutex *sync.Mutex
}
// FileProperties contains various properties of a file.
type FileProperties struct {
CacheControl string `header:"x-ms-cache-control"`
Disposition string `header:"x-ms-content-disposition"`
Encoding string `header:"x-ms-content-encoding"`
Etag string
Language string `header:"x-ms-content-language"`
LastModified string
Length uint64 `xml:"Content-Length" header:"x-ms-content-length"`
MD5 string `header:"x-ms-content-md5"`
Type string `header:"x-ms-content-type"`
}
// FileCopyState contains various properties of a file copy operation.
type FileCopyState struct {
CompletionTime string
ID string `header:"x-ms-copy-id"`
Progress string
Source string
Status string `header:"x-ms-copy-status"`
StatusDesc string
}
// FileStream contains file data returned from a call to GetFile.
type FileStream struct {
Body io.ReadCloser
ContentMD5 string
}
// FileRequestOptions will be passed to misc file operations.
// Currently just Timeout (in seconds) but could expand.
type FileRequestOptions struct {
Timeout uint // timeout duration in seconds.
}
func prepareOptions(options *FileRequestOptions) url.Values {
params := url.Values{}
if options != nil {
params = addTimeout(params, options.Timeout)
}
return params
}
// FileRanges contains a list of file range information for a file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
type FileRanges struct {
ContentLength uint64
LastModified string
ETag string
FileRanges []FileRange `xml:"Range"`
}
// FileRange contains range information for a file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
type FileRange struct {
Start uint64 `xml:"Start"`
End uint64 `xml:"End"`
}
func (fr FileRange) String() string {
return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End)
}
// builds the complete file path for this file object
func (f *File) buildPath() string {
return f.parent.buildPath() + "/" + f.Name
}
// ClearRange releases the specified range of space in a file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error {
var timeout *uint
if options != nil {
timeout = &options.Timeout
}
headers, err := f.modifyRange(nil, fileRange, timeout, nil)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// Create creates a new file or replaces an existing one.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File
func (f *File) Create(maxSize uint64, options *FileRequestOptions) error {
if maxSize > oneTB {
return fmt.Errorf("max file size is 1TB")
}
params := prepareOptions(options)
headers := headersFromStruct(f.Properties)
headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10)
headers["x-ms-type"] = "file"
outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated})
if err != nil {
return err
}
f.Properties.Length = maxSize
f.updateEtagAndLastModified(outputHeaders)
return nil
}
// CopyFile operation copied a file/blob from the sourceURL to the path provided.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
extraHeaders := map[string]string{
"x-ms-type": "file",
"x-ms-copy-source": sourceURL,
}
params := prepareOptions(options)
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
return nil
}
// Delete immediately removes this file from the storage account.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
func (f *File) Delete(options *FileRequestOptions) error {
return f.fsc.deleteResource(f.buildPath(), resourceFile, options)
}
// DeleteIfExists removes this file if it exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) {
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
// GetFileOptions includes options for a get file operation
type GetFileOptions struct {
Timeout uint
GetContentMD5 bool
}
// DownloadToStream operation downloads the file.
//
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) {
params := prepareOptions(options)
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil)
if err != nil {
return nil, err
}
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
drainRespBody(resp)
return nil, err
}
return resp.Body, nil
}
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) {
extraHeaders := map[string]string{
"Range": fileRange.String(),
}
params := url.Values{}
if options != nil {
if options.GetContentMD5 {
if isRangeTooBig(fileRange) {
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
}
extraHeaders["x-ms-range-get-content-md5"] = "true"
}
params = addTimeout(params, options.Timeout)
}
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders)
if err != nil {
return fs, err
}
if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
drainRespBody(resp)
return fs, err
}
fs.Body = resp.Body
if options != nil && options.GetContentMD5 {
fs.ContentMD5 = resp.Header.Get("Content-MD5")
}
return fs, nil
}
// Exists returns true if this file exists.
func (f *File) Exists() (bool, error) {
exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile)
if exists {
f.updateEtagAndLastModified(headers)
f.updateProperties(headers)
}
return exists, err
}
// FetchAttributes updates metadata and properties for this file.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties
func (f *File) FetchAttributes(options *FileRequestOptions) error {
params := prepareOptions(options)
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
f.updateProperties(headers)
f.Metadata = getMetadataFromHeaders(headers)
return nil
}
// returns true if the range is larger than 4MB
func isRangeTooBig(fileRange FileRange) bool {
if fileRange.End-fileRange.Start > fourMB {
return true
}
return false
}
// ListRangesOptions includes options for a list file ranges operation
type ListRangesOptions struct {
Timeout uint
ListRange *FileRange
}
// ListRanges returns the list of valid ranges for this file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) {
params := url.Values{"comp": {"rangelist"}}
// add optional range to list
var headers map[string]string
if options != nil {
params = addTimeout(params, options.Timeout)
if options.ListRange != nil {
headers = make(map[string]string)
headers["Range"] = options.ListRange.String()
}
}
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var cl uint64
cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64)
if err != nil {
ioutil.ReadAll(resp.Body)
return nil, err
}
var out FileRanges
out.ContentLength = cl
out.ETag = resp.Header.Get("ETag")
out.LastModified = resp.Header.Get("Last-Modified")
err = xmlUnmarshal(resp.Body, &out)
return &out, err
}
// modifies a range of bytes in this file
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) {
if err := f.fsc.checkForStorageEmulator(); err != nil {
return nil, err
}
if fileRange.End < fileRange.Start {
return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
}
if bytes != nil && isRangeTooBig(fileRange) {
return nil, errors.New("range cannot exceed 4MB in size")
}
params := url.Values{"comp": {"range"}}
if timeout != nil {
params = addTimeout(params, *timeout)
}
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params)
// default to clear
write := "clear"
cl := uint64(0)
// if bytes is not nil then this is an update operation
if bytes != nil {
write = "update"
cl = (fileRange.End - fileRange.Start) + 1
}
extraHeaders := map[string]string{
"Content-Length": strconv.FormatUint(cl, 10),
"Range": fileRange.String(),
"x-ms-write": write,
}
if contentMD5 != nil {
extraHeaders["Content-MD5"] = *contentMD5
}
headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders)
resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
return resp.Header, checkRespCode(resp, []int{http.StatusCreated})
}
// SetMetadata replaces the metadata for this file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetFileMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata
func (f *File) SetMetadata(options *FileRequestOptions) error {
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// SetProperties sets system properties on this file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by SetFileProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties
func (f *File) SetProperties(options *FileRequestOptions) error {
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (f *File) updateEtagAndLastModified(headers http.Header) {
f.Properties.Etag = headers.Get("Etag")
f.Properties.LastModified = headers.Get("Last-Modified")
}
// updates file properties from the specified HTTP header
func (f *File) updateProperties(header http.Header) {
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
if err == nil {
f.Properties.Length = size
}
f.updateEtagAndLastModified(header)
f.Properties.CacheControl = header.Get("Cache-Control")
f.Properties.Disposition = header.Get("Content-Disposition")
f.Properties.Encoding = header.Get("Content-Encoding")
f.Properties.Language = header.Get("Content-Language")
f.Properties.MD5 = header.Get("Content-MD5")
f.Properties.Type = header.Get("Content-Type")
}
// URL gets the canonical URL to this file.
// This method does not create a publicly accessible URL if the file
// is private and this method does not check if the file exists.
func (f *File) URL() string {
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil)
}
// WriteRangeOptions includes options for a write file range operation
type WriteRangeOptions struct {
Timeout uint
ContentMD5 string
}
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside
// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with
// a maximum size of 4MB.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error {
if bytes == nil {
return errors.New("bytes cannot be nil")
}
var timeout *uint
var md5 *string
if options != nil {
timeout = &options.Timeout
md5 = &options.ContentMD5
}
headers, err := f.modifyRange(bytes, fileRange, timeout, md5)
if err != nil {
return err
}
// it's perfectly legal for multiple go routines to call WriteRange
// on the same *File (e.g. concurrently writing non-overlapping ranges)
// so we must take the file mutex before updating our properties.
f.mutex.Lock()
f.updateEtagAndLastModified(headers)
f.mutex.Unlock()
return nil
}

338
vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go

@ -0,0 +1,338 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
)
// FileServiceClient contains operations for Microsoft Azure File Service.
type FileServiceClient struct {
client Client
auth authentication
}
// ListSharesParameters defines the set of customizable parameters to make a
// List Shares call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
type ListSharesParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// ShareListResponse contains the response fields from
// ListShares call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
type ShareListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Shares []Share `xml:"Shares>Share"`
}
type compType string
const (
compNone compType = ""
compList compType = "list"
compMetadata compType = "metadata"
compProperties compType = "properties"
compRangeList compType = "rangelist"
)
func (ct compType) String() string {
return string(ct)
}
type resourceType string
const (
resourceDirectory resourceType = "directory"
resourceFile resourceType = ""
resourceShare resourceType = "share"
)
func (rt resourceType) String() string {
return string(rt)
}
func (p ListSharesParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
if p.Timeout != 0 {
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
}
return out
}
func (p ListDirsAndFilesParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
out = addTimeout(out, p.Timeout)
return out
}
// returns url.Values for the specified types
func getURLInitValues(comp compType, res resourceType) url.Values {
values := url.Values{}
if comp != compNone {
values.Set("comp", comp.String())
}
if res != resourceFile {
values.Set("restype", res.String())
}
return values
}
// GetShareReference returns a Share object for the specified share name.
func (f *FileServiceClient) GetShareReference(name string) *Share {
return &Share{
fsc: f,
Name: name,
Properties: ShareProperties{
Quota: -1,
},
}
}
// ListShares returns the list of shares in a storage account along with
// pagination token and other response details.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
var out ShareListResponse
resp, err := f.listContent("", q, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
// assign our client to the newly created Share objects
for i := range out.Shares {
out.Shares[i].fsc = &f
}
return &out, err
}
// GetServiceProperties gets the properties of your storage account's file service.
// File service does not support logging
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return f.client.getServiceProperties(fileServiceName, f.auth)
}
// SetServiceProperties sets the properties of your storage account's file service.
// File service does not support logging
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error {
return f.client.setServiceProperties(props, fileServiceName, f.auth)
}
// retrieves directory or share content
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
uri := f.client.getEndpoint(fileServiceName, path, params)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth)
if err != nil {
return nil, err
}
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
drainRespBody(resp)
return nil, err
}
return resp, nil
}
// returns true if the specified resource exists
func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) {
if err := f.checkForStorageEmulator(); err != nil {
return false, nil, err
}
uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res))
headers := f.client.getStandardHeaders()
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, resp.Header, nil
}
}
return false, nil, err
}
// creates a resource depending on the specified resource type
func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) {
resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
return resp.Header, checkRespCode(resp, expectedResponseCodes)
}
// creates a resource depending on the specified resource type, doesn't close the response body
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
values := getURLInitValues(compNone, res)
combinedParams := mergeParams(values, urlParams)
uri := f.client.getEndpoint(fileServiceName, path, combinedParams)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
return f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
}
// returns HTTP header data for the specified directory or share
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) {
resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
return resp.Header, nil
}
// gets the specified resource, doesn't close the response body
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
params = mergeParams(params, getURLInitValues(comp, res))
uri := f.client.getEndpoint(fileServiceName, path, params)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
return f.client.exec(verb, uri, headers, nil, f.auth)
}
// deletes the resource and returns the response
func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error {
resp, err := f.deleteResourceNoClose(path, res, options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}
// deletes the resource and returns the response, doesn't close the response body
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options))
uri := f.client.getEndpoint(fileServiceName, path, values)
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
}
// merges metadata into extraHeaders and returns extraHeaders
func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string {
if metadata == nil && extraHeaders == nil {
return nil
}
if extraHeaders == nil {
extraHeaders = make(map[string]string)
}
for k, v := range metadata {
extraHeaders[userDefinedMetadataHeaderPrefix+k] = v
}
return extraHeaders
}
// sets extra header data for the specified resource
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
params := mergeParams(getURLInitValues(comp, res), prepareOptions(options))
uri := f.client.getEndpoint(fileServiceName, path, params)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
return resp.Header, checkRespCode(resp, []int{http.StatusOK})
}
//checkForStorageEmulator determines if the client is setup for use with
//Azure Storage Emulator, and returns a relevant error
func (f FileServiceClient) checkForStorageEmulator() error {
if f.client.accountName == StorageEmulatorAccountName {
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
}
return nil
}

201
vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go

@ -0,0 +1,201 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"net/http"
"net/url"
"strconv"
"time"
)
// lease constants.
const (
leaseHeaderPrefix = "x-ms-lease-"
headerLeaseID = "x-ms-lease-id"
leaseAction = "x-ms-lease-action"
leaseBreakPeriod = "x-ms-lease-break-period"
leaseDuration = "x-ms-lease-duration"
leaseProposedID = "x-ms-proposed-lease-id"
leaseTime = "x-ms-lease-time"
acquireLease = "acquire"
renewLease = "renew"
changeLease = "change"
releaseLease = "release"
breakLease = "break"
)
// leasePut is common PUT code for the various acquire/release/break etc functions.
func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) {
params := url.Values{"comp": {"lease"}}
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{expectedStatus}); err != nil {
return nil, err
}
return resp.Header, nil
}
// LeaseOptions includes options for all operations regarding leasing blobs
type LeaseOptions struct {
Timeout uint
Origin string `header:"Origin"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
RequestID string `header:"x-ms-client-request-id"`
}
// AcquireLease creates a lease for a blob
// returns leaseID acquired
// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = acquireLease
if leaseTimeInSeconds == -1 {
// Do nothing, but don't trigger the following clauses.
} else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" {
leaseTimeInSeconds = 60
} else if leaseTimeInSeconds < 15 {
leaseTimeInSeconds = 15
}
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
if proposedLeaseID != "" {
headers[leaseProposedID] = proposedLeaseID
}
respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options)
if err != nil {
return "", err
}
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
if returnedLeaseID != "" {
return returnedLeaseID, nil
}
return "", errors.New("LeaseID not returned")
}
// BreakLease breaks the lease for a blob
// Returns the timeout remaining in the lease in seconds
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = breakLease
return b.breakLeaseCommon(headers, options)
}
// BreakLeaseWithBreakPeriod breaks the lease for a blob
// breakPeriodInSeconds is used to determine how long until new lease can be created.
// Returns the timeout remaining in the lease in seconds
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = breakLease
headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
return b.breakLeaseCommon(headers, options)
}
// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) {
respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options)
if err != nil {
return 0, err
}
breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
if breakTimeoutStr != "" {
breakTimeout, err = strconv.Atoi(breakTimeoutStr)
if err != nil {
return 0, err
}
}
return breakTimeout, nil
}
// ChangeLease changes a lease ID for a blob
// Returns the new LeaseID acquired
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = changeLease
headers[headerLeaseID] = currentLeaseID
headers[leaseProposedID] = proposedLeaseID
respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options)
if err != nil {
return "", err
}
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
if newLeaseID != "" {
return newLeaseID, nil
}
return "", errors.New("LeaseID not returned")
}
// ReleaseLease releases the lease for a blob
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = releaseLease
headers[headerLeaseID] = currentLeaseID
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
if err != nil {
return err
}
return nil
}
// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = renewLease
headers[headerLeaseID] = currentLeaseID
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
if err != nil {
return err
}
return nil
}

171
vendor/github.com/Azure/azure-sdk-for-go/storage/message.go

@ -0,0 +1,171 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"time"
)
// Message represents an Azure message.
type Message struct {
Queue *Queue
Text string `xml:"MessageText"`
ID string `xml:"MessageId"`
Insertion TimeRFC1123 `xml:"InsertionTime"`
Expiration TimeRFC1123 `xml:"ExpirationTime"`
PopReceipt string `xml:"PopReceipt"`
NextVisible TimeRFC1123 `xml:"TimeNextVisible"`
DequeueCount int `xml:"DequeueCount"`
}
func (m *Message) buildPath() string {
return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID)
}
// PutMessageOptions is the set of options can be specified for Put Messsage
// operation. A zero struct does not use any preferences for the request.
type PutMessageOptions struct {
Timeout uint
VisibilityTimeout int
MessageTTL int
RequestID string `header:"x-ms-client-request-id"`
}
// Put operation adds a new message to the back of the message queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message
func (m *Message) Put(options *PutMessageOptions) error {
query := url.Values{}
headers := m.Queue.qsc.client.getStandardHeaders()
req := putMessageRequest{MessageText: m.Text}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers["Content-Length"] = strconv.Itoa(nn)
if options != nil {
if options.VisibilityTimeout != 0 {
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
}
if options.MessageTTL != 0 {
query.Set("messagettl", strconv.Itoa(options.MessageTTL))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query)
resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
err = checkRespCode(resp, []int{http.StatusCreated})
if err != nil {
return err
}
err = xmlUnmarshal(resp.Body, m)
if err != nil {
return err
}
return nil
}
// UpdateMessageOptions is the set of options can be specified for Update Messsage
// operation. A zero struct does not use any preferences for the request.
type UpdateMessageOptions struct {
Timeout uint
VisibilityTimeout int
RequestID string `header:"x-ms-client-request-id"`
}
// Update operation updates the specified message.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message
func (m *Message) Update(options *UpdateMessageOptions) error {
query := url.Values{}
if m.PopReceipt != "" {
query.Set("popreceipt", m.PopReceipt)
}
headers := m.Queue.qsc.client.getStandardHeaders()
req := putMessageRequest{MessageText: m.Text}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers["Content-Length"] = strconv.Itoa(nn)
// visibilitytimeout is required for Update (zero or greater) so set the default here
query.Set("visibilitytimeout", "0")
if options != nil {
if options.VisibilityTimeout != 0 {
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query)
resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
m.PopReceipt = resp.Header.Get("x-ms-popreceipt")
nextTimeStr := resp.Header.Get("x-ms-time-next-visible")
if nextTimeStr != "" {
nextTime, err := time.Parse(time.RFC1123, nextTimeStr)
if err != nil {
return err
}
m.NextVisible = TimeRFC1123(nextTime)
}
return checkRespCode(resp, []int{http.StatusNoContent})
}
// Delete operation deletes the specified message.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
func (m *Message) Delete(options *QueueServiceOptions) error {
params := url.Values{"popreceipt": {m.PopReceipt}}
headers := m.Queue.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params)
resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
type putMessageRequest struct {
XMLName xml.Name `xml:"QueueMessage"`
MessageText string `xml:"MessageText"`
}

48
vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go

@ -0,0 +1,48 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// MetadataLevel determines if operations should return a paylod,
// and it level of detail.
type MetadataLevel string
// This consts are meant to help with Odata supported operations
const (
OdataTypeSuffix = "@odata.type"
// Types
OdataBinary = "Edm.Binary"
OdataDateTime = "Edm.DateTime"
OdataDouble = "Edm.Double"
OdataGUID = "Edm.Guid"
OdataInt64 = "Edm.Int64"
// Query options
OdataFilter = "$filter"
OdataOrderBy = "$orderby"
OdataTop = "$top"
OdataSkip = "$skip"
OdataCount = "$count"
OdataExpand = "$expand"
OdataSelect = "$select"
OdataSearch = "$search"
EmptyPayload MetadataLevel = ""
NoMetadata MetadataLevel = "application/json;odata=nometadata"
MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata"
FullMetadata MetadataLevel = "application/json;odata=fullmetadata"
)

203
vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go

@ -0,0 +1,203 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"time"
)
// GetPageRangesResponse contains the response fields from
// Get Page Ranges call.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
type GetPageRangesResponse struct {
XMLName xml.Name `xml:"PageList"`
PageList []PageRange `xml:"PageRange"`
}
// PageRange contains information about a page of a page blob from
// Get Pages Range call.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
type PageRange struct {
Start int64 `xml:"Start"`
End int64 `xml:"End"`
}
var (
errBlobCopyAborted = errors.New("storage: blob copy is aborted")
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
)
// PutPageOptions includes the options for a put page operation
type PutPageOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"`
IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"`
IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// WriteRange writes a range of pages to a page blob.
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
// multiplies by 512.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
if bytes == nil {
return errors.New("bytes cannot be nil")
}
return b.modifyRange(blobRange, bytes, options)
}
// ClearRange clears the given range in a page blob.
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
// multiplies by 512.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error {
return b.modifyRange(blobRange, nil, options)
}
func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
if blobRange.End < blobRange.Start {
return errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
}
if blobRange.Start%512 != 0 {
return errors.New("the value for rangeStart must be a multiple of 512")
}
if blobRange.End%512 != 511 {
return errors.New("the value for rangeEnd must be a multiple of 512 - 1")
}
params := url.Values{"comp": {"page"}}
// default to clear
write := "clear"
var cl uint64
// if bytes is not nil then this is an update operation
if bytes != nil {
write = "update"
cl = (blobRange.End - blobRange.Start) + 1
}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypePage)
headers["x-ms-page-write"] = write
headers["x-ms-range"] = blobRange.String()
headers["Content-Length"] = fmt.Sprintf("%v", cl)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// GetPageRangesOptions includes the options for a get page ranges operation
type GetPageRangesOptions struct {
Timeout uint
Snapshot *time.Time
PreviousSnapshot *time.Time
Range *BlobRange
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetPageRanges returns the list of valid page ranges for a page blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges
func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) {
params := url.Values{"comp": {"pagelist"}}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
if options.PreviousSnapshot != nil {
params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot))
}
if options.Range != nil {
headers["Range"] = options.Range.String()
}
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
var out GetPageRangesResponse
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return out, err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return out, err
}
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// PutPageBlob initializes an empty page blob with specified name and maximum
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
// be created using this method before writing pages.
//
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) PutPageBlob(options *PutBlobOptions) error {
if b.Properties.ContentLength%512 != 0 {
return errors.New("Content length must be aligned to a 512-byte boundary")
}
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypePage)
headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength)
headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber)
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypePage)
}

436
vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go

@ -0,0 +1,436 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
const (
// casing is per Golang's http.Header canonicalizing the header names.
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
)
// QueueAccessPolicy represents each access policy in the queue ACL.
type QueueAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanAdd bool
CanUpdate bool
CanProcess bool
}
// QueuePermissions represents the queue ACLs.
type QueuePermissions struct {
AccessPolicies []QueueAccessPolicy
}
// SetQueuePermissionOptions includes options for a set queue permissions operation
type SetQueuePermissionOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// Queue represents an Azure queue.
type Queue struct {
qsc *QueueServiceClient
Name string
Metadata map[string]string
AproxMessageCount uint64
}
func (q *Queue) buildPath() string {
return fmt.Sprintf("/%s", q.Name)
}
func (q *Queue) buildPathMessages() string {
return fmt.Sprintf("%s/messages", q.buildPath())
}
// QueueServiceOptions includes options for some queue service operations
type QueueServiceOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// Create operation creates a queue under the given account.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4
func (q *Queue) Create(options *QueueServiceOptions) error {
params := url.Values{}
headers := q.qsc.client.getStandardHeaders()
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// Delete operation permanently deletes the specified queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3
func (q *Queue) Delete(options *QueueServiceOptions) error {
params := url.Values{}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// Exists returns true if a queue with given name exists.
func (q *Queue) Exists() (bool, error) {
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, nil
}
err = getErrorFromResponse(resp)
}
return false, err
}
// SetMetadata operation sets user-defined metadata on the specified queue.
// Metadata is associated with the queue as name-value pairs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
func (q *Queue) SetMetadata(options *QueueServiceOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := q.qsc.client.getStandardHeaders()
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// GetMetadata operation retrieves user-defined metadata and queue
// properties on the specified queue. Metadata is associated with
// the queue as name-values pairs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
//
// Because the way Golang's http client (and http.Header in particular)
// canonicalize header names, the returned metadata names would always
// be all lower case.
func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader))
if aproxMessagesStr != "" {
aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64)
if err != nil {
return err
}
q.AproxMessageCount = aproxMessages
}
q.Metadata = getMetadataFromHeaders(resp.Header)
return nil
}
// GetMessageReference returns a message object with the specified text.
func (q *Queue) GetMessageReference(text string) *Message {
return &Message{
Queue: q,
Text: text,
}
}
// GetMessagesOptions is the set of options can be specified for Get
// Messsages operation. A zero struct does not use any preferences for the
// request.
type GetMessagesOptions struct {
Timeout uint
NumOfMessages int
VisibilityTimeout int
RequestID string `header:"x-ms-client-request-id"`
}
type messages struct {
XMLName xml.Name `xml:"QueueMessagesList"`
Messages []Message `xml:"QueueMessage"`
}
// GetMessages operation retrieves one or more messages from the front of the
// queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages
func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) {
query := url.Values{}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
if options.NumOfMessages != 0 {
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
}
if options.VisibilityTimeout != 0 {
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return []Message{}, err
}
defer resp.Body.Close()
var out messages
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return []Message{}, err
}
for i := range out.Messages {
out.Messages[i].Queue = q
}
return out.Messages, err
}
// PeekMessagesOptions is the set of options can be specified for Peek
// Messsage operation. A zero struct does not use any preferences for the
// request.
type PeekMessagesOptions struct {
Timeout uint
NumOfMessages int
RequestID string `header:"x-ms-client-request-id"`
}
// PeekMessages retrieves one or more messages from the front of the queue, but
// does not alter the visibility of the message.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages
func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) {
query := url.Values{"peekonly": {"true"}} // Required for peek operation
headers := q.qsc.client.getStandardHeaders()
if options != nil {
if options.NumOfMessages != 0 {
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return []Message{}, err
}
defer resp.Body.Close()
var out messages
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return []Message{}, err
}
for i := range out.Messages {
out.Messages[i].Queue = q
}
return out.Messages, err
}
// ClearMessages operation deletes all messages from the specified queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages
func (q *Queue) ClearMessages(options *QueueServiceOptions) error {
params := url.Values{}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params)
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// SetPermissions sets up queue permissions
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl
func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error {
body, length, err := generateQueueACLpayload(permissions.AccessPolicies)
if err != nil {
return err
}
params := url.Values{
"comp": {"acl"},
}
headers := q.qsc.client.getStandardHeaders()
headers["Content-Length"] = strconv.Itoa(length)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, qapd := range policies {
permission := qapd.generateQueuePermissions()
signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) {
// generate the permissions string (raup).
// still want the end user API to have bool flags.
permissions = ""
if qapd.CanRead {
permissions += "r"
}
if qapd.CanAdd {
permissions += "a"
}
if qapd.CanUpdate {
permissions += "u"
}
if qapd.CanProcess {
permissions += "p"
}
return permissions
}
// GetQueuePermissionOptions includes options for a get queue permissions operation
type GetQueuePermissionOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl
// If timeout is 0 then it will not be passed to Azure
func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) {
params := url.Values{
"comp": {"acl"},
}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var ap AccessPolicy
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
return buildQueueAccessPolicy(ap, &resp.Header), nil
}
func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions {
permissions := QueuePermissions{
AccessPolicies: []QueueAccessPolicy{},
}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
qapd := QueueAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a")
qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p")
permissions.AccessPolicies = append(permissions.AccessPolicies, qapd)
}
return &permissions
}

146
vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go

@ -0,0 +1,146 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/url"
"strings"
"time"
)
// QueueSASOptions are options to construct a blob SAS
// URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type QueueSASOptions struct {
QueueSASPermissions
SASOptions
}
// QueueSASPermissions includes the available permissions for
// a queue SAS URI.
type QueueSASPermissions struct {
Read bool
Add bool
Update bool
Process bool
}
func (q QueueSASPermissions) buildString() string {
permissions := ""
if q.Read {
permissions += "r"
}
if q.Add {
permissions += "a"
}
if q.Update {
permissions += "u"
}
if q.Process {
permissions += "p"
}
return permissions
}
// GetSASURI creates an URL to the specified queue which contains the Shared
// Access Signature with specified permissions and expiration time.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) {
canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true)
if err != nil {
return "", err
}
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
// later, the storage account name, and the resource name, and must be URL-decoded.
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
if err != nil {
return "", err
}
signedStart := ""
if options.Start != (time.Time{}) {
signedStart = options.Start.UTC().Format(time.RFC3339)
}
signedExpiry := options.Expiry.UTC().Format(time.RFC3339)
protocols := "https,http"
if options.UseHTTPS {
protocols = "https"
}
permissions := options.QueueSASPermissions.buildString()
stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier)
if err != nil {
return "", err
}
sig := q.qsc.client.computeHmac256(stringToSign)
sasParams := url.Values{
"sv": {q.qsc.client.apiVersion},
"se": {signedExpiry},
"sp": {permissions},
"sig": {sig},
}
if q.qsc.client.apiVersion >= "2015-04-05" {
sasParams.Add("spr", protocols)
addQueryParameter(sasParams, "sip", options.IP)
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil)
sasURL, err := url.Parse(uri)
if err != nil {
return "", err
}
sasURL.RawQuery = sasParams.Encode()
return sasURL.String(), nil
}
func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) {
if signedVersion >= "2015-02-21" {
canonicalizedResource = "/queue" + canonicalizedResource
}
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
if signedVersion >= "2015-04-05" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
signedPermissions,
signedStart,
signedExpiry,
canonicalizedResource,
signedIdentifier,
signedIP,
protocols,
signedVersion), nil
}
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
if signedVersion >= "2013-08-15" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil
}
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
}

42
vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go

@ -0,0 +1,42 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
// Service.
type QueueServiceClient struct {
client Client
auth authentication
}
// GetServiceProperties gets the properties of your storage account's queue service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return q.client.getServiceProperties(queueServiceName, q.auth)
}
// SetServiceProperties sets the properties of your storage account's queue service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
return q.client.setServiceProperties(props, queueServiceName, q.auth)
}
// GetQueueReference returns a Container object for the specified queue name.
func (q *QueueServiceClient) GetQueueReference(name string) *Queue {
return &Queue{
qsc: q,
Name: name,
}
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save