loveckiy.ivan
2 years ago
commit
5f6c90acd1
1398 changed files with 538270 additions and 0 deletions
@ -0,0 +1,2 @@ |
|||
[url "ssh://git@git.lowcodeplatform.net/"] |
|||
insteadOf = https://git.lowcodeplatform.net/ |
@ -0,0 +1,9 @@ |
|||
.history |
|||
.idea |
|||
.vscode |
|||
.DS_Store |
|||
*~merged* |
|||
*~merged |
|||
/public |
|||
.env |
|||
local |
@ -0,0 +1,3 @@ |
|||
# lib |
|||
|
|||
Библиотека общих компонентов для сервисов Buildbox Fabric |
@ -0,0 +1,305 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"os" |
|||
|
|||
"github.com/urfave/cli" |
|||
) |
|||
|
|||
const sep = string(os.PathSeparator) |
|||
|
|||
// RunServiceFuncCLI обраатываем параметры с консоли и вызываем переданую функцию
|
|||
func RunServiceFuncCLI(funcCLI func(configfile, dir, port, mode, service, param1, param2, param3, sourcedb, action, version string)) error { |
|||
var err error |
|||
|
|||
appCLI := cli.NewApp() |
|||
appCLI.Usage = "Demon Buildbox Proxy started" |
|||
appCLI.Commands = []cli.Command{ |
|||
{ |
|||
Name: "webinit", ShortName: "", |
|||
Usage: "Start Web-UI from init infractractire LowCodePlatform-service", |
|||
Flags: []cli.Flag{ |
|||
cli.StringFlag{ |
|||
Name: "port, c", |
|||
Usage: "Порт запуска UI", |
|||
Value: "8088", |
|||
}, |
|||
}, |
|||
Action: func(c *cli.Context) error { |
|||
port := c.String("port") |
|||
|
|||
funcCLI("", "", port, "", "", "", "", "", "", "webinit", "") |
|||
return nil |
|||
}, |
|||
}, |
|||
{ |
|||
Name: "stop", ShortName: "", |
|||
Usage: "Stop service", |
|||
Flags: []cli.Flag{ |
|||
cli.StringFlag{ |
|||
Name: "service, s", |
|||
Usage: "Остановить сервисы (через запятую). '-s systems' - остановить системные сервисы; '-s custom' - остановить рабочие пользовательские сервисы ", |
|||
Value: "all", |
|||
}, |
|||
}, |
|||
Action: func(c *cli.Context) error { |
|||
service := c.String("service") |
|||
|
|||
funcCLI("", "", "", "", service, "", "", "", "", "stop", "") |
|||
return nil |
|||
}, |
|||
}, |
|||
{ |
|||
Name: "start", ShortName: "", |
|||
Usage: "Start single Buildbox-service process", |
|||
Flags: []cli.Flag{ |
|||
cli.StringFlag{ |
|||
Name: "config, c", |
|||
Usage: "Название файла конфигурации, с которым будет запущен сервис", |
|||
Value: "default", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "dir, d", |
|||
Usage: "Путь к шаблонам", |
|||
Value: "default", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "port, p", |
|||
Usage: "Порт, на котором запустить процесс", |
|||
Value: "", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "mode, m", |
|||
Usage: "Доп.режимы запуска: debug (логирования stdout в файл)", |
|||
Value: "", |
|||
}, |
|||
|
|||
cli.StringFlag{ |
|||
Name: "service, s", |
|||
Usage: "Запуск сервиса (для запуска нескольких сервисов укажите их через запятую)", |
|||
Value: "systems", |
|||
}, |
|||
}, |
|||
Action: func(c *cli.Context) error { |
|||
configfile := c.String("config") |
|||
port := c.String("port") |
|||
dir := c.String("dir") |
|||
mode := c.String("mode") |
|||
|
|||
service := c.String("service") |
|||
|
|||
if dir == "default" { |
|||
dir, err = RootDir() |
|||
} |
|||
|
|||
funcCLI(configfile, dir, port, mode, service, "", "", "", "", "start", "") |
|||
return nil |
|||
}, |
|||
}, |
|||
{ |
|||
Name: "init", ShortName: "", |
|||
Usage: "Init single LowCodePlatform-service process", |
|||
Flags: []cli.Flag{ |
|||
cli.StringFlag{ |
|||
Name: "service, s", |
|||
Usage: "Инициализация сервиса", |
|||
Value: "false", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "version, v", |
|||
Usage: "До какой версии обновить выбранный сервис", |
|||
Value: "latest", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "param1, p1", |
|||
Usage: "Зарезервировано", |
|||
Value: "false", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "param2, p2", |
|||
Usage: "Зарезервировано", |
|||
Value: "false", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "param3, p3", |
|||
Usage: "Зарезервировано", |
|||
Value: "false", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "dir, d", |
|||
Usage: "Директория создания проекта (по-умолчанию - текущая директория)", |
|||
Value: "", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "sourcedb, db", |
|||
Usage: "База данных, где будет развернута фабрика (поддерживается SQLite, MySQL, Postgres, CocckroachDB) (по-умолчанию: SQLite)", |
|||
Value: "./default.db", |
|||
}, |
|||
}, |
|||
Action: func(c *cli.Context) error { |
|||
service := c.String("service") |
|||
param1 := c.String("param1") |
|||
param2 := c.String("param2") |
|||
param3 := c.String("param3") |
|||
dir := c.String("dir") |
|||
version := c.String("version") |
|||
sourcedb := c.String("sourcedb") |
|||
|
|||
if dir == "default" { |
|||
dir, err = RootDir() |
|||
} |
|||
|
|||
funcCLI("", dir, "", "", service, param1, param2, param3, sourcedb, "init", version) |
|||
return nil |
|||
}, |
|||
}, |
|||
} |
|||
appCLI.Run(os.Args) |
|||
|
|||
return err |
|||
} |
|||
|
|||
// Stop завершение процесса
|
|||
func Stop(pid int) (err error) { |
|||
var sig os.Signal |
|||
sig = os.Kill |
|||
p, err := os.FindProcess(pid) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
err = p.Signal(sig) |
|||
return err |
|||
} |
|||
|
|||
// завершение всех процессов для текущей конфигурации
|
|||
// config - ид-конфигурации
|
|||
//func PidsByConfig(config, portProxy string) (result []string, err error) {
|
|||
// _, fullresult, _, _ := Ps("full", portProxy)
|
|||
//
|
|||
// // получаем pid для переданной конфигурации
|
|||
// for _, v1 := range fullresult {
|
|||
// for _, v := range v1 {
|
|||
// configfile := v[1] // файл
|
|||
// idProcess := v[0] // pid
|
|||
//
|
|||
// if config == configfile {
|
|||
// result = append(result, idProcess)
|
|||
// }
|
|||
//
|
|||
// if err != nil {
|
|||
// fmt.Println("Error stopped process config:", config, ", err:", err)
|
|||
// }
|
|||
// }
|
|||
// }
|
|||
//
|
|||
// return
|
|||
//}
|
|||
|
|||
// получаем строки пидов подходящих под условия, в котором:
|
|||
// domain - название проекта (домен)
|
|||
// alias - название алиас-сервиса (gui/api/proxy и тд - то, что в мап-прокси идет второй частью адреса)
|
|||
// если алиас явно не задан, то он может быть получен из домена
|
|||
//func PidsByAlias(domain, alias, portProxy string) (result []string, err error) {
|
|||
//
|
|||
// if domain == "" {
|
|||
// domain = "all"
|
|||
// }
|
|||
// if alias == "" {
|
|||
// alias = "all"
|
|||
// }
|
|||
//
|
|||
// // можем в домене передать полный путь с учетом алиаса типа buildbox/gui
|
|||
// // в этом случае алиас если он явно не задан заполним значением алиаса полученного из домена
|
|||
// splitDomain := strings.Split(domain, "/")
|
|||
// if len(splitDomain) == 2 {
|
|||
// domain = splitDomain[0]
|
|||
// alias = splitDomain[1]
|
|||
// }
|
|||
// _, _, raw, _ := Ps("full", portProxy)
|
|||
//
|
|||
// // получаем pid для переданной конфигурации
|
|||
// for _, pidRegistry := range raw {
|
|||
// for d, v1 := range pidRegistry {
|
|||
// // пропускаем если точное сравнение и не подоходит
|
|||
// if domain != "all" && d != domain {
|
|||
// continue
|
|||
// }
|
|||
//
|
|||
// for a, v2 := range v1 {
|
|||
// // пропускаем если точное сравнение и не подоходит
|
|||
// if alias != "all" && a != alias {
|
|||
// continue
|
|||
// }
|
|||
//
|
|||
// for _, v3 := range v2 {
|
|||
// k3 := strings.Split(v3, ":")
|
|||
// idProcess := k3[0] // pid
|
|||
// // дополняем результат значениями домена и алиаса (для возврата их при остановке если не переданы алиас явно)
|
|||
// // бывают значения, когда мы останавлитваем процесс тошько по домену и тогда мы не можем возврашить алиас остановленного процесса
|
|||
// // а алиас нужен для поиска в прокси в картах /Pid и /Мар для удаления из активных сервисов по домену и алиасу
|
|||
// // если алиаса нет (не приходит в ответе от лоадера, то не находим и прибитые процессы залипают в мапах)
|
|||
// result = append(result, v3+":"+ d + ":" + a)
|
|||
//
|
|||
// if err != nil {
|
|||
// fmt.Println("Error stopped process: pid:", idProcess, ", err:", err)
|
|||
// }
|
|||
// }
|
|||
// }
|
|||
// }
|
|||
// }
|
|||
//
|
|||
// return
|
|||
//}
|
|||
|
|||
// уничтожить все процессы
|
|||
//func Destroy(portProxy string) (err error) {
|
|||
// pids, _, _, _ := Ps("pid", portProxy)
|
|||
// for _, v := range pids {
|
|||
// pi, err := strconv.Atoi(v)
|
|||
// if err == nil {
|
|||
// Stop(pi)
|
|||
// }
|
|||
// }
|
|||
// return err
|
|||
//}
|
|||
|
|||
// инициализация приложения
|
|||
//func Install() (err error) {
|
|||
//
|
|||
// // 1. задание переменных окружения
|
|||
// currentDir, err := CurrentDir()
|
|||
// if err != nil {
|
|||
// return
|
|||
// }
|
|||
// os.Setenv("BBPATH", currentDir)
|
|||
//
|
|||
// //var rootPath = os.Getenv("BBPATH")
|
|||
//
|
|||
// //fmt.Println(rootPath)
|
|||
// //path, _ := os.LookupEnv("BBPATH")
|
|||
// //fmt.Print("BBPATH: ", path)
|
|||
//
|
|||
// // 2. копирование файла запуска в /etc/bin
|
|||
// //src := "./buildbox"
|
|||
// //dst := "/usr/bin/buildbox"
|
|||
// //
|
|||
// //in, err := os.Open(src)
|
|||
// //if err != nil {
|
|||
// // return err
|
|||
// //}
|
|||
// //defer in.Close()
|
|||
// //
|
|||
// //out, err := os.Create(dst)
|
|||
// //if err != nil {
|
|||
// // return err
|
|||
// //}
|
|||
// //defer out.Close()
|
|||
// //
|
|||
// //_, err = io.Copy(out, in)
|
|||
// //if err != nil {
|
|||
// // return err
|
|||
// //}
|
|||
// //return out.Close()
|
|||
//
|
|||
// return err
|
|||
//}
|
@ -0,0 +1,107 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"encoding/base64" |
|||
"fmt" |
|||
"os" |
|||
"strings" |
|||
|
|||
"github.com/BurntSushi/toml" |
|||
"github.com/kelseyhightower/envconfig" |
|||
"github.com/labstack/gommon/color" |
|||
) |
|||
|
|||
var warning = color.Red("[Fail]") |
|||
|
|||
// ConfigLoad читаем конфигурации
|
|||
// получаем только название конфигурации
|
|||
// 1. поднимаемся до корневой директории
|
|||
// 2. от нее ищем полный путь до конфига
|
|||
// 3. читаем по этому пути
|
|||
func ConfigLoad(config string, pointToCfg interface{}) (err error) { |
|||
var payload string |
|||
|
|||
if err := envconfig.Process("", pointToCfg); err != nil { |
|||
fmt.Printf("%s Error load default enviroment: %s\n", warning, err) |
|||
err = fmt.Errorf("Error load default enviroment: %s", err) |
|||
return err |
|||
} |
|||
|
|||
// проверка на длину конфигурационного файла
|
|||
// если он больше 100, то скорее всего передали конфигурацию в base64
|
|||
if len(config) < 200 { |
|||
// 3.
|
|||
if len(config) == 0 { |
|||
return fmt.Errorf("%s", "Error. Configfile is empty.") |
|||
} |
|||
if !strings.Contains(config, "."){ |
|||
config = config + ".cfg" |
|||
} |
|||
|
|||
// 4. читаем из файла
|
|||
payload, err = ReadFile(config) |
|||
if err != nil { |
|||
return fmt.Errorf("Error raed configfile: (%s), err: %s", config, err) |
|||
} |
|||
|
|||
} else { |
|||
// пробуем расшифровать из base64
|
|||
debase, err := base64.StdEncoding.DecodeString(config) |
|||
if err != nil { |
|||
return fmt.Errorf("Error decode to string from base64 configfile. err: %s", err) |
|||
} |
|||
payload = string(debase) |
|||
} |
|||
err = decodeConfig(payload, pointToCfg) |
|||
|
|||
return err |
|||
} |
|||
|
|||
// Читаем конфигурация по заданному полному пути
|
|||
func decodeConfig(configfile string, cfg interface{}) (err error) { |
|||
if _, err = toml.Decode(configfile, cfg); err != nil { |
|||
fmt.Printf("%s Error: %s (configfile: %s)\n", warning, err, configfile) |
|||
} |
|||
|
|||
return err |
|||
} |
|||
|
|||
// SearchConfigDir получаем путь до искомой конфигурации от переданной директории
|
|||
func searchConfigDir(startDir, configuration string) (configPath string, err error) { |
|||
var nextPath string |
|||
directory, err := os.Open(startDir) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
defer directory.Close() |
|||
|
|||
objects, err := directory.Readdir(-1) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
// пробегаем текущую папку и считаем совпадание признаков
|
|||
for _, obj := range objects { |
|||
nextPath = startDir + sep + obj.Name() |
|||
if obj.IsDir() { |
|||
dirName := obj.Name() |
|||
|
|||
// не входим в скрытые папки
|
|||
if dirName[:1] != "." { |
|||
configPath, err = searchConfigDir(nextPath, configuration) |
|||
if configPath != "" { |
|||
return configPath, err // поднимает результат наверх
|
|||
} |
|||
} |
|||
} else { |
|||
if !strings.Contains(nextPath, "/.") { |
|||
// проверяем только файлы конфигурации (игнорируем .json)
|
|||
if strings.Contains(obj.Name(), configuration + ".cfg") { |
|||
return nextPath, err |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
return configPath, err |
|||
} |
@ -0,0 +1,99 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"bytes" |
|||
"crypto/aes" |
|||
"crypto/cipher" |
|||
"crypto/rand" |
|||
"encoding/base64" |
|||
"errors" |
|||
"io" |
|||
"strings" |
|||
) |
|||
|
|||
// Пример использования
|
|||
//func main() {
|
|||
// key := []byte("LKHlhb899Y09olUi")
|
|||
// encryptMsg, _ := encrypt(key, "Hello World")
|
|||
// msg, _ := decrypt(key, encryptMsg)
|
|||
// fmt.Println(msg) // Hello World
|
|||
//}
|
|||
|
|||
|
|||
func addBase64Padding(value string) string { |
|||
m := len(value) % 4 |
|||
if m != 0 { |
|||
value += strings.Repeat("=", 4-m) |
|||
} |
|||
|
|||
return value |
|||
} |
|||
|
|||
func removeBase64Padding(value string) string { |
|||
return strings.Replace(value, "=", "", -1) |
|||
} |
|||
|
|||
func unpad(src []byte) ([]byte, error) { |
|||
length := len(src) |
|||
unpadding := int(src[length-1]) |
|||
|
|||
if unpadding > length { |
|||
return nil, errors.New("unpad error. This could happen when incorrect encryption key is used") |
|||
} |
|||
|
|||
return src[:(length - unpadding)], nil |
|||
} |
|||
|
|||
func Pad(src []byte) []byte { |
|||
padding := aes.BlockSize - len(src)%aes.BlockSize |
|||
padtext := bytes.Repeat([]byte{byte(padding)}, padding) |
|||
return append(src, padtext...) |
|||
} |
|||
|
|||
func Encrypt(key []byte, text string) (string, error) { |
|||
block, err := aes.NewCipher(key) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
msg := Pad([]byte(text)) |
|||
ciphertext := make([]byte, aes.BlockSize+len(msg)) |
|||
iv := ciphertext[:aes.BlockSize] |
|||
if _, err := io.ReadFull(rand.Reader, iv); err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
cfb := cipher.NewCFBEncrypter(block, iv) |
|||
cfb.XORKeyStream(ciphertext[aes.BlockSize:], []byte(msg)) |
|||
finalMsg := removeBase64Padding(base64.URLEncoding.EncodeToString(ciphertext)) |
|||
return finalMsg, nil |
|||
} |
|||
|
|||
func Decrypt(key []byte, text string) (string, error) { |
|||
block, err := aes.NewCipher(key) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
decodedMsg, err := base64.URLEncoding.DecodeString(addBase64Padding(text)) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
if (len(decodedMsg) % aes.BlockSize) != 0 { |
|||
return "", errors.New("blocksize must be multipe of decoded message length") |
|||
} |
|||
|
|||
iv := decodedMsg[:aes.BlockSize] |
|||
msg := decodedMsg[aes.BlockSize:] |
|||
|
|||
cfb := cipher.NewCFBDecrypter(block, iv) |
|||
cfb.XORKeyStream(msg, msg) |
|||
|
|||
unpadMsg, err := unpad(msg) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
return string(unpadMsg), nil |
|||
} |
@ -0,0 +1,305 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"archive/zip" |
|||
"fmt" |
|||
"io" |
|||
"io/ioutil" |
|||
"os" |
|||
"path/filepath" |
|||
"strings" |
|||
) |
|||
|
|||
// CreateFile Создаем файл по указанному пути если его нет
|
|||
func CreateFile(path string) (err error) { |
|||
|
|||
// detect if file exists
|
|||
_, err = os.Stat(path) |
|||
var file *os.File |
|||
|
|||
// delete old file if exists
|
|||
if !os.IsNotExist(err) { |
|||
os.RemoveAll(path) |
|||
} |
|||
|
|||
// create file
|
|||
file, err = os.Create(path) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer file.Close() |
|||
|
|||
return err |
|||
} |
|||
|
|||
// WriteFile пишем в файл по указанному пути
|
|||
func WriteFile(path string, data []byte) (err error) { |
|||
|
|||
// detect if file exists and create
|
|||
err = CreateFile(path) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
// open file using READ & WRITE permission
|
|||
file, err := os.OpenFile(path, os.O_RDWR, 0644) |
|||
if err != nil { |
|||
return |
|||
} |
|||
defer file.Close() |
|||
|
|||
// write into file
|
|||
_, err = file.Write(data) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
// save changes
|
|||
err = file.Sync() |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
// ReadFile читаем файл. (отключил: всегда в рамках рабочей диретории)
|
|||
func ReadFile(path string) (result string, err error) { |
|||
// если не от корня, то подставляем текущую директорию
|
|||
//if path[:1] != "/" {
|
|||
// path = CurrentDir() + "/" + path
|
|||
//} else {
|
|||
// path = CurrentDir() + path
|
|||
//}
|
|||
|
|||
file, err := os.Open(path) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
b, err := ioutil.ReadAll(file) |
|||
if err == nil { |
|||
result = string(b) |
|||
} |
|||
defer file.Close() |
|||
|
|||
return result, err |
|||
} |
|||
|
|||
// CopyFolder копирование папки
|
|||
func CopyFolder(source string, dest string) (err error) { |
|||
|
|||
sourceinfo, err := os.Stat(source) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
err = os.MkdirAll(dest, sourceinfo.Mode()) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
directory, _ := os.Open(source) |
|||
objects, err := directory.Readdir(-1) |
|||
|
|||
for _, obj := range objects { |
|||
sourcefilepointer := source + "/" + obj.Name() |
|||
destinationfilepointer := dest + "/" + obj.Name() |
|||
|
|||
if obj.IsDir() { |
|||
err = CopyFolder(sourcefilepointer, destinationfilepointer) |
|||
if err != nil { |
|||
fmt.Println(err) |
|||
} |
|||
} else { |
|||
err = CopyFile(sourcefilepointer, destinationfilepointer) |
|||
if err != nil { |
|||
fmt.Println(err) |
|||
} |
|||
} |
|||
|
|||
} |
|||
return |
|||
} |
|||
|
|||
// CopyFile копирование файла
|
|||
func CopyFile(source string, dest string) (err error) { |
|||
sourcefile, err := os.Open(source) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer sourcefile.Close() |
|||
|
|||
destfile, err := os.Create(dest) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer destfile.Close() |
|||
|
|||
_, err = io.Copy(destfile, sourcefile) |
|||
if err == nil { |
|||
sourceinfo, err := os.Stat(source) |
|||
if err != nil { |
|||
err = os.Chmod(dest, sourceinfo.Mode()) |
|||
} |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
// IsExist определяем наличие директории/файла
|
|||
func IsExist(path string) (exist bool) { |
|||
if _, err := os.Stat(path); !os.IsNotExist(err) { |
|||
return true |
|||
} |
|||
|
|||
return false |
|||
} |
|||
|
|||
// CreateDir создание папки
|
|||
func CreateDir(path string, mode os.FileMode) (err error) { |
|||
if mode == 0 { |
|||
mode = 0711 |
|||
} |
|||
err = os.MkdirAll(path, mode) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func DeleteFile(path string) (err error) { |
|||
err = os.Remove(path) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func MoveFile(source string, dest string) (err error) { |
|||
err = CopyFile(source, dest) |
|||
if err != nil { |
|||
return |
|||
} |
|||
err = DeleteFile(source) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// Zip
|
|||
// zip("/tmp/documents", "/tmp/backup.zip")
|
|||
func Zip(source, target string) (err error) { |
|||
zipfile, err := os.Create(target) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer zipfile.Close() |
|||
|
|||
archive := zip.NewWriter(zipfile) |
|||
defer archive.Close() |
|||
|
|||
info, err := os.Stat(source) |
|||
if err != nil { |
|||
return nil |
|||
} |
|||
|
|||
var baseDir string |
|||
if info.IsDir() { |
|||
baseDir = filepath.Base(source) |
|||
} |
|||
|
|||
filepath.Walk(source, func(path string, info os.FileInfo, err error) error { |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
header, err := zip.FileInfoHeader(info) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if baseDir != "" { |
|||
header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source)) |
|||
} |
|||
|
|||
if info.IsDir() { |
|||
header.Name += "/" |
|||
} else { |
|||
header.Method = zip.Deflate |
|||
} |
|||
|
|||
writer, err := archive.CreateHeader(header) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if info.IsDir() { |
|||
return nil |
|||
} |
|||
|
|||
file, err := os.Open(path) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer file.Close() |
|||
_, err = io.Copy(writer, file) |
|||
return err |
|||
}) |
|||
|
|||
return err |
|||
} |
|||
|
|||
// Unzip
|
|||
// unzip("/tmp/report-2015.zip", "/tmp/reports/")
|
|||
func Unzip(archive, target string) (err error) { |
|||
reader, err := zip.OpenReader(archive) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if err := os.MkdirAll(target, 0755); err != nil { |
|||
return err |
|||
} |
|||
for _, file := range reader.File { |
|||
path := filepath.Join(target, file.Name) |
|||
if file.FileInfo().IsDir() { |
|||
os.MkdirAll(path, file.Mode()) |
|||
continue |
|||
} |
|||
|
|||
fileReader, err := file.Open() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer fileReader.Close() |
|||
|
|||
targetFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode()) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer targetFile.Close() |
|||
|
|||
if _, err := io.Copy(targetFile, fileReader); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
func Chmod(path string, mode os.FileMode) (err error) { |
|||
file, err := os.Open(path) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer file.Close() |
|||
|
|||
err = file.Chmod(mode) |
|||
|
|||
return err |
|||
} |
@ -0,0 +1,204 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"crypto/sha1" |
|||
"encoding/hex" |
|||
"encoding/json" |
|||
"fmt" |
|||
"net/http" |
|||
"os" |
|||
"os/exec" |
|||
"path" |
|||
"path/filepath" |
|||
"strings" |
|||
"syscall" |
|||
"time" |
|||
|
|||
"git.lowcodeplatform.net/fabric/models" |
|||
uuid "github.com/satori/go.uuid" |
|||
) |
|||
|
|||
// ResponseJSON если status не из списка, то вставляем статус - 501 и Descraption из статуса
|
|||
func ResponseJSON(w http.ResponseWriter, objResponse interface{}, status string, error error, metrics interface{}) (err error) { |
|||
|
|||
if w == nil { |
|||
return |
|||
} |
|||
|
|||
errMessage := models.RestStatus{} |
|||
st, found := models.StatusCode[status] |
|||
if found { |
|||
errMessage = st |
|||
} else { |
|||
errMessage = models.StatusCode["NotStatus"] |
|||
} |
|||
|
|||
objResp := &models.Response{} |
|||
if error != nil { |
|||
errMessage.Error = error |
|||
} |
|||
|
|||
// Metrics
|
|||
b1, _ := json.Marshal(metrics) |
|||
var metricsR models.Metrics |
|||
json.Unmarshal(b1, &metricsR) |
|||
if metrics != nil { |
|||
objResp.Metrics = metricsR |
|||
} |
|||
|
|||
objResp.Status = errMessage |
|||
objResp.Data = objResponse |
|||
|
|||
// формируем ответ
|
|||
out, err := json.Marshal(objResp) |
|||
if err != nil { |
|||
out = []byte(fmt.Sprintf("%s", err)) |
|||
} |
|||
|
|||
//WriteFile("./dump.json", out)
|
|||
|
|||
w.WriteHeader(errMessage.Status) |
|||
w.Header().Set("Content-Type", "application/json; charset=UTF-8") |
|||
w.Write(out) |
|||
|
|||
return |
|||
} |
|||
|
|||
// RunProcess стартуем сервис из конфига
|
|||
func RunProcess(path, config, command, mode string) (pid int, err error) { |
|||
var cmd *exec.Cmd |
|||
|
|||
if config == "" { |
|||
return 0, fmt.Errorf("%s", "Configuration file is not found") |
|||
} |
|||
if command == "" { |
|||
command = "start" |
|||
} |
|||
|
|||
path = strings.Replace(path, "//", "/", -1) |
|||
|
|||
cmd = exec.Command(path, command, "--config", config, "--mode", mode) |
|||
if mode == "debug" { |
|||
t := time.Now().Format("2006.01.02-15-04-05") |
|||
s := strings.Split(path, sep) |
|||
srv := s[len(s)-1] |
|||
|
|||
err = CreateDir("debug"+sep+srv, 0777) |
|||
config_name := strings.Replace(config, "-", "", -1) |
|||
|
|||
f, _ := os.Create("debug" + sep + srv + sep + config_name + "_" + fmt.Sprint(t) + ".log") |
|||
cmd.Stdout = f |
|||
cmd.Stderr = f |
|||
} |
|||
|
|||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} |
|||
err = cmd.Start() |
|||
if err != nil { |
|||
return 0, err |
|||
} |
|||
|
|||
pid = cmd.Process.Pid |
|||
|
|||
return |
|||
} |
|||
|
|||
// RootDir получаем корневую директорию от места где запускаем файл
|
|||
func RootDir() (rootDir string, err error) { |
|||
file, err := filepath.Abs(os.Args[0]) |
|||
if err != nil { |
|||
return |
|||
} |
|||
rootDir = path.Dir(file) |
|||
if err != nil { |
|||
fmt.Println("Error calculation RootDir. File: ", file, "; Error: ", err) |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
func Hash(str string) (result string) { |
|||
h := sha1.New() |
|||
h.Write([]byte(str)) |
|||
result = hex.EncodeToString(h.Sum(nil)) |
|||
|
|||
return |
|||
} |
|||
|
|||
func PanicOnErr(err error) { |
|||
if err != nil { |
|||
fmt.Println("Error: ", err) |
|||
panic(err) |
|||
} |
|||
} |
|||
|
|||
func UUID() (result string) { |
|||
stUUID := uuid.NewV4() |
|||
return stUUID.String() |
|||
} |
|||
|
|||
// RemoveElementFromData удаляем элемент из слайса
|
|||
func RemoveElementFromData(p *models.ResponseData, i int) bool { |
|||
|
|||
if i < len(p.Data) { |
|||
p.Data = append(p.Data[:i], p.Data[i+1:]...) |
|||
} else { |
|||
//log.Warning("Error! Position invalid (", i, ")")
|
|||
return false |
|||
} |
|||
|
|||
return true |
|||
} |
|||
|
|||
// JsonEscape экранируем "
|
|||
// fmt.Println(jsonEscape(`dog "fish" cat`))
|
|||
// output: dog \"fish\" cat
|
|||
func JsonEscape(i string) string { |
|||
b, err := json.Marshal(i) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
s := string(b) |
|||
return s[1 : len(s)-1] |
|||
} |
|||
|
|||
// SearchConfigDir получаем путь до искомой конфигурации от переданной директории
|
|||
func SearchConfig(projectDir, configuration string) (configPath string, err error) { |
|||
var nextPath string |
|||
|
|||
directory, err := os.Open(projectDir) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
defer directory.Close() |
|||
|
|||
objects, err := directory.Readdir(-1) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
// пробегаем текущую папку и считаем совпадание признаков
|
|||
for _, obj := range objects { |
|||
|
|||
nextPath = projectDir + sep + obj.Name() |
|||
if obj.IsDir() { |
|||
dirName := obj.Name() |
|||
|
|||
// не входим в скрытые папки
|
|||
if dirName[:1] != "." { |
|||
configPath, err = SearchConfig(nextPath, configuration) |
|||
if configPath != "" { |
|||
return configPath, err // поднимает результат наверх
|
|||
} |
|||
} |
|||
} else { |
|||
if !strings.Contains(nextPath, "/.") { |
|||
// проверяем только файлы конфигурации (игнорируем .json)
|
|||
if strings.Contains(obj.Name(), configuration+".cfg") { |
|||
return nextPath, err |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
return configPath, err |
|||
} |
@ -0,0 +1,22 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"fmt" |
|||
"testing" |
|||
) |
|||
|
|||
func TestSearchConfig(t *testing.T) { |
|||
t.Skip() |
|||
|
|||
cases := []struct { |
|||
path string |
|||
configfile string |
|||
}{ |
|||
{"/Users/ivan/go/src/git.lowcodeplatform.net/buildbox/upload/buildbox/bin/proxy/darwin/v1.2.0", "2021-04-01T09-32-39Z-515f56"}, |
|||
} |
|||
|
|||
for _, c := range cases { |
|||
res, err := SearchConfig(c.configfile, c.path) |
|||
fmt.Println(res, err) |
|||
} |
|||
} |
@ -0,0 +1,51 @@ |
|||
module git.lowcodeplatform.net/fabric/lib |
|||
|
|||
go 1.17 |
|||
|
|||
require ( |
|||
git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237 |
|||
github.com/BurntSushi/toml v1.2.0 |
|||
github.com/gabriel-vasile/mimetype v1.4.1 |
|||
github.com/graymeta/stow v0.2.8 |
|||
github.com/kelseyhightower/envconfig v1.4.0 |
|||
github.com/labstack/gommon v0.4.0 |
|||
github.com/satori/go.uuid v1.2.0 |
|||
github.com/shirou/gopsutil v3.21.11+incompatible |
|||
github.com/sirupsen/logrus v1.9.0 |
|||
github.com/urfave/cli v1.22.10 |
|||
) |
|||
|
|||
require ( |
|||
cloud.google.com/go v0.38.0 // indirect |
|||
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible // indirect |
|||
github.com/Azure/go-autorest/autorest v0.9.0 // indirect |
|||
github.com/Azure/go-autorest/autorest/adal v0.5.0 // indirect |
|||
github.com/Azure/go-autorest/autorest/date v0.1.0 // indirect |
|||
github.com/Azure/go-autorest/logger v0.1.0 // indirect |
|||
github.com/Azure/go-autorest/tracing v0.5.0 // indirect |
|||
github.com/aws/aws-sdk-go v1.23.4 // indirect |
|||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d // indirect |
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect |
|||
github.com/go-ole/go-ole v1.2.6 // indirect |
|||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect |
|||
github.com/golang/protobuf v1.3.1 // indirect |
|||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect |
|||
github.com/hashicorp/golang-lru v0.5.1 // indirect |
|||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect |
|||
github.com/mattn/go-colorable v0.1.11 // indirect |
|||
github.com/mattn/go-isatty v0.0.14 // indirect |
|||
github.com/ncw/swift v1.0.49 // indirect |
|||
github.com/pkg/errors v0.8.1 // indirect |
|||
github.com/russross/blackfriday/v2 v2.0.1 // indirect |
|||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect |
|||
github.com/yusufpapurcu/wmi v1.2.2 // indirect |
|||
go.opencensus.io v0.21.0 // indirect |
|||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect |
|||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect |
|||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect |
|||
golang.org/x/text v0.3.7 // indirect |
|||
google.golang.org/api v0.8.0 // indirect |
|||
google.golang.org/appengine v1.5.0 // indirect |
|||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect |
|||
google.golang.org/grpc v1.20.1 // indirect |
|||
) |
@ -0,0 +1,182 @@ |
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= |
|||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= |
|||
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= |
|||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= |
|||
git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237 h1:TiGs+dG9tueKWvMDos7XMIej9kekY0777bCI3+QXsh4= |
|||
git.lowcodeplatform.net/fabric/models v0.0.0-20221009154545-811eec886237/go.mod h1:kTVmb3xTTlMOV0PJ+IFHC3kS6pkOszNigaRsZeZp0M0= |
|||
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible h1:Hn/DsObfmw0M7dMGS/c0MlVrJuGFzHzOpBWL89acR68= |
|||
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= |
|||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= |
|||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= |
|||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= |
|||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= |
|||
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= |
|||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= |
|||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= |
|||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= |
|||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= |
|||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= |
|||
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= |
|||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= |
|||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= |
|||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= |
|||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= |
|||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= |
|||
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= |
|||
github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= |
|||
github.com/aws/aws-sdk-go v1.23.4 h1:F6f/iQRhuSfrpUdy80q29898H0NYN27pX+95tkJ+BIY= |
|||
github.com/aws/aws-sdk-go v1.23.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= |
|||
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= |
|||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= |
|||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= |
|||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= |
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= |
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= |
|||
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= |
|||
github.com/gabriel-vasile/mimetype v1.4.1 h1:TRWk7se+TOjCYgRth7+1/OYLNiRNIotknkFtf/dnN7Q= |
|||
github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= |
|||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= |
|||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= |
|||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= |
|||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= |
|||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= |
|||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= |
|||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= |
|||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= |
|||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= |
|||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= |
|||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= |
|||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= |
|||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= |
|||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= |
|||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= |
|||
github.com/google/readahead v0.0.0-20161222183148-eaceba169032/go.mod h1:qYysrqQXuV4tzsizt4oOQ6mrBZQ0xnQXP3ylXX8Jk5Y= |
|||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= |
|||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= |
|||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= |
|||
github.com/graymeta/stow v0.2.8 h1:fxN42iKy/bUg5nMR/2iWSc5+57hctCBbnFQ31PrYIOU= |
|||
github.com/graymeta/stow v0.2.8/go.mod h1:JAs139Zr29qfsecy7b+h9DRsWXbFbsd7LCrbCDYI84k= |
|||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= |
|||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= |
|||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= |
|||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= |
|||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= |
|||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= |
|||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= |
|||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= |
|||
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= |
|||
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= |
|||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= |
|||
github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8= |
|||
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= |
|||
github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs= |
|||
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= |
|||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= |
|||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= |
|||
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= |
|||
github.com/ncw/swift v1.0.49 h1:eQaKIjSt/PXLKfYgzg01nevmO+CMXfXGRhB1gOhDs7E= |
|||
github.com/ncw/swift v1.0.49/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= |
|||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= |
|||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= |
|||
github.com/pkg/sftp v1.10.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk= |
|||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
|||
github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= |
|||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= |
|||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= |
|||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= |
|||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= |
|||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= |
|||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= |
|||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= |
|||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= |
|||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= |
|||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= |
|||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= |
|||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= |
|||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= |
|||
github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= |
|||
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= |
|||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= |
|||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= |
|||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= |
|||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= |
|||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= |
|||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= |
|||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= |
|||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= |
|||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= |
|||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= |
|||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= |
|||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= |
|||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= |
|||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= |
|||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= |
|||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= |
|||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= |
|||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= |
|||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= |
|||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= |
|||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= |
|||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= |
|||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ= |
|||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= |
|||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= |
|||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= |
|||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= |
|||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= |
|||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
|||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
|||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
|||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
|||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
|||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
|||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
|||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
|||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
|||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|||
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= |
|||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= |
|||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= |
|||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= |
|||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= |
|||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= |
|||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= |
|||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= |
|||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= |
|||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= |
|||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= |
|||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= |
|||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= |
|||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= |
|||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= |
|||
google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= |
|||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= |
|||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= |
|||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= |
|||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= |
|||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= |
|||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= |
|||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= |
|||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= |
|||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= |
|||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= |
|||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= |
|||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= |
|||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= |
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
|||
gopkg.in/kothar/go-backblaze.v0 v0.0.0-20190520213052-702d4e7eb465/go.mod h1:zJ2QpyDCYo1KvLXlmdnFlQAyF/Qfth0fB8239Qg7BIE= |
|||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
|||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
|||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
|||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= |
|||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= |
|||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= |
@ -0,0 +1,139 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"strings" |
|||
|
|||
"git.lowcodeplatform.net/fabric/models" |
|||
"github.com/labstack/gommon/color" |
|||
) |
|||
|
|||
|
|||
// Curl всегде возвращает результат в интерфейс + ошибка (полезно для внешних запросов с неизвестной структурой)
|
|||
// сериализуем в объект, при передаче ссылки на переменную типа
|
|||
func Curl(method, urlc, bodyJSON string, response interface{}, headers map[string]string, cookies []*http.Cookie) (result interface{}, err error) { |
|||
var mapValues map[string]string |
|||
var req *http.Request |
|||
client := &http.Client{} |
|||
|
|||
if method == "" { |
|||
method = "POST" |
|||
} |
|||
|
|||
method = strings.Trim(method, " ") |
|||
values := url.Values{} |
|||
actionType := "" |
|||
|
|||
// если в гете мы передали еще и json (его добавляем в строку запроса)
|
|||
// только если в запросе не указаны передаваемые параметры
|
|||
clearUrl := strings.Contains(urlc, "?") |
|||
|
|||
bodyJSON = strings.Replace(bodyJSON, " ", "", -1) |
|||
err = json.Unmarshal([]byte(bodyJSON), &mapValues) |
|||
|
|||
if method == "JSONTOGET" && bodyJSON != "" && clearUrl { |
|||
actionType = "JSONTOGET" |
|||
} |
|||
if method == "JSONTOPOST" && bodyJSON != "" { |
|||
actionType = "JSONTOPOST" |
|||
} |
|||
|
|||
switch actionType { |
|||
case "JSONTOGET": // преобразуем параметры в json в строку запроса
|
|||
if err == nil { |
|||
for k, v := range mapValues { |
|||
values.Set(k, v) |
|||
} |
|||
uri, _ := url.Parse(urlc) |
|||
uri.RawQuery = values.Encode() |
|||
urlc = uri.String() |
|||
req, err = http.NewRequest("GET", urlc, strings.NewReader(bodyJSON)) |
|||
} else { |
|||
fmt.Println("Error! Fail parsed bodyJSON from GET Curl: ", err) |
|||
} |
|||
case "JSONTOPOST": // преобразуем параметры в json в тело запроса
|
|||
if err == nil { |
|||
for k, v := range mapValues { |
|||
values.Set(k, v) |
|||
} |
|||
req, err = http.NewRequest("POST", urlc, strings.NewReader(values.Encode())) |
|||
req.PostForm = values |
|||
req.Header.Add("Content-Type", "application/x-www-form-urlencoded") |
|||
} else { |
|||
fmt.Println("Error! Fail parsed bodyJSON to POST: ", err) |
|||
} |
|||
default: |
|||
req, err = http.NewRequest(method, urlc, strings.NewReader(bodyJSON)) |
|||
} |
|||
|
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
// дополняем переданными заголовками
|
|||
if len(headers) > 0 { |
|||
for k, v := range headers { |
|||
req.Header.Add(k, v) |
|||
} |
|||
} |
|||
|
|||
// дополянем куками назначенными для данного запроса
|
|||
if cookies != nil { |
|||
for _, v := range cookies { |
|||
req.AddCookie(v) |
|||
} |
|||
} |
|||
|
|||
resp, err := client.Do(req) |
|||
if err != nil { |
|||
fmt.Println("Error request: method:", method, ", url:", urlc, ", bodyJSON:", bodyJSON) |
|||
return "", err |
|||
} else { |
|||
defer resp.Body.Close() |
|||
} |
|||
|
|||
responseData, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
responseString := string(responseData) |
|||
|
|||
// возвращаем объект ответа, если передано - в какой объект класть результат
|
|||
if response != nil { |
|||
json.Unmarshal([]byte(responseString), &response) |
|||
} |
|||
|
|||
// всегда отдаем в интерфейсе результат (полезно, когда внешние запросы или сериализация на клиенте)
|
|||
//json.Unmarshal([]byte(responseString), &result)
|
|||
|
|||
return responseString, err |
|||
} |
|||
|
|||
func AddressProxy(addressProxy, interval string) (port string, err error) { |
|||
fail := color.Red("[Fail]") |
|||
urlProxy := "" |
|||
|
|||
// если автоматическая настройка портов
|
|||
if addressProxy != "" && interval != "" { |
|||
if addressProxy[len(addressProxy)-1:] != "/" { |
|||
addressProxy = addressProxy + "/" |
|||
} |
|||
|
|||
var portDataAPI models.Response |
|||
// запрашиваем порт у указанного прокси-сервера
|
|||
urlProxy = addressProxy + "port?interval=" + interval |
|||
Curl("GET", urlProxy, "", &portDataAPI, map[string]string{}, nil) |
|||
port = fmt.Sprint(portDataAPI.Data) |
|||
} |
|||
|
|||
if port == "" { |
|||
err = fmt.Errorf("%s", "Port APP-service is null. Servive not running.") |
|||
fmt.Print(fail, " Port APP-service is null. Servive not running.\n") |
|||
} |
|||
|
|||
return port, err |
|||
} |
@ -0,0 +1,386 @@ |
|||
// обертка для логирования, которая дополняем аттрибутами логируемого процесса logrus
|
|||
// дополняем значениями, идентифицирующими запущенный сервис UID,Name,Service
|
|||
|
|||
package lib |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"io" |
|||
"os" |
|||
"runtime/debug" |
|||
"strconv" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
|
|||
"github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
var logrusB = logrus.New() |
|||
|
|||
// LogLine структура строк лог-файла. нужна для анмаршалинга
|
|||
type LogLine struct { |
|||
Config string `json:"config"` |
|||
Level string `json:"level"` |
|||
Msg interface{} `json:"msg"` |
|||
Name string `json:"name"` |
|||
Srv string `json:"srv"` |
|||
Time string `json:"time"` |
|||
Uid string `json:"uid"` |
|||
} |
|||
|
|||
type log struct { |
|||
|
|||
// куда логируем? stdout/;*os.File на файл, в который будем писать логи
|
|||
Output io.Writer `json:"output"` |
|||
//Debug:
|
|||
// сообщения отладки, профилирования.
|
|||
// В production системе обычно сообщения этого уровня включаются при первоначальном
|
|||
// запуске системы или для поиска узких мест (bottleneck-ов).
|
|||
|
|||
//Info: - логировать процесс выполнения
|
|||
// обычные сообщения, информирующие о действиях системы.
|
|||
// Реагировать на такие сообщения вообще не надо, но они могут помочь, например,
|
|||
// при поиске багов, расследовании интересных ситуаций итд.
|
|||
|
|||
//Warning: - логировать странные операции
|
|||
// записывая такое сообщение, система пытается привлечь внимание обслуживающего персонала.
|
|||
// Произошло что-то странное. Возможно, это новый тип ситуации, ещё не известный системе.
|
|||
// Следует разобраться в том, что произошло, что это означает, и отнести ситуацию либо к
|
|||
// инфо-сообщению, либо к ошибке. Соответственно, придётся доработать код обработки таких ситуаций.
|
|||
|
|||
//Error: - логировать ошибки
|
|||
// ошибка в работе системы, требующая вмешательства. Что-то не сохранилось, что-то отвалилось.
|
|||
// Необходимо принимать меры довольно быстро! Ошибки этого уровня и выше требуют немедленной записи в лог,
|
|||
// чтобы ускорить реакцию на них. Нужно понимать, что ошибка пользователя – это не ошибка системы.
|
|||
// Если пользователь ввёл в поле -1, где это не предполагалось – не надо писать об этом в лог ошибок.
|
|||
|
|||
//Panic: - логировать критические ошибки
|
|||
// это особый класс ошибок. Такие ошибки приводят к неработоспособности системы в целом, или
|
|||
// неработоспособности одной из подсистем. Чаще всего случаются фатальные ошибки из-за неверной конфигурации
|
|||
// или отказов оборудования. Требуют срочной, немедленной реакции. Возможно, следует предусмотреть уведомление о таких ошибках по SMS.
|
|||
// указываем уровни логирования Error/Warning/Debug/Info/Panic
|
|||
|
|||
//Trace: - логировать обработки запросов
|
|||
|
|||
// можно указывать через | разные уровени логирования, например Error|Warning
|
|||
// можно указать All - логирование всех уровней
|
|||
Levels string `json:"levels"` |
|||
// uid процесса (сервиса), который логируется (случайная величина)
|
|||
UID string `json:"uid"` |
|||
// имя процесса (сервиса), который логируется
|
|||
Name string `json:"name"` |
|||
// название сервиса (app/gui...)
|
|||
Service string `json:"service"` |
|||
// директория сохранения логов
|
|||
Dir string `json:"dir"` |
|||
// uid-конфигурации с которой был запущен процесс
|
|||
Config string `json:"config"` |
|||
// интервал между проверками актуального файла логирования (для текущего дня)
|
|||
IntervalReload time.Duration `json:"delay_reload"` |
|||
// интервал проверками на наличие файлов на удаление
|
|||
IntervalClearFiles time.Duration `json:"interval_clear_files"` |
|||
// период хранения файлов лет-месяцев-дней (например: 0-1-0 - хранить 1 месяц)
|
|||
PeriodSaveFiles string `json:"period_save_files"` |
|||
|
|||
File *os.File |
|||
|
|||
mux *sync.Mutex |
|||
} |
|||
|
|||
type Log interface { |
|||
Trace(args ...interface{}) |
|||
Debug(args ...interface{}) |
|||
Info(args ...interface{}) |
|||
Warning(args ...interface{}) |
|||
Error(err error, args ...interface{}) |
|||
Panic(err error, args ...interface{}) |
|||
Exit(err error, args ...interface{}) |
|||
RotateInit(ctx context.Context) |
|||
GetOutput() io.Writer |
|||
GetFile() *os.File |
|||
Close() |
|||
} |
|||
|
|||
func (l *log) Trace(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Trace") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.TraceLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Trace(args...) |
|||
} |
|||
} |
|||
|
|||
func (l *log) Debug(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Debug") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
|
|||
// Only log the warning severity or above.
|
|||
logrusB.SetLevel(logrus.DebugLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Debug(args...) |
|||
} |
|||
} |
|||
|
|||
func (l *log) Info(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Info") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
|
|||
logrusB.SetLevel(logrus.InfoLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Info(args...) |
|||
} |
|||
} |
|||
|
|||
func (l *log) Warning(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Warning") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.WarnLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Warn(args...) |
|||
} |
|||
} |
|||
|
|||
func (l *log) Error(err error, args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Error") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.ErrorLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
"error": fmt.Sprint(err), |
|||
}).Error(args...) |
|||
} |
|||
} |
|||
|
|||
func (l *log) Panic(err error, args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Panic") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.PanicLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
"error": fmt.Sprint(err), |
|||
}).Panic(args...) |
|||
} |
|||
} |
|||
|
|||
// Exit внутренняя ф-ция логирования и прекращения работы программы
|
|||
func (l *log) Exit(err error, args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Fatal") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.FatalLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
"error": fmt.Sprint(err), |
|||
}).Fatal(args...) |
|||
} |
|||
} |
|||
|
|||
// RotateInit Переинициализация файла логирования
|
|||
func (l *log) RotateInit(ctx context.Context) { |
|||
ctx, cancel := context.WithCancel(context.Background()) |
|||
defer cancel() |
|||
|
|||
l.IntervalReload = 5 * time.Second |
|||
|
|||
defer func() { |
|||
rec := recover() |
|||
if rec != nil { |
|||
b := string(debug.Stack()) |
|||
fmt.Printf("panic in loggier (RotateInit). stack: %+v", b) |
|||
//cancel()
|
|||
//os.Exit(1)
|
|||
} |
|||
}() |
|||
|
|||
// попытка обновить файл (раз в 10 минут)
|
|||
go func() { |
|||
ticker := time.NewTicker(l.IntervalReload) |
|||
defer ticker.Stop() |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
l.File.Close() // закрыл старый файл
|
|||
b := NewLogger(l.Dir, l.Levels, l.UID, l.Name, l.Service, l.Config, l.IntervalReload, l.IntervalClearFiles, l.PeriodSaveFiles) |
|||
|
|||
l.Output = b.GetOutput() |
|||
l.File = b.GetFile() // передал указатель на новый файл в структуру лога
|
|||
ticker = time.NewTicker(l.IntervalReload) |
|||
} |
|||
} |
|||
}() |
|||
|
|||
// попытка очистки старых файлов (каждые пол часа)
|
|||
go func() { |
|||
ticker := time.NewTicker(l.IntervalClearFiles) |
|||
defer ticker.Stop() |
|||
|
|||
// получаем период, через который мы будем удалять файлы
|
|||
period := l.PeriodSaveFiles |
|||
if period == "" { |
|||
l.Error(fmt.Errorf("%s", "Fail perion save log files. (expected format: year-month-day; eg: 0-1-0)")) |
|||
return |
|||
} |
|||
slPeriod := strings.Split(period, "-") |
|||
if len(slPeriod) < 3 { |
|||
l.Error(fmt.Errorf("%s", "Fail perion save log files. (expected format: year-month-day; eg: 0-1-0)")) |
|||
return |
|||
} |
|||
|
|||
// получаем числовые значения года месяца и дня для расчета даты удаления файлов
|
|||
year, err := strconv.Atoi(slPeriod[0]) |
|||
if err != nil { |
|||
l.Error(err, "Fail converted Year from period saved log files. (expected format: year-month-day; eg: 0-1-0)") |
|||
} |
|||
month, err := strconv.Atoi(slPeriod[1]) |
|||
if err != nil { |
|||
l.Error(err, "Fail converted Month from period saved log files. (expected format: year-month-day; eg: 0-1-0)") |
|||
} |
|||
day, err := strconv.Atoi(slPeriod[2]) |
|||
if err != nil { |
|||
l.Error(err, "Fail converted Day from period saved log files. (expected format: year-month-day; eg: 0-1-0)") |
|||
} |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
oneMonthAgo := time.Now().AddDate(-year, -month, -day) // minus 1 месяц
|
|||
fileMonthAgoDate := oneMonthAgo.Format("2006.01.02") |
|||
|
|||
// пробегаем директорию и читаем все файлы, если имя меньше текущее время - месяц = удаляем
|
|||
directory, _ := os.Open(l.Dir) |
|||
objects, err := directory.Readdir(-1) |
|||
if err != nil { |
|||
l.Error(err, "Error read directory: ", directory) |
|||
return |
|||
} |
|||
|
|||
for _, obj := range objects { |
|||
filename := obj.Name() |
|||
filenameMonthAgoDate := fileMonthAgoDate + "_" + l.Service |
|||
|
|||
if filenameMonthAgoDate > filename { |
|||
pathFile := l.Dir + sep + filename |
|||
err = os.Remove(pathFile) |
|||
if err != nil { |
|||
l.Error(err, "Error deleted file: ", pathFile) |
|||
return |
|||
} |
|||
} |
|||
} |
|||
ticker = time.NewTicker(l.IntervalClearFiles) |
|||
} |
|||
} |
|||
}() |
|||
} |
|||
|
|||
func (l *log) GetOutput() io.Writer { |
|||
l.mux.Lock() |
|||
defer l.mux.Unlock() |
|||
|
|||
return l.Output |
|||
} |
|||
|
|||
func (l *log) GetFile() *os.File { |
|||
|
|||
return l.File |
|||
} |
|||
|
|||
func (l *log) Close() { |
|||
l.File.Close() |
|||
} |
|||
|
|||
func NewLogger(logsDir, level, uid, name, srv, config string, intervalReload, intervalClearFiles time.Duration, periodSaveFiles string) Log { |
|||
var output io.Writer |
|||
var file *os.File |
|||
var err error |
|||
var mode os.FileMode |
|||
m := sync.Mutex{} |
|||
|
|||
datefile := time.Now().Format("2006.01.02") |
|||
logName := datefile + "_" + srv + "_" + uid + ".log" |
|||
|
|||
// создаем/открываем файл логирования и назначаем его логеру
|
|||
mode = 0711 |
|||
CreateDir(logsDir, mode) |
|||
if err != nil { |
|||
logrus.Error(err, "Error creating directory") |
|||
return nil |
|||
} |
|||
|
|||
pathFile := logsDir + "/" + logName |
|||
|
|||
if !IsExist(pathFile) { |
|||
err := CreateFile(pathFile) |
|||
if err != nil { |
|||
logrus.Error(err, "Error creating file") |
|||
return nil |
|||
} |
|||
} |
|||
|
|||
file, err = os.OpenFile(pathFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) |
|||
output = file |
|||
if err != nil { |
|||
logrus.Panic(err, "error opening file") |
|||
return nil |
|||
} |
|||
|
|||
return &log{ |
|||
Output: output, |
|||
Levels: level, |
|||
UID: uid, |
|||
Name: name, |
|||
Service: srv, |
|||
Dir: logsDir, |
|||
Config: config, |
|||
IntervalReload: intervalReload, |
|||
IntervalClearFiles: intervalClearFiles, |
|||
PeriodSaveFiles: periodSaveFiles, |
|||
mux: &m, |
|||
File: file, |
|||
} |
|||
} |
@ -0,0 +1,343 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
"net/http" |
|||
"sort" |
|||
"sync" |
|||
"time" |
|||
) |
|||
|
|||
type Metrics struct { |
|||
StateHost StateHost |
|||
Connections int // количество соединений за весь период учета
|
|||
Queue_AVG float32 // среднее количество запросов в очереди
|
|||
Queue_QTL_80 float32 // квантиль 80% - какое среднее кол-во запросов до границы 80% в отсорованном ряду
|
|||
Queue_QTL_90 float32 // квантиль 90%
|
|||
Queue_QTL_99 float32 // квантиль 99%
|
|||
TPR_AVG_MS float32 // (ms) Time per request - среднее время обработки запроса
|
|||
TPR_QTL_MS_80 float32 // (ms) квантиль 80% - какое среднее время обработки запросов до границы 80% в отсорованном ряду
|
|||
TPR_QTL_MS_90 float32 // (ms) квантиль 90%
|
|||
TPR_QTL_MS_99 float32 // (ms) квантиль 99%
|
|||
|
|||
RPS int // Request per second - количество запросов в секунду
|
|||
} |
|||
|
|||
type serviceMetric struct { |
|||
Metrics |
|||
Stash Metrics // карман для сохранения предыдущего значения
|
|||
connectionOpen int // текущее кол-во открытых соединений (+ при запрос - при ответе)
|
|||
queue []int // массив соединений в очереди (не закрытых) см.выше
|
|||
tpr []time.Duration // массив времен обработки запросов
|
|||
mux *sync.Mutex |
|||
ctx context.Context |
|||
} |
|||
|
|||
type ServiceMetric interface { |
|||
SetState() |
|||
SetConnectionIncrement() |
|||
SetConnectionDecrement() |
|||
SetTimeRequest(timeRequest time.Duration) |
|||
Generate() |
|||
Get() (result Metrics) |
|||
Clear() |
|||
SaveToStash() |
|||
Middleware(next http.Handler) http.Handler |
|||
} |
|||
|
|||
func (s *serviceMetric) SetState() { |
|||
//s.mux.Lock()
|
|||
//defer s.mux.Unlock()
|
|||
|
|||
s.StateHost.Tick() |
|||
|
|||
return |
|||
} |
|||
|
|||
// записываем время обработки запроса в массив
|
|||
func (s *serviceMetric) SetTimeRequest(timeRequest time.Duration) { |
|||
go func() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.tpr = append(s.tpr, timeRequest) |
|||
}() |
|||
|
|||
return |
|||
} |
|||
|
|||
// SetConnectionIncrement увеличиваем счетчик и добавляем в массив метрик
|
|||
// формируем временной ряд количества соединений
|
|||
// при начале запроса увеличиваем, при завершении уменьшаем
|
|||
// запускаем в отдельной рутине, потому что ф-ция вызывается из сервиса и не должна быть блокирующей
|
|||
func (s *serviceMetric) SetConnectionIncrement() { |
|||
go func() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.Connections = s.Connections + 1 |
|||
s.connectionOpen = s.connectionOpen + 1 |
|||
s.queue = append(s.queue, s.connectionOpen) |
|||
}() |
|||
|
|||
return |
|||
} |
|||
|
|||
// SetConnectionDecrement уменьшаем счетчик и добавляем в массив метрик
|
|||
// запускаем в отдельной рутине, потому что ф-ция вызывается из сервиса и не должна быть блокирующей
|
|||
func (s *serviceMetric) SetConnectionDecrement() { |
|||
go func() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
if s.connectionOpen != 0 { |
|||
s.connectionOpen = s.connectionOpen - 1 |
|||
} |
|||
s.queue = append(s.queue, s.connectionOpen) |
|||
}() |
|||
|
|||
return |
|||
} |
|||
|
|||
func (s *serviceMetric) SetP(value time.Duration) { |
|||
go func() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.tpr = append(s.tpr, value) |
|||
}() |
|||
|
|||
return |
|||
} |
|||
|
|||
// SaveToStash сохраняем текущее значение расчитанных метрик в кармане
|
|||
func (s *serviceMetric) SaveToStash() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.Stash.StateHost = s.StateHost |
|||
s.Stash.Connections = s.Connections |
|||
s.Stash.RPS = s.RPS |
|||
|
|||
s.Stash.Queue_AVG = s.Queue_AVG |
|||
s.Stash.Queue_QTL_99 = s.Queue_QTL_99 |
|||
s.Stash.Queue_QTL_90 = s.Queue_QTL_90 |
|||
s.Stash.Queue_QTL_80 = s.Queue_QTL_80 |
|||
|
|||
s.Stash.TPR_AVG_MS = s.TPR_AVG_MS |
|||
s.Stash.TPR_QTL_MS_80 = s.TPR_QTL_MS_80 |
|||
s.Stash.TPR_QTL_MS_90 = s.TPR_QTL_MS_90 |
|||
s.Stash.TPR_QTL_MS_99 = s.TPR_QTL_MS_99 |
|||
|
|||
return |
|||
} |
|||
|
|||
func (s *serviceMetric) Clear() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.Connections = 0 |
|||
s.connectionOpen = 0 |
|||
s.queue = []int{} |
|||
s.tpr = []time.Duration{} |
|||
|
|||
s.RPS = 0 |
|||
s.Queue_AVG = 0.0 |
|||
s.Queue_QTL_80 = 0.0 |
|||
s.Queue_QTL_90 = 0.0 |
|||
s.Queue_QTL_99 = 0.0 |
|||
|
|||
s.TPR_AVG_MS = 0.0 |
|||
s.TPR_QTL_MS_80 = 0.0 |
|||
s.TPR_QTL_MS_90 = 0.0 |
|||
s.TPR_QTL_MS_99 = 0.0 |
|||
|
|||
return |
|||
} |
|||
|
|||
func (s *serviceMetric) Get() (result Metrics) { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
return s.Stash |
|||
} |
|||
|
|||
func (s *serviceMetric) Generate() { |
|||
var val_Queue_QTL_80, val_Queue_QTL_90, val_Queue_QTL_99, val_Queue float32 |
|||
var Queue_AVG, Queue_QTL_80, Queue_QTL_90, Queue_QTL_99 float32 |
|||
var val_TPR_80, val_TPR_90, val_TPR_99, val_TPR float32 |
|||
var AVG_TPR, QTL_TPR_80, QTL_TPR_90, QTL_TPR_99 float32 |
|||
|
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.SetState() // БЕЗ БЛОКИРОВКИ получаю текущие метрики загрузки хоста
|
|||
|
|||
//////////////////////////////////////////////////////////
|
|||
// расчитываем среднее кол-во запросо и квартили (средние значения после 80-90-99 процентов всех запросов)
|
|||
//////////////////////////////////////////////////////////
|
|||
|
|||
// сортируем список
|
|||
sort.Ints(s.queue) |
|||
|
|||
lenQueue := len(s.queue) |
|||
|
|||
if lenQueue != 0 { |
|||
len_Queue_QTL_80 := lenQueue * 8 / 10 |
|||
len_Queue_QTL_90 := lenQueue * 9 / 10 |
|||
len_Queue_QTL_99 := lenQueue * 99 / 100 |
|||
|
|||
for i, v := range s.queue { |
|||
vall := float32(v) |
|||
// суммируем значения которые после 80% других
|
|||
if i > len_Queue_QTL_80 { |
|||
val_Queue_QTL_80 = val_Queue_QTL_80 + vall |
|||
} |
|||
// суммируем значения которые после 90% других
|
|||
if i > len_Queue_QTL_90 { |
|||
val_Queue_QTL_90 = val_Queue_QTL_90 + vall |
|||
} |
|||
// суммируем значения которые после 99% других
|
|||
if i > len_Queue_QTL_99 { |
|||
val_Queue_QTL_99 = val_Queue_QTL_99 + vall |
|||
} |
|||
|
|||
val_Queue = val_Queue + vall |
|||
} |
|||
|
|||
lQ := float32(lenQueue) - 1 // проверка на 0
|
|||
if lQ == 0 { |
|||
lQ = 1 |
|||
} |
|||
Queue_AVG = val_Queue / lQ |
|||
Queue_QTL_80 = val_Queue_QTL_80 / float32(lenQueue-len_Queue_QTL_80) |
|||
Queue_QTL_90 = val_Queue_QTL_90 / float32(lenQueue-len_Queue_QTL_90) |
|||
Queue_QTL_99 = val_Queue_QTL_99 / float32(lenQueue-len_Queue_QTL_99) |
|||
} |
|||
|
|||
//////////////////////////////////////////////////////////
|
|||
// расчитываем среднее время запросо и квартили (средние значения после 80-90-99 процентов всех запросов)
|
|||
//////////////////////////////////////////////////////////
|
|||
|
|||
// сортируем список
|
|||
lenTPR := len(s.tpr) |
|||
if lenTPR != 0 { |
|||
|
|||
timeInt := []float64{} |
|||
for _, v := range s.tpr { |
|||
timeInt = append(timeInt, float64(v.Microseconds())) |
|||
} |
|||
sort.Float64s(timeInt) |
|||
|
|||
len_TPR_80 := lenTPR * 8 / 10 |
|||
len_TPR_90 := lenTPR * 9 / 10 |
|||
len_TPR_99 := lenTPR * 99 / 100 |
|||
|
|||
for i, v := range timeInt { |
|||
vall := float32(v) |
|||
// суммируем значения которые после 80% других
|
|||
if i > len_TPR_80 { |
|||
val_TPR_80 = val_TPR_80 + vall |
|||
} |
|||
// суммируем значения которые после 90% других
|
|||
if i > len_TPR_90 { |
|||
val_TPR_90 = val_TPR_90 + vall |
|||
} |
|||
// суммируем значения которые после 99% других
|
|||
if i > len_TPR_99 { |
|||
val_TPR_99 = val_TPR_99 + vall |
|||
} |
|||
|
|||
val_TPR = val_TPR + vall |
|||
} |
|||
|
|||
lQ := float32(lenQueue) - 1 |
|||
if lQ == 0 { |
|||
lQ = 1 |
|||
} |
|||
AVG_TPR = val_TPR / lQ |
|||
QTL_TPR_80 = val_TPR_80 / float32(lenTPR-len_TPR_80) |
|||
QTL_TPR_90 = val_TPR_90 / float32(lenTPR-len_TPR_90) |
|||
QTL_TPR_99 = val_TPR_99 / float32(lenTPR-len_TPR_99) |
|||
} |
|||
|
|||
//////////////////////////////////////////////////////////
|
|||
//////////////////////////////////////////////////////////
|
|||
|
|||
s.RPS = lenQueue / 10 |
|||
|
|||
s.Queue_AVG = Queue_AVG |
|||
s.Queue_QTL_80 = Queue_QTL_80 |
|||
s.Queue_QTL_90 = Queue_QTL_90 |
|||
s.Queue_QTL_99 = Queue_QTL_99 |
|||
|
|||
s.TPR_AVG_MS = AVG_TPR |
|||
s.TPR_QTL_MS_80 = QTL_TPR_80 |
|||
s.TPR_QTL_MS_90 = QTL_TPR_90 |
|||
s.TPR_QTL_MS_99 = QTL_TPR_99 |
|||
|
|||
return |
|||
} |
|||
|
|||
func (s *serviceMetric) Middleware(next http.Handler) http.Handler { |
|||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
|||
// увеличиваем счетчик активных сессий
|
|||
s.SetConnectionIncrement() |
|||
next.ServeHTTP(w, r) |
|||
|
|||
// уменьшаем счетчик активных сессий
|
|||
s.SetConnectionDecrement() |
|||
}) |
|||
} |
|||
|
|||
// interval - интервалы времени, через которые статистика будет сбрасыватсья в лог
|
|||
func NewMetric(ctx context.Context, logger Log, interval time.Duration) (metrics ServiceMetric) { |
|||
m := sync.Mutex{} |
|||
t := StateHost{} |
|||
s := Metrics{ |
|||
StateHost: t, |
|||
Queue_AVG: 0, |
|||
Queue_QTL_99: 0, |
|||
Queue_QTL_90: 0, |
|||
Queue_QTL_80: 0, |
|||
TPR_AVG_MS: 0, |
|||
TPR_QTL_MS_80: 0, |
|||
TPR_QTL_MS_90: 0, |
|||
TPR_QTL_MS_99: 0, |
|||
RPS: 0, |
|||
} |
|||
metrics = &serviceMetric{ |
|||
Metrics: s, |
|||
Stash: s, |
|||
connectionOpen: 0, |
|||
queue: []int{}, |
|||
mux: &m, |
|||
ctx: ctx, |
|||
} |
|||
|
|||
go RunMetricLogger(ctx, metrics, logger, interval) |
|||
|
|||
return metrics |
|||
} |
|||
|
|||
func RunMetricLogger(ctx context.Context, m ServiceMetric, logger Log, interval time.Duration) { |
|||
ticker := time.NewTicker(interval) |
|||
defer ticker.Stop() |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
// сохраняем значение метрик в лог
|
|||
m.Generate() // сгенерировали метрики
|
|||
m.SaveToStash() // сохранили в карман
|
|||
m.Clear() // очистили объект метрик для приема новых данных
|
|||
mes, _ := json.Marshal(m.Get()) |
|||
logger.Trace(string(mes)) // записали в лог из кармана
|
|||
|
|||
ticker = time.NewTicker(interval) |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,41 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"math" |
|||
"runtime" |
|||
|
|||
"github.com/shirou/gopsutil/mem" |
|||
) |
|||
|
|||
type StateHost struct { |
|||
PercentageCPU, |
|||
PercentageMemory, |
|||
PercentageDisk, |
|||
TotalCPU, |
|||
TotalMemory, |
|||
TotalDisk, |
|||
UsedCPU, |
|||
UsedMemory, |
|||
UsedDisk float64 |
|||
Goroutines int |
|||
} |
|||
|
|||
func (c *StateHost) Tick() { |
|||
//var pcpu, i float64
|
|||
|
|||
memoryStat, _ := mem.VirtualMemory() |
|||
//percentage, _ := cpu.Percent(0, true)
|
|||
//diskStat, _ := disk.Usage("/")
|
|||
//
|
|||
//for _, cpupercent := range percentage {
|
|||
// pcpu = (pcpu + cpupercent)
|
|||
// i ++
|
|||
//}
|
|||
|
|||
//c.PercentageCPU = math.Round(pcpu / i)
|
|||
c.PercentageMemory = math.Round(memoryStat.UsedPercent) |
|||
//c.PercentageDisk = math.Round(diskStat.UsedPercent)
|
|||
c.Goroutines = runtime.NumGoroutine() |
|||
|
|||
return |
|||
} |
@ -0,0 +1,202 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
APPENDIX: How to apply the Apache License to your work. |
|||
|
|||
To apply the Apache License to your work, attach the following |
|||
boilerplate notice, with the fields enclosed by brackets "[]" |
|||
replaced with your own identifying information. (Don't include |
|||
the brackets!) The text should be enclosed in the appropriate |
|||
comment syntax for the file format. We also recommend that a |
|||
file or class name and description of purpose be included on the |
|||
same "printed page" as the copyright notice for easier |
|||
identification within third-party archives. |
|||
|
|||
Copyright [yyyy] [name of copyright owner] |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,513 @@ |
|||
// Copyright 2014 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package metadata provides access to Google Compute Engine (GCE)
|
|||
// metadata and API service accounts.
|
|||
//
|
|||
// This package is a wrapper around the GCE metadata service,
|
|||
// as documented at https://developers.google.com/compute/docs/metadata.
|
|||
package metadata // import "cloud.google.com/go/compute/metadata"
|
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net" |
|||
"net/http" |
|||
"net/url" |
|||
"os" |
|||
"runtime" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
// metadataIP is the documented metadata server IP address.
|
|||
metadataIP = "169.254.169.254" |
|||
|
|||
// metadataHostEnv is the environment variable specifying the
|
|||
// GCE metadata hostname. If empty, the default value of
|
|||
// metadataIP ("169.254.169.254") is used instead.
|
|||
// This is variable name is not defined by any spec, as far as
|
|||
// I know; it was made up for the Go package.
|
|||
metadataHostEnv = "GCE_METADATA_HOST" |
|||
|
|||
userAgent = "gcloud-golang/0.1" |
|||
) |
|||
|
|||
type cachedValue struct { |
|||
k string |
|||
trim bool |
|||
mu sync.Mutex |
|||
v string |
|||
} |
|||
|
|||
var ( |
|||
projID = &cachedValue{k: "project/project-id", trim: true} |
|||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true} |
|||
instID = &cachedValue{k: "instance/id", trim: true} |
|||
) |
|||
|
|||
var ( |
|||
defaultClient = &Client{hc: &http.Client{ |
|||
Transport: &http.Transport{ |
|||
Dial: (&net.Dialer{ |
|||
Timeout: 2 * time.Second, |
|||
KeepAlive: 30 * time.Second, |
|||
}).Dial, |
|||
ResponseHeaderTimeout: 2 * time.Second, |
|||
}, |
|||
}} |
|||
subscribeClient = &Client{hc: &http.Client{ |
|||
Transport: &http.Transport{ |
|||
Dial: (&net.Dialer{ |
|||
Timeout: 2 * time.Second, |
|||
KeepAlive: 30 * time.Second, |
|||
}).Dial, |
|||
}, |
|||
}} |
|||
) |
|||
|
|||
// NotDefinedError is returned when requested metadata is not defined.
|
|||
//
|
|||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
|||
//
|
|||
// This error is not returned if the value is defined to be the empty
|
|||
// string.
|
|||
type NotDefinedError string |
|||
|
|||
func (suffix NotDefinedError) Error() string { |
|||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) |
|||
} |
|||
|
|||
func (c *cachedValue) get(cl *Client) (v string, err error) { |
|||
defer c.mu.Unlock() |
|||
c.mu.Lock() |
|||
if c.v != "" { |
|||
return c.v, nil |
|||
} |
|||
if c.trim { |
|||
v, err = cl.getTrimmed(c.k) |
|||
} else { |
|||
v, err = cl.Get(c.k) |
|||
} |
|||
if err == nil { |
|||
c.v = v |
|||
} |
|||
return |
|||
} |
|||
|
|||
var ( |
|||
onGCEOnce sync.Once |
|||
onGCE bool |
|||
) |
|||
|
|||
// OnGCE reports whether this process is running on Google Compute Engine.
|
|||
func OnGCE() bool { |
|||
onGCEOnce.Do(initOnGCE) |
|||
return onGCE |
|||
} |
|||
|
|||
func initOnGCE() { |
|||
onGCE = testOnGCE() |
|||
} |
|||
|
|||
func testOnGCE() bool { |
|||
// The user explicitly said they're on GCE, so trust them.
|
|||
if os.Getenv(metadataHostEnv) != "" { |
|||
return true |
|||
} |
|||
|
|||
ctx, cancel := context.WithCancel(context.Background()) |
|||
defer cancel() |
|||
|
|||
resc := make(chan bool, 2) |
|||
|
|||
// Try two strategies in parallel.
|
|||
// See https://github.com/googleapis/google-cloud-go/issues/194
|
|||
go func() { |
|||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) |
|||
req.Header.Set("User-Agent", userAgent) |
|||
res, err := defaultClient.hc.Do(req.WithContext(ctx)) |
|||
if err != nil { |
|||
resc <- false |
|||
return |
|||
} |
|||
defer res.Body.Close() |
|||
resc <- res.Header.Get("Metadata-Flavor") == "Google" |
|||
}() |
|||
|
|||
go func() { |
|||
addrs, err := net.LookupHost("metadata.google.internal") |
|||
if err != nil || len(addrs) == 0 { |
|||
resc <- false |
|||
return |
|||
} |
|||
resc <- strsContains(addrs, metadataIP) |
|||
}() |
|||
|
|||
tryHarder := systemInfoSuggestsGCE() |
|||
if tryHarder { |
|||
res := <-resc |
|||
if res { |
|||
// The first strategy succeeded, so let's use it.
|
|||
return true |
|||
} |
|||
// Wait for either the DNS or metadata server probe to
|
|||
// contradict the other one and say we are running on
|
|||
// GCE. Give it a lot of time to do so, since the system
|
|||
// info already suggests we're running on a GCE BIOS.
|
|||
timer := time.NewTimer(5 * time.Second) |
|||
defer timer.Stop() |
|||
select { |
|||
case res = <-resc: |
|||
return res |
|||
case <-timer.C: |
|||
// Too slow. Who knows what this system is.
|
|||
return false |
|||
} |
|||
} |
|||
|
|||
// There's no hint from the system info that we're running on
|
|||
// GCE, so use the first probe's result as truth, whether it's
|
|||
// true or false. The goal here is to optimize for speed for
|
|||
// users who are NOT running on GCE. We can't assume that
|
|||
// either a DNS lookup or an HTTP request to a blackholed IP
|
|||
// address is fast. Worst case this should return when the
|
|||
// metaClient's Transport.ResponseHeaderTimeout or
|
|||
// Transport.Dial.Timeout fires (in two seconds).
|
|||
return <-resc |
|||
} |
|||
|
|||
// systemInfoSuggestsGCE reports whether the local system (without
|
|||
// doing network requests) suggests that we're running on GCE. If this
|
|||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
|||
// server.
|
|||
func systemInfoSuggestsGCE() bool { |
|||
if runtime.GOOS != "linux" { |
|||
// We don't have any non-Linux clues available, at least yet.
|
|||
return false |
|||
} |
|||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") |
|||
name := strings.TrimSpace(string(slurp)) |
|||
return name == "Google" || name == "Google Compute Engine" |
|||
} |
|||
|
|||
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
|||
// ResponseHeaderTimeout).
|
|||
func Subscribe(suffix string, fn func(v string, ok bool) error) error { |
|||
return subscribeClient.Subscribe(suffix, fn) |
|||
} |
|||
|
|||
// Get calls Client.Get on the default client.
|
|||
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } |
|||
|
|||
// ProjectID returns the current instance's project ID string.
|
|||
func ProjectID() (string, error) { return defaultClient.ProjectID() } |
|||
|
|||
// NumericProjectID returns the current instance's numeric project ID.
|
|||
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } |
|||
|
|||
// InternalIP returns the instance's primary internal IP address.
|
|||
func InternalIP() (string, error) { return defaultClient.InternalIP() } |
|||
|
|||
// ExternalIP returns the instance's primary external (public) IP address.
|
|||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() } |
|||
|
|||
// Hostname returns the instance's hostname. This will be of the form
|
|||
// "<instanceID>.c.<projID>.internal".
|
|||
func Hostname() (string, error) { return defaultClient.Hostname() } |
|||
|
|||
// InstanceTags returns the list of user-defined instance tags,
|
|||
// assigned when initially creating a GCE instance.
|
|||
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } |
|||
|
|||
// InstanceID returns the current VM's numeric instance ID.
|
|||
func InstanceID() (string, error) { return defaultClient.InstanceID() } |
|||
|
|||
// InstanceName returns the current VM's instance ID string.
|
|||
func InstanceName() (string, error) { return defaultClient.InstanceName() } |
|||
|
|||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|||
func Zone() (string, error) { return defaultClient.Zone() } |
|||
|
|||
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
|||
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } |
|||
|
|||
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
|||
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } |
|||
|
|||
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
|||
func InstanceAttributeValue(attr string) (string, error) { |
|||
return defaultClient.InstanceAttributeValue(attr) |
|||
} |
|||
|
|||
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
|||
func ProjectAttributeValue(attr string) (string, error) { |
|||
return defaultClient.ProjectAttributeValue(attr) |
|||
} |
|||
|
|||
// Scopes calls Client.Scopes on the default client.
|
|||
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } |
|||
|
|||
func strsContains(ss []string, s string) bool { |
|||
for _, v := range ss { |
|||
if v == s { |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
|
|||
// A Client provides metadata.
|
|||
type Client struct { |
|||
hc *http.Client |
|||
} |
|||
|
|||
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
|||
// will use the given http.Client instead of the default client.
|
|||
func NewClient(c *http.Client) *Client { |
|||
return &Client{hc: c} |
|||
} |
|||
|
|||
// getETag returns a value from the metadata service as well as the associated ETag.
|
|||
// This func is otherwise equivalent to Get.
|
|||
func (c *Client) getETag(suffix string) (value, etag string, err error) { |
|||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
|||
// a container, which is an important use-case for local testing of cloud
|
|||
// deployments. To enable spoofing of the metadata service, the environment
|
|||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
|||
// requests shall go.
|
|||
host := os.Getenv(metadataHostEnv) |
|||
if host == "" { |
|||
// Using 169.254.169.254 instead of "metadata" here because Go
|
|||
// binaries built with the "netgo" tag and without cgo won't
|
|||
// know the search suffix for "metadata" is
|
|||
// ".google.internal", and this IP address is documented as
|
|||
// being stable anyway.
|
|||
host = metadataIP |
|||
} |
|||
u := "http://" + host + "/computeMetadata/v1/" + suffix |
|||
req, _ := http.NewRequest("GET", u, nil) |
|||
req.Header.Set("Metadata-Flavor", "Google") |
|||
req.Header.Set("User-Agent", userAgent) |
|||
res, err := c.hc.Do(req) |
|||
if err != nil { |
|||
return "", "", err |
|||
} |
|||
defer res.Body.Close() |
|||
if res.StatusCode == http.StatusNotFound { |
|||
return "", "", NotDefinedError(suffix) |
|||
} |
|||
all, err := ioutil.ReadAll(res.Body) |
|||
if err != nil { |
|||
return "", "", err |
|||
} |
|||
if res.StatusCode != 200 { |
|||
return "", "", &Error{Code: res.StatusCode, Message: string(all)} |
|||
} |
|||
return string(all), res.Header.Get("Etag"), nil |
|||
} |
|||
|
|||
// Get returns a value from the metadata service.
|
|||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|||
//
|
|||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
|||
// 169.254.169.254 will be used instead.
|
|||
//
|
|||
// If the requested metadata is not defined, the returned error will
|
|||
// be of type NotDefinedError.
|
|||
func (c *Client) Get(suffix string) (string, error) { |
|||
val, _, err := c.getETag(suffix) |
|||
return val, err |
|||
} |
|||
|
|||
func (c *Client) getTrimmed(suffix string) (s string, err error) { |
|||
s, err = c.Get(suffix) |
|||
s = strings.TrimSpace(s) |
|||
return |
|||
} |
|||
|
|||
func (c *Client) lines(suffix string) ([]string, error) { |
|||
j, err := c.Get(suffix) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
s := strings.Split(strings.TrimSpace(j), "\n") |
|||
for i := range s { |
|||
s[i] = strings.TrimSpace(s[i]) |
|||
} |
|||
return s, nil |
|||
} |
|||
|
|||
// ProjectID returns the current instance's project ID string.
|
|||
func (c *Client) ProjectID() (string, error) { return projID.get(c) } |
|||
|
|||
// NumericProjectID returns the current instance's numeric project ID.
|
|||
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } |
|||
|
|||
// InstanceID returns the current VM's numeric instance ID.
|
|||
func (c *Client) InstanceID() (string, error) { return instID.get(c) } |
|||
|
|||
// InternalIP returns the instance's primary internal IP address.
|
|||
func (c *Client) InternalIP() (string, error) { |
|||
return c.getTrimmed("instance/network-interfaces/0/ip") |
|||
} |
|||
|
|||
// ExternalIP returns the instance's primary external (public) IP address.
|
|||
func (c *Client) ExternalIP() (string, error) { |
|||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") |
|||
} |
|||
|
|||
// Hostname returns the instance's hostname. This will be of the form
|
|||
// "<instanceID>.c.<projID>.internal".
|
|||
func (c *Client) Hostname() (string, error) { |
|||
return c.getTrimmed("instance/hostname") |
|||
} |
|||
|
|||
// InstanceTags returns the list of user-defined instance tags,
|
|||
// assigned when initially creating a GCE instance.
|
|||
func (c *Client) InstanceTags() ([]string, error) { |
|||
var s []string |
|||
j, err := c.Get("instance/tags") |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { |
|||
return nil, err |
|||
} |
|||
return s, nil |
|||
} |
|||
|
|||
// InstanceName returns the current VM's instance ID string.
|
|||
func (c *Client) InstanceName() (string, error) { |
|||
host, err := c.Hostname() |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
return strings.Split(host, ".")[0], nil |
|||
} |
|||
|
|||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|||
func (c *Client) Zone() (string, error) { |
|||
zone, err := c.getTrimmed("instance/zone") |
|||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
return zone[strings.LastIndex(zone, "/")+1:], nil |
|||
} |
|||
|
|||
// InstanceAttributes returns the list of user-defined attributes,
|
|||
// assigned when initially creating a GCE VM instance. The value of an
|
|||
// attribute can be obtained with InstanceAttributeValue.
|
|||
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } |
|||
|
|||
// ProjectAttributes returns the list of user-defined attributes
|
|||
// applying to the project as a whole, not just this VM. The value of
|
|||
// an attribute can be obtained with ProjectAttributeValue.
|
|||
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } |
|||
|
|||
// InstanceAttributeValue returns the value of the provided VM
|
|||
// instance attribute.
|
|||
//
|
|||
// If the requested attribute is not defined, the returned error will
|
|||
// be of type NotDefinedError.
|
|||
//
|
|||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
|||
// defined to be the empty string.
|
|||
func (c *Client) InstanceAttributeValue(attr string) (string, error) { |
|||
return c.Get("instance/attributes/" + attr) |
|||
} |
|||
|
|||
// ProjectAttributeValue returns the value of the provided
|
|||
// project attribute.
|
|||
//
|
|||
// If the requested attribute is not defined, the returned error will
|
|||
// be of type NotDefinedError.
|
|||
//
|
|||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
|||
// defined to be the empty string.
|
|||
func (c *Client) ProjectAttributeValue(attr string) (string, error) { |
|||
return c.Get("project/attributes/" + attr) |
|||
} |
|||
|
|||
// Scopes returns the service account scopes for the given account.
|
|||
// The account may be empty or the string "default" to use the instance's
|
|||
// main account.
|
|||
func (c *Client) Scopes(serviceAccount string) ([]string, error) { |
|||
if serviceAccount == "" { |
|||
serviceAccount = "default" |
|||
} |
|||
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") |
|||
} |
|||
|
|||
// Subscribe subscribes to a value from the metadata service.
|
|||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|||
// The suffix may contain query parameters.
|
|||
//
|
|||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
|||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
|||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
|||
// is deleted. Subscribe returns the error value returned from the last call to
|
|||
// fn, which may be nil when ok == false.
|
|||
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { |
|||
const failedSubscribeSleep = time.Second * 5 |
|||
|
|||
// First check to see if the metadata value exists at all.
|
|||
val, lastETag, err := c.getETag(suffix) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if err := fn(val, true); err != nil { |
|||
return err |
|||
} |
|||
|
|||
ok := true |
|||
if strings.ContainsRune(suffix, '?') { |
|||
suffix += "&wait_for_change=true&last_etag=" |
|||
} else { |
|||
suffix += "?wait_for_change=true&last_etag=" |
|||
} |
|||
for { |
|||
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) |
|||
if err != nil { |
|||
if _, deleted := err.(NotDefinedError); !deleted { |
|||
time.Sleep(failedSubscribeSleep) |
|||
continue // Retry on other errors.
|
|||
} |
|||
ok = false |
|||
} |
|||
lastETag = etag |
|||
|
|||
if err := fn(val, ok); err != nil || !ok { |
|||
return err |
|||
} |
|||
} |
|||
} |
|||
|
|||
// Error contains an error response from the server.
|
|||
type Error struct { |
|||
// Code is the HTTP response status code.
|
|||
Code int |
|||
// Message is the server response message.
|
|||
Message string |
|||
} |
|||
|
|||
func (e *Error) Error() string { |
|||
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) |
|||
} |
@ -0,0 +1,315 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package iam supports the resource-specific operations of Google Cloud
|
|||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
|||
// See https://cloud.google.com/iam for more about IAM.
|
|||
//
|
|||
// Users of the Google Cloud Libraries will typically not use this package
|
|||
// directly. Instead they will begin with some resource that supports IAM, like
|
|||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
|||
package iam |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"time" |
|||
|
|||
gax "github.com/googleapis/gax-go/v2" |
|||
pb "google.golang.org/genproto/googleapis/iam/v1" |
|||
"google.golang.org/grpc" |
|||
"google.golang.org/grpc/codes" |
|||
"google.golang.org/grpc/metadata" |
|||
) |
|||
|
|||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
|||
type client interface { |
|||
Get(ctx context.Context, resource string) (*pb.Policy, error) |
|||
Set(ctx context.Context, resource string, p *pb.Policy) error |
|||
Test(ctx context.Context, resource string, perms []string) ([]string, error) |
|||
} |
|||
|
|||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
|||
type grpcClient struct { |
|||
c pb.IAMPolicyClient |
|||
} |
|||
|
|||
var withRetry = gax.WithRetry(func() gax.Retryer { |
|||
return gax.OnCodes([]codes.Code{ |
|||
codes.DeadlineExceeded, |
|||
codes.Unavailable, |
|||
}, gax.Backoff{ |
|||
Initial: 100 * time.Millisecond, |
|||
Max: 60 * time.Second, |
|||
Multiplier: 1.3, |
|||
}) |
|||
}) |
|||
|
|||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { |
|||
var proto *pb.Policy |
|||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) |
|||
ctx = insertMetadata(ctx, md) |
|||
|
|||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { |
|||
var err error |
|||
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) |
|||
return err |
|||
}, withRetry) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return proto, nil |
|||
} |
|||
|
|||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { |
|||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) |
|||
ctx = insertMetadata(ctx, md) |
|||
|
|||
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { |
|||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ |
|||
Resource: resource, |
|||
Policy: p, |
|||
}) |
|||
return err |
|||
}, withRetry) |
|||
} |
|||
|
|||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { |
|||
var res *pb.TestIamPermissionsResponse |
|||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) |
|||
ctx = insertMetadata(ctx, md) |
|||
|
|||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { |
|||
var err error |
|||
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ |
|||
Resource: resource, |
|||
Permissions: perms, |
|||
}) |
|||
return err |
|||
}, withRetry) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return res.Permissions, nil |
|||
} |
|||
|
|||
// A Handle provides IAM operations for a resource.
|
|||
type Handle struct { |
|||
c client |
|||
resource string |
|||
} |
|||
|
|||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
|||
//
|
|||
// InternalNewHandle returns a Handle for resource.
|
|||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
|||
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { |
|||
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) |
|||
} |
|||
|
|||
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
|
|||
//
|
|||
// InternalNewHandleClient returns a Handle for resource using the given
|
|||
// grpc service that implements IAM as a mixin
|
|||
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { |
|||
return InternalNewHandleClient(&grpcClient{c: c}, resource) |
|||
} |
|||
|
|||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
|||
//
|
|||
// InternalNewHandleClient returns a Handle for resource using the given
|
|||
// client implementation.
|
|||
func InternalNewHandleClient(c client, resource string) *Handle { |
|||
return &Handle{ |
|||
c: c, |
|||
resource: resource, |
|||
} |
|||
} |
|||
|
|||
// Policy retrieves the IAM policy for the resource.
|
|||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) { |
|||
proto, err := h.c.Get(ctx, h.resource) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return &Policy{InternalProto: proto}, nil |
|||
} |
|||
|
|||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
|||
//
|
|||
// If policy was created from a prior call to Get, then the modification will
|
|||
// only succeed if the policy has not changed since the Get.
|
|||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { |
|||
return h.c.Set(ctx, h.resource, policy.InternalProto) |
|||
} |
|||
|
|||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
|||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { |
|||
return h.c.Test(ctx, h.resource, permissions) |
|||
} |
|||
|
|||
// A RoleName is a name representing a collection of permissions.
|
|||
type RoleName string |
|||
|
|||
// Common role names.
|
|||
const ( |
|||
Owner RoleName = "roles/owner" |
|||
Editor RoleName = "roles/editor" |
|||
Viewer RoleName = "roles/viewer" |
|||
) |
|||
|
|||
const ( |
|||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
|||
AllUsers = "allUsers" |
|||
|
|||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
|||
AllAuthenticatedUsers = "allAuthenticatedUsers" |
|||
) |
|||
|
|||
// A Policy is a list of Bindings representing roles
|
|||
// granted to members.
|
|||
//
|
|||
// The zero Policy is a valid policy with no bindings.
|
|||
type Policy struct { |
|||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
|||
// and provide an exported alias here.
|
|||
|
|||
// This field is exported for use by the Google Cloud Libraries only.
|
|||
// It may become unexported in a future release.
|
|||
InternalProto *pb.Policy |
|||
} |
|||
|
|||
// Members returns the list of members with the supplied role.
|
|||
// The return value should not be modified. Use Add and Remove
|
|||
// to modify the members of a role.
|
|||
func (p *Policy) Members(r RoleName) []string { |
|||
b := p.binding(r) |
|||
if b == nil { |
|||
return nil |
|||
} |
|||
return b.Members |
|||
} |
|||
|
|||
// HasRole reports whether member has role r.
|
|||
func (p *Policy) HasRole(member string, r RoleName) bool { |
|||
return memberIndex(member, p.binding(r)) >= 0 |
|||
} |
|||
|
|||
// Add adds member member to role r if it is not already present.
|
|||
// A new binding is created if there is no binding for the role.
|
|||
func (p *Policy) Add(member string, r RoleName) { |
|||
b := p.binding(r) |
|||
if b == nil { |
|||
if p.InternalProto == nil { |
|||
p.InternalProto = &pb.Policy{} |
|||
} |
|||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ |
|||
Role: string(r), |
|||
Members: []string{member}, |
|||
}) |
|||
return |
|||
} |
|||
if memberIndex(member, b) < 0 { |
|||
b.Members = append(b.Members, member) |
|||
return |
|||
} |
|||
} |
|||
|
|||
// Remove removes member from role r if it is present.
|
|||
func (p *Policy) Remove(member string, r RoleName) { |
|||
bi := p.bindingIndex(r) |
|||
if bi < 0 { |
|||
return |
|||
} |
|||
bindings := p.InternalProto.Bindings |
|||
b := bindings[bi] |
|||
mi := memberIndex(member, b) |
|||
if mi < 0 { |
|||
return |
|||
} |
|||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
|||
// into the removed spot and shrink the slice.
|
|||
if len(b.Members) == 1 { |
|||
// Remove binding.
|
|||
last := len(bindings) - 1 |
|||
bindings[bi] = bindings[last] |
|||
bindings[last] = nil |
|||
p.InternalProto.Bindings = bindings[:last] |
|||
return |
|||
} |
|||
// Remove member.
|
|||
// TODO(jba): worry about multiple copies of m?
|
|||
last := len(b.Members) - 1 |
|||
b.Members[mi] = b.Members[last] |
|||
b.Members[last] = "" |
|||
b.Members = b.Members[:last] |
|||
} |
|||
|
|||
// Roles returns the names of all the roles that appear in the Policy.
|
|||
func (p *Policy) Roles() []RoleName { |
|||
if p.InternalProto == nil { |
|||
return nil |
|||
} |
|||
var rns []RoleName |
|||
for _, b := range p.InternalProto.Bindings { |
|||
rns = append(rns, RoleName(b.Role)) |
|||
} |
|||
return rns |
|||
} |
|||
|
|||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
|||
func (p *Policy) binding(r RoleName) *pb.Binding { |
|||
i := p.bindingIndex(r) |
|||
if i < 0 { |
|||
return nil |
|||
} |
|||
return p.InternalProto.Bindings[i] |
|||
} |
|||
|
|||
func (p *Policy) bindingIndex(r RoleName) int { |
|||
if p.InternalProto == nil { |
|||
return -1 |
|||
} |
|||
for i, b := range p.InternalProto.Bindings { |
|||
if b.Role == string(r) { |
|||
return i |
|||
} |
|||
} |
|||
return -1 |
|||
} |
|||
|
|||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
|||
func memberIndex(m string, b *pb.Binding) int { |
|||
if b == nil { |
|||
return -1 |
|||
} |
|||
for i, mm := range b.Members { |
|||
if mm == m { |
|||
return i |
|||
} |
|||
} |
|||
return -1 |
|||
} |
|||
|
|||
// insertMetadata inserts metadata into the given context
|
|||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { |
|||
out, _ := metadata.FromOutgoingContext(ctx) |
|||
out = out.Copy() |
|||
for _, md := range mds { |
|||
for k, v := range md { |
|||
out[k] = append(out[k], v...) |
|||
} |
|||
} |
|||
return metadata.NewOutgoingContext(ctx, out) |
|||
} |
@ -0,0 +1,54 @@ |
|||
// Copyright 2017 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package internal |
|||
|
|||
import ( |
|||
"fmt" |
|||
|
|||
"google.golang.org/api/googleapi" |
|||
"google.golang.org/grpc/status" |
|||
) |
|||
|
|||
// Annotate prepends msg to the error message in err, attempting
|
|||
// to preserve other information in err, like an error code.
|
|||
//
|
|||
// Annotate panics if err is nil.
|
|||
//
|
|||
// Annotate knows about these error types:
|
|||
// - "google.golang.org/grpc/status".Status
|
|||
// - "google.golang.org/api/googleapi".Error
|
|||
// If the error is not one of these types, Annotate behaves
|
|||
// like
|
|||
// fmt.Errorf("%s: %v", msg, err)
|
|||
func Annotate(err error, msg string) error { |
|||
if err == nil { |
|||
panic("Annotate called with nil") |
|||
} |
|||
if s, ok := status.FromError(err); ok { |
|||
p := s.Proto() |
|||
p.Message = msg + ": " + p.Message |
|||
return status.ErrorProto(p) |
|||
} |
|||
if g, ok := err.(*googleapi.Error); ok { |
|||
g.Message = msg + ": " + g.Message |
|||
return g |
|||
} |
|||
return fmt.Errorf("%s: %v", msg, err) |
|||
} |
|||
|
|||
// Annotatef uses format and args to format a string, then calls Annotate.
|
|||
func Annotatef(err error, format string, args ...interface{}) error { |
|||
return Annotate(err, fmt.Sprintf(format, args...)) |
|||
} |
@ -0,0 +1,108 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package optional provides versions of primitive types that can
|
|||
// be nil. These are useful in methods that update some of an API object's
|
|||
// fields.
|
|||
package optional |
|||
|
|||
import ( |
|||
"fmt" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
type ( |
|||
// Bool is either a bool or nil.
|
|||
Bool interface{} |
|||
|
|||
// String is either a string or nil.
|
|||
String interface{} |
|||
|
|||
// Int is either an int or nil.
|
|||
Int interface{} |
|||
|
|||
// Uint is either a uint or nil.
|
|||
Uint interface{} |
|||
|
|||
// Float64 is either a float64 or nil.
|
|||
Float64 interface{} |
|||
|
|||
// Duration is either a time.Duration or nil.
|
|||
Duration interface{} |
|||
) |
|||
|
|||
// ToBool returns its argument as a bool.
|
|||
// It panics if its argument is nil or not a bool.
|
|||
func ToBool(v Bool) bool { |
|||
x, ok := v.(bool) |
|||
if !ok { |
|||
doPanic("Bool", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToString returns its argument as a string.
|
|||
// It panics if its argument is nil or not a string.
|
|||
func ToString(v String) string { |
|||
x, ok := v.(string) |
|||
if !ok { |
|||
doPanic("String", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToInt returns its argument as an int.
|
|||
// It panics if its argument is nil or not an int.
|
|||
func ToInt(v Int) int { |
|||
x, ok := v.(int) |
|||
if !ok { |
|||
doPanic("Int", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToUint returns its argument as a uint.
|
|||
// It panics if its argument is nil or not a uint.
|
|||
func ToUint(v Uint) uint { |
|||
x, ok := v.(uint) |
|||
if !ok { |
|||
doPanic("Uint", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToFloat64 returns its argument as a float64.
|
|||
// It panics if its argument is nil or not a float64.
|
|||
func ToFloat64(v Float64) float64 { |
|||
x, ok := v.(float64) |
|||
if !ok { |
|||
doPanic("Float64", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToDuration returns its argument as a time.Duration.
|
|||
// It panics if its argument is nil or not a time.Duration.
|
|||
func ToDuration(v Duration) time.Duration { |
|||
x, ok := v.(time.Duration) |
|||
if !ok { |
|||
doPanic("Duration", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
func doPanic(capType string, v interface{}) { |
|||
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) |
|||
} |
@ -0,0 +1,54 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package internal |
|||
|
|||
import ( |
|||
"context" |
|||
"time" |
|||
|
|||
gax "github.com/googleapis/gax-go/v2" |
|||
) |
|||
|
|||
// Retry calls the supplied function f repeatedly according to the provided
|
|||
// backoff parameters. It returns when one of the following occurs:
|
|||
// When f's first return value is true, Retry immediately returns with f's second
|
|||
// return value.
|
|||
// When the provided context is done, Retry returns with an error that
|
|||
// includes both ctx.Error() and the last error returned by f.
|
|||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { |
|||
return retry(ctx, bo, f, gax.Sleep) |
|||
} |
|||
|
|||
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), |
|||
sleep func(context.Context, time.Duration) error) error { |
|||
var lastErr error |
|||
for { |
|||
stop, err := f() |
|||
if stop { |
|||
return err |
|||
} |
|||
// Remember the last "real" error from f.
|
|||
if err != nil && err != context.Canceled && err != context.DeadlineExceeded { |
|||
lastErr = err |
|||
} |
|||
p := bo.Pause() |
|||
if cerr := sleep(ctx, p); cerr != nil { |
|||
if lastErr != nil { |
|||
return Annotatef(lastErr, "retry failed with %v; last error", cerr) |
|||
} |
|||
return cerr |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,109 @@ |
|||
// Copyright 2018 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package trace |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
|
|||
"go.opencensus.io/trace" |
|||
"google.golang.org/api/googleapi" |
|||
"google.golang.org/genproto/googleapis/rpc/code" |
|||
"google.golang.org/grpc/status" |
|||
) |
|||
|
|||
// StartSpan adds a span to the trace with the given name.
|
|||
func StartSpan(ctx context.Context, name string) context.Context { |
|||
ctx, _ = trace.StartSpan(ctx, name) |
|||
return ctx |
|||
} |
|||
|
|||
// EndSpan ends a span with the given error.
|
|||
func EndSpan(ctx context.Context, err error) { |
|||
span := trace.FromContext(ctx) |
|||
if err != nil { |
|||
span.SetStatus(toStatus(err)) |
|||
} |
|||
span.End() |
|||
} |
|||
|
|||
// toStatus interrogates an error and converts it to an appropriate
|
|||
// OpenCensus status.
|
|||
func toStatus(err error) trace.Status { |
|||
if err2, ok := err.(*googleapi.Error); ok { |
|||
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} |
|||
} else if s, ok := status.FromError(err); ok { |
|||
return trace.Status{Code: int32(s.Code()), Message: s.Message()} |
|||
} else { |
|||
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} |
|||
} |
|||
} |
|||
|
|||
// TODO (deklerk): switch to using OpenCensus function when it becomes available.
|
|||
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
|
|||
func httpStatusCodeToOCCode(httpStatusCode int) int32 { |
|||
switch httpStatusCode { |
|||
case 200: |
|||
return int32(code.Code_OK) |
|||
case 499: |
|||
return int32(code.Code_CANCELLED) |
|||
case 500: |
|||
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
|
|||
case 400: |
|||
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
|
|||
case 504: |
|||
return int32(code.Code_DEADLINE_EXCEEDED) |
|||
case 404: |
|||
return int32(code.Code_NOT_FOUND) |
|||
case 409: |
|||
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
|
|||
case 403: |
|||
return int32(code.Code_PERMISSION_DENIED) |
|||
case 401: |
|||
return int32(code.Code_UNAUTHENTICATED) |
|||
case 429: |
|||
return int32(code.Code_RESOURCE_EXHAUSTED) |
|||
case 501: |
|||
return int32(code.Code_UNIMPLEMENTED) |
|||
case 503: |
|||
return int32(code.Code_UNAVAILABLE) |
|||
default: |
|||
return int32(code.Code_UNKNOWN) |
|||
} |
|||
} |
|||
|
|||
// TODO: (odeke-em): perhaps just pass around spans due to the cost
|
|||
// incurred from using trace.FromContext(ctx) yet we could avoid
|
|||
// throwing away the work done by ctx, span := trace.StartSpan.
|
|||
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { |
|||
var attrs []trace.Attribute |
|||
for k, v := range attrMap { |
|||
var a trace.Attribute |
|||
switch v := v.(type) { |
|||
case string: |
|||
a = trace.StringAttribute(k, v) |
|||
case bool: |
|||
a = trace.BoolAttribute(k, v) |
|||
case int: |
|||
a = trace.Int64Attribute(k, int64(v)) |
|||
case int64: |
|||
a = trace.Int64Attribute(k, v) |
|||
default: |
|||
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) |
|||
} |
|||
attrs = append(attrs, a) |
|||
} |
|||
trace.FromContext(ctx).Annotatef(attrs, format, args...) |
|||
} |
@ -0,0 +1,6 @@ |
|||
#!/bin/bash |
|||
|
|||
today=$(date +%Y%m%d) |
|||
|
|||
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE |
|||
|
@ -0,0 +1,71 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
//go:generate ./update_version.sh
|
|||
|
|||
// Package version contains version information for Google Cloud Client
|
|||
// Libraries for Go, as reported in request headers.
|
|||
package version |
|||
|
|||
import ( |
|||
"runtime" |
|||
"strings" |
|||
"unicode" |
|||
) |
|||
|
|||
// Repo is the current version of the client libraries in this
|
|||
// repo. It should be a date in YYYYMMDD format.
|
|||
const Repo = "20180226" |
|||
|
|||
// Go returns the Go runtime version. The returned string
|
|||
// has no whitespace.
|
|||
func Go() string { |
|||
return goVersion |
|||
} |
|||
|
|||
var goVersion = goVer(runtime.Version()) |
|||
|
|||
const develPrefix = "devel +" |
|||
|
|||
func goVer(s string) string { |
|||
if strings.HasPrefix(s, develPrefix) { |
|||
s = s[len(develPrefix):] |
|||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { |
|||
s = s[:p] |
|||
} |
|||
return s |
|||
} |
|||
|
|||
if strings.HasPrefix(s, "go1") { |
|||
s = s[2:] |
|||
var prerelease string |
|||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 { |
|||
s, prerelease = s[:p], s[p:] |
|||
} |
|||
if strings.HasSuffix(s, ".") { |
|||
s += "0" |
|||
} else if strings.Count(s, ".") < 2 { |
|||
s += ".0" |
|||
} |
|||
if prerelease != "" { |
|||
s += "-" + prerelease |
|||
} |
|||
return s |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func notSemverRune(r rune) bool { |
|||
return !strings.ContainsRune("0123456789.", r) |
|||
} |
@ -0,0 +1,335 @@ |
|||
// Copyright 2014 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"net/http" |
|||
"reflect" |
|||
|
|||
"cloud.google.com/go/internal/trace" |
|||
"google.golang.org/api/googleapi" |
|||
raw "google.golang.org/api/storage/v1" |
|||
) |
|||
|
|||
// ACLRole is the level of access to grant.
|
|||
type ACLRole string |
|||
|
|||
const ( |
|||
RoleOwner ACLRole = "OWNER" |
|||
RoleReader ACLRole = "READER" |
|||
RoleWriter ACLRole = "WRITER" |
|||
) |
|||
|
|||
// ACLEntity refers to a user or group.
|
|||
// They are sometimes referred to as grantees.
|
|||
//
|
|||
// It could be in the form of:
|
|||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
|||
// "domain-<domain>" and "project-team-<projectId>".
|
|||
//
|
|||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
|||
type ACLEntity string |
|||
|
|||
const ( |
|||
AllUsers ACLEntity = "allUsers" |
|||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" |
|||
) |
|||
|
|||
// ACLRule represents a grant for a role to an entity (user, group or team) for a
|
|||
// Google Cloud Storage object or bucket.
|
|||
type ACLRule struct { |
|||
Entity ACLEntity |
|||
EntityID string |
|||
Role ACLRole |
|||
Domain string |
|||
Email string |
|||
ProjectTeam *ProjectTeam |
|||
} |
|||
|
|||
// ProjectTeam is the project team associated with the entity, if any.
|
|||
type ProjectTeam struct { |
|||
ProjectNumber string |
|||
Team string |
|||
} |
|||
|
|||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
|||
type ACLHandle struct { |
|||
c *Client |
|||
bucket string |
|||
object string |
|||
isDefault bool |
|||
userProject string // for requester-pays buckets
|
|||
} |
|||
|
|||
// Delete permanently deletes the ACL entry for the given entity.
|
|||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if a.object != "" { |
|||
return a.objectDelete(ctx, entity) |
|||
} |
|||
if a.isDefault { |
|||
return a.bucketDefaultDelete(ctx, entity) |
|||
} |
|||
return a.bucketDelete(ctx, entity) |
|||
} |
|||
|
|||
// Set sets the role for the given entity.
|
|||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if a.object != "" { |
|||
return a.objectSet(ctx, entity, role, false) |
|||
} |
|||
if a.isDefault { |
|||
return a.objectSet(ctx, entity, role, true) |
|||
} |
|||
return a.bucketSet(ctx, entity, role) |
|||
} |
|||
|
|||
// List retrieves ACL entries.
|
|||
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if a.object != "" { |
|||
return a.objectList(ctx) |
|||
} |
|||
if a.isDefault { |
|||
return a.bucketDefaultList(ctx) |
|||
} |
|||
return a.bucketList(ctx) |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { |
|||
var acls *raw.ObjectAccessControls |
|||
var err error |
|||
err = runWithRetry(ctx, func() error { |
|||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) |
|||
a.configureCall(ctx, req) |
|||
acls, err = req.Do() |
|||
return err |
|||
}) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return toObjectACLRules(acls.Items), nil |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { |
|||
return runWithRetry(ctx, func() error { |
|||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) |
|||
a.configureCall(ctx, req) |
|||
return req.Do() |
|||
}) |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { |
|||
var acls *raw.BucketAccessControls |
|||
var err error |
|||
err = runWithRetry(ctx, func() error { |
|||
req := a.c.raw.BucketAccessControls.List(a.bucket) |
|||
a.configureCall(ctx, req) |
|||
acls, err = req.Do() |
|||
return err |
|||
}) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return toBucketACLRules(acls.Items), nil |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { |
|||
acl := &raw.BucketAccessControl{ |
|||
Bucket: a.bucket, |
|||
Entity: string(entity), |
|||
Role: string(role), |
|||
} |
|||
err := runWithRetry(ctx, func() error { |
|||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) |
|||
a.configureCall(ctx, req) |
|||
_, err := req.Do() |
|||
return err |
|||
}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { |
|||
return runWithRetry(ctx, func() error { |
|||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) |
|||
a.configureCall(ctx, req) |
|||
return req.Do() |
|||
}) |
|||
} |
|||
|
|||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { |
|||
var acls *raw.ObjectAccessControls |
|||
var err error |
|||
err = runWithRetry(ctx, func() error { |
|||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) |
|||
a.configureCall(ctx, req) |
|||
acls, err = req.Do() |
|||
return err |
|||
}) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return toObjectACLRules(acls.Items), nil |
|||
} |
|||
|
|||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { |
|||
type setRequest interface { |
|||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) |
|||
Header() http.Header |
|||
} |
|||
|
|||
acl := &raw.ObjectAccessControl{ |
|||
Bucket: a.bucket, |
|||
Entity: string(entity), |
|||
Role: string(role), |
|||
} |
|||
var req setRequest |
|||
if isBucketDefault { |
|||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) |
|||
} else { |
|||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) |
|||
} |
|||
a.configureCall(ctx, req) |
|||
return runWithRetry(ctx, func() error { |
|||
_, err := req.Do() |
|||
return err |
|||
}) |
|||
} |
|||
|
|||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { |
|||
return runWithRetry(ctx, func() error { |
|||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) |
|||
a.configureCall(ctx, req) |
|||
return req.Do() |
|||
}) |
|||
} |
|||
|
|||
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { |
|||
vc := reflect.ValueOf(call) |
|||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) |
|||
if a.userProject != "" { |
|||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) |
|||
} |
|||
setClientHeader(call.Header()) |
|||
} |
|||
|
|||
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { |
|||
var rs []ACLRule |
|||
for _, item := range items { |
|||
rs = append(rs, toObjectACLRule(item)) |
|||
} |
|||
return rs |
|||
} |
|||
|
|||
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { |
|||
var rs []ACLRule |
|||
for _, item := range items { |
|||
rs = append(rs, toBucketACLRule(item)) |
|||
} |
|||
return rs |
|||
} |
|||
|
|||
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { |
|||
return ACLRule{ |
|||
Entity: ACLEntity(a.Entity), |
|||
EntityID: a.EntityId, |
|||
Role: ACLRole(a.Role), |
|||
Domain: a.Domain, |
|||
Email: a.Email, |
|||
ProjectTeam: toObjectProjectTeam(a.ProjectTeam), |
|||
} |
|||
} |
|||
|
|||
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { |
|||
return ACLRule{ |
|||
Entity: ACLEntity(a.Entity), |
|||
EntityID: a.EntityId, |
|||
Role: ACLRole(a.Role), |
|||
Domain: a.Domain, |
|||
Email: a.Email, |
|||
ProjectTeam: toBucketProjectTeam(a.ProjectTeam), |
|||
} |
|||
} |
|||
|
|||
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { |
|||
if len(rules) == 0 { |
|||
return nil |
|||
} |
|||
r := make([]*raw.ObjectAccessControl, 0, len(rules)) |
|||
for _, rule := range rules { |
|||
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
|
|||
} |
|||
return r |
|||
} |
|||
|
|||
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { |
|||
if len(rules) == 0 { |
|||
return nil |
|||
} |
|||
r := make([]*raw.BucketAccessControl, 0, len(rules)) |
|||
for _, rule := range rules { |
|||
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
|
|||
} |
|||
return r |
|||
} |
|||
|
|||
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { |
|||
return &raw.BucketAccessControl{ |
|||
Bucket: bucket, |
|||
Entity: string(r.Entity), |
|||
Role: string(r.Role), |
|||
// The other fields are not settable.
|
|||
} |
|||
} |
|||
|
|||
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl { |
|||
return &raw.ObjectAccessControl{ |
|||
Bucket: bucket, |
|||
Entity: string(r.Entity), |
|||
Role: string(r.Role), |
|||
// The other fields are not settable.
|
|||
} |
|||
} |
|||
|
|||
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { |
|||
if p == nil { |
|||
return nil |
|||
} |
|||
return &ProjectTeam{ |
|||
ProjectNumber: p.ProjectNumber, |
|||
Team: p.Team, |
|||
} |
|||
} |
|||
|
|||
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { |
|||
if p == nil { |
|||
return nil |
|||
} |
|||
return &ProjectTeam{ |
|||
ProjectNumber: p.ProjectNumber, |
|||
Team: p.Team, |
|||
} |
|||
} |
File diff suppressed because it is too large
@ -0,0 +1,228 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"errors" |
|||
"fmt" |
|||
|
|||
"cloud.google.com/go/internal/trace" |
|||
raw "google.golang.org/api/storage/v1" |
|||
) |
|||
|
|||
// CopierFrom creates a Copier that can copy src to dst.
|
|||
// You can immediately call Run on the returned Copier, or
|
|||
// you can configure it first.
|
|||
//
|
|||
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
|||
// in which case the user project of src is billed.
|
|||
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { |
|||
return &Copier{dst: dst, src: src} |
|||
} |
|||
|
|||
// A Copier copies a source object to a destination.
|
|||
type Copier struct { |
|||
// ObjectAttrs are optional attributes to set on the destination object.
|
|||
// Any attributes must be initialized before any calls on the Copier. Nil
|
|||
// or zero-valued attributes are ignored.
|
|||
ObjectAttrs |
|||
|
|||
// RewriteToken can be set before calling Run to resume a copy
|
|||
// operation. After Run returns a non-nil error, RewriteToken will
|
|||
// have been updated to contain the value needed to resume the copy.
|
|||
RewriteToken string |
|||
|
|||
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
|||
// operation. If ProgressFunc is not nil and copying requires multiple
|
|||
// calls to the underlying service (see
|
|||
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
|||
// ProgressFunc will be invoked after each call with the number of bytes of
|
|||
// content copied so far and the total size in bytes of the source object.
|
|||
//
|
|||
// ProgressFunc is intended to make upload progress available to the
|
|||
// application. For example, the implementation of ProgressFunc may update
|
|||
// a progress bar in the application's UI, or log the result of
|
|||
// float64(copiedBytes)/float64(totalBytes).
|
|||
//
|
|||
// ProgressFunc should return quickly without blocking.
|
|||
ProgressFunc func(copiedBytes, totalBytes uint64) |
|||
|
|||
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
|
|||
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
|
|||
// any.
|
|||
//
|
|||
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
|
|||
// (via ObjectHandle.Key) on the destination object will result in an error when
|
|||
// Run is called.
|
|||
DestinationKMSKeyName string |
|||
|
|||
dst, src *ObjectHandle |
|||
} |
|||
|
|||
// Run performs the copy.
|
|||
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if err := c.src.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if err := c.dst.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { |
|||
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") |
|||
} |
|||
// Convert destination attributes to raw form, omitting the bucket.
|
|||
// If the bucket is included but name or content-type aren't, the service
|
|||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
|||
// does not cause any problems.
|
|||
rawObject := c.ObjectAttrs.toRawObject("") |
|||
for { |
|||
res, err := c.callRewrite(ctx, rawObject) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
if c.ProgressFunc != nil { |
|||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) |
|||
} |
|||
if res.Done { // Finished successfully.
|
|||
return newObject(res.Resource), nil |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { |
|||
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) |
|||
|
|||
call.Context(ctx).Projection("full") |
|||
if c.RewriteToken != "" { |
|||
call.RewriteToken(c.RewriteToken) |
|||
} |
|||
if c.DestinationKMSKeyName != "" { |
|||
call.DestinationKmsKeyName(c.DestinationKMSKeyName) |
|||
} |
|||
if c.PredefinedACL != "" { |
|||
call.DestinationPredefinedAcl(c.PredefinedACL) |
|||
} |
|||
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { |
|||
return nil, err |
|||
} |
|||
if c.dst.userProject != "" { |
|||
call.UserProject(c.dst.userProject) |
|||
} else if c.src.userProject != "" { |
|||
call.UserProject(c.src.userProject) |
|||
} |
|||
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { |
|||
return nil, err |
|||
} |
|||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { |
|||
return nil, err |
|||
} |
|||
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { |
|||
return nil, err |
|||
} |
|||
var res *raw.RewriteResponse |
|||
var err error |
|||
setClientHeader(call.Header()) |
|||
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
c.RewriteToken = res.RewriteToken |
|||
return res, nil |
|||
} |
|||
|
|||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
|||
// You can immediately call Run on the returned Composer, or you can
|
|||
// configure it first.
|
|||
//
|
|||
// The encryption key for the destination object will be used to decrypt all
|
|||
// source objects and encrypt the destination object. It is an error
|
|||
// to specify an encryption key for any of the source objects.
|
|||
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { |
|||
return &Composer{dst: dst, srcs: srcs} |
|||
} |
|||
|
|||
// A Composer composes source objects into a destination object.
|
|||
//
|
|||
// For Requester Pays buckets, the user project of dst is billed.
|
|||
type Composer struct { |
|||
// ObjectAttrs are optional attributes to set on the destination object.
|
|||
// Any attributes must be initialized before any calls on the Composer. Nil
|
|||
// or zero-valued attributes are ignored.
|
|||
ObjectAttrs |
|||
|
|||
dst *ObjectHandle |
|||
srcs []*ObjectHandle |
|||
} |
|||
|
|||
// Run performs the compose operation.
|
|||
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if err := c.dst.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if len(c.srcs) == 0 { |
|||
return nil, errors.New("storage: at least one source object must be specified") |
|||
} |
|||
|
|||
req := &raw.ComposeRequest{} |
|||
// Compose requires a non-empty Destination, so we always set it,
|
|||
// even if the caller-provided ObjectAttrs is the zero value.
|
|||
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) |
|||
for _, src := range c.srcs { |
|||
if err := src.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if src.bucket != c.dst.bucket { |
|||
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) |
|||
} |
|||
if src.encryptionKey != nil { |
|||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) |
|||
} |
|||
srcObj := &raw.ComposeRequestSourceObjects{ |
|||
Name: src.object, |
|||
} |
|||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { |
|||
return nil, err |
|||
} |
|||
req.SourceObjects = append(req.SourceObjects, srcObj) |
|||
} |
|||
|
|||
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) |
|||
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { |
|||
return nil, err |
|||
} |
|||
if c.dst.userProject != "" { |
|||
call.UserProject(c.dst.userProject) |
|||
} |
|||
if c.PredefinedACL != "" { |
|||
call.DestinationPredefinedAcl(c.PredefinedACL) |
|||
} |
|||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { |
|||
return nil, err |
|||
} |
|||
var obj *raw.Object |
|||
setClientHeader(call.Header()) |
|||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return newObject(obj), nil |
|||
} |
@ -0,0 +1,176 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
/* |
|||
Package storage provides an easy way to work with Google Cloud Storage. |
|||
Google Cloud Storage stores data in named objects, which are grouped into buckets. |
|||
|
|||
More information about Google Cloud Storage is available at |
|||
https://cloud.google.com/storage/docs.
|
|||
|
|||
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
|
|||
connection pooling and similar aspects of this package. |
|||
|
|||
All of the methods of this package use exponential backoff to retry calls that fail |
|||
with certain errors, as described in |
|||
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
|
|||
indefinitely unless the controlling context is canceled or the client is closed. See |
|||
context.WithTimeout and context.WithCancel. |
|||
|
|||
|
|||
Creating a Client |
|||
|
|||
To start working with this package, create a client: |
|||
|
|||
ctx := context.Background() |
|||
client, err := storage.NewClient(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
|
|||
The client will use your default application credentials. |
|||
|
|||
If you only wish to access public data, you can create |
|||
an unauthenticated client with |
|||
|
|||
client, err := storage.NewClient(ctx, option.WithoutAuthentication()) |
|||
|
|||
Buckets |
|||
|
|||
A Google Cloud Storage bucket is a collection of objects. To work with a |
|||
bucket, make a bucket handle: |
|||
|
|||
bkt := client.Bucket(bucketName) |
|||
|
|||
A handle is a reference to a bucket. You can have a handle even if the |
|||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage, |
|||
call Create on the handle: |
|||
|
|||
if err := bkt.Create(ctx, projectID, nil); err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
|
|||
Note that although buckets are associated with projects, bucket names are |
|||
global across all projects. |
|||
|
|||
Each bucket has associated metadata, represented in this package by |
|||
BucketAttrs. The third argument to BucketHandle.Create allows you to set |
|||
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use |
|||
Attrs: |
|||
|
|||
attrs, err := bkt.Attrs(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", |
|||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) |
|||
|
|||
Objects |
|||
|
|||
An object holds arbitrary data as a sequence of bytes, like a file. You |
|||
refer to objects using a handle, just as with buckets, but unlike buckets |
|||
you don't explicitly create an object. Instead, the first time you write |
|||
to an object it will be created. You can use the standard Go io.Reader |
|||
and io.Writer interfaces to read and write object data: |
|||
|
|||
obj := bkt.Object("data") |
|||
// Write something to obj.
|
|||
// w implements io.Writer.
|
|||
w := obj.NewWriter(ctx) |
|||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
|||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
// Close, just like writing a file.
|
|||
if err := w.Close(); err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
|
|||
// Read it back.
|
|||
r, err := obj.NewReader(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
defer r.Close() |
|||
if _, err := io.Copy(os.Stdout, r); err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
// Prints "This object contains text."
|
|||
|
|||
Objects also have attributes, which you can fetch with Attrs: |
|||
|
|||
objAttrs, err := obj.Attrs(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
fmt.Printf("object %s has size %d and can be read using %s\n", |
|||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) |
|||
|
|||
ACLs |
|||
|
|||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of |
|||
ACLRules, each of which specifies the role of a user, group or project. ACLs |
|||
are suitable for fine-grained control, but you may prefer using IAM to control |
|||
access at the project level (see |
|||
https://cloud.google.com/storage/docs/access-control/iam).
|
|||
|
|||
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: |
|||
|
|||
acls, err := obj.ACL().List(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
for _, rule := range acls { |
|||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) |
|||
} |
|||
|
|||
You can also set and delete ACLs. |
|||
|
|||
Conditions |
|||
|
|||
Every object has a generation and a metageneration. The generation changes |
|||
whenever the content changes, and the metageneration changes whenever the |
|||
metadata changes. Conditions let you check these values before an operation; |
|||
the operation only executes if the conditions match. You can use conditions to |
|||
prevent race conditions in read-modify-write operations. |
|||
|
|||
For example, say you've read an object's metadata into objAttrs. Now |
|||
you want to write to that object, but only if its contents haven't changed |
|||
since you read it. Here is how to express that: |
|||
|
|||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) |
|||
// Proceed with writing as above.
|
|||
|
|||
Signed URLs |
|||
|
|||
You can obtain a URL that lets anyone read or write an object for a limited time. |
|||
You don't need to create a client to do this. See the documentation of |
|||
SignedURL for details. |
|||
|
|||
url, err := storage.SignedURL(bucketName, "shared-object", opts) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
fmt.Println(url) |
|||
|
|||
Errors |
|||
|
|||
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
|
|||
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example: |
|||
|
|||
if e, ok := err.(*googleapi.Error); ok { |
|||
if e.Code == 409 { ... } |
|||
} |
|||
*/ |
|||
package storage // import "cloud.google.com/go/storage"
|
@ -0,0 +1,32 @@ |
|||
// Copyright 2017 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// +build go1.10
|
|||
|
|||
package storage |
|||
|
|||
import "google.golang.org/api/googleapi" |
|||
|
|||
func shouldRetry(err error) bool { |
|||
switch e := err.(type) { |
|||
case *googleapi.Error: |
|||
// Retry on 429 and 5xx, according to
|
|||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
|||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600) |
|||
case interface{ Temporary() bool }: |
|||
return e.Temporary() |
|||
default: |
|||
return false |
|||
} |
|||
} |
@ -0,0 +1,130 @@ |
|||
// Copyright 2017 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
|
|||
"cloud.google.com/go/iam" |
|||
"cloud.google.com/go/internal/trace" |
|||
raw "google.golang.org/api/storage/v1" |
|||
iampb "google.golang.org/genproto/googleapis/iam/v1" |
|||
) |
|||
|
|||
// IAM provides access to IAM access control for the bucket.
|
|||
func (b *BucketHandle) IAM() *iam.Handle { |
|||
return iam.InternalNewHandleClient(&iamClient{ |
|||
raw: b.c.raw, |
|||
userProject: b.userProject, |
|||
}, b.name) |
|||
} |
|||
|
|||
// iamClient implements the iam.client interface.
|
|||
type iamClient struct { |
|||
raw *raw.Service |
|||
userProject string |
|||
} |
|||
|
|||
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
call := c.raw.Buckets.GetIamPolicy(resource) |
|||
setClientHeader(call.Header()) |
|||
if c.userProject != "" { |
|||
call.UserProject(c.userProject) |
|||
} |
|||
var rp *raw.Policy |
|||
err = runWithRetry(ctx, func() error { |
|||
rp, err = call.Context(ctx).Do() |
|||
return err |
|||
}) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return iamFromStoragePolicy(rp), nil |
|||
} |
|||
|
|||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
rp := iamToStoragePolicy(p) |
|||
call := c.raw.Buckets.SetIamPolicy(resource, rp) |
|||
setClientHeader(call.Header()) |
|||
if c.userProject != "" { |
|||
call.UserProject(c.userProject) |
|||
} |
|||
return runWithRetry(ctx, func() error { |
|||
_, err := call.Context(ctx).Do() |
|||
return err |
|||
}) |
|||
} |
|||
|
|||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
call := c.raw.Buckets.TestIamPermissions(resource, perms) |
|||
setClientHeader(call.Header()) |
|||
if c.userProject != "" { |
|||
call.UserProject(c.userProject) |
|||
} |
|||
var res *raw.TestIamPermissionsResponse |
|||
err = runWithRetry(ctx, func() error { |
|||
res, err = call.Context(ctx).Do() |
|||
return err |
|||
}) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return res.Permissions, nil |
|||
} |
|||
|
|||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { |
|||
return &raw.Policy{ |
|||
Bindings: iamToStorageBindings(ip.Bindings), |
|||
Etag: string(ip.Etag), |
|||
} |
|||
} |
|||
|
|||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { |
|||
var rbs []*raw.PolicyBindings |
|||
for _, ib := range ibs { |
|||
rbs = append(rbs, &raw.PolicyBindings{ |
|||
Role: ib.Role, |
|||
Members: ib.Members, |
|||
}) |
|||
} |
|||
return rbs |
|||
} |
|||
|
|||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { |
|||
return &iampb.Policy{ |
|||
Bindings: iamFromStorageBindings(rp.Bindings), |
|||
Etag: []byte(rp.Etag), |
|||
} |
|||
} |
|||
|
|||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { |
|||
var ibs []*iampb.Binding |
|||
for _, rb := range rbs { |
|||
ibs = append(ibs, &iampb.Binding{ |
|||
Role: rb.Role, |
|||
Members: rb.Members, |
|||
}) |
|||
} |
|||
return ibs |
|||
} |
@ -0,0 +1,37 @@ |
|||
// Copyright 2014 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
|
|||
"cloud.google.com/go/internal" |
|||
gax "github.com/googleapis/gax-go/v2" |
|||
) |
|||
|
|||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
|||
// the context is done.
|
|||
func runWithRetry(ctx context.Context, call func() error) error { |
|||
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { |
|||
err = call() |
|||
if err == nil { |
|||
return true, nil |
|||
} |
|||
if shouldRetry(err) { |
|||
return false, nil |
|||
} |
|||
return true, err |
|||
}) |
|||
} |
@ -0,0 +1,42 @@ |
|||
// Copyright 2017 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// +build !go1.10
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"net/url" |
|||
"strings" |
|||
|
|||
"google.golang.org/api/googleapi" |
|||
) |
|||
|
|||
func shouldRetry(err error) bool { |
|||
switch e := err.(type) { |
|||
case *googleapi.Error: |
|||
// Retry on 429 and 5xx, according to
|
|||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
|||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600) |
|||
case *url.Error: |
|||
// Retry on REFUSED_STREAM.
|
|||
// Unfortunately the error type is unexported, so we resort to string
|
|||
// matching.
|
|||
return strings.Contains(e.Error(), "REFUSED_STREAM") |
|||
case interface{ Temporary() bool }: |
|||
return e.Temporary() |
|||
default: |
|||
return false |
|||
} |
|||
} |
@ -0,0 +1,188 @@ |
|||
// Copyright 2017 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"errors" |
|||
"fmt" |
|||
"regexp" |
|||
|
|||
"cloud.google.com/go/internal/trace" |
|||
raw "google.golang.org/api/storage/v1" |
|||
) |
|||
|
|||
// A Notification describes how to send Cloud PubSub messages when certain
|
|||
// events occur in a bucket.
|
|||
type Notification struct { |
|||
//The ID of the notification.
|
|||
ID string |
|||
|
|||
// The ID of the topic to which this subscription publishes.
|
|||
TopicID string |
|||
|
|||
// The ID of the project to which the topic belongs.
|
|||
TopicProjectID string |
|||
|
|||
// Only send notifications about listed event types. If empty, send notifications
|
|||
// for all event types.
|
|||
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
|
|||
EventTypes []string |
|||
|
|||
// If present, only apply this notification configuration to object names that
|
|||
// begin with this prefix.
|
|||
ObjectNamePrefix string |
|||
|
|||
// An optional list of additional attributes to attach to each Cloud PubSub
|
|||
// message published for this notification subscription.
|
|||
CustomAttributes map[string]string |
|||
|
|||
// The contents of the message payload.
|
|||
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
|
|||
PayloadFormat string |
|||
} |
|||
|
|||
// Values for Notification.PayloadFormat.
|
|||
const ( |
|||
// Send no payload with notification messages.
|
|||
NoPayload = "NONE" |
|||
|
|||
// Send object metadata as JSON with notification messages.
|
|||
JSONPayload = "JSON_API_V1" |
|||
) |
|||
|
|||
// Values for Notification.EventTypes.
|
|||
const ( |
|||
// Event that occurs when an object is successfully created.
|
|||
ObjectFinalizeEvent = "OBJECT_FINALIZE" |
|||
|
|||
// Event that occurs when the metadata of an existing object changes.
|
|||
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" |
|||
|
|||
// Event that occurs when an object is permanently deleted.
|
|||
ObjectDeleteEvent = "OBJECT_DELETE" |
|||
|
|||
// Event that occurs when the live version of an object becomes an
|
|||
// archived version.
|
|||
ObjectArchiveEvent = "OBJECT_ARCHIVE" |
|||
) |
|||
|
|||
func toNotification(rn *raw.Notification) *Notification { |
|||
n := &Notification{ |
|||
ID: rn.Id, |
|||
EventTypes: rn.EventTypes, |
|||
ObjectNamePrefix: rn.ObjectNamePrefix, |
|||
CustomAttributes: rn.CustomAttributes, |
|||
PayloadFormat: rn.PayloadFormat, |
|||
} |
|||
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) |
|||
return n |
|||
} |
|||
|
|||
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") |
|||
|
|||
// parseNotificationTopic extracts the project and topic IDs from from the full
|
|||
// resource name returned by the service. If the name is malformed, it returns
|
|||
// "?" for both IDs.
|
|||
func parseNotificationTopic(nt string) (projectID, topicID string) { |
|||
matches := topicRE.FindStringSubmatch(nt) |
|||
if matches == nil { |
|||
return "?", "?" |
|||
} |
|||
return matches[1], matches[2] |
|||
} |
|||
|
|||
func toRawNotification(n *Notification) *raw.Notification { |
|||
return &raw.Notification{ |
|||
Id: n.ID, |
|||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", |
|||
n.TopicProjectID, n.TopicID), |
|||
EventTypes: n.EventTypes, |
|||
ObjectNamePrefix: n.ObjectNamePrefix, |
|||
CustomAttributes: n.CustomAttributes, |
|||
PayloadFormat: string(n.PayloadFormat), |
|||
} |
|||
} |
|||
|
|||
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
|
|||
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
|
|||
// returned Notification's ID can be used to refer to it.
|
|||
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if n.ID != "" { |
|||
return nil, errors.New("storage: AddNotification: ID must not be set") |
|||
} |
|||
if n.TopicProjectID == "" { |
|||
return nil, errors.New("storage: AddNotification: missing TopicProjectID") |
|||
} |
|||
if n.TopicID == "" { |
|||
return nil, errors.New("storage: AddNotification: missing TopicID") |
|||
} |
|||
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) |
|||
setClientHeader(call.Header()) |
|||
if b.userProject != "" { |
|||
call.UserProject(b.userProject) |
|||
} |
|||
rn, err := call.Context(ctx).Do() |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return toNotification(rn), nil |
|||
} |
|||
|
|||
// Notifications returns all the Notifications configured for this bucket, as a map
|
|||
// indexed by notification ID.
|
|||
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
call := b.c.raw.Notifications.List(b.name) |
|||
setClientHeader(call.Header()) |
|||
if b.userProject != "" { |
|||
call.UserProject(b.userProject) |
|||
} |
|||
var res *raw.Notifications |
|||
err = runWithRetry(ctx, func() error { |
|||
res, err = call.Context(ctx).Do() |
|||
return err |
|||
}) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return notificationsToMap(res.Items), nil |
|||
} |
|||
|
|||
func notificationsToMap(rns []*raw.Notification) map[string]*Notification { |
|||
m := map[string]*Notification{} |
|||
for _, rn := range rns { |
|||
m[rn.Id] = toNotification(rn) |
|||
} |
|||
return m |
|||
} |
|||
|
|||
// DeleteNotification deletes the notification with the given ID.
|
|||
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
call := b.c.raw.Notifications.Delete(b.name, id) |
|||
setClientHeader(call.Header()) |
|||
if b.userProject != "" { |
|||
call.UserProject(b.userProject) |
|||
} |
|||
return call.Context(ctx).Do() |
|||
} |
@ -0,0 +1,385 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"errors" |
|||
"fmt" |
|||
"hash/crc32" |
|||
"io" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"reflect" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
|
|||
"cloud.google.com/go/internal/trace" |
|||
"google.golang.org/api/googleapi" |
|||
) |
|||
|
|||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli) |
|||
|
|||
// ReaderObjectAttrs are attributes about the object being read. These are populated
|
|||
// during the New call. This struct only holds a subset of object attributes: to
|
|||
// get the full set of attributes, use ObjectHandle.Attrs.
|
|||
//
|
|||
// Each field is read-only.
|
|||
type ReaderObjectAttrs struct { |
|||
// Size is the length of the object's content.
|
|||
Size int64 |
|||
|
|||
// ContentType is the MIME type of the object's content.
|
|||
ContentType string |
|||
|
|||
// ContentEncoding is the encoding of the object's content.
|
|||
ContentEncoding string |
|||
|
|||
// CacheControl specifies whether and for how long browser and Internet
|
|||
// caches are allowed to cache your objects.
|
|||
CacheControl string |
|||
|
|||
// LastModified is the time that the object was last modified.
|
|||
LastModified time.Time |
|||
|
|||
// Generation is the generation number of the object's content.
|
|||
Generation int64 |
|||
|
|||
// Metageneration is the version of the metadata for this object at
|
|||
// this generation. This field is used for preconditions and for
|
|||
// detecting changes in metadata. A metageneration number is only
|
|||
// meaningful in the context of a particular generation of a
|
|||
// particular object.
|
|||
Metageneration int64 |
|||
} |
|||
|
|||
// NewReader creates a new Reader to read the contents of the
|
|||
// object.
|
|||
// ErrObjectNotExist will be returned if the object is not found.
|
|||
//
|
|||
// The caller must call Close on the returned Reader when done reading.
|
|||
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { |
|||
return o.NewRangeReader(ctx, 0, -1) |
|||
} |
|||
|
|||
// NewRangeReader reads part of an object, reading at most length bytes
|
|||
// starting at the given offset. If length is negative, the object is read
|
|||
// until the end.
|
|||
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if err := o.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if offset < 0 { |
|||
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) |
|||
} |
|||
if o.conds != nil { |
|||
if err := o.conds.validate("NewRangeReader"); err != nil { |
|||
return nil, err |
|||
} |
|||
} |
|||
u := &url.URL{ |
|||
Scheme: "https", |
|||
Host: "storage.googleapis.com", |
|||
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), |
|||
} |
|||
verb := "GET" |
|||
if length == 0 { |
|||
verb = "HEAD" |
|||
} |
|||
req, err := http.NewRequest(verb, u.String(), nil) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
req = req.WithContext(ctx) |
|||
if o.userProject != "" { |
|||
req.Header.Set("X-Goog-User-Project", o.userProject) |
|||
} |
|||
if o.readCompressed { |
|||
req.Header.Set("Accept-Encoding", "gzip") |
|||
} |
|||
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
gen := o.gen |
|||
|
|||
// Define a function that initiates a Read with offset and length, assuming we
|
|||
// have already read seen bytes.
|
|||
reopen := func(seen int64) (*http.Response, error) { |
|||
start := offset + seen |
|||
if length < 0 && start > 0 { |
|||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) |
|||
} else if length > 0 { |
|||
// The end character isn't affected by how many bytes we've seen.
|
|||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1)) |
|||
} |
|||
// We wait to assign conditions here because the generation number can change in between reopen() runs.
|
|||
req.URL.RawQuery = conditionsQuery(gen, o.conds) |
|||
var res *http.Response |
|||
err = runWithRetry(ctx, func() error { |
|||
res, err = o.c.hc.Do(req) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if res.StatusCode == http.StatusNotFound { |
|||
res.Body.Close() |
|||
return ErrObjectNotExist |
|||
} |
|||
if res.StatusCode < 200 || res.StatusCode > 299 { |
|||
body, _ := ioutil.ReadAll(res.Body) |
|||
res.Body.Close() |
|||
return &googleapi.Error{ |
|||
Code: res.StatusCode, |
|||
Header: res.Header, |
|||
Body: string(body), |
|||
} |
|||
} |
|||
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { |
|||
res.Body.Close() |
|||
return errors.New("storage: partial request not satisfied") |
|||
} |
|||
// If a generation hasn't been specified, and this is the first response we get, let's record the
|
|||
// generation. In future requests we'll use this generation as a precondition to avoid data races.
|
|||
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" { |
|||
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
gen = gen64 |
|||
} |
|||
return nil |
|||
}) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return res, nil |
|||
} |
|||
|
|||
res, err := reopen(0) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
var ( |
|||
size int64 // total size of object, even if a range was requested.
|
|||
checkCRC bool |
|||
crc uint32 |
|||
) |
|||
if res.StatusCode == http.StatusPartialContent { |
|||
cr := strings.TrimSpace(res.Header.Get("Content-Range")) |
|||
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { |
|||
|
|||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) |
|||
} |
|||
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) |
|||
} |
|||
} else { |
|||
size = res.ContentLength |
|||
// Check the CRC iff all of the following hold:
|
|||
// - We asked for content (length != 0).
|
|||
// - We got all the content (status != PartialContent).
|
|||
// - The server sent a CRC header.
|
|||
// - The Go http stack did not uncompress the file.
|
|||
// - We were not served compressed data that was uncompressed on download.
|
|||
// The problem with the last two cases is that the CRC will not match -- GCS
|
|||
// computes it on the compressed contents, but we compute it on the
|
|||
// uncompressed contents.
|
|||
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) { |
|||
crc, checkCRC = parseCRC32c(res) |
|||
} |
|||
} |
|||
|
|||
remain := res.ContentLength |
|||
body := res.Body |
|||
if length == 0 { |
|||
remain = 0 |
|||
body.Close() |
|||
body = emptyBody |
|||
} |
|||
var metaGen int64 |
|||
if res.Header.Get("X-Goog-Generation") != "" { |
|||
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
} |
|||
|
|||
var lm time.Time |
|||
if res.Header.Get("Last-Modified") != "" { |
|||
lm, err = http.ParseTime(res.Header.Get("Last-Modified")) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
} |
|||
|
|||
attrs := ReaderObjectAttrs{ |
|||
Size: size, |
|||
ContentType: res.Header.Get("Content-Type"), |
|||
ContentEncoding: res.Header.Get("Content-Encoding"), |
|||
CacheControl: res.Header.Get("Cache-Control"), |
|||
LastModified: lm, |
|||
Generation: gen, |
|||
Metageneration: metaGen, |
|||
} |
|||
return &Reader{ |
|||
Attrs: attrs, |
|||
body: body, |
|||
size: size, |
|||
remain: remain, |
|||
wantCRC: crc, |
|||
checkCRC: checkCRC, |
|||
reopen: reopen, |
|||
}, nil |
|||
} |
|||
|
|||
func uncompressedByServer(res *http.Response) bool { |
|||
// If the data is stored as gzip but is not encoded as gzip, then it
|
|||
// was uncompressed by the server.
|
|||
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" && |
|||
res.Header.Get("Content-Encoding") != "gzip" |
|||
} |
|||
|
|||
func parseCRC32c(res *http.Response) (uint32, bool) { |
|||
const prefix = "crc32c=" |
|||
for _, spec := range res.Header["X-Goog-Hash"] { |
|||
if strings.HasPrefix(spec, prefix) { |
|||
c, err := decodeUint32(spec[len(prefix):]) |
|||
if err == nil { |
|||
return c, true |
|||
} |
|||
} |
|||
} |
|||
return 0, false |
|||
} |
|||
|
|||
var emptyBody = ioutil.NopCloser(strings.NewReader("")) |
|||
|
|||
// Reader reads a Cloud Storage object.
|
|||
// It implements io.Reader.
|
|||
//
|
|||
// Typically, a Reader computes the CRC of the downloaded content and compares it to
|
|||
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
|
|||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
|||
type Reader struct { |
|||
Attrs ReaderObjectAttrs |
|||
body io.ReadCloser |
|||
seen, remain, size int64 |
|||
checkCRC bool // should we check the CRC?
|
|||
wantCRC uint32 // the CRC32c value the server sent in the header
|
|||
gotCRC uint32 // running crc
|
|||
reopen func(seen int64) (*http.Response, error) |
|||
} |
|||
|
|||
// Close closes the Reader. It must be called when done reading.
|
|||
func (r *Reader) Close() error { |
|||
return r.body.Close() |
|||
} |
|||
|
|||
func (r *Reader) Read(p []byte) (int, error) { |
|||
n, err := r.readWithRetry(p) |
|||
if r.remain != -1 { |
|||
r.remain -= int64(n) |
|||
} |
|||
if r.checkCRC { |
|||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) |
|||
// Check CRC here. It would be natural to check it in Close, but
|
|||
// everybody defers Close on the assumption that it doesn't return
|
|||
// anything worth looking at.
|
|||
if err == io.EOF { |
|||
if r.gotCRC != r.wantCRC { |
|||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", |
|||
r.gotCRC, r.wantCRC) |
|||
} |
|||
} |
|||
} |
|||
return n, err |
|||
} |
|||
|
|||
func (r *Reader) readWithRetry(p []byte) (int, error) { |
|||
n := 0 |
|||
for len(p[n:]) > 0 { |
|||
m, err := r.body.Read(p[n:]) |
|||
n += m |
|||
r.seen += int64(m) |
|||
if !shouldRetryRead(err) { |
|||
return n, err |
|||
} |
|||
// Read failed, but we will try again. Send a ranged read request that takes
|
|||
// into account the number of bytes we've already seen.
|
|||
res, err := r.reopen(r.seen) |
|||
if err != nil { |
|||
// reopen already retries
|
|||
return n, err |
|||
} |
|||
r.body.Close() |
|||
r.body = res.Body |
|||
} |
|||
return n, nil |
|||
} |
|||
|
|||
func shouldRetryRead(err error) bool { |
|||
if err == nil { |
|||
return false |
|||
} |
|||
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2") |
|||
} |
|||
|
|||
// Size returns the size of the object in bytes.
|
|||
// The returned value is always the same and is not affected by
|
|||
// calls to Read or Close.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.Size.
|
|||
func (r *Reader) Size() int64 { |
|||
return r.Attrs.Size |
|||
} |
|||
|
|||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
|||
func (r *Reader) Remain() int64 { |
|||
return r.remain |
|||
} |
|||
|
|||
// ContentType returns the content type of the object.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.ContentType.
|
|||
func (r *Reader) ContentType() string { |
|||
return r.Attrs.ContentType |
|||
} |
|||
|
|||
// ContentEncoding returns the content encoding of the object.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.ContentEncoding.
|
|||
func (r *Reader) ContentEncoding() string { |
|||
return r.Attrs.ContentEncoding |
|||
} |
|||
|
|||
// CacheControl returns the cache control of the object.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.CacheControl.
|
|||
func (r *Reader) CacheControl() string { |
|||
return r.Attrs.CacheControl |
|||
} |
|||
|
|||
// LastModified returns the value of the Last-Modified header.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.LastModified.
|
|||
func (r *Reader) LastModified() (time.Time, error) { |
|||
return r.Attrs.LastModified, nil |
|||
} |
File diff suppressed because it is too large
File diff suppressed because one or more lines are too long
@ -0,0 +1,261 @@ |
|||
// Copyright 2014 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/base64" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"sync" |
|||
"unicode/utf8" |
|||
|
|||
"google.golang.org/api/googleapi" |
|||
raw "google.golang.org/api/storage/v1" |
|||
) |
|||
|
|||
// A Writer writes a Cloud Storage object.
|
|||
type Writer struct { |
|||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
|||
// must be initialized before the first Write call. Nil or zero-valued
|
|||
// attributes are ignored.
|
|||
ObjectAttrs |
|||
|
|||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
|||
// to true in addition to setting the Writer's CRC32C field, because zero
|
|||
// is a valid CRC and normally a zero would not be transmitted.
|
|||
// If a CRC32C is sent, and the data written does not match the checksum,
|
|||
// the write will be rejected.
|
|||
SendCRC32C bool |
|||
|
|||
// ChunkSize controls the maximum number of bytes of the object that the
|
|||
// Writer will attempt to send to the server in a single request. Objects
|
|||
// smaller than the size will be sent in a single request, while larger
|
|||
// objects will be split over multiple requests. The size will be rounded up
|
|||
// to the nearest multiple of 256K. If zero, chunking will be disabled and
|
|||
// the object will be uploaded in a single request.
|
|||
//
|
|||
// ChunkSize will default to a reasonable value. If you perform many concurrent
|
|||
// writes of small objects, you may wish set ChunkSize to a value that matches
|
|||
// your objects' sizes to avoid consuming large amounts of memory.
|
|||
//
|
|||
// ChunkSize must be set before the first Write call.
|
|||
ChunkSize int |
|||
|
|||
// ProgressFunc can be used to monitor the progress of a large write.
|
|||
// operation. If ProgressFunc is not nil and writing requires multiple
|
|||
// calls to the underlying service (see
|
|||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
|||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
|||
// content copied so far.
|
|||
//
|
|||
// ProgressFunc should return quickly without blocking.
|
|||
ProgressFunc func(int64) |
|||
|
|||
ctx context.Context |
|||
o *ObjectHandle |
|||
|
|||
opened bool |
|||
pw *io.PipeWriter |
|||
|
|||
donec chan struct{} // closed after err and obj are set.
|
|||
obj *ObjectAttrs |
|||
|
|||
mu sync.Mutex |
|||
err error |
|||
} |
|||
|
|||
func (w *Writer) open() error { |
|||
attrs := w.ObjectAttrs |
|||
// Check the developer didn't change the object Name (this is unfortunate, but
|
|||
// we don't want to store an object under the wrong name).
|
|||
if attrs.Name != w.o.object { |
|||
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) |
|||
} |
|||
if !utf8.ValidString(attrs.Name) { |
|||
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) |
|||
} |
|||
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { |
|||
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") |
|||
} |
|||
pr, pw := io.Pipe() |
|||
w.pw = pw |
|||
w.opened = true |
|||
|
|||
go w.monitorCancel() |
|||
|
|||
if w.ChunkSize < 0 { |
|||
return errors.New("storage: Writer.ChunkSize must be non-negative") |
|||
} |
|||
mediaOpts := []googleapi.MediaOption{ |
|||
googleapi.ChunkSize(w.ChunkSize), |
|||
} |
|||
if c := attrs.ContentType; c != "" { |
|||
mediaOpts = append(mediaOpts, googleapi.ContentType(c)) |
|||
} |
|||
|
|||
go func() { |
|||
defer close(w.donec) |
|||
|
|||
rawObj := attrs.toRawObject(w.o.bucket) |
|||
if w.SendCRC32C { |
|||
rawObj.Crc32c = encodeUint32(attrs.CRC32C) |
|||
} |
|||
if w.MD5 != nil { |
|||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) |
|||
} |
|||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). |
|||
Media(pr, mediaOpts...). |
|||
Projection("full"). |
|||
Context(w.ctx) |
|||
if w.ProgressFunc != nil { |
|||
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) |
|||
} |
|||
if attrs.KMSKeyName != "" { |
|||
call.KmsKeyName(attrs.KMSKeyName) |
|||
} |
|||
if attrs.PredefinedACL != "" { |
|||
call.PredefinedAcl(attrs.PredefinedACL) |
|||
} |
|||
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { |
|||
w.mu.Lock() |
|||
w.err = err |
|||
w.mu.Unlock() |
|||
pr.CloseWithError(err) |
|||
return |
|||
} |
|||
var resp *raw.Object |
|||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call) |
|||
if err == nil { |
|||
if w.o.userProject != "" { |
|||
call.UserProject(w.o.userProject) |
|||
} |
|||
setClientHeader(call.Header()) |
|||
// If the chunk size is zero, then no chunking is done on the Reader,
|
|||
// which means we cannot retry: the first call will read the data, and if
|
|||
// it fails, there is no way to re-read.
|
|||
if w.ChunkSize == 0 { |
|||
resp, err = call.Do() |
|||
} else { |
|||
// We will only retry here if the initial POST, which obtains a URI for
|
|||
// the resumable upload, fails with a retryable error. The upload itself
|
|||
// has its own retry logic.
|
|||
err = runWithRetry(w.ctx, func() error { |
|||
var err2 error |
|||
resp, err2 = call.Do() |
|||
return err2 |
|||
}) |
|||
} |
|||
} |
|||
if err != nil { |
|||
w.mu.Lock() |
|||
w.err = err |
|||
w.mu.Unlock() |
|||
pr.CloseWithError(err) |
|||
return |
|||
} |
|||
w.obj = newObject(resp) |
|||
}() |
|||
return nil |
|||
} |
|||
|
|||
// Write appends to w. It implements the io.Writer interface.
|
|||
//
|
|||
// Since writes happen asynchronously, Write may return a nil
|
|||
// error even though the write failed (or will fail). Always
|
|||
// use the error returned from Writer.Close to determine if
|
|||
// the upload was successful.
|
|||
func (w *Writer) Write(p []byte) (n int, err error) { |
|||
w.mu.Lock() |
|||
werr := w.err |
|||
w.mu.Unlock() |
|||
if werr != nil { |
|||
return 0, werr |
|||
} |
|||
if !w.opened { |
|||
if err := w.open(); err != nil { |
|||
return 0, err |
|||
} |
|||
} |
|||
n, err = w.pw.Write(p) |
|||
if err != nil { |
|||
w.mu.Lock() |
|||
werr := w.err |
|||
w.mu.Unlock() |
|||
// Preserve existing functionality that when context is canceled, Write will return
|
|||
// context.Canceled instead of "io: read/write on closed pipe". This hides the
|
|||
// pipe implementation detail from users and makes Write seem as though it's an RPC.
|
|||
if werr == context.Canceled || werr == context.DeadlineExceeded { |
|||
return n, werr |
|||
} |
|||
} |
|||
return n, err |
|||
} |
|||
|
|||
// Close completes the write operation and flushes any buffered data.
|
|||
// If Close doesn't return an error, metadata about the written object
|
|||
// can be retrieved by calling Attrs.
|
|||
func (w *Writer) Close() error { |
|||
if !w.opened { |
|||
if err := w.open(); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
// Closing either the read or write causes the entire pipe to close.
|
|||
if err := w.pw.Close(); err != nil { |
|||
return err |
|||
} |
|||
|
|||
<-w.donec |
|||
w.mu.Lock() |
|||
defer w.mu.Unlock() |
|||
return w.err |
|||
} |
|||
|
|||
// monitorCancel is intended to be used as a background goroutine. It monitors the
|
|||
// the context, and when it observes that the context has been canceled, it manually
|
|||
// closes things that do not take a context.
|
|||
func (w *Writer) monitorCancel() { |
|||
select { |
|||
case <-w.ctx.Done(): |
|||
w.mu.Lock() |
|||
werr := w.ctx.Err() |
|||
w.err = werr |
|||
w.mu.Unlock() |
|||
|
|||
// Closing either the read or write causes the entire pipe to close.
|
|||
w.CloseWithError(werr) |
|||
case <-w.donec: |
|||
} |
|||
} |
|||
|
|||
// CloseWithError aborts the write operation with the provided error.
|
|||
// CloseWithError always returns nil.
|
|||
//
|
|||
// Deprecated: cancel the context passed to NewWriter instead.
|
|||
func (w *Writer) CloseWithError(err error) error { |
|||
if !w.opened { |
|||
return nil |
|||
} |
|||
return w.pw.CloseWithError(err) |
|||
} |
|||
|
|||
// Attrs returns metadata about a successfully-written object.
|
|||
// It's only valid to call it after Close returns nil.
|
|||
func (w *Writer) Attrs() *ObjectAttrs { |
|||
return w.obj |
|||
} |
@ -0,0 +1,2 @@ |
|||
[url "ssh://git@git.lowcodeplatform.net/"] |
|||
insteadOf = https://git.lowcodeplatform.net/ |
@ -0,0 +1,9 @@ |
|||
.history |
|||
.idea |
|||
.vscode |
|||
.DS_Store |
|||
*~merged* |
|||
*~merged |
|||
/public |
|||
.env |
|||
local |
@ -0,0 +1,3 @@ |
|||
# models |
|||
|
|||
Модели общих сущностей проекта Buildbox Fabric |
@ -0,0 +1,98 @@ |
|||
package models |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
) |
|||
|
|||
var StatusCode = RStatus{ |
|||
"OK": {"Запрос выполнен", 200, "", nil}, |
|||
"OKLicenseActivation": {"Лицензия была активирована", 200, "", nil}, |
|||
"Unauthorized": {"Ошибка авторизации", 401, "", nil}, |
|||
"NotCache": {"Доступно только в Турбо-режиме", 200, "", nil}, |
|||
"NotStatus": {"Ответ сервера не содержит статус выполнения запроса", 501, "", nil}, |
|||
"NotExtended": {"На сервере отсутствует расширение, которое желает использовать клиент", 501, "", nil}, |
|||
"ErrorFormatJson": {"Ошибка формата JSON-запроса", 500, "ErrorFormatJson", nil}, |
|||
"ErrorTransactionFalse": {"Ошибка выполнения тразакции SQL", 500, "ErrorTransactionFalse", nil}, |
|||
"ErrorBeginDB": {"Ошибка подключения к БД", 500, "ErrorBeginDB", nil}, |
|||
"ErrorPrepareSQL": {"Ошибка подготовки запроса SQL", 500, "ErrorPrepareSQL", nil}, |
|||
"ErrorNullParameter": {"Ошибка! Не передан параметр", 503, "ErrorNullParameter", nil}, |
|||
"ErrorQuery": {"Ошибка запроса на выборку данных", 500, "ErrorQuery", nil}, |
|||
"ErrorScanRows": {"Ошибка переноса данных из запроса в объект", 500, "ErrorScanRows", nil}, |
|||
"ErrorNullFields": {"Не все поля заполнены", 500, "ErrorScanRows", nil}, |
|||
"ErrorAccessType": {"Ошибка доступа к элементу типа", 500, "ErrorAccessType", nil}, |
|||
"ErrorGetData": {"Ошибка доступа данным объекта", 500, "ErrorGetData", nil}, |
|||
"ErrorRevElement": {"Значение было изменено ранее.", 409, "ErrorRevElement", nil}, |
|||
"ErrorForbiddenElement": {"Значение занято другим пользователем.", 403, "ErrorForbiddenElement", nil}, |
|||
"ErrorUnprocessableEntity": {"Необрабатываемый экземпляр", 422, "ErrorUnprocessableEntity", nil}, |
|||
"ErrorNotFound": {"Значение не найдено", 404, "ErrorNotFound", nil}, |
|||
"ErrorReadDir": {"Ошибка чтения директории", 403, "ErrorReadDir", nil}, |
|||
"ErrorReadConfigDir": {"Ошибка чтения директории конфигураций", 403, "ErrorReadConfigDir", nil}, |
|||
"errorOpenConfigDir": {"Ошибка открытия директории конфигураций", 403, "errorOpenConfigDir", nil}, |
|||
"ErrorReadConfigFile": {"Ошибка чтения файла конфигураций", 403, "ErrorReadConfigFile", nil}, |
|||
"ErrorReadLogFile": {"Ошибка чтения файла логирования", 403, "ErrorReadLogFile", nil}, |
|||
"ErrorScanLogFile": {"Ошибка построчного чтения файла логирования", 403, "ErrorScanLogFile", nil}, |
|||
"ErrorPortBusy": {"Указанный порт занят", 403, "ErrorPortBusy", nil}, |
|||
"ErrorGone": {"Объект был удален ранее", 410, "ErrorGone", nil}, |
|||
"ErrorShema": {"Ошибка формата заданной схемы формирования запроса", 410, "ErrorShema", nil}, |
|||
"ErrorInitBase": {"Ошибка инициализации новой базы данных", 410, "ErrorInitBase", nil}, |
|||
"ErrorCreateCacheRecord": {"Ошибка создания объекта в кеше", 410, "ErrorCreateCacheRecord", nil}, |
|||
"ErrorUpdateParams": {"Не переданы параметры для обновления серверов (сервер источник, сервер получатель)", 410, "ErrorUpdateParams", nil}, |
|||
"ErrorIntervalProxy": {"Ошибка переданного интервала (формат: 1000:2000)", 410, "ErrorIntervalProxy", nil}, |
|||
"ErrorReservPortProxy": {"Ошибка выделения порта proxy-сервером", 410, "ErrorReservPortProxy", nil}, |
|||
} |
|||
|
|||
type RStatus map[string]RestStatus |
|||
type RestStatus struct { |
|||
Description string `json:"description"` |
|||
Status int `json:"status"` |
|||
Code string `json:"code"` |
|||
Error error `json:"error"` |
|||
} |
|||
|
|||
func (r RestStatus) MarshalJSON() ([]byte, error) { |
|||
type RestStatusJson struct { |
|||
Description string `json:"description"` |
|||
Status int `json:"status"` |
|||
Code string `json:"code"` |
|||
Error string `json:"error"` |
|||
} |
|||
|
|||
var n = RestStatusJson{} |
|||
n.Description = r.Description |
|||
n.Status = r.Status |
|||
n.Code = r.Code |
|||
n.Error = fmt.Sprint(r.Error) |
|||
if r.Error == nil { |
|||
n.Error = "" |
|||
} |
|||
|
|||
res, err := json.Marshal(n) |
|||
return res, err |
|||
} |
|||
|
|||
func (r RestStatus) UnmarshalJSON(b []byte) error { |
|||
type RestStatusJson struct { |
|||
Description string `json:"description"` |
|||
Status int `json:"status"` |
|||
Code string `json:"code"` |
|||
Error string `json:"error"` |
|||
} |
|||
t := RestStatusJson{} |
|||
|
|||
err := json.Unmarshal(b, &t) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
r.Description = t.Description |
|||
r.Code = t.Code |
|||
r.Status = t.Status |
|||
if t.Error != "" { |
|||
r.Error = nil |
|||
} else { |
|||
r.Error = fmt.Errorf("%s", t.Error) |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,151 @@ |
|||
package models |
|||
|
|||
type Data struct { |
|||
Uid string `json:"uid"` |
|||
Id string `json:"id"` |
|||
Source string `json:"source"` |
|||
Parent string `json:"parent"` |
|||
Type string `json:"type"` |
|||
Title string `json:"title"` |
|||
Rev string `json:"rev"` |
|||
Сopies string `json:"copies"` |
|||
Attributes map[string]Attribute `json:"attributes"` |
|||
} |
|||
|
|||
type Attribute struct { |
|||
Value string `json:"value"` |
|||
Src string `json:"src"` |
|||
Tpls string `json:"tpls"` |
|||
Status string `json:"status"` |
|||
Rev string `json:"rev"` |
|||
Editor string `json:"editor"` |
|||
} |
|||
|
|||
type Response struct { |
|||
Data interface{} `json:"data"` |
|||
Status RestStatus `json:"status"` |
|||
Metrics Metrics `json:"metrics"` |
|||
} |
|||
|
|||
type ResponseData struct { |
|||
Data []Data `json:"data"` |
|||
Res interface{} `json:"res"` |
|||
Status RestStatus `json:"status"` |
|||
Metrics Metrics `json:"metrics"` |
|||
} |
|||
|
|||
type Metrics struct { |
|||
ResultSize int `json:"result_size"` |
|||
ResultCount int `json:"result_count"` |
|||
ResultOffset int `json:"result_offset"` |
|||
ResultLimit int `json:"result_limit"` |
|||
ResultPage int `json:"result_page"` |
|||
TimeExecution string `json:"time_execution"` |
|||
TimeQuery string `json:"time_query"` |
|||
|
|||
PageLast int `json:"page_last"` |
|||
PageCurrent int `json:"page_current"` |
|||
PageList []int `json:"page_list"` |
|||
PageFrom int `json:"page_from"` |
|||
PageTo int `json:"page_to"` |
|||
} |
|||
|
|||
// возвращаем необходимый значение атрибута для объекта если он есть, инае пусто
|
|||
// а также из заголовка объекта
|
|||
func (p *Data) Attr(name, element string) (result string, found bool) { |
|||
|
|||
|
|||
if _, found := p.Attributes[name]; found { |
|||
|
|||
// фикс для тех объектов, на которых добавлено скрытое поле Uid
|
|||
if name == "uid" { |
|||
return p.Uid, true |
|||
} |
|||
|
|||
switch element { |
|||
case "src": |
|||
return p.Attributes[name].Src, true |
|||
case "value": |
|||
return p.Attributes[name].Value, true |
|||
case "tpls": |
|||
return p.Attributes[name].Tpls, true |
|||
case "rev": |
|||
return p.Attributes[name].Rev, true |
|||
case "status": |
|||
return p.Attributes[name].Status, true |
|||
case "uid": |
|||
return p.Uid, true |
|||
case "source": |
|||
return p.Source, true |
|||
case "id": |
|||
return p.Id, true |
|||
case "title": |
|||
return p.Title, true |
|||
case "type": |
|||
return p.Type, true |
|||
} |
|||
} else { |
|||
switch name { |
|||
case "uid": |
|||
return p.Uid, true |
|||
case "source": |
|||
return p.Source, true |
|||
case "id": |
|||
return p.Id, true |
|||
case "title": |
|||
return p.Title, true |
|||
case "type": |
|||
return p.Type, true |
|||
} |
|||
} |
|||
return "", false |
|||
} |
|||
|
|||
// заменяем значение аттрибутов в объекте профиля
|
|||
func (p *Data) AttrSet(name, element, value string) bool { |
|||
g := Attribute{} |
|||
|
|||
for k, v := range p.Attributes { |
|||
if k == name { |
|||
g = v |
|||
} |
|||
} |
|||
|
|||
switch element { |
|||
case "src": |
|||
g.Src = value |
|||
case "value": |
|||
g.Value = value |
|||
case "tpls": |
|||
g.Tpls = value |
|||
case "rev": |
|||
g.Rev = value |
|||
case "status": |
|||
g.Status = value |
|||
} |
|||
|
|||
f := p.Attributes |
|||
|
|||
for k, _ := range f { |
|||
if k == name { |
|||
f[k] = g |
|||
return true |
|||
} |
|||
} |
|||
|
|||
|
|||
return false |
|||
} |
|||
|
|||
// удаляем элемент из слайса
|
|||
func (p *ResponseData) RemoveData(i int) bool { |
|||
|
|||
if (i < len(p.Data)){ |
|||
p.Data = append(p.Data[:i], p.Data[i+1:]...) |
|||
} else { |
|||
//log.Warning("Error! Position invalid (", i, ")")
|
|||
return false |
|||
} |
|||
|
|||
return true |
|||
} |
@ -0,0 +1,23 @@ |
|||
package models |
|||
|
|||
// тип ответа, который сервис отдает прокси при периодическом опросе (ping-е)
|
|||
type Pong struct { |
|||
Uid string `json:"uid"` |
|||
Name string `json:"name"` |
|||
Version string `json:"version"` |
|||
Status string `json:"status"` |
|||
Port int `json:"port"` |
|||
Pid string `json:"pid"` |
|||
State string `json:"state"` |
|||
Replicas int `json:"replicas"` |
|||
Https bool `json:"https"` |
|||
DeadTime int64 `json:"dead_time"` |
|||
Follower string `json:"follower"` |
|||
} |
|||
|
|||
type Hosts struct { |
|||
Host string `json:"host"` |
|||
PortFrom int `json:"portfrom"` |
|||
PortTo int `json:"portto"` |
|||
Protocol string `json:"protocol"` |
|||
} |
@ -0,0 +1,46 @@ |
|||
package models |
|||
|
|||
type ProfileData struct { |
|||
Revision string `json:"revision"` // ревизия текущей сессии (если сессия обновляется (меняется профиль) - ID-сессии остается, но ревизия меняется
|
|||
Hash string `json:"hash"` |
|||
Email string `json:"email"` |
|||
Uid string `json:"uid"` |
|||
ObjUid string `json:"obj_uid"` |
|||
FirstName string `json:"first_name"` |
|||
LastName string `json:"last_name"` |
|||
Photo string `json:"photo"` |
|||
Age string `json:"age"` |
|||
City string `json:"city"` |
|||
Country string `json:"country"` |
|||
Oauth_identity string `json:"oauth_identity"` |
|||
Status string `json:"status"` // - src поля Status в профиле (иногда необходимо для доп.фильтрации)
|
|||
Raw []Data `json:"raw"` // объект пользователя (нужен при сборки проекта для данного юзера при добавлении прав на базу)
|
|||
Tables []Data `json:"tables"` |
|||
Roles map[string]string `json:"roles"` |
|||
Homepage string `json:"homepage"` |
|||
Maket string `json:"maket"` |
|||
UpdateFlag bool `json:"update_flag"` |
|||
UpdateData []Data `json:"update_data"` |
|||
CurrentRole Data `json:"current_role"` |
|||
CurrentProfile Data `json:"current_profile"` |
|||
Navigator []*Items `json:"navigator"` |
|||
} |
|||
|
|||
|
|||
type Items struct { |
|||
Title string `json:"title"` |
|||
ExtentedLink string `json:"extentedLink"` |
|||
Uid string `json:"uid"` |
|||
Source string `json:"source"` |
|||
Icon string `json:"icon"` |
|||
Leader string `json:"leader"` |
|||
Order string `json:"order"` |
|||
Type string `json:"type"` |
|||
Preview string `json:"preview"` |
|||
Url string `json:"url"` |
|||
Sub []string `json:"sub"` |
|||
Incl []*Items `json:"incl"` |
|||
Class string `json:"class"` |
|||
} |
|||
|
|||
|
@ -0,0 +1,20 @@ |
|||
package models |
|||
|
|||
import "github.com/golang-jwt/jwt" |
|||
|
|||
type Token struct { |
|||
Uid string |
|||
Role string |
|||
Profile string |
|||
Groups string |
|||
Local string |
|||
Type string |
|||
Session string |
|||
SessionRev string // ревизия текущей сессии (если сессия обновляется (меняется профиль) - ID-сессии остается, но ревизия меняется
|
|||
jwt.StandardClaims |
|||
} |
|||
|
|||
type Roles struct { |
|||
Title string |
|||
Uid string |
|||
} |
@ -0,0 +1,50 @@ |
|||
package models |
|||
|
|||
type DataTree struct { |
|||
Uid string `json:"uid"` |
|||
Id string `json:"id"` |
|||
Source string `json:"source"` |
|||
Parent string `json:"parent"` |
|||
Type string `json:"type"` |
|||
Title string `json:"title"` |
|||
Rev string `json:"rev"` |
|||
Сopies string `json:"copies"` |
|||
Attributes map[string]Attribute `json:"attributes"` |
|||
Sub []string `json:"sub"` |
|||
Incl []*DataTree `json:"incl"` |
|||
} |
|||
|
|||
type DataTreeOut struct { |
|||
Uid string `json:"uid"` |
|||
Id string `json:"id"` |
|||
Source string `json:"source"` |
|||
Parent string `json:"parent"` |
|||
Type string `json:"type"` |
|||
Title string `json:"title"` |
|||
Rev string `json:"rev"` |
|||
Сopies string `json:"copies"` |
|||
Attributes map[string]Attribute `json:"attributes"` |
|||
Sub []string `json:"sub"` |
|||
Incl []DataTree `json:"incl"` |
|||
} |
|||
|
|||
////////////////////////////////////////////////////////////////////////////////////////
|
|||
////////////////////////////////////////////////////////////////////////////////////////
|
|||
////////////////////////////////////////////////////////////////////////////////////////
|
|||
|
|||
// метод типа Items (перемещаем структуры в карте, исходя из заявленной вложенности элементов)
|
|||
// (переделать дубль фукнции)
|
|||
func (p *DataTree) ScanSub(maps *map[string]*DataTree) { |
|||
if p.Sub != nil && len(p.Sub) != 0 { |
|||
for _, c := range p.Sub { |
|||
gg := *maps |
|||
fromP := gg[c] |
|||
if fromP != nil { |
|||
copyPolygon := *fromP |
|||
p.Incl = append(p.Incl, ©Polygon) |
|||
delete(*maps, c) |
|||
copyPolygon.ScanSub(maps) |
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,202 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
APPENDIX: How to apply the Apache License to your work. |
|||
|
|||
To apply the Apache License to your work, attach the following |
|||
boilerplate notice, with the fields enclosed by brackets "[]" |
|||
replaced with your own identifying information. (Don't include |
|||
the brackets!) The text should be enclosed in the appropriate |
|||
comment syntax for the file format. We also recommend that a |
|||
file or class name and description of purpose be included on the |
|||
same "printed page" as the copyright notice for easier |
|||
identification within third-party archives. |
|||
|
|||
Copyright 2016 Microsoft Corporation |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,5 @@ |
|||
Microsoft Azure-SDK-for-Go |
|||
Copyright 2014-2017 Microsoft |
|||
|
|||
This product includes software developed at |
|||
the Microsoft Corporation (https://www.microsoft.com). |
@ -0,0 +1,22 @@ |
|||
# Azure Storage SDK for Go (Preview) |
|||
|
|||
:exclamation: IMPORTANT: This package is in maintenance only and will be deprecated in the |
|||
future. Please use one of the following packages instead. |
|||
|
|||
| Service | Import Path/Repo | |
|||
|---------|------------------| |
|||
| Storage - Blobs | [github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) | |
|||
| Storage - Files | [github.com/Azure/azure-storage-file-go](https://github.com/Azure/azure-storage-file-go) | |
|||
| Storage - Queues | [github.com/Azure/azure-storage-queue-go](https://github.com/Azure/azure-storage-queue-go) | |
|||
|
|||
The `github.com/Azure/azure-sdk-for-go/storage` package is used to manage |
|||
[Azure Storage](https://docs.microsoft.com/en-us/azure/storage/) data plane |
|||
resources: containers, blobs, tables, and queues. |
|||
|
|||
To manage storage *accounts* use Azure Resource Manager (ARM) via the packages |
|||
at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/services/storage). |
|||
|
|||
This package also supports the [Azure Storage |
|||
Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) |
|||
(Windows only). |
|||
|
@ -0,0 +1,91 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"crypto/md5" |
|||
"encoding/base64" |
|||
"fmt" |
|||
"net/http" |
|||
"net/url" |
|||
"time" |
|||
) |
|||
|
|||
// PutAppendBlob initializes an empty append blob with specified name. An
|
|||
// append blob must be created using this method before appending blocks.
|
|||
//
|
|||
// See CreateBlockBlobFromReader for more info on creating blobs.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
|||
func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { |
|||
params := url.Values{} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["x-ms-blob-type"] = string(BlobTypeAppend) |
|||
headers = mergeHeaders(headers, headersFromStruct(b.Properties)) |
|||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return b.respondCreation(resp, BlobTypeAppend) |
|||
} |
|||
|
|||
// AppendBlockOptions includes the options for an append block operation
|
|||
type AppendBlockOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
MaxSize *uint `header:"x-ms-blob-condition-maxsize"` |
|||
AppendPosition *uint `header:"x-ms-blob-condition-appendpos"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
ContentMD5 bool |
|||
} |
|||
|
|||
// AppendBlock appends a block to an append blob.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block
|
|||
func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { |
|||
params := url.Values{"comp": {"appendblock"}} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["x-ms-blob-type"] = string(BlobTypeAppend) |
|||
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
if options.ContentMD5 { |
|||
md5sum := md5.Sum(chunk) |
|||
headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) |
|||
} |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return b.respondCreation(resp, BlobTypeAppend) |
|||
} |
@ -0,0 +1,246 @@ |
|||
// Package storage provides clients for Microsoft Azure Storage Services.
|
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"fmt" |
|||
"net/url" |
|||
"sort" |
|||
"strings" |
|||
) |
|||
|
|||
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
|
|||
|
|||
type authentication string |
|||
|
|||
const ( |
|||
sharedKey authentication = "sharedKey" |
|||
sharedKeyForTable authentication = "sharedKeyTable" |
|||
sharedKeyLite authentication = "sharedKeyLite" |
|||
sharedKeyLiteForTable authentication = "sharedKeyLiteTable" |
|||
|
|||
// headers
|
|||
headerAcceptCharset = "Accept-Charset" |
|||
headerAuthorization = "Authorization" |
|||
headerContentLength = "Content-Length" |
|||
headerDate = "Date" |
|||
headerXmsDate = "x-ms-date" |
|||
headerXmsVersion = "x-ms-version" |
|||
headerContentEncoding = "Content-Encoding" |
|||
headerContentLanguage = "Content-Language" |
|||
headerContentType = "Content-Type" |
|||
headerContentMD5 = "Content-MD5" |
|||
headerIfModifiedSince = "If-Modified-Since" |
|||
headerIfMatch = "If-Match" |
|||
headerIfNoneMatch = "If-None-Match" |
|||
headerIfUnmodifiedSince = "If-Unmodified-Since" |
|||
headerRange = "Range" |
|||
headerDataServiceVersion = "DataServiceVersion" |
|||
headerMaxDataServiceVersion = "MaxDataServiceVersion" |
|||
headerContentTransferEncoding = "Content-Transfer-Encoding" |
|||
) |
|||
|
|||
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { |
|||
if !c.sasClient { |
|||
authHeader, err := c.getSharedKey(verb, url, headers, auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
headers[headerAuthorization] = authHeader |
|||
} |
|||
return headers, nil |
|||
} |
|||
|
|||
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { |
|||
canRes, err := c.buildCanonicalizedResource(url, auth, false) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
canString, err := buildCanonicalizedString(verb, headers, canRes, auth) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
return c.createAuthorizationHeader(canString, auth), nil |
|||
} |
|||
|
|||
func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) { |
|||
errMsg := "buildCanonicalizedResource error: %s" |
|||
u, err := url.Parse(uri) |
|||
if err != nil { |
|||
return "", fmt.Errorf(errMsg, err.Error()) |
|||
} |
|||
|
|||
cr := bytes.NewBufferString("") |
|||
if c.accountName != StorageEmulatorAccountName || !sas { |
|||
cr.WriteString("/") |
|||
cr.WriteString(c.getCanonicalizedAccountName()) |
|||
} |
|||
|
|||
if len(u.Path) > 0 { |
|||
// Any portion of the CanonicalizedResource string that is derived from
|
|||
// the resource's URI should be encoded exactly as it is in the URI.
|
|||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
|||
cr.WriteString(u.EscapedPath()) |
|||
} |
|||
|
|||
params, err := url.ParseQuery(u.RawQuery) |
|||
if err != nil { |
|||
return "", fmt.Errorf(errMsg, err.Error()) |
|||
} |
|||
|
|||
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
|
|||
if auth == sharedKey { |
|||
if len(params) > 0 { |
|||
cr.WriteString("\n") |
|||
|
|||
keys := []string{} |
|||
for key := range params { |
|||
keys = append(keys, key) |
|||
} |
|||
sort.Strings(keys) |
|||
|
|||
completeParams := []string{} |
|||
for _, key := range keys { |
|||
if len(params[key]) > 1 { |
|||
sort.Strings(params[key]) |
|||
} |
|||
|
|||
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) |
|||
} |
|||
cr.WriteString(strings.Join(completeParams, "\n")) |
|||
} |
|||
} else { |
|||
// search for "comp" parameter, if exists then add it to canonicalizedresource
|
|||
if v, ok := params["comp"]; ok { |
|||
cr.WriteString("?comp=" + v[0]) |
|||
} |
|||
} |
|||
|
|||
return string(cr.Bytes()), nil |
|||
} |
|||
|
|||
func (c *Client) getCanonicalizedAccountName() string { |
|||
// since we may be trying to access a secondary storage account, we need to
|
|||
// remove the -secondary part of the storage name
|
|||
return strings.TrimSuffix(c.accountName, "-secondary") |
|||
} |
|||
|
|||
func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) { |
|||
contentLength := headers[headerContentLength] |
|||
if contentLength == "0" { |
|||
contentLength = "" |
|||
} |
|||
date := headers[headerDate] |
|||
if v, ok := headers[headerXmsDate]; ok { |
|||
if auth == sharedKey || auth == sharedKeyLite { |
|||
date = "" |
|||
} else { |
|||
date = v |
|||
} |
|||
} |
|||
var canString string |
|||
switch auth { |
|||
case sharedKey: |
|||
canString = strings.Join([]string{ |
|||
verb, |
|||
headers[headerContentEncoding], |
|||
headers[headerContentLanguage], |
|||
contentLength, |
|||
headers[headerContentMD5], |
|||
headers[headerContentType], |
|||
date, |
|||
headers[headerIfModifiedSince], |
|||
headers[headerIfMatch], |
|||
headers[headerIfNoneMatch], |
|||
headers[headerIfUnmodifiedSince], |
|||
headers[headerRange], |
|||
buildCanonicalizedHeader(headers), |
|||
canonicalizedResource, |
|||
}, "\n") |
|||
case sharedKeyForTable: |
|||
canString = strings.Join([]string{ |
|||
verb, |
|||
headers[headerContentMD5], |
|||
headers[headerContentType], |
|||
date, |
|||
canonicalizedResource, |
|||
}, "\n") |
|||
case sharedKeyLite: |
|||
canString = strings.Join([]string{ |
|||
verb, |
|||
headers[headerContentMD5], |
|||
headers[headerContentType], |
|||
date, |
|||
buildCanonicalizedHeader(headers), |
|||
canonicalizedResource, |
|||
}, "\n") |
|||
case sharedKeyLiteForTable: |
|||
canString = strings.Join([]string{ |
|||
date, |
|||
canonicalizedResource, |
|||
}, "\n") |
|||
default: |
|||
return "", fmt.Errorf("%s authentication is not supported yet", auth) |
|||
} |
|||
return canString, nil |
|||
} |
|||
|
|||
func buildCanonicalizedHeader(headers map[string]string) string { |
|||
cm := make(map[string]string) |
|||
|
|||
for k, v := range headers { |
|||
headerName := strings.TrimSpace(strings.ToLower(k)) |
|||
if strings.HasPrefix(headerName, "x-ms-") { |
|||
cm[headerName] = v |
|||
} |
|||
} |
|||
|
|||
if len(cm) == 0 { |
|||
return "" |
|||
} |
|||
|
|||
keys := []string{} |
|||
for key := range cm { |
|||
keys = append(keys, key) |
|||
} |
|||
|
|||
sort.Strings(keys) |
|||
|
|||
ch := bytes.NewBufferString("") |
|||
|
|||
for _, key := range keys { |
|||
ch.WriteString(key) |
|||
ch.WriteRune(':') |
|||
ch.WriteString(cm[key]) |
|||
ch.WriteRune('\n') |
|||
} |
|||
|
|||
return strings.TrimSuffix(string(ch.Bytes()), "\n") |
|||
} |
|||
|
|||
func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string { |
|||
signature := c.computeHmac256(canonicalizedString) |
|||
var key string |
|||
switch auth { |
|||
case sharedKey, sharedKeyForTable: |
|||
key = "SharedKey" |
|||
case sharedKeyLite, sharedKeyLiteForTable: |
|||
key = "SharedKeyLite" |
|||
} |
|||
return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature) |
|||
} |
@ -0,0 +1,632 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
// A Blob is an entry in BlobListResponse.
|
|||
type Blob struct { |
|||
Container *Container |
|||
Name string `xml:"Name"` |
|||
Snapshot time.Time `xml:"Snapshot"` |
|||
Properties BlobProperties `xml:"Properties"` |
|||
Metadata BlobMetadata `xml:"Metadata"` |
|||
} |
|||
|
|||
// PutBlobOptions includes the options any put blob operation
|
|||
// (page, block, append)
|
|||
type PutBlobOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
Origin string `header:"Origin"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// BlobMetadata is a set of custom name/value pairs.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
|
|||
type BlobMetadata map[string]string |
|||
|
|||
type blobMetadataEntries struct { |
|||
Entries []blobMetadataEntry `xml:",any"` |
|||
} |
|||
type blobMetadataEntry struct { |
|||
XMLName xml.Name |
|||
Value string `xml:",chardata"` |
|||
} |
|||
|
|||
// UnmarshalXML converts the xml:Metadata into Metadata map
|
|||
func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { |
|||
var entries blobMetadataEntries |
|||
if err := d.DecodeElement(&entries, &start); err != nil { |
|||
return err |
|||
} |
|||
for _, entry := range entries.Entries { |
|||
if *bm == nil { |
|||
*bm = make(BlobMetadata) |
|||
} |
|||
(*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// MarshalXML implements the xml.Marshaler interface. It encodes
|
|||
// metadata name/value pairs as they would appear in an Azure
|
|||
// ListBlobs response.
|
|||
func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { |
|||
entries := make([]blobMetadataEntry, 0, len(bm)) |
|||
for k, v := range bm { |
|||
entries = append(entries, blobMetadataEntry{ |
|||
XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, |
|||
Value: v, |
|||
}) |
|||
} |
|||
return enc.EncodeElement(blobMetadataEntries{ |
|||
Entries: entries, |
|||
}, start) |
|||
} |
|||
|
|||
// BlobProperties contains various properties of a blob
|
|||
// returned in various endpoints like ListBlobs or GetBlobProperties.
|
|||
type BlobProperties struct { |
|||
LastModified TimeRFC1123 `xml:"Last-Modified"` |
|||
Etag string `xml:"Etag"` |
|||
ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"` |
|||
ContentLength int64 `xml:"Content-Length"` |
|||
ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"` |
|||
ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"` |
|||
CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` |
|||
ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` |
|||
ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` |
|||
BlobType BlobType `xml:"BlobType"` |
|||
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` |
|||
CopyID string `xml:"CopyId"` |
|||
CopyStatus string `xml:"CopyStatus"` |
|||
CopySource string `xml:"CopySource"` |
|||
CopyProgress string `xml:"CopyProgress"` |
|||
CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"` |
|||
CopyStatusDescription string `xml:"CopyStatusDescription"` |
|||
LeaseStatus string `xml:"LeaseStatus"` |
|||
LeaseState string `xml:"LeaseState"` |
|||
LeaseDuration string `xml:"LeaseDuration"` |
|||
ServerEncrypted bool `xml:"ServerEncrypted"` |
|||
IncrementalCopy bool `xml:"IncrementalCopy"` |
|||
} |
|||
|
|||
// BlobType defines the type of the Azure Blob.
|
|||
type BlobType string |
|||
|
|||
// Types of page blobs
|
|||
const ( |
|||
BlobTypeBlock BlobType = "BlockBlob" |
|||
BlobTypePage BlobType = "PageBlob" |
|||
BlobTypeAppend BlobType = "AppendBlob" |
|||
) |
|||
|
|||
func (b *Blob) buildPath() string { |
|||
return b.Container.buildPath() + "/" + b.Name |
|||
} |
|||
|
|||
// Exists returns true if a blob with given name exists on the specified
|
|||
// container of the storage account.
|
|||
func (b *Blob) Exists() (bool, error) { |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { |
|||
return resp.StatusCode == http.StatusOK, nil |
|||
} |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
// GetURL gets the canonical URL to the blob with the specified name in the
|
|||
// specified container.
|
|||
// This method does not create a publicly accessible URL if the blob or container
|
|||
// is private and this method does not check if the blob exists.
|
|||
func (b *Blob) GetURL() string { |
|||
container := b.Container.Name |
|||
if container == "" { |
|||
container = "$root" |
|||
} |
|||
return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil) |
|||
} |
|||
|
|||
// GetBlobRangeOptions includes the options for a get blob range operation
|
|||
type GetBlobRangeOptions struct { |
|||
Range *BlobRange |
|||
GetRangeContentMD5 bool |
|||
*GetBlobOptions |
|||
} |
|||
|
|||
// GetBlobOptions includes the options for a get blob operation
|
|||
type GetBlobOptions struct { |
|||
Timeout uint |
|||
Snapshot *time.Time |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
Origin string `header:"Origin"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// BlobRange represents the bytes range to be get
|
|||
type BlobRange struct { |
|||
Start uint64 |
|||
End uint64 |
|||
} |
|||
|
|||
func (br BlobRange) String() string { |
|||
if br.End == 0 { |
|||
return fmt.Sprintf("bytes=%d-", br.Start) |
|||
} |
|||
return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) |
|||
} |
|||
|
|||
// Get returns a stream to read the blob. Caller must call both Read and Close()
|
|||
// to correctly close the underlying connection.
|
|||
//
|
|||
// See the GetRange method for use with a Range header.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
|
|||
func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { |
|||
rangeOptions := GetBlobRangeOptions{ |
|||
GetBlobOptions: options, |
|||
} |
|||
resp, err := b.getRange(&rangeOptions) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return nil, err |
|||
} |
|||
if err := b.writeProperties(resp.Header, true); err != nil { |
|||
return resp.Body, err |
|||
} |
|||
return resp.Body, nil |
|||
} |
|||
|
|||
// GetRange reads the specified range of a blob to a stream. The bytesRange
|
|||
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
|
|||
// Caller must call both Read and Close()// to correctly close the underlying
|
|||
// connection.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
|
|||
func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { |
|||
resp, err := b.getRange(options) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
if err := checkRespCode(resp, []int{http.StatusPartialContent}); err != nil { |
|||
return nil, err |
|||
} |
|||
// Content-Length header should not be updated, as the service returns the range length
|
|||
// (which is not alwys the full blob length)
|
|||
if err := b.writeProperties(resp.Header, false); err != nil { |
|||
return resp.Body, err |
|||
} |
|||
return resp.Body, nil |
|||
} |
|||
|
|||
func (b *Blob) getRange(options *GetBlobRangeOptions) (*http.Response, error) { |
|||
params := url.Values{} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
if options.Range != nil { |
|||
headers["Range"] = options.Range.String() |
|||
if options.GetRangeContentMD5 { |
|||
headers["x-ms-range-get-content-md5"] = "true" |
|||
} |
|||
} |
|||
if options.GetBlobOptions != nil { |
|||
headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) |
|||
params = addTimeout(params, options.Timeout) |
|||
params = addSnapshot(params, options.Snapshot) |
|||
} |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return resp, err |
|||
} |
|||
|
|||
// SnapshotOptions includes the options for a snapshot blob operation
|
|||
type SnapshotOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// CreateSnapshot creates a snapshot for a blob
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
|
|||
func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) { |
|||
params := url.Values{"comp": {"snapshot"}} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil || resp == nil { |
|||
return nil, err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
snapshotResponse := resp.Header.Get(http.CanonicalHeaderKey("x-ms-snapshot")) |
|||
if snapshotResponse != "" { |
|||
snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return &snapshotTimestamp, nil |
|||
} |
|||
|
|||
return nil, errors.New("Snapshot not created") |
|||
} |
|||
|
|||
// GetBlobPropertiesOptions includes the options for a get blob properties operation
|
|||
type GetBlobPropertiesOptions struct { |
|||
Timeout uint |
|||
Snapshot *time.Time |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// GetProperties provides various information about the specified blob.
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
|
|||
func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { |
|||
params := url.Values{} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
params = addSnapshot(params, options.Snapshot) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return err |
|||
} |
|||
return b.writeProperties(resp.Header, true) |
|||
} |
|||
|
|||
func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { |
|||
var err error |
|||
|
|||
contentLength := b.Properties.ContentLength |
|||
if includeContentLen { |
|||
contentLengthStr := h.Get("Content-Length") |
|||
if contentLengthStr != "" { |
|||
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
} |
|||
} |
|||
|
|||
var sequenceNum int64 |
|||
sequenceNumStr := h.Get("x-ms-blob-sequence-number") |
|||
if sequenceNumStr != "" { |
|||
sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
lastModified, err := getTimeFromHeaders(h, "Last-Modified") |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time") |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
b.Properties = BlobProperties{ |
|||
LastModified: TimeRFC1123(*lastModified), |
|||
Etag: h.Get("Etag"), |
|||
ContentMD5: h.Get("Content-MD5"), |
|||
ContentLength: contentLength, |
|||
ContentEncoding: h.Get("Content-Encoding"), |
|||
ContentType: h.Get("Content-Type"), |
|||
ContentDisposition: h.Get("Content-Disposition"), |
|||
CacheControl: h.Get("Cache-Control"), |
|||
ContentLanguage: h.Get("Content-Language"), |
|||
SequenceNumber: sequenceNum, |
|||
CopyCompletionTime: TimeRFC1123(*copyCompletionTime), |
|||
CopyStatusDescription: h.Get("x-ms-copy-status-description"), |
|||
CopyID: h.Get("x-ms-copy-id"), |
|||
CopyProgress: h.Get("x-ms-copy-progress"), |
|||
CopySource: h.Get("x-ms-copy-source"), |
|||
CopyStatus: h.Get("x-ms-copy-status"), |
|||
BlobType: BlobType(h.Get("x-ms-blob-type")), |
|||
LeaseStatus: h.Get("x-ms-lease-status"), |
|||
LeaseState: h.Get("x-ms-lease-state"), |
|||
} |
|||
b.writeMetadata(h) |
|||
return nil |
|||
} |
|||
|
|||
// SetBlobPropertiesOptions contains various properties of a blob and is an entry
|
|||
// in SetProperties
|
|||
type SetBlobPropertiesOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
Origin string `header:"Origin"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
SequenceNumberAction *SequenceNumberAction |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// SequenceNumberAction defines how the blob's sequence number should be modified
|
|||
type SequenceNumberAction string |
|||
|
|||
// Options for sequence number action
|
|||
const ( |
|||
SequenceNumberActionMax SequenceNumberAction = "max" |
|||
SequenceNumberActionUpdate SequenceNumberAction = "update" |
|||
SequenceNumberActionIncrement SequenceNumberAction = "increment" |
|||
) |
|||
|
|||
// SetProperties replaces the BlobHeaders for the specified blob.
|
|||
//
|
|||
// Some keys may be converted to Camel-Case before sending. All keys
|
|||
// are returned in lower case by GetBlobProperties. HTTP header names
|
|||
// are case-insensitive so case munging should not matter to other
|
|||
// applications either.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties
|
|||
func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { |
|||
params := url.Values{"comp": {"properties"}} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers = mergeHeaders(headers, headersFromStruct(b.Properties)) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
if b.Properties.BlobType == BlobTypePage { |
|||
headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) |
|||
if options != nil && options.SequenceNumberAction != nil { |
|||
headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) |
|||
if *options.SequenceNumberAction != SequenceNumberActionIncrement { |
|||
headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) |
|||
} |
|||
} |
|||
} |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusOK}) |
|||
} |
|||
|
|||
// SetBlobMetadataOptions includes the options for a set blob metadata operation
|
|||
type SetBlobMetadataOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// SetMetadata replaces the metadata for the specified blob.
|
|||
//
|
|||
// Some keys may be converted to Camel-Case before sending. All keys
|
|||
// are returned in lower case by GetBlobMetadata. HTTP header names
|
|||
// are case-insensitive so case munging should not matter to other
|
|||
// applications either.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|||
func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error { |
|||
params := url.Values{"comp": {"metadata"}} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusOK}) |
|||
} |
|||
|
|||
// GetBlobMetadataOptions includes the options for a get blob metadata operation
|
|||
type GetBlobMetadataOptions struct { |
|||
Timeout uint |
|||
Snapshot *time.Time |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// GetMetadata returns all user-defined metadata for the specified blob.
|
|||
//
|
|||
// All metadata keys will be returned in lower case. (HTTP header
|
|||
// names are case-insensitive.)
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|||
func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { |
|||
params := url.Values{"comp": {"metadata"}} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
params = addSnapshot(params, options.Snapshot) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return err |
|||
} |
|||
|
|||
b.writeMetadata(resp.Header) |
|||
return nil |
|||
} |
|||
|
|||
func (b *Blob) writeMetadata(h http.Header) { |
|||
b.Metadata = BlobMetadata(writeMetadata(h)) |
|||
} |
|||
|
|||
// DeleteBlobOptions includes the options for a delete blob operation
|
|||
type DeleteBlobOptions struct { |
|||
Timeout uint |
|||
Snapshot *time.Time |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
DeleteSnapshots *bool |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// Delete deletes the given blob from the specified container.
|
|||
// If the blob does not exists at the time of the Delete Blob operation, it
|
|||
// returns error.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
|
|||
func (b *Blob) Delete(options *DeleteBlobOptions) error { |
|||
resp, err := b.delete(options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusAccepted}) |
|||
} |
|||
|
|||
// DeleteIfExists deletes the given blob from the specified container If the
|
|||
// blob is deleted with this call, returns true. Otherwise returns false.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
|
|||
func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) { |
|||
resp, err := b.delete(options) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { |
|||
return resp.StatusCode == http.StatusAccepted, nil |
|||
} |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
func (b *Blob) delete(options *DeleteBlobOptions) (*http.Response, error) { |
|||
params := url.Values{} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
params = addSnapshot(params, options.Snapshot) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
if options.DeleteSnapshots != nil { |
|||
if *options.DeleteSnapshots { |
|||
headers["x-ms-delete-snapshots"] = "include" |
|||
} else { |
|||
headers["x-ms-delete-snapshots"] = "only" |
|||
} |
|||
} |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth) |
|||
} |
|||
|
|||
// helper method to construct the path to either a blob or container
|
|||
func pathForResource(container, name string) string { |
|||
if name != "" { |
|||
return fmt.Sprintf("/%s/%s", container, name) |
|||
} |
|||
return fmt.Sprintf("/%s", container) |
|||
} |
|||
|
|||
func (b *Blob) respondCreation(resp *http.Response, bt BlobType) error { |
|||
defer drainRespBody(resp) |
|||
err := checkRespCode(resp, []int{http.StatusCreated}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
b.Properties.BlobType = bt |
|||
return nil |
|||
} |
@ -0,0 +1,179 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"net/url" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
// OverrideHeaders defines overridable response heaedrs in
|
|||
// a request using a SAS URI.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|||
type OverrideHeaders struct { |
|||
CacheControl string |
|||
ContentDisposition string |
|||
ContentEncoding string |
|||
ContentLanguage string |
|||
ContentType string |
|||
} |
|||
|
|||
// BlobSASOptions are options to construct a blob SAS
|
|||
// URI.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|||
type BlobSASOptions struct { |
|||
BlobServiceSASPermissions |
|||
OverrideHeaders |
|||
SASOptions |
|||
} |
|||
|
|||
// BlobServiceSASPermissions includes the available permissions for
|
|||
// blob service SAS URI.
|
|||
type BlobServiceSASPermissions struct { |
|||
Read bool |
|||
Add bool |
|||
Create bool |
|||
Write bool |
|||
Delete bool |
|||
} |
|||
|
|||
func (p BlobServiceSASPermissions) buildString() string { |
|||
permissions := "" |
|||
if p.Read { |
|||
permissions += "r" |
|||
} |
|||
if p.Add { |
|||
permissions += "a" |
|||
} |
|||
if p.Create { |
|||
permissions += "c" |
|||
} |
|||
if p.Write { |
|||
permissions += "w" |
|||
} |
|||
if p.Delete { |
|||
permissions += "d" |
|||
} |
|||
return permissions |
|||
} |
|||
|
|||
// GetSASURI creates an URL to the blob which contains the Shared
|
|||
// Access Signature with the specified options.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|||
func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) { |
|||
uri := b.GetURL() |
|||
signedResource := "b" |
|||
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
permissions := options.BlobServiceSASPermissions.buildString() |
|||
return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) |
|||
} |
|||
|
|||
func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) { |
|||
start := "" |
|||
if options.Start != (time.Time{}) { |
|||
start = options.Start.UTC().Format(time.RFC3339) |
|||
} |
|||
|
|||
expiry := options.Expiry.UTC().Format(time.RFC3339) |
|||
|
|||
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
|||
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) |
|||
canonicalizedResource, err := url.QueryUnescape(canonicalizedResource) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
protocols := "" |
|||
if options.UseHTTPS { |
|||
protocols = "https" |
|||
} |
|||
stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, signedResource, "", headers) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
sig := c.computeHmac256(stringToSign) |
|||
sasParams := url.Values{ |
|||
"sv": {c.apiVersion}, |
|||
"se": {expiry}, |
|||
"sr": {signedResource}, |
|||
"sp": {permissions}, |
|||
"sig": {sig}, |
|||
} |
|||
|
|||
if start != "" { |
|||
sasParams.Add("st", start) |
|||
} |
|||
|
|||
if c.apiVersion >= "2015-04-05" { |
|||
if protocols != "" { |
|||
sasParams.Add("spr", protocols) |
|||
} |
|||
if options.IP != "" { |
|||
sasParams.Add("sip", options.IP) |
|||
} |
|||
} |
|||
|
|||
// Add override response hedaers
|
|||
addQueryParameter(sasParams, "rscc", headers.CacheControl) |
|||
addQueryParameter(sasParams, "rscd", headers.ContentDisposition) |
|||
addQueryParameter(sasParams, "rsce", headers.ContentEncoding) |
|||
addQueryParameter(sasParams, "rscl", headers.ContentLanguage) |
|||
addQueryParameter(sasParams, "rsct", headers.ContentType) |
|||
|
|||
sasURL, err := url.Parse(uri) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
sasURL.RawQuery = sasParams.Encode() |
|||
return sasURL.String(), nil |
|||
} |
|||
|
|||
func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime string, headers OverrideHeaders) (string, error) { |
|||
rscc := headers.CacheControl |
|||
rscd := headers.ContentDisposition |
|||
rsce := headers.ContentEncoding |
|||
rscl := headers.ContentLanguage |
|||
rsct := headers.ContentType |
|||
|
|||
if signedVersion >= "2015-02-21" { |
|||
canonicalizedResource = "/blob" + canonicalizedResource |
|||
} |
|||
|
|||
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|||
if signedVersion >= "2018-11-09" { |
|||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime, rscc, rscd, rsce, rscl, rsct), nil |
|||
} |
|||
|
|||
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
|
|||
if signedVersion >= "2015-04-05" { |
|||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil |
|||
} |
|||
|
|||
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
|||
if signedVersion >= "2013-08-15" { |
|||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil |
|||
} |
|||
|
|||
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") |
|||
} |
@ -0,0 +1,186 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"fmt" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"strings" |
|||
) |
|||
|
|||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
|||
// Service.
|
|||
type BlobStorageClient struct { |
|||
client Client |
|||
auth authentication |
|||
} |
|||
|
|||
// GetServiceProperties gets the properties of your storage account's blob service.
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
|
|||
func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { |
|||
return b.client.getServiceProperties(blobServiceName, b.auth) |
|||
} |
|||
|
|||
// SetServiceProperties sets the properties of your storage account's blob service.
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
|
|||
func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { |
|||
return b.client.setServiceProperties(props, blobServiceName, b.auth) |
|||
} |
|||
|
|||
// ListContainersParameters defines the set of customizable parameters to make a
|
|||
// List Containers call.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
|||
type ListContainersParameters struct { |
|||
Prefix string |
|||
Marker string |
|||
Include string |
|||
MaxResults uint |
|||
Timeout uint |
|||
} |
|||
|
|||
// GetContainerReference returns a Container object for the specified container name.
|
|||
func (b *BlobStorageClient) GetContainerReference(name string) *Container { |
|||
return &Container{ |
|||
bsc: b, |
|||
Name: name, |
|||
} |
|||
} |
|||
|
|||
// GetContainerReferenceFromSASURI returns a Container object for the specified
|
|||
// container SASURI
|
|||
func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) { |
|||
path := strings.Split(sasuri.Path, "/") |
|||
if len(path) <= 1 { |
|||
return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String()) |
|||
} |
|||
c, err := newSASClientFromURL(&sasuri) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
cli := c.GetBlobService() |
|||
return &Container{ |
|||
bsc: &cli, |
|||
Name: path[1], |
|||
sasuri: sasuri, |
|||
}, nil |
|||
} |
|||
|
|||
// ListContainers returns the list of containers in a storage account along with
|
|||
// pagination token and other response details.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
|||
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { |
|||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) |
|||
uri := b.client.getEndpoint(blobServiceName, "", q) |
|||
headers := b.client.getStandardHeaders() |
|||
|
|||
type ContainerAlias struct { |
|||
bsc *BlobStorageClient |
|||
Name string `xml:"Name"` |
|||
Properties ContainerProperties `xml:"Properties"` |
|||
Metadata BlobMetadata |
|||
sasuri url.URL |
|||
} |
|||
type ContainerListResponseAlias struct { |
|||
XMLName xml.Name `xml:"EnumerationResults"` |
|||
Xmlns string `xml:"xmlns,attr"` |
|||
Prefix string `xml:"Prefix"` |
|||
Marker string `xml:"Marker"` |
|||
NextMarker string `xml:"NextMarker"` |
|||
MaxResults int64 `xml:"MaxResults"` |
|||
Containers []ContainerAlias `xml:"Containers>Container"` |
|||
} |
|||
|
|||
var outAlias ContainerListResponseAlias |
|||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer resp.Body.Close() |
|||
err = xmlUnmarshal(resp.Body, &outAlias) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
out := ContainerListResponse{ |
|||
XMLName: outAlias.XMLName, |
|||
Xmlns: outAlias.Xmlns, |
|||
Prefix: outAlias.Prefix, |
|||
Marker: outAlias.Marker, |
|||
NextMarker: outAlias.NextMarker, |
|||
MaxResults: outAlias.MaxResults, |
|||
Containers: make([]Container, len(outAlias.Containers)), |
|||
} |
|||
for i, cnt := range outAlias.Containers { |
|||
out.Containers[i] = Container{ |
|||
bsc: &b, |
|||
Name: cnt.Name, |
|||
Properties: cnt.Properties, |
|||
Metadata: map[string]string(cnt.Metadata), |
|||
sasuri: cnt.sasuri, |
|||
} |
|||
} |
|||
|
|||
return &out, err |
|||
} |
|||
|
|||
func (p ListContainersParameters) getParameters() url.Values { |
|||
out := url.Values{} |
|||
|
|||
if p.Prefix != "" { |
|||
out.Set("prefix", p.Prefix) |
|||
} |
|||
if p.Marker != "" { |
|||
out.Set("marker", p.Marker) |
|||
} |
|||
if p.Include != "" { |
|||
out.Set("include", p.Include) |
|||
} |
|||
if p.MaxResults != 0 { |
|||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) |
|||
} |
|||
if p.Timeout != 0 { |
|||
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) |
|||
} |
|||
|
|||
return out |
|||
} |
|||
|
|||
func writeMetadata(h http.Header) map[string]string { |
|||
metadata := make(map[string]string) |
|||
for k, v := range h { |
|||
// Can't trust CanonicalHeaderKey() to munge case
|
|||
// reliably. "_" is allowed in identifiers:
|
|||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
|||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
|||
// ...but "_" is considered invalid by
|
|||
// CanonicalMIMEHeaderKey in
|
|||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
|||
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
|||
k = strings.ToLower(k) |
|||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { |
|||
continue |
|||
} |
|||
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
|||
k = k[len(userDefinedMetadataHeaderPrefix):] |
|||
metadata[k] = v[len(v)-1] |
|||
} |
|||
return metadata |
|||
} |
@ -0,0 +1,270 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding/xml" |
|||
"fmt" |
|||
"io" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
// BlockListType is used to filter out types of blocks in a Get Blocks List call
|
|||
// for a block blob.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
|
|||
// block types.
|
|||
type BlockListType string |
|||
|
|||
// Filters for listing blocks in block blobs
|
|||
const ( |
|||
BlockListTypeAll BlockListType = "all" |
|||
BlockListTypeCommitted BlockListType = "committed" |
|||
BlockListTypeUncommitted BlockListType = "uncommitted" |
|||
) |
|||
|
|||
// Maximum sizes (per REST API) for various concepts
|
|||
const ( |
|||
MaxBlobBlockSize = 100 * 1024 * 1024 |
|||
MaxBlobPageSize = 4 * 1024 * 1024 |
|||
) |
|||
|
|||
// BlockStatus defines states a block for a block blob can
|
|||
// be in.
|
|||
type BlockStatus string |
|||
|
|||
// List of statuses that can be used to refer to a block in a block list
|
|||
const ( |
|||
BlockStatusUncommitted BlockStatus = "Uncommitted" |
|||
BlockStatusCommitted BlockStatus = "Committed" |
|||
BlockStatusLatest BlockStatus = "Latest" |
|||
) |
|||
|
|||
// Block is used to create Block entities for Put Block List
|
|||
// call.
|
|||
type Block struct { |
|||
ID string |
|||
Status BlockStatus |
|||
} |
|||
|
|||
// BlockListResponse contains the response fields from Get Block List call.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
|
|||
type BlockListResponse struct { |
|||
XMLName xml.Name `xml:"BlockList"` |
|||
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` |
|||
UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` |
|||
} |
|||
|
|||
// BlockResponse contains the block information returned
|
|||
// in the GetBlockListCall.
|
|||
type BlockResponse struct { |
|||
Name string `xml:"Name"` |
|||
Size int64 `xml:"Size"` |
|||
} |
|||
|
|||
// CreateBlockBlob initializes an empty block blob with no blocks.
|
|||
//
|
|||
// See CreateBlockBlobFromReader for more info on creating blobs.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
|||
func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { |
|||
return b.CreateBlockBlobFromReader(nil, options) |
|||
} |
|||
|
|||
// CreateBlockBlobFromReader initializes a block blob using data from
|
|||
// reader. Size must be the number of bytes read from reader. To
|
|||
// create an empty blob, use size==0 and reader==nil.
|
|||
//
|
|||
// Any headers set in blob.Properties or metadata in blob.Metadata
|
|||
// will be set on the blob.
|
|||
//
|
|||
// The API rejects requests with size > 256 MiB (but this limit is not
|
|||
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
|
|||
// PutBlock, and PutBlockList.
|
|||
//
|
|||
// To create a blob from scratch, call container.GetBlobReference() to
|
|||
// get an empty blob, fill in blob.Properties and blob.Metadata as
|
|||
// appropriate then call this method.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
|||
func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { |
|||
params := url.Values{} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["x-ms-blob-type"] = string(BlobTypeBlock) |
|||
|
|||
headers["Content-Length"] = "0" |
|||
var n int64 |
|||
var err error |
|||
if blob != nil { |
|||
type lener interface { |
|||
Len() int |
|||
} |
|||
// TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc.
|
|||
if l, ok := blob.(lener); ok { |
|||
n = int64(l.Len()) |
|||
} else { |
|||
var buf bytes.Buffer |
|||
n, err = io.Copy(&buf, blob) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
blob = &buf |
|||
} |
|||
|
|||
headers["Content-Length"] = strconv.FormatInt(n, 10) |
|||
} |
|||
b.Properties.ContentLength = n |
|||
|
|||
headers = mergeHeaders(headers, headersFromStruct(b.Properties)) |
|||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return b.respondCreation(resp, BlobTypeBlock) |
|||
} |
|||
|
|||
// PutBlockOptions includes the options for a put block operation
|
|||
type PutBlockOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
ContentMD5 string `header:"Content-MD5"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// PutBlock saves the given data chunk to the specified block blob with
|
|||
// given ID.
|
|||
//
|
|||
// The API rejects chunks larger than 100 MiB (but this limit is not
|
|||
// checked by the SDK).
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
|||
func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error { |
|||
return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options) |
|||
} |
|||
|
|||
// PutBlockWithLength saves the given data stream of exactly specified size to
|
|||
// the block blob with given ID. It is an alternative to PutBlocks where data
|
|||
// comes as stream but the length is known in advance.
|
|||
//
|
|||
// The API rejects requests with size > 100 MiB (but this limit is not
|
|||
// checked by the SDK).
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
|||
func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error { |
|||
query := url.Values{ |
|||
"comp": {"block"}, |
|||
"blockid": {blockID}, |
|||
} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["Content-Length"] = fmt.Sprintf("%v", size) |
|||
|
|||
if options != nil { |
|||
query = addTimeout(query, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return b.respondCreation(resp, BlobTypeBlock) |
|||
} |
|||
|
|||
// PutBlockListOptions includes the options for a put block list operation
|
|||
type PutBlockListOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// PutBlockList saves list of blocks to the specified block blob.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List
|
|||
func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error { |
|||
params := url.Values{"comp": {"blocklist"}} |
|||
blockListXML := prepareBlockListRequest(blocks) |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) |
|||
headers = mergeHeaders(headers, headersFromStruct(b.Properties)) |
|||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusCreated}) |
|||
} |
|||
|
|||
// GetBlockListOptions includes the options for a get block list operation
|
|||
type GetBlockListOptions struct { |
|||
Timeout uint |
|||
Snapshot *time.Time |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// GetBlockList retrieves list of blocks in the specified block blob.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List
|
|||
func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) { |
|||
params := url.Values{ |
|||
"comp": {"blocklist"}, |
|||
"blocklisttype": {string(blockType)}, |
|||
} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
params = addSnapshot(params, options.Snapshot) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
var out BlockListResponse |
|||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return out, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
err = xmlUnmarshal(resp.Body, &out) |
|||
return out, err |
|||
} |
@ -0,0 +1,991 @@ |
|||
// Package storage provides clients for Microsoft Azure Storage Services.
|
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bufio" |
|||
"encoding/base64" |
|||
"encoding/json" |
|||
"encoding/xml" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"io/ioutil" |
|||
"mime" |
|||
"mime/multipart" |
|||
"net/http" |
|||
"net/url" |
|||
"regexp" |
|||
"runtime" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/Azure/azure-sdk-for-go/version" |
|||
"github.com/Azure/go-autorest/autorest" |
|||
"github.com/Azure/go-autorest/autorest/azure" |
|||
) |
|||
|
|||
const ( |
|||
// DefaultBaseURL is the domain name used for storage requests in the
|
|||
// public cloud when a default client is created.
|
|||
DefaultBaseURL = "core.windows.net" |
|||
|
|||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
|||
// basic client is created.
|
|||
DefaultAPIVersion = "2018-03-28" |
|||
|
|||
defaultUseHTTPS = true |
|||
defaultRetryAttempts = 5 |
|||
defaultRetryDuration = time.Second * 5 |
|||
|
|||
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
|
|||
StorageEmulatorAccountName = "devstoreaccount1" |
|||
|
|||
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
|
|||
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" |
|||
|
|||
blobServiceName = "blob" |
|||
tableServiceName = "table" |
|||
queueServiceName = "queue" |
|||
fileServiceName = "file" |
|||
|
|||
storageEmulatorBlob = "127.0.0.1:10000" |
|||
storageEmulatorTable = "127.0.0.1:10002" |
|||
storageEmulatorQueue = "127.0.0.1:10001" |
|||
|
|||
userAgentHeader = "User-Agent" |
|||
|
|||
userDefinedMetadataHeaderPrefix = "x-ms-meta-" |
|||
|
|||
connectionStringAccountName = "accountname" |
|||
connectionStringAccountKey = "accountkey" |
|||
connectionStringEndpointSuffix = "endpointsuffix" |
|||
connectionStringEndpointProtocol = "defaultendpointsprotocol" |
|||
|
|||
connectionStringBlobEndpoint = "blobendpoint" |
|||
connectionStringFileEndpoint = "fileendpoint" |
|||
connectionStringQueueEndpoint = "queueendpoint" |
|||
connectionStringTableEndpoint = "tableendpoint" |
|||
connectionStringSAS = "sharedaccesssignature" |
|||
) |
|||
|
|||
var ( |
|||
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") |
|||
defaultValidStatusCodes = []int{ |
|||
http.StatusRequestTimeout, // 408
|
|||
http.StatusInternalServerError, // 500
|
|||
http.StatusBadGateway, // 502
|
|||
http.StatusServiceUnavailable, // 503
|
|||
http.StatusGatewayTimeout, // 504
|
|||
} |
|||
) |
|||
|
|||
// Sender sends a request
|
|||
type Sender interface { |
|||
Send(*Client, *http.Request) (*http.Response, error) |
|||
} |
|||
|
|||
// DefaultSender is the default sender for the client. It implements
|
|||
// an automatic retry strategy.
|
|||
type DefaultSender struct { |
|||
RetryAttempts int |
|||
RetryDuration time.Duration |
|||
ValidStatusCodes []int |
|||
attempts int // used for testing
|
|||
} |
|||
|
|||
// Send is the default retry strategy in the client
|
|||
func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { |
|||
rr := autorest.NewRetriableRequest(req) |
|||
for attempts := 0; attempts < ds.RetryAttempts; attempts++ { |
|||
err = rr.Prepare() |
|||
if err != nil { |
|||
return resp, err |
|||
} |
|||
resp, err = c.HTTPClient.Do(rr.Request()) |
|||
if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { |
|||
return resp, err |
|||
} |
|||
drainRespBody(resp) |
|||
autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel) |
|||
ds.attempts = attempts |
|||
} |
|||
ds.attempts++ |
|||
return resp, err |
|||
} |
|||
|
|||
// Client is the object that needs to be constructed to perform
|
|||
// operations on the storage account.
|
|||
type Client struct { |
|||
// HTTPClient is the http.Client used to initiate API
|
|||
// requests. http.DefaultClient is used when creating a
|
|||
// client.
|
|||
HTTPClient *http.Client |
|||
|
|||
// Sender is an interface that sends the request. Clients are
|
|||
// created with a DefaultSender. The DefaultSender has an
|
|||
// automatic retry strategy built in. The Sender can be customized.
|
|||
Sender Sender |
|||
|
|||
accountName string |
|||
accountKey []byte |
|||
useHTTPS bool |
|||
UseSharedKeyLite bool |
|||
baseURL string |
|||
apiVersion string |
|||
userAgent string |
|||
sasClient bool |
|||
accountSASToken url.Values |
|||
} |
|||
|
|||
type odataResponse struct { |
|||
resp *http.Response |
|||
odata odataErrorWrapper |
|||
} |
|||
|
|||
// AzureStorageServiceError contains fields of the error response from
|
|||
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
|
|||
// Some fields might be specific to certain calls.
|
|||
type AzureStorageServiceError struct { |
|||
Code string `xml:"Code"` |
|||
Message string `xml:"Message"` |
|||
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` |
|||
QueryParameterName string `xml:"QueryParameterName"` |
|||
QueryParameterValue string `xml:"QueryParameterValue"` |
|||
Reason string `xml:"Reason"` |
|||
Lang string |
|||
StatusCode int |
|||
RequestID string |
|||
Date string |
|||
APIVersion string |
|||
} |
|||
|
|||
type odataErrorMessage struct { |
|||
Lang string `json:"lang"` |
|||
Value string `json:"value"` |
|||
} |
|||
|
|||
type odataError struct { |
|||
Code string `json:"code"` |
|||
Message odataErrorMessage `json:"message"` |
|||
} |
|||
|
|||
type odataErrorWrapper struct { |
|||
Err odataError `json:"odata.error"` |
|||
} |
|||
|
|||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
|||
// nor with an HTTP status code indicating success.
|
|||
type UnexpectedStatusCodeError struct { |
|||
allowed []int |
|||
got int |
|||
inner error |
|||
} |
|||
|
|||
func (e UnexpectedStatusCodeError) Error() string { |
|||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } |
|||
|
|||
got := s(e.got) |
|||
expected := []string{} |
|||
for _, v := range e.allowed { |
|||
expected = append(expected, s(v)) |
|||
} |
|||
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner) |
|||
} |
|||
|
|||
// Got is the actual status code returned by Azure.
|
|||
func (e UnexpectedStatusCodeError) Got() int { |
|||
return e.got |
|||
} |
|||
|
|||
// Inner returns any inner error info.
|
|||
func (e UnexpectedStatusCodeError) Inner() error { |
|||
return e.inner |
|||
} |
|||
|
|||
// NewClientFromConnectionString creates a Client from the connection string.
|
|||
func NewClientFromConnectionString(input string) (Client, error) { |
|||
// build a map of connection string key/value pairs
|
|||
parts := map[string]string{} |
|||
for _, pair := range strings.Split(input, ";") { |
|||
if pair == "" { |
|||
continue |
|||
} |
|||
|
|||
equalDex := strings.IndexByte(pair, '=') |
|||
if equalDex <= 0 { |
|||
return Client{}, fmt.Errorf("Invalid connection segment %q", pair) |
|||
} |
|||
|
|||
value := strings.TrimSpace(pair[equalDex+1:]) |
|||
key := strings.TrimSpace(strings.ToLower(pair[:equalDex])) |
|||
parts[key] = value |
|||
} |
|||
|
|||
// TODO: validate parameter sets?
|
|||
|
|||
if parts[connectionStringAccountName] == StorageEmulatorAccountName { |
|||
return NewEmulatorClient() |
|||
} |
|||
|
|||
if parts[connectionStringSAS] != "" { |
|||
endpoint := "" |
|||
if parts[connectionStringBlobEndpoint] != "" { |
|||
endpoint = parts[connectionStringBlobEndpoint] |
|||
} else if parts[connectionStringFileEndpoint] != "" { |
|||
endpoint = parts[connectionStringFileEndpoint] |
|||
} else if parts[connectionStringQueueEndpoint] != "" { |
|||
endpoint = parts[connectionStringQueueEndpoint] |
|||
} else { |
|||
endpoint = parts[connectionStringTableEndpoint] |
|||
} |
|||
|
|||
return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS]) |
|||
} |
|||
|
|||
useHTTPS := defaultUseHTTPS |
|||
if parts[connectionStringEndpointProtocol] != "" { |
|||
useHTTPS = parts[connectionStringEndpointProtocol] == "https" |
|||
} |
|||
|
|||
return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey], |
|||
parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS) |
|||
} |
|||
|
|||
// NewBasicClient constructs a Client with given storage service name and
|
|||
// key.
|
|||
func NewBasicClient(accountName, accountKey string) (Client, error) { |
|||
if accountName == StorageEmulatorAccountName { |
|||
return NewEmulatorClient() |
|||
} |
|||
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) |
|||
} |
|||
|
|||
// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
|
|||
// key in the referenced cloud.
|
|||
func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { |
|||
if accountName == StorageEmulatorAccountName { |
|||
return NewEmulatorClient() |
|||
} |
|||
return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) |
|||
} |
|||
|
|||
//NewEmulatorClient contructs a Client intended to only work with Azure
|
|||
//Storage Emulator
|
|||
func NewEmulatorClient() (Client, error) { |
|||
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) |
|||
} |
|||
|
|||
// NewClient constructs a Client. This should be used if the caller wants
|
|||
// to specify whether to use HTTPS, a specific REST API version or a custom
|
|||
// storage endpoint than Azure Public Cloud.
|
|||
func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { |
|||
var c Client |
|||
if !IsValidStorageAccount(accountName) { |
|||
return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) |
|||
} else if accountKey == "" { |
|||
return c, fmt.Errorf("azure: account key required") |
|||
} else if serviceBaseURL == "" { |
|||
return c, fmt.Errorf("azure: base storage service url required") |
|||
} |
|||
|
|||
key, err := base64.StdEncoding.DecodeString(accountKey) |
|||
if err != nil { |
|||
return c, fmt.Errorf("azure: malformed storage account key: %v", err) |
|||
} |
|||
|
|||
c = Client{ |
|||
HTTPClient: http.DefaultClient, |
|||
accountName: accountName, |
|||
accountKey: key, |
|||
useHTTPS: useHTTPS, |
|||
baseURL: serviceBaseURL, |
|||
apiVersion: apiVersion, |
|||
sasClient: false, |
|||
UseSharedKeyLite: false, |
|||
Sender: &DefaultSender{ |
|||
RetryAttempts: defaultRetryAttempts, |
|||
ValidStatusCodes: defaultValidStatusCodes, |
|||
RetryDuration: defaultRetryDuration, |
|||
}, |
|||
} |
|||
c.userAgent = c.getDefaultUserAgent() |
|||
return c, nil |
|||
} |
|||
|
|||
// IsValidStorageAccount checks if the storage account name is valid.
|
|||
// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account
|
|||
func IsValidStorageAccount(account string) bool { |
|||
return validStorageAccount.MatchString(account) |
|||
} |
|||
|
|||
// NewAccountSASClient contructs a client that uses accountSAS authorization
|
|||
// for its operations.
|
|||
func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client { |
|||
return newSASClient(account, env.StorageEndpointSuffix, token) |
|||
} |
|||
|
|||
// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization
|
|||
// for its operations using the specified endpoint and SAS token.
|
|||
func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) { |
|||
u, err := url.Parse(endpoint) |
|||
if err != nil { |
|||
return Client{}, err |
|||
} |
|||
_, err = url.ParseQuery(sasToken) |
|||
if err != nil { |
|||
return Client{}, err |
|||
} |
|||
u.RawQuery = sasToken |
|||
return newSASClientFromURL(u) |
|||
} |
|||
|
|||
func newSASClient(accountName, baseURL string, sasToken url.Values) Client { |
|||
c := Client{ |
|||
HTTPClient: http.DefaultClient, |
|||
apiVersion: DefaultAPIVersion, |
|||
sasClient: true, |
|||
Sender: &DefaultSender{ |
|||
RetryAttempts: defaultRetryAttempts, |
|||
ValidStatusCodes: defaultValidStatusCodes, |
|||
RetryDuration: defaultRetryDuration, |
|||
}, |
|||
accountName: accountName, |
|||
baseURL: baseURL, |
|||
accountSASToken: sasToken, |
|||
useHTTPS: defaultUseHTTPS, |
|||
} |
|||
c.userAgent = c.getDefaultUserAgent() |
|||
// Get API version and protocol from token
|
|||
c.apiVersion = sasToken.Get("sv") |
|||
if spr := sasToken.Get("spr"); spr != "" { |
|||
c.useHTTPS = spr == "https" |
|||
} |
|||
return c |
|||
} |
|||
|
|||
func newSASClientFromURL(u *url.URL) (Client, error) { |
|||
// the host name will look something like this
|
|||
// - foo.blob.core.windows.net
|
|||
// "foo" is the account name
|
|||
// "core.windows.net" is the baseURL
|
|||
|
|||
// find the first dot to get account name
|
|||
i1 := strings.IndexByte(u.Host, '.') |
|||
if i1 < 0 { |
|||
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host) |
|||
} |
|||
|
|||
// now find the second dot to get the base URL
|
|||
i2 := strings.IndexByte(u.Host[i1+1:], '.') |
|||
if i2 < 0 { |
|||
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:]) |
|||
} |
|||
|
|||
sasToken := u.Query() |
|||
c := newSASClient(u.Host[:i1], u.Host[i1+i2+2:], sasToken) |
|||
if spr := sasToken.Get("spr"); spr == "" { |
|||
// infer from URL if not in the query params set
|
|||
c.useHTTPS = u.Scheme == "https" |
|||
} |
|||
return c, nil |
|||
} |
|||
|
|||
func (c Client) isServiceSASClient() bool { |
|||
return c.sasClient && c.accountSASToken == nil |
|||
} |
|||
|
|||
func (c Client) isAccountSASClient() bool { |
|||
return c.sasClient && c.accountSASToken != nil |
|||
} |
|||
|
|||
func (c Client) getDefaultUserAgent() string { |
|||
return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", |
|||
runtime.Version(), |
|||
runtime.GOARCH, |
|||
runtime.GOOS, |
|||
version.Number, |
|||
c.apiVersion, |
|||
) |
|||
} |
|||
|
|||
// AddToUserAgent adds an extension to the current user agent
|
|||
func (c *Client) AddToUserAgent(extension string) error { |
|||
if extension != "" { |
|||
c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension) |
|||
return nil |
|||
} |
|||
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent) |
|||
} |
|||
|
|||
// protectUserAgent is used in funcs that include extraheaders as a parameter.
|
|||
// It prevents the User-Agent header to be overwritten, instead if it happens to
|
|||
// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
|
|||
func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string { |
|||
if v, ok := extraheaders[userAgentHeader]; ok { |
|||
c.AddToUserAgent(v) |
|||
delete(extraheaders, userAgentHeader) |
|||
} |
|||
return extraheaders |
|||
} |
|||
|
|||
func (c Client) getBaseURL(service string) *url.URL { |
|||
scheme := "http" |
|||
if c.useHTTPS { |
|||
scheme = "https" |
|||
} |
|||
host := "" |
|||
if c.accountName == StorageEmulatorAccountName { |
|||
switch service { |
|||
case blobServiceName: |
|||
host = storageEmulatorBlob |
|||
case tableServiceName: |
|||
host = storageEmulatorTable |
|||
case queueServiceName: |
|||
host = storageEmulatorQueue |
|||
} |
|||
} else { |
|||
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) |
|||
} |
|||
|
|||
return &url.URL{ |
|||
Scheme: scheme, |
|||
Host: host, |
|||
} |
|||
} |
|||
|
|||
func (c Client) getEndpoint(service, path string, params url.Values) string { |
|||
u := c.getBaseURL(service) |
|||
|
|||
// API doesn't accept path segments not starting with '/'
|
|||
if !strings.HasPrefix(path, "/") { |
|||
path = fmt.Sprintf("/%v", path) |
|||
} |
|||
|
|||
if c.accountName == StorageEmulatorAccountName { |
|||
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) |
|||
} |
|||
|
|||
u.Path = path |
|||
u.RawQuery = params.Encode() |
|||
return u.String() |
|||
} |
|||
|
|||
// AccountSASTokenOptions includes options for constructing
|
|||
// an account SAS token.
|
|||
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
|||
type AccountSASTokenOptions struct { |
|||
APIVersion string |
|||
Services Services |
|||
ResourceTypes ResourceTypes |
|||
Permissions Permissions |
|||
Start time.Time |
|||
Expiry time.Time |
|||
IP string |
|||
UseHTTPS bool |
|||
} |
|||
|
|||
// Services specify services accessible with an account SAS.
|
|||
type Services struct { |
|||
Blob bool |
|||
Queue bool |
|||
Table bool |
|||
File bool |
|||
} |
|||
|
|||
// ResourceTypes specify the resources accesible with an
|
|||
// account SAS.
|
|||
type ResourceTypes struct { |
|||
Service bool |
|||
Container bool |
|||
Object bool |
|||
} |
|||
|
|||
// Permissions specifies permissions for an accountSAS.
|
|||
type Permissions struct { |
|||
Read bool |
|||
Write bool |
|||
Delete bool |
|||
List bool |
|||
Add bool |
|||
Create bool |
|||
Update bool |
|||
Process bool |
|||
} |
|||
|
|||
// GetAccountSASToken creates an account SAS token
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
|||
func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) { |
|||
if options.APIVersion == "" { |
|||
options.APIVersion = c.apiVersion |
|||
} |
|||
|
|||
if options.APIVersion < "2015-04-05" { |
|||
return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion) |
|||
} |
|||
|
|||
// build services string
|
|||
services := "" |
|||
if options.Services.Blob { |
|||
services += "b" |
|||
} |
|||
if options.Services.Queue { |
|||
services += "q" |
|||
} |
|||
if options.Services.Table { |
|||
services += "t" |
|||
} |
|||
if options.Services.File { |
|||
services += "f" |
|||
} |
|||
|
|||
// build resources string
|
|||
resources := "" |
|||
if options.ResourceTypes.Service { |
|||
resources += "s" |
|||
} |
|||
if options.ResourceTypes.Container { |
|||
resources += "c" |
|||
} |
|||
if options.ResourceTypes.Object { |
|||
resources += "o" |
|||
} |
|||
|
|||
// build permissions string
|
|||
permissions := "" |
|||
if options.Permissions.Read { |
|||
permissions += "r" |
|||
} |
|||
if options.Permissions.Write { |
|||
permissions += "w" |
|||
} |
|||
if options.Permissions.Delete { |
|||
permissions += "d" |
|||
} |
|||
if options.Permissions.List { |
|||
permissions += "l" |
|||
} |
|||
if options.Permissions.Add { |
|||
permissions += "a" |
|||
} |
|||
if options.Permissions.Create { |
|||
permissions += "c" |
|||
} |
|||
if options.Permissions.Update { |
|||
permissions += "u" |
|||
} |
|||
if options.Permissions.Process { |
|||
permissions += "p" |
|||
} |
|||
|
|||
// build start time, if exists
|
|||
start := "" |
|||
if options.Start != (time.Time{}) { |
|||
start = options.Start.UTC().Format(time.RFC3339) |
|||
} |
|||
|
|||
// build expiry time
|
|||
expiry := options.Expiry.UTC().Format(time.RFC3339) |
|||
|
|||
protocol := "https,http" |
|||
if options.UseHTTPS { |
|||
protocol = "https" |
|||
} |
|||
|
|||
stringToSign := strings.Join([]string{ |
|||
c.accountName, |
|||
permissions, |
|||
services, |
|||
resources, |
|||
start, |
|||
expiry, |
|||
options.IP, |
|||
protocol, |
|||
options.APIVersion, |
|||
"", |
|||
}, "\n") |
|||
signature := c.computeHmac256(stringToSign) |
|||
|
|||
sasParams := url.Values{ |
|||
"sv": {options.APIVersion}, |
|||
"ss": {services}, |
|||
"srt": {resources}, |
|||
"sp": {permissions}, |
|||
"se": {expiry}, |
|||
"spr": {protocol}, |
|||
"sig": {signature}, |
|||
} |
|||
if start != "" { |
|||
sasParams.Add("st", start) |
|||
} |
|||
if options.IP != "" { |
|||
sasParams.Add("sip", options.IP) |
|||
} |
|||
|
|||
return sasParams, nil |
|||
} |
|||
|
|||
// GetBlobService returns a BlobStorageClient which can operate on the blob
|
|||
// service of the storage account.
|
|||
func (c Client) GetBlobService() BlobStorageClient { |
|||
b := BlobStorageClient{ |
|||
client: c, |
|||
} |
|||
b.client.AddToUserAgent(blobServiceName) |
|||
b.auth = sharedKey |
|||
if c.UseSharedKeyLite { |
|||
b.auth = sharedKeyLite |
|||
} |
|||
return b |
|||
} |
|||
|
|||
// GetQueueService returns a QueueServiceClient which can operate on the queue
|
|||
// service of the storage account.
|
|||
func (c Client) GetQueueService() QueueServiceClient { |
|||
q := QueueServiceClient{ |
|||
client: c, |
|||
} |
|||
q.client.AddToUserAgent(queueServiceName) |
|||
q.auth = sharedKey |
|||
if c.UseSharedKeyLite { |
|||
q.auth = sharedKeyLite |
|||
} |
|||
return q |
|||
} |
|||
|
|||
// GetTableService returns a TableServiceClient which can operate on the table
|
|||
// service of the storage account.
|
|||
func (c Client) GetTableService() TableServiceClient { |
|||
t := TableServiceClient{ |
|||
client: c, |
|||
} |
|||
t.client.AddToUserAgent(tableServiceName) |
|||
t.auth = sharedKeyForTable |
|||
if c.UseSharedKeyLite { |
|||
t.auth = sharedKeyLiteForTable |
|||
} |
|||
return t |
|||
} |
|||
|
|||
// GetFileService returns a FileServiceClient which can operate on the file
|
|||
// service of the storage account.
|
|||
func (c Client) GetFileService() FileServiceClient { |
|||
f := FileServiceClient{ |
|||
client: c, |
|||
} |
|||
f.client.AddToUserAgent(fileServiceName) |
|||
f.auth = sharedKey |
|||
if c.UseSharedKeyLite { |
|||
f.auth = sharedKeyLite |
|||
} |
|||
return f |
|||
} |
|||
|
|||
func (c Client) getStandardHeaders() map[string]string { |
|||
return map[string]string{ |
|||
userAgentHeader: c.userAgent, |
|||
"x-ms-version": c.apiVersion, |
|||
"x-ms-date": currentTimeRfc1123Formatted(), |
|||
} |
|||
} |
|||
|
|||
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) { |
|||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
req, err := http.NewRequest(verb, url, body) |
|||
if err != nil { |
|||
return nil, errors.New("azure/storage: error creating request: " + err.Error()) |
|||
} |
|||
|
|||
// http.NewRequest() will automatically set req.ContentLength for a handful of types
|
|||
// otherwise we will handle here.
|
|||
if req.ContentLength < 1 { |
|||
if clstr, ok := headers["Content-Length"]; ok { |
|||
if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil { |
|||
req.ContentLength = cl |
|||
} |
|||
} |
|||
} |
|||
|
|||
for k, v := range headers { |
|||
req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645
|
|||
} |
|||
|
|||
if c.isAccountSASClient() { |
|||
// append the SAS token to the query params
|
|||
v := req.URL.Query() |
|||
v = mergeParams(v, c.accountSASToken) |
|||
req.URL.RawQuery = v.Encode() |
|||
} |
|||
|
|||
resp, err := c.Sender.Send(&c, req) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
if resp.StatusCode >= 400 && resp.StatusCode <= 505 { |
|||
return resp, getErrorFromResponse(resp) |
|||
} |
|||
|
|||
return resp, nil |
|||
} |
|||
|
|||
func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) { |
|||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth) |
|||
if err != nil { |
|||
return nil, nil, nil, err |
|||
} |
|||
|
|||
req, err := http.NewRequest(verb, url, body) |
|||
for k, v := range headers { |
|||
req.Header.Add(k, v) |
|||
} |
|||
|
|||
resp, err := c.Sender.Send(&c, req) |
|||
if err != nil { |
|||
return nil, nil, nil, err |
|||
} |
|||
|
|||
respToRet := &odataResponse{resp: resp} |
|||
|
|||
statusCode := resp.StatusCode |
|||
if statusCode >= 400 && statusCode <= 505 { |
|||
var respBody []byte |
|||
respBody, err = readAndCloseBody(resp.Body) |
|||
if err != nil { |
|||
return nil, nil, nil, err |
|||
} |
|||
|
|||
requestID, date, version := getDebugHeaders(resp.Header) |
|||
if len(respBody) == 0 { |
|||
// no error in response body, might happen in HEAD requests
|
|||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) |
|||
return respToRet, req, resp, err |
|||
} |
|||
// try unmarshal as odata.error json
|
|||
err = json.Unmarshal(respBody, &respToRet.odata) |
|||
} |
|||
|
|||
return respToRet, req, resp, err |
|||
} |
|||
|
|||
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { |
|||
respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth) |
|||
return respToRet, err |
|||
} |
|||
|
|||
func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { |
|||
// execute common query, get back generated request, response etc... for more processing.
|
|||
respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
// return the OData in the case of executing batch commands.
|
|||
// In this case we need to read the outer batch boundary and contents.
|
|||
// Then we read the changeset information within the batch
|
|||
var respBody []byte |
|||
respBody, err = readAndCloseBody(resp.Body) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
// outer multipart body
|
|||
_, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0]) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
// batch details.
|
|||
batchBoundary := batchHeader["boundary"] |
|||
batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
// changeset details.
|
|||
err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return respToRet, nil |
|||
} |
|||
|
|||
func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error { |
|||
changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary) |
|||
changesetPart, err := changesetMultiReader.NextPart() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
changesetPartBufioReader := bufio.NewReader(changesetPart) |
|||
changesetResp, err := http.ReadResponse(changesetPartBufioReader, req) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if changesetResp.StatusCode != http.StatusNoContent { |
|||
changesetBody, err := readAndCloseBody(changesetResp.Body) |
|||
err = json.Unmarshal(changesetBody, &respToRet.odata) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
respToRet.resp = changesetResp |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) { |
|||
respBodyString := string(respBody) |
|||
respBodyReader := strings.NewReader(respBodyString) |
|||
|
|||
// reading batchresponse
|
|||
batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary) |
|||
batchPart, err := batchMultiReader.NextPart() |
|||
if err != nil { |
|||
return nil, "", err |
|||
} |
|||
batchPartBufioReader := bufio.NewReader(batchPart) |
|||
|
|||
_, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type")) |
|||
if err != nil { |
|||
return nil, "", err |
|||
} |
|||
changesetBoundary := changesetHeader["boundary"] |
|||
return batchPartBufioReader, changesetBoundary, nil |
|||
} |
|||
|
|||
func readAndCloseBody(body io.ReadCloser) ([]byte, error) { |
|||
defer body.Close() |
|||
out, err := ioutil.ReadAll(body) |
|||
if err == io.EOF { |
|||
err = nil |
|||
} |
|||
return out, err |
|||
} |
|||
|
|||
// reads the response body then closes it
|
|||
func drainRespBody(resp *http.Response) { |
|||
io.Copy(ioutil.Discard, resp.Body) |
|||
resp.Body.Close() |
|||
} |
|||
|
|||
func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error { |
|||
if err := xml.Unmarshal(body, storageErr); err != nil { |
|||
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error { |
|||
odataError := odataErrorWrapper{} |
|||
if err := json.Unmarshal(body, &odataError); err != nil { |
|||
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) |
|||
return err |
|||
} |
|||
storageErr.Code = odataError.Err.Code |
|||
storageErr.Message = odataError.Err.Message.Value |
|||
storageErr.Lang = odataError.Err.Message.Lang |
|||
return nil |
|||
} |
|||
|
|||
func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError { |
|||
return AzureStorageServiceError{ |
|||
StatusCode: code, |
|||
Code: status, |
|||
RequestID: requestID, |
|||
Date: date, |
|||
APIVersion: version, |
|||
Message: "no response body was available for error status code", |
|||
} |
|||
} |
|||
|
|||
func (e AzureStorageServiceError) Error() string { |
|||
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s", |
|||
e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue) |
|||
} |
|||
|
|||
// checkRespCode returns UnexpectedStatusError if the given response code is not
|
|||
// one of the allowed status codes; otherwise nil.
|
|||
func checkRespCode(resp *http.Response, allowed []int) error { |
|||
for _, v := range allowed { |
|||
if resp.StatusCode == v { |
|||
return nil |
|||
} |
|||
} |
|||
err := getErrorFromResponse(resp) |
|||
return UnexpectedStatusCodeError{ |
|||
allowed: allowed, |
|||
got: resp.StatusCode, |
|||
inner: err, |
|||
} |
|||
} |
|||
|
|||
func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string { |
|||
metadata = c.protectUserAgent(metadata) |
|||
for k, v := range metadata { |
|||
h[userDefinedMetadataHeaderPrefix+k] = v |
|||
} |
|||
return h |
|||
} |
|||
|
|||
func getDebugHeaders(h http.Header) (requestID, date, version string) { |
|||
requestID = h.Get("x-ms-request-id") |
|||
version = h.Get("x-ms-version") |
|||
date = h.Get("Date") |
|||
return |
|||
} |
|||
|
|||
func getErrorFromResponse(resp *http.Response) error { |
|||
respBody, err := readAndCloseBody(resp.Body) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
requestID, date, version := getDebugHeaders(resp.Header) |
|||
if len(respBody) == 0 { |
|||
// no error in response body, might happen in HEAD requests
|
|||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) |
|||
} else { |
|||
storageErr := AzureStorageServiceError{ |
|||
StatusCode: resp.StatusCode, |
|||
RequestID: requestID, |
|||
Date: date, |
|||
APIVersion: version, |
|||
} |
|||
// response contains storage service error object, unmarshal
|
|||
if resp.Header.Get("Content-Type") == "application/xml" { |
|||
errIn := serviceErrFromXML(respBody, &storageErr) |
|||
if err != nil { // error unmarshaling the error response
|
|||
err = errIn |
|||
} |
|||
} else { |
|||
errIn := serviceErrFromJSON(respBody, &storageErr) |
|||
if err != nil { // error unmarshaling the error response
|
|||
err = errIn |
|||
} |
|||
} |
|||
err = storageErr |
|||
} |
|||
return err |
|||
} |
@ -0,0 +1,38 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"net/url" |
|||
"time" |
|||
) |
|||
|
|||
// SASOptions includes options used by SAS URIs for different
|
|||
// services and resources.
|
|||
type SASOptions struct { |
|||
APIVersion string |
|||
Start time.Time |
|||
Expiry time.Time |
|||
IP string |
|||
UseHTTPS bool |
|||
Identifier string |
|||
} |
|||
|
|||
func addQueryParameter(query url.Values, key, value string) url.Values { |
|||
if value != "" { |
|||
query.Add(key, value) |
|||
} |
|||
return query |
|||
} |
@ -0,0 +1,640 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"fmt" |
|||
"io" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
// Container represents an Azure container.
|
|||
type Container struct { |
|||
bsc *BlobStorageClient |
|||
Name string `xml:"Name"` |
|||
Properties ContainerProperties `xml:"Properties"` |
|||
Metadata map[string]string |
|||
sasuri url.URL |
|||
} |
|||
|
|||
// Client returns the HTTP client used by the Container reference.
|
|||
func (c *Container) Client() *Client { |
|||
return &c.bsc.client |
|||
} |
|||
|
|||
func (c *Container) buildPath() string { |
|||
return fmt.Sprintf("/%s", c.Name) |
|||
} |
|||
|
|||
// GetURL gets the canonical URL to the container.
|
|||
// This method does not create a publicly accessible URL if the container
|
|||
// is private and this method does not check if the blob exists.
|
|||
func (c *Container) GetURL() string { |
|||
container := c.Name |
|||
if container == "" { |
|||
container = "$root" |
|||
} |
|||
return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil) |
|||
} |
|||
|
|||
// ContainerSASOptions are options to construct a container SAS
|
|||
// URI.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|||
type ContainerSASOptions struct { |
|||
ContainerSASPermissions |
|||
OverrideHeaders |
|||
SASOptions |
|||
} |
|||
|
|||
// ContainerSASPermissions includes the available permissions for
|
|||
// a container SAS URI.
|
|||
type ContainerSASPermissions struct { |
|||
BlobServiceSASPermissions |
|||
List bool |
|||
} |
|||
|
|||
// GetSASURI creates an URL to the container which contains the Shared
|
|||
// Access Signature with the specified options.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|||
func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) { |
|||
uri := c.GetURL() |
|||
signedResource := "c" |
|||
canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
// build permissions string
|
|||
permissions := options.BlobServiceSASPermissions.buildString() |
|||
if options.List { |
|||
permissions += "l" |
|||
} |
|||
|
|||
return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) |
|||
} |
|||
|
|||
// ContainerProperties contains various properties of a container returned from
|
|||
// various endpoints like ListContainers.
|
|||
type ContainerProperties struct { |
|||
LastModified string `xml:"Last-Modified"` |
|||
Etag string `xml:"Etag"` |
|||
LeaseStatus string `xml:"LeaseStatus"` |
|||
LeaseState string `xml:"LeaseState"` |
|||
LeaseDuration string `xml:"LeaseDuration"` |
|||
PublicAccess ContainerAccessType `xml:"PublicAccess"` |
|||
} |
|||
|
|||
// ContainerListResponse contains the response fields from
|
|||
// ListContainers call.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
|||
type ContainerListResponse struct { |
|||
XMLName xml.Name `xml:"EnumerationResults"` |
|||
Xmlns string `xml:"xmlns,attr"` |
|||
Prefix string `xml:"Prefix"` |
|||
Marker string `xml:"Marker"` |
|||
NextMarker string `xml:"NextMarker"` |
|||
MaxResults int64 `xml:"MaxResults"` |
|||
Containers []Container `xml:"Containers>Container"` |
|||
} |
|||
|
|||
// BlobListResponse contains the response fields from ListBlobs call.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
|||
type BlobListResponse struct { |
|||
XMLName xml.Name `xml:"EnumerationResults"` |
|||
Xmlns string `xml:"xmlns,attr"` |
|||
Prefix string `xml:"Prefix"` |
|||
Marker string `xml:"Marker"` |
|||
NextMarker string `xml:"NextMarker"` |
|||
MaxResults int64 `xml:"MaxResults"` |
|||
Blobs []Blob `xml:"Blobs>Blob"` |
|||
|
|||
// BlobPrefix is used to traverse blobs as if it were a file system.
|
|||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
|||
// The list here can be thought of as "folders" that may contain
|
|||
// other folders or blobs.
|
|||
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` |
|||
|
|||
// Delimiter is used to traverse blobs as if it were a file system.
|
|||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
|||
Delimiter string `xml:"Delimiter"` |
|||
} |
|||
|
|||
// IncludeBlobDataset has options to include in a list blobs operation
|
|||
type IncludeBlobDataset struct { |
|||
Snapshots bool |
|||
Metadata bool |
|||
UncommittedBlobs bool |
|||
Copy bool |
|||
} |
|||
|
|||
// ListBlobsParameters defines the set of customizable
|
|||
// parameters to make a List Blobs call.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
|||
type ListBlobsParameters struct { |
|||
Prefix string |
|||
Delimiter string |
|||
Marker string |
|||
Include *IncludeBlobDataset |
|||
MaxResults uint |
|||
Timeout uint |
|||
RequestID string |
|||
} |
|||
|
|||
func (p ListBlobsParameters) getParameters() url.Values { |
|||
out := url.Values{} |
|||
|
|||
if p.Prefix != "" { |
|||
out.Set("prefix", p.Prefix) |
|||
} |
|||
if p.Delimiter != "" { |
|||
out.Set("delimiter", p.Delimiter) |
|||
} |
|||
if p.Marker != "" { |
|||
out.Set("marker", p.Marker) |
|||
} |
|||
if p.Include != nil { |
|||
include := []string{} |
|||
include = addString(include, p.Include.Snapshots, "snapshots") |
|||
include = addString(include, p.Include.Metadata, "metadata") |
|||
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") |
|||
include = addString(include, p.Include.Copy, "copy") |
|||
fullInclude := strings.Join(include, ",") |
|||
out.Set("include", fullInclude) |
|||
} |
|||
if p.MaxResults != 0 { |
|||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) |
|||
} |
|||
if p.Timeout != 0 { |
|||
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) |
|||
} |
|||
|
|||
return out |
|||
} |
|||
|
|||
func addString(datasets []string, include bool, text string) []string { |
|||
if include { |
|||
datasets = append(datasets, text) |
|||
} |
|||
return datasets |
|||
} |
|||
|
|||
// ContainerAccessType defines the access level to the container from a public
|
|||
// request.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
|
|||
// blob-public-access" header.
|
|||
type ContainerAccessType string |
|||
|
|||
// Access options for containers
|
|||
const ( |
|||
ContainerAccessTypePrivate ContainerAccessType = "" |
|||
ContainerAccessTypeBlob ContainerAccessType = "blob" |
|||
ContainerAccessTypeContainer ContainerAccessType = "container" |
|||
) |
|||
|
|||
// ContainerAccessPolicy represents each access policy in the container ACL.
|
|||
type ContainerAccessPolicy struct { |
|||
ID string |
|||
StartTime time.Time |
|||
ExpiryTime time.Time |
|||
CanRead bool |
|||
CanWrite bool |
|||
CanDelete bool |
|||
} |
|||
|
|||
// ContainerPermissions represents the container ACLs.
|
|||
type ContainerPermissions struct { |
|||
AccessType ContainerAccessType |
|||
AccessPolicies []ContainerAccessPolicy |
|||
} |
|||
|
|||
// ContainerAccessHeader references header used when setting/getting container ACL
|
|||
const ( |
|||
ContainerAccessHeader string = "x-ms-blob-public-access" |
|||
) |
|||
|
|||
// GetBlobReference returns a Blob object for the specified blob name.
|
|||
func (c *Container) GetBlobReference(name string) *Blob { |
|||
return &Blob{ |
|||
Container: c, |
|||
Name: name, |
|||
} |
|||
} |
|||
|
|||
// CreateContainerOptions includes the options for a create container operation
|
|||
type CreateContainerOptions struct { |
|||
Timeout uint |
|||
Access ContainerAccessType `header:"x-ms-blob-public-access"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// Create creates a blob container within the storage account
|
|||
// with given name and access level. Returns error if container already exists.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container
|
|||
func (c *Container) Create(options *CreateContainerOptions) error { |
|||
resp, err := c.create(options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusCreated}) |
|||
} |
|||
|
|||
// CreateIfNotExists creates a blob container if it does not exist. Returns
|
|||
// true if container is newly created or false if container already exists.
|
|||
func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) { |
|||
resp, err := c.create(options) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { |
|||
return resp.StatusCode == http.StatusCreated, nil |
|||
} |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) { |
|||
query := url.Values{"restype": {"container"}} |
|||
headers := c.bsc.client.getStandardHeaders() |
|||
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) |
|||
|
|||
if options != nil { |
|||
query = addTimeout(query, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) |
|||
|
|||
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) |
|||
} |
|||
|
|||
// Exists returns true if a container with given name exists
|
|||
// on the storage account, otherwise returns false.
|
|||
func (c *Container) Exists() (bool, error) { |
|||
q := url.Values{"restype": {"container"}} |
|||
var uri string |
|||
if c.bsc.client.isServiceSASClient() { |
|||
q = mergeParams(q, c.sasuri.Query()) |
|||
newURI := c.sasuri |
|||
newURI.RawQuery = q.Encode() |
|||
uri = newURI.String() |
|||
|
|||
} else { |
|||
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) |
|||
} |
|||
headers := c.bsc.client.getStandardHeaders() |
|||
|
|||
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { |
|||
return resp.StatusCode == http.StatusOK, nil |
|||
} |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
// SetContainerPermissionOptions includes options for a set container permissions operation
|
|||
type SetContainerPermissionOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// SetPermissions sets up container permissions
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL
|
|||
func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error { |
|||
body, length, err := generateContainerACLpayload(permissions.AccessPolicies) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
params := url.Values{ |
|||
"restype": {"container"}, |
|||
"comp": {"acl"}, |
|||
} |
|||
headers := c.bsc.client.getStandardHeaders() |
|||
headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType)) |
|||
headers["Content-Length"] = strconv.Itoa(length) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) |
|||
|
|||
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusOK}) |
|||
} |
|||
|
|||
// GetContainerPermissionOptions includes options for a get container permissions operation
|
|||
type GetContainerPermissionOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
|||
// If timeout is 0 then it will not be passed to Azure
|
|||
// leaseID will only be passed to Azure if populated
|
|||
func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) { |
|||
params := url.Values{ |
|||
"restype": {"container"}, |
|||
"comp": {"acl"}, |
|||
} |
|||
headers := c.bsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) |
|||
|
|||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
var ap AccessPolicy |
|||
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return buildAccessPolicy(ap, &resp.Header), nil |
|||
} |
|||
|
|||
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { |
|||
// containerAccess. Blob, Container, empty
|
|||
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) |
|||
permissions := ContainerPermissions{ |
|||
AccessType: ContainerAccessType(containerAccess), |
|||
AccessPolicies: []ContainerAccessPolicy{}, |
|||
} |
|||
|
|||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { |
|||
capd := ContainerAccessPolicy{ |
|||
ID: policy.ID, |
|||
StartTime: policy.AccessPolicy.StartTime, |
|||
ExpiryTime: policy.AccessPolicy.ExpiryTime, |
|||
} |
|||
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") |
|||
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") |
|||
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") |
|||
|
|||
permissions.AccessPolicies = append(permissions.AccessPolicies, capd) |
|||
} |
|||
return &permissions |
|||
} |
|||
|
|||
// DeleteContainerOptions includes options for a delete container operation
|
|||
type DeleteContainerOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// Delete deletes the container with given name on the storage
|
|||
// account. If the container does not exist returns error.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
|||
func (c *Container) Delete(options *DeleteContainerOptions) error { |
|||
resp, err := c.delete(options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusAccepted}) |
|||
} |
|||
|
|||
// DeleteIfExists deletes the container with given name on the storage
|
|||
// account if it exists. Returns true if container is deleted with this call, or
|
|||
// false if the container did not exist at the time of the Delete Container
|
|||
// operation.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
|||
func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) { |
|||
resp, err := c.delete(options) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { |
|||
return resp.StatusCode == http.StatusAccepted, nil |
|||
} |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) { |
|||
query := url.Values{"restype": {"container"}} |
|||
headers := c.bsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
query = addTimeout(query, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) |
|||
|
|||
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) |
|||
} |
|||
|
|||
// ListBlobs returns an object that contains list of blobs in the container,
|
|||
// pagination token and other information in the response of List Blobs call.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs
|
|||
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { |
|||
q := mergeParams(params.getParameters(), url.Values{ |
|||
"restype": {"container"}, |
|||
"comp": {"list"}, |
|||
}) |
|||
var uri string |
|||
if c.bsc.client.isServiceSASClient() { |
|||
q = mergeParams(q, c.sasuri.Query()) |
|||
newURI := c.sasuri |
|||
newURI.RawQuery = q.Encode() |
|||
uri = newURI.String() |
|||
} else { |
|||
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) |
|||
} |
|||
|
|||
headers := c.bsc.client.getStandardHeaders() |
|||
headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) |
|||
|
|||
var out BlobListResponse |
|||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) |
|||
if err != nil { |
|||
return out, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
err = xmlUnmarshal(resp.Body, &out) |
|||
for i := range out.Blobs { |
|||
out.Blobs[i].Container = c |
|||
} |
|||
return out, err |
|||
} |
|||
|
|||
// ContainerMetadataOptions includes options for container metadata operations
|
|||
type ContainerMetadataOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// SetMetadata replaces the metadata for the specified container.
|
|||
//
|
|||
// Some keys may be converted to Camel-Case before sending. All keys
|
|||
// are returned in lower case by GetBlobMetadata. HTTP header names
|
|||
// are case-insensitive so case munging should not matter to other
|
|||
// applications either.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata
|
|||
func (c *Container) SetMetadata(options *ContainerMetadataOptions) error { |
|||
params := url.Values{ |
|||
"comp": {"metadata"}, |
|||
"restype": {"container"}, |
|||
} |
|||
headers := c.bsc.client.getStandardHeaders() |
|||
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
|
|||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) |
|||
|
|||
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusOK}) |
|||
} |
|||
|
|||
// GetMetadata returns all user-defined metadata for the specified container.
|
|||
//
|
|||
// All metadata keys will be returned in lower case. (HTTP header
|
|||
// names are case-insensitive.)
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata
|
|||
func (c *Container) GetMetadata(options *ContainerMetadataOptions) error { |
|||
params := url.Values{ |
|||
"comp": {"metadata"}, |
|||
"restype": {"container"}, |
|||
} |
|||
headers := c.bsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
|
|||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) |
|||
|
|||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return err |
|||
} |
|||
|
|||
c.writeMetadata(resp.Header) |
|||
return nil |
|||
} |
|||
|
|||
func (c *Container) writeMetadata(h http.Header) { |
|||
c.Metadata = writeMetadata(h) |
|||
} |
|||
|
|||
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { |
|||
sil := SignedIdentifiers{ |
|||
SignedIdentifiers: []SignedIdentifier{}, |
|||
} |
|||
for _, capd := range policies { |
|||
permission := capd.generateContainerPermissions() |
|||
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) |
|||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) |
|||
} |
|||
return xmlMarshal(sil) |
|||
} |
|||
|
|||
func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { |
|||
// generate the permissions string (rwd).
|
|||
// still want the end user API to have bool flags.
|
|||
permissions = "" |
|||
|
|||
if capd.CanRead { |
|||
permissions += "r" |
|||
} |
|||
|
|||
if capd.CanWrite { |
|||
permissions += "w" |
|||
} |
|||
|
|||
if capd.CanDelete { |
|||
permissions += "d" |
|||
} |
|||
|
|||
return permissions |
|||
} |
|||
|
|||
// GetProperties updated the properties of the container.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
|
|||
func (c *Container) GetProperties() error { |
|||
params := url.Values{ |
|||
"restype": {"container"}, |
|||
} |
|||
headers := c.bsc.client.getStandardHeaders() |
|||
|
|||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) |
|||
|
|||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer resp.Body.Close() |
|||
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return err |
|||
} |
|||
|
|||
// update properties
|
|||
c.Properties.Etag = resp.Header.Get(headerEtag) |
|||
c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status") |
|||
c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state") |
|||
c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration") |
|||
c.Properties.LastModified = resp.Header.Get("Last-Modified") |
|||
c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader)) |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,237 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"net/http" |
|||
"net/url" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
blobCopyStatusPending = "pending" |
|||
blobCopyStatusSuccess = "success" |
|||
blobCopyStatusAborted = "aborted" |
|||
blobCopyStatusFailed = "failed" |
|||
) |
|||
|
|||
// CopyOptions includes the options for a copy blob operation
|
|||
type CopyOptions struct { |
|||
Timeout uint |
|||
Source CopyOptionsConditions |
|||
Destiny CopyOptionsConditions |
|||
RequestID string |
|||
} |
|||
|
|||
// IncrementalCopyOptions includes the options for an incremental copy blob operation
|
|||
type IncrementalCopyOptions struct { |
|||
Timeout uint |
|||
Destination IncrementalCopyOptionsConditions |
|||
RequestID string |
|||
} |
|||
|
|||
// CopyOptionsConditions includes some conditional options in a copy blob operation
|
|||
type CopyOptionsConditions struct { |
|||
LeaseID string |
|||
IfModifiedSince *time.Time |
|||
IfUnmodifiedSince *time.Time |
|||
IfMatch string |
|||
IfNoneMatch string |
|||
} |
|||
|
|||
// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation
|
|||
type IncrementalCopyOptionsConditions struct { |
|||
IfModifiedSince *time.Time |
|||
IfUnmodifiedSince *time.Time |
|||
IfMatch string |
|||
IfNoneMatch string |
|||
} |
|||
|
|||
// Copy starts a blob copy operation and waits for the operation to
|
|||
// complete. sourceBlob parameter must be a canonical URL to the blob (can be
|
|||
// obtained using the GetURL method.) There is no SLA on blob copy and therefore
|
|||
// this helper method works faster on smaller files.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
|||
func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { |
|||
copyID, err := b.StartCopy(sourceBlob, options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
return b.WaitForCopy(copyID) |
|||
} |
|||
|
|||
// StartCopy starts a blob copy operation.
|
|||
// sourceBlob parameter must be a canonical URL to the blob (can be
|
|||
// obtained using the GetURL method.)
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
|||
func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { |
|||
params := url.Values{} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["x-ms-copy-source"] = sourceBlob |
|||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) |
|||
// source
|
|||
headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID) |
|||
headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince) |
|||
headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince) |
|||
headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch) |
|||
headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch) |
|||
//destiny
|
|||
headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID) |
|||
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince) |
|||
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince) |
|||
headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch) |
|||
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
copyID := resp.Header.Get("x-ms-copy-id") |
|||
if copyID == "" { |
|||
return "", errors.New("Got empty copy id header") |
|||
} |
|||
return copyID, nil |
|||
} |
|||
|
|||
// AbortCopyOptions includes the options for an abort blob operation
|
|||
type AbortCopyOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
|
|||
// copyID is generated from StartBlobCopy function.
|
|||
// currentLeaseID is required IF the destination blob has an active lease on it.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob
|
|||
func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error { |
|||
params := url.Values{ |
|||
"comp": {"copy"}, |
|||
"copyid": {copyID}, |
|||
} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["x-ms-copy-action"] = "abort" |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusNoContent}) |
|||
} |
|||
|
|||
// WaitForCopy loops until a BlobCopy operation is completed (or fails with error)
|
|||
func (b *Blob) WaitForCopy(copyID string) error { |
|||
for { |
|||
err := b.GetProperties(nil) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if b.Properties.CopyID != copyID { |
|||
return errBlobCopyIDMismatch |
|||
} |
|||
|
|||
switch b.Properties.CopyStatus { |
|||
case blobCopyStatusSuccess: |
|||
return nil |
|||
case blobCopyStatusPending: |
|||
continue |
|||
case blobCopyStatusAborted: |
|||
return errBlobCopyAborted |
|||
case blobCopyStatusFailed: |
|||
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription) |
|||
default: |
|||
return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob
|
|||
// sourceBlob parameter must be a valid snapshot URL of the original blob.
|
|||
// THe original blob mut be public, or use a Shared Access Signature.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob .
|
|||
func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) { |
|||
params := url.Values{"comp": {"incrementalcopy"}} |
|||
|
|||
// need formatting to 7 decimal places so it's friendly to Windows and *nix
|
|||
snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z") |
|||
u, err := url.Parse(sourceBlobURL) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
query := u.Query() |
|||
query.Add("snapshot", snapshotTimeFormatted) |
|||
encodedQuery := query.Encode() |
|||
encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1) |
|||
u.RawQuery = encodedQuery |
|||
snapshotURL := u.String() |
|||
|
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["x-ms-copy-source"] = snapshotURL |
|||
|
|||
if options != nil { |
|||
addTimeout(params, options.Timeout) |
|||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) |
|||
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince) |
|||
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince) |
|||
headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch) |
|||
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch) |
|||
} |
|||
|
|||
// get URI of destination blob
|
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
copyID := resp.Header.Get("x-ms-copy-id") |
|||
if copyID == "" { |
|||
return "", errors.New("Got empty copy id header") |
|||
} |
|||
return copyID, nil |
|||
} |
@ -0,0 +1,238 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"net/http" |
|||
"net/url" |
|||
"sync" |
|||
) |
|||
|
|||
// Directory represents a directory on a share.
|
|||
type Directory struct { |
|||
fsc *FileServiceClient |
|||
Metadata map[string]string |
|||
Name string `xml:"Name"` |
|||
parent *Directory |
|||
Properties DirectoryProperties |
|||
share *Share |
|||
} |
|||
|
|||
// DirectoryProperties contains various properties of a directory.
|
|||
type DirectoryProperties struct { |
|||
LastModified string `xml:"Last-Modified"` |
|||
Etag string `xml:"Etag"` |
|||
} |
|||
|
|||
// ListDirsAndFilesParameters defines the set of customizable parameters to
|
|||
// make a List Files and Directories call.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
|||
type ListDirsAndFilesParameters struct { |
|||
Prefix string |
|||
Marker string |
|||
MaxResults uint |
|||
Timeout uint |
|||
} |
|||
|
|||
// DirsAndFilesListResponse contains the response fields from
|
|||
// a List Files and Directories call.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
|||
type DirsAndFilesListResponse struct { |
|||
XMLName xml.Name `xml:"EnumerationResults"` |
|||
Xmlns string `xml:"xmlns,attr"` |
|||
Marker string `xml:"Marker"` |
|||
MaxResults int64 `xml:"MaxResults"` |
|||
Directories []Directory `xml:"Entries>Directory"` |
|||
Files []File `xml:"Entries>File"` |
|||
NextMarker string `xml:"NextMarker"` |
|||
} |
|||
|
|||
// builds the complete directory path for this directory object.
|
|||
func (d *Directory) buildPath() string { |
|||
path := "" |
|||
current := d |
|||
for current.Name != "" { |
|||
path = "/" + current.Name + path |
|||
current = current.parent |
|||
} |
|||
return d.share.buildPath() + path |
|||
} |
|||
|
|||
// Create this directory in the associated share.
|
|||
// If a directory with the same name already exists, the operation fails.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
|||
func (d *Directory) Create(options *FileRequestOptions) error { |
|||
// if this is the root directory exit early
|
|||
if d.parent == nil { |
|||
return nil |
|||
} |
|||
|
|||
params := prepareOptions(options) |
|||
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
d.updateEtagAndLastModified(headers) |
|||
return nil |
|||
} |
|||
|
|||
// CreateIfNotExists creates this directory under the associated share if the
|
|||
// directory does not exists. Returns true if the directory is newly created or
|
|||
// false if the directory already exists.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
|||
func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) { |
|||
// if this is the root directory exit early
|
|||
if d.parent == nil { |
|||
return false, nil |
|||
} |
|||
|
|||
params := prepareOptions(options) |
|||
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { |
|||
if resp.StatusCode == http.StatusCreated { |
|||
d.updateEtagAndLastModified(resp.Header) |
|||
return true, nil |
|||
} |
|||
|
|||
return false, d.FetchAttributes(nil) |
|||
} |
|||
} |
|||
|
|||
return false, err |
|||
} |
|||
|
|||
// Delete removes this directory. It must be empty in order to be deleted.
|
|||
// If the directory does not exist the operation fails.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
|||
func (d *Directory) Delete(options *FileRequestOptions) error { |
|||
return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options) |
|||
} |
|||
|
|||
// DeleteIfExists removes this directory if it exists.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
|||
func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) { |
|||
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { |
|||
return resp.StatusCode == http.StatusAccepted, nil |
|||
} |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
// Exists returns true if this directory exists.
|
|||
func (d *Directory) Exists() (bool, error) { |
|||
exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory) |
|||
if exists { |
|||
d.updateEtagAndLastModified(headers) |
|||
} |
|||
return exists, err |
|||
} |
|||
|
|||
// FetchAttributes retrieves metadata for this directory.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties
|
|||
func (d *Directory) FetchAttributes(options *FileRequestOptions) error { |
|||
params := prepareOptions(options) |
|||
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
d.updateEtagAndLastModified(headers) |
|||
d.Metadata = getMetadataFromHeaders(headers) |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// GetDirectoryReference returns a child Directory object for this directory.
|
|||
func (d *Directory) GetDirectoryReference(name string) *Directory { |
|||
return &Directory{ |
|||
fsc: d.fsc, |
|||
Name: name, |
|||
parent: d, |
|||
share: d.share, |
|||
} |
|||
} |
|||
|
|||
// GetFileReference returns a child File object for this directory.
|
|||
func (d *Directory) GetFileReference(name string) *File { |
|||
return &File{ |
|||
fsc: d.fsc, |
|||
Name: name, |
|||
parent: d, |
|||
share: d.share, |
|||
mutex: &sync.Mutex{}, |
|||
} |
|||
} |
|||
|
|||
// ListDirsAndFiles returns a list of files and directories under this directory.
|
|||
// It also contains a pagination token and other response details.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
|||
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { |
|||
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) |
|||
|
|||
resp, err := d.fsc.listContent(d.buildPath(), q, nil) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
defer resp.Body.Close() |
|||
var out DirsAndFilesListResponse |
|||
err = xmlUnmarshal(resp.Body, &out) |
|||
return &out, err |
|||
} |
|||
|
|||
// SetMetadata replaces the metadata for this directory.
|
|||
//
|
|||
// Some keys may be converted to Camel-Case before sending. All keys
|
|||
// are returned in lower case by GetDirectoryMetadata. HTTP header names
|
|||
// are case-insensitive so case munging should not matter to other
|
|||
// applications either.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata
|
|||
func (d *Directory) SetMetadata(options *FileRequestOptions) error { |
|||
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
d.updateEtagAndLastModified(headers) |
|||
return nil |
|||
} |
|||
|
|||
// updates Etag and last modified date
|
|||
func (d *Directory) updateEtagAndLastModified(headers http.Header) { |
|||
d.Properties.Etag = headers.Get("Etag") |
|||
d.Properties.LastModified = headers.Get("Last-Modified") |
|||
} |
|||
|
|||
// URL gets the canonical URL to this directory.
|
|||
// This method does not create a publicly accessible URL if the directory
|
|||
// is private and this method does not check if the directory exists.
|
|||
func (d *Directory) URL() string { |
|||
return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{}) |
|||
} |
@ -0,0 +1,466 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding/base64" |
|||
"encoding/json" |
|||
"errors" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
|
|||
uuid "github.com/satori/go.uuid" |
|||
) |
|||
|
|||
// Annotating as secure for gas scanning
|
|||
/* #nosec */ |
|||
const ( |
|||
partitionKeyNode = "PartitionKey" |
|||
rowKeyNode = "RowKey" |
|||
etagErrorTemplate = "Etag didn't match: %v" |
|||
) |
|||
|
|||
var ( |
|||
errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation") |
|||
errNilPreviousResult = errors.New("The previous results page is nil") |
|||
errNilNextLink = errors.New("There are no more pages in this query results") |
|||
) |
|||
|
|||
// Entity represents an entity inside an Azure table.
|
|||
type Entity struct { |
|||
Table *Table |
|||
PartitionKey string |
|||
RowKey string |
|||
TimeStamp time.Time |
|||
OdataMetadata string |
|||
OdataType string |
|||
OdataID string |
|||
OdataEtag string |
|||
OdataEditLink string |
|||
Properties map[string]interface{} |
|||
} |
|||
|
|||
// GetEntityReference returns an Entity object with the specified
|
|||
// partition key and row key.
|
|||
func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity { |
|||
return &Entity{ |
|||
PartitionKey: partitionKey, |
|||
RowKey: rowKey, |
|||
Table: t, |
|||
} |
|||
} |
|||
|
|||
// EntityOptions includes options for entity operations.
|
|||
type EntityOptions struct { |
|||
Timeout uint |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// GetEntityOptions includes options for a get entity operation
|
|||
type GetEntityOptions struct { |
|||
Select []string |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// Get gets the referenced entity. Which properties to get can be
|
|||
// specified using the select option.
|
|||
// See:
|
|||
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
|||
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
|
|||
func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error { |
|||
if ml == EmptyPayload { |
|||
return errEmptyPayload |
|||
} |
|||
// RowKey and PartitionKey could be lost if not included in the query
|
|||
// As those are the entity identifiers, it is best if they are not lost
|
|||
rk := e.RowKey |
|||
pk := e.PartitionKey |
|||
|
|||
query := url.Values{ |
|||
"timeout": {strconv.FormatUint(uint64(timeout), 10)}, |
|||
} |
|||
headers := e.Table.tsc.client.getStandardHeaders() |
|||
headers[headerAccept] = string(ml) |
|||
|
|||
if options != nil { |
|||
if len(options.Select) > 0 { |
|||
query.Add("$select", strings.Join(options.Select, ",")) |
|||
} |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
|
|||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) |
|||
resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return err |
|||
} |
|||
|
|||
respBody, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
err = json.Unmarshal(respBody, e) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
e.PartitionKey = pk |
|||
e.RowKey = rk |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// Insert inserts the referenced entity in its table.
|
|||
// The function fails if there is an entity with the same
|
|||
// PartitionKey and RowKey in the table.
|
|||
// ml determines the level of detail of metadata in the operation response,
|
|||
// or no data at all.
|
|||
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity
|
|||
func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error { |
|||
query, headers := options.getParameters() |
|||
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) |
|||
|
|||
body, err := json.Marshal(e) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
headers = addBodyRelatedHeaders(headers, len(body)) |
|||
headers = addReturnContentHeaders(headers, ml) |
|||
|
|||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query) |
|||
resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if ml != EmptyPayload { |
|||
if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil { |
|||
return err |
|||
} |
|||
data, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if err = e.UnmarshalJSON(data); err != nil { |
|||
return err |
|||
} |
|||
} else { |
|||
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// Update updates the contents of an entity. The function fails if there is no entity
|
|||
// with the same PartitionKey and RowKey in the table or if the ETag is different
|
|||
// than the one in Azure.
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2
|
|||
func (e *Entity) Update(force bool, options *EntityOptions) error { |
|||
return e.updateMerge(force, http.MethodPut, options) |
|||
} |
|||
|
|||
// Merge merges the contents of entity specified with PartitionKey and RowKey
|
|||
// with the content specified in Properties.
|
|||
// The function fails if there is no entity with the same PartitionKey and
|
|||
// RowKey in the table or if the ETag is different than the one in Azure.
|
|||
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity
|
|||
func (e *Entity) Merge(force bool, options *EntityOptions) error { |
|||
return e.updateMerge(force, "MERGE", options) |
|||
} |
|||
|
|||
// Delete deletes the entity.
|
|||
// The function fails if there is no entity with the same PartitionKey and
|
|||
// RowKey in the table or if the ETag is different than the one in Azure.
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1
|
|||
func (e *Entity) Delete(force bool, options *EntityOptions) error { |
|||
query, headers := options.getParameters() |
|||
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) |
|||
|
|||
headers = addIfMatchHeader(headers, force, e.OdataEtag) |
|||
headers = addReturnContentHeaders(headers, EmptyPayload) |
|||
|
|||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) |
|||
resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth) |
|||
if err != nil { |
|||
if resp.StatusCode == http.StatusPreconditionFailed { |
|||
return fmt.Errorf(etagErrorTemplate, err) |
|||
} |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { |
|||
return err |
|||
} |
|||
|
|||
return e.updateTimestamp(resp.Header) |
|||
} |
|||
|
|||
// InsertOrReplace inserts an entity or replaces the existing one.
|
|||
// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity
|
|||
func (e *Entity) InsertOrReplace(options *EntityOptions) error { |
|||
return e.insertOr(http.MethodPut, options) |
|||
} |
|||
|
|||
// InsertOrMerge inserts an entity or merges the existing one.
|
|||
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity
|
|||
func (e *Entity) InsertOrMerge(options *EntityOptions) error { |
|||
return e.insertOr("MERGE", options) |
|||
} |
|||
|
|||
func (e *Entity) buildPath() string { |
|||
return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey) |
|||
} |
|||
|
|||
// MarshalJSON is a custom marshaller for entity
|
|||
func (e *Entity) MarshalJSON() ([]byte, error) { |
|||
completeMap := map[string]interface{}{} |
|||
completeMap[partitionKeyNode] = e.PartitionKey |
|||
completeMap[rowKeyNode] = e.RowKey |
|||
for k, v := range e.Properties { |
|||
typeKey := strings.Join([]string{k, OdataTypeSuffix}, "") |
|||
switch t := v.(type) { |
|||
case []byte: |
|||
completeMap[typeKey] = OdataBinary |
|||
completeMap[k] = t |
|||
case time.Time: |
|||
completeMap[typeKey] = OdataDateTime |
|||
completeMap[k] = t.Format(time.RFC3339Nano) |
|||
case uuid.UUID: |
|||
completeMap[typeKey] = OdataGUID |
|||
completeMap[k] = t.String() |
|||
case int64: |
|||
completeMap[typeKey] = OdataInt64 |
|||
completeMap[k] = fmt.Sprintf("%v", v) |
|||
case float32, float64: |
|||
completeMap[typeKey] = OdataDouble |
|||
completeMap[k] = fmt.Sprintf("%v", v) |
|||
default: |
|||
completeMap[k] = v |
|||
} |
|||
if strings.HasSuffix(k, OdataTypeSuffix) { |
|||
if !(completeMap[k] == OdataBinary || |
|||
completeMap[k] == OdataDateTime || |
|||
completeMap[k] == OdataGUID || |
|||
completeMap[k] == OdataInt64 || |
|||
completeMap[k] == OdataDouble) { |
|||
return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k) |
|||
} |
|||
valueKey := strings.TrimSuffix(k, OdataTypeSuffix) |
|||
if _, ok := completeMap[valueKey]; !ok { |
|||
return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k) |
|||
} |
|||
} |
|||
} |
|||
return json.Marshal(completeMap) |
|||
} |
|||
|
|||
// UnmarshalJSON is a custom unmarshaller for entities
|
|||
func (e *Entity) UnmarshalJSON(data []byte) error { |
|||
errorTemplate := "Deserializing error: %v" |
|||
|
|||
props := map[string]interface{}{} |
|||
err := json.Unmarshal(data, &props) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
// deselialize metadata
|
|||
e.OdataMetadata = stringFromMap(props, "odata.metadata") |
|||
e.OdataType = stringFromMap(props, "odata.type") |
|||
e.OdataID = stringFromMap(props, "odata.id") |
|||
e.OdataEtag = stringFromMap(props, "odata.etag") |
|||
e.OdataEditLink = stringFromMap(props, "odata.editLink") |
|||
e.PartitionKey = stringFromMap(props, partitionKeyNode) |
|||
e.RowKey = stringFromMap(props, rowKeyNode) |
|||
|
|||
// deserialize timestamp
|
|||
timeStamp, ok := props["Timestamp"] |
|||
if ok { |
|||
str, ok := timeStamp.(string) |
|||
if !ok { |
|||
return fmt.Errorf(errorTemplate, "Timestamp casting error") |
|||
} |
|||
t, err := time.Parse(time.RFC3339Nano, str) |
|||
if err != nil { |
|||
return fmt.Errorf(errorTemplate, err) |
|||
} |
|||
e.TimeStamp = t |
|||
} |
|||
delete(props, "Timestamp") |
|||
delete(props, "Timestamp@odata.type") |
|||
|
|||
// deserialize entity (user defined fields)
|
|||
for k, v := range props { |
|||
if strings.HasSuffix(k, OdataTypeSuffix) { |
|||
valueKey := strings.TrimSuffix(k, OdataTypeSuffix) |
|||
str, ok := props[valueKey].(string) |
|||
if !ok { |
|||
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v)) |
|||
} |
|||
switch v { |
|||
case OdataBinary: |
|||
props[valueKey], err = base64.StdEncoding.DecodeString(str) |
|||
if err != nil { |
|||
return fmt.Errorf(errorTemplate, err) |
|||
} |
|||
case OdataDateTime: |
|||
t, err := time.Parse("2006-01-02T15:04:05Z", str) |
|||
if err != nil { |
|||
return fmt.Errorf(errorTemplate, err) |
|||
} |
|||
props[valueKey] = t |
|||
case OdataGUID: |
|||
props[valueKey] = uuid.FromStringOrNil(str) |
|||
case OdataInt64: |
|||
i, err := strconv.ParseInt(str, 10, 64) |
|||
if err != nil { |
|||
return fmt.Errorf(errorTemplate, err) |
|||
} |
|||
props[valueKey] = i |
|||
case OdataDouble: |
|||
f, err := strconv.ParseFloat(str, 64) |
|||
if err != nil { |
|||
return fmt.Errorf(errorTemplate, err) |
|||
} |
|||
props[valueKey] = f |
|||
default: |
|||
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v)) |
|||
} |
|||
delete(props, k) |
|||
} |
|||
} |
|||
|
|||
e.Properties = props |
|||
return nil |
|||
} |
|||
|
|||
func getAndDelete(props map[string]interface{}, key string) interface{} { |
|||
if value, ok := props[key]; ok { |
|||
delete(props, key) |
|||
return value |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string { |
|||
if force { |
|||
h[headerIfMatch] = "*" |
|||
} else { |
|||
h[headerIfMatch] = etag |
|||
} |
|||
return h |
|||
} |
|||
|
|||
// updates Etag and timestamp
|
|||
func (e *Entity) updateEtagAndTimestamp(headers http.Header) error { |
|||
e.OdataEtag = headers.Get(headerEtag) |
|||
return e.updateTimestamp(headers) |
|||
} |
|||
|
|||
func (e *Entity) updateTimestamp(headers http.Header) error { |
|||
str := headers.Get(headerDate) |
|||
t, err := time.Parse(time.RFC1123, str) |
|||
if err != nil { |
|||
return fmt.Errorf("Update timestamp error: %v", err) |
|||
} |
|||
e.TimeStamp = t |
|||
return nil |
|||
} |
|||
|
|||
func (e *Entity) insertOr(verb string, options *EntityOptions) error { |
|||
query, headers := options.getParameters() |
|||
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) |
|||
|
|||
body, err := json.Marshal(e) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
headers = addBodyRelatedHeaders(headers, len(body)) |
|||
headers = addReturnContentHeaders(headers, EmptyPayload) |
|||
|
|||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) |
|||
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { |
|||
return err |
|||
} |
|||
|
|||
return e.updateEtagAndTimestamp(resp.Header) |
|||
} |
|||
|
|||
func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error { |
|||
query, headers := options.getParameters() |
|||
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) |
|||
|
|||
body, err := json.Marshal(e) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
headers = addBodyRelatedHeaders(headers, len(body)) |
|||
headers = addIfMatchHeader(headers, force, e.OdataEtag) |
|||
headers = addReturnContentHeaders(headers, EmptyPayload) |
|||
|
|||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) |
|||
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) |
|||
if err != nil { |
|||
if resp.StatusCode == http.StatusPreconditionFailed { |
|||
return fmt.Errorf(etagErrorTemplate, err) |
|||
} |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { |
|||
return err |
|||
} |
|||
|
|||
return e.updateEtagAndTimestamp(resp.Header) |
|||
} |
|||
|
|||
func stringFromMap(props map[string]interface{}, key string) string { |
|||
value := getAndDelete(props, key) |
|||
if value != nil { |
|||
return value.(string) |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (options *EntityOptions) getParameters() (url.Values, map[string]string) { |
|||
query := url.Values{} |
|||
headers := map[string]string{} |
|||
if options != nil { |
|||
query = addTimeout(query, options.Timeout) |
|||
headers = headersFromStruct(*options) |
|||
} |
|||
return query, headers |
|||
} |
@ -0,0 +1,484 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"sync" |
|||
) |
|||
|
|||
const fourMB = uint64(4194304) |
|||
const oneTB = uint64(1099511627776) |
|||
|
|||
// Export maximum range and file sizes
|
|||
|
|||
// MaxRangeSize defines the maximum size in bytes for a file range.
|
|||
const MaxRangeSize = fourMB |
|||
|
|||
// MaxFileSize defines the maximum size in bytes for a file.
|
|||
const MaxFileSize = oneTB |
|||
|
|||
// File represents a file on a share.
|
|||
type File struct { |
|||
fsc *FileServiceClient |
|||
Metadata map[string]string |
|||
Name string `xml:"Name"` |
|||
parent *Directory |
|||
Properties FileProperties `xml:"Properties"` |
|||
share *Share |
|||
FileCopyProperties FileCopyState |
|||
mutex *sync.Mutex |
|||
} |
|||
|
|||
// FileProperties contains various properties of a file.
|
|||
type FileProperties struct { |
|||
CacheControl string `header:"x-ms-cache-control"` |
|||
Disposition string `header:"x-ms-content-disposition"` |
|||
Encoding string `header:"x-ms-content-encoding"` |
|||
Etag string |
|||
Language string `header:"x-ms-content-language"` |
|||
LastModified string |
|||
Length uint64 `xml:"Content-Length" header:"x-ms-content-length"` |
|||
MD5 string `header:"x-ms-content-md5"` |
|||
Type string `header:"x-ms-content-type"` |
|||
} |
|||
|
|||
// FileCopyState contains various properties of a file copy operation.
|
|||
type FileCopyState struct { |
|||
CompletionTime string |
|||
ID string `header:"x-ms-copy-id"` |
|||
Progress string |
|||
Source string |
|||
Status string `header:"x-ms-copy-status"` |
|||
StatusDesc string |
|||
} |
|||
|
|||
// FileStream contains file data returned from a call to GetFile.
|
|||
type FileStream struct { |
|||
Body io.ReadCloser |
|||
ContentMD5 string |
|||
} |
|||
|
|||
// FileRequestOptions will be passed to misc file operations.
|
|||
// Currently just Timeout (in seconds) but could expand.
|
|||
type FileRequestOptions struct { |
|||
Timeout uint // timeout duration in seconds.
|
|||
} |
|||
|
|||
func prepareOptions(options *FileRequestOptions) url.Values { |
|||
params := url.Values{} |
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
} |
|||
return params |
|||
} |
|||
|
|||
// FileRanges contains a list of file range information for a file.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
|||
type FileRanges struct { |
|||
ContentLength uint64 |
|||
LastModified string |
|||
ETag string |
|||
FileRanges []FileRange `xml:"Range"` |
|||
} |
|||
|
|||
// FileRange contains range information for a file.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
|||
type FileRange struct { |
|||
Start uint64 `xml:"Start"` |
|||
End uint64 `xml:"End"` |
|||
} |
|||
|
|||
func (fr FileRange) String() string { |
|||
return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) |
|||
} |
|||
|
|||
// builds the complete file path for this file object
|
|||
func (f *File) buildPath() string { |
|||
return f.parent.buildPath() + "/" + f.Name |
|||
} |
|||
|
|||
// ClearRange releases the specified range of space in a file.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
|
|||
func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error { |
|||
var timeout *uint |
|||
if options != nil { |
|||
timeout = &options.Timeout |
|||
} |
|||
headers, err := f.modifyRange(nil, fileRange, timeout, nil) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
f.updateEtagAndLastModified(headers) |
|||
return nil |
|||
} |
|||
|
|||
// Create creates a new file or replaces an existing one.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File
|
|||
func (f *File) Create(maxSize uint64, options *FileRequestOptions) error { |
|||
if maxSize > oneTB { |
|||
return fmt.Errorf("max file size is 1TB") |
|||
} |
|||
params := prepareOptions(options) |
|||
headers := headersFromStruct(f.Properties) |
|||
headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10) |
|||
headers["x-ms-type"] = "file" |
|||
|
|||
outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
f.Properties.Length = maxSize |
|||
f.updateEtagAndLastModified(outputHeaders) |
|||
return nil |
|||
} |
|||
|
|||
// CopyFile operation copied a file/blob from the sourceURL to the path provided.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
|
|||
func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { |
|||
extraHeaders := map[string]string{ |
|||
"x-ms-type": "file", |
|||
"x-ms-copy-source": sourceURL, |
|||
} |
|||
params := prepareOptions(options) |
|||
|
|||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
f.updateEtagAndLastModified(headers) |
|||
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") |
|||
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") |
|||
return nil |
|||
} |
|||
|
|||
// Delete immediately removes this file from the storage account.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
|
|||
func (f *File) Delete(options *FileRequestOptions) error { |
|||
return f.fsc.deleteResource(f.buildPath(), resourceFile, options) |
|||
} |
|||
|
|||
// DeleteIfExists removes this file if it exists.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
|
|||
func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) { |
|||
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { |
|||
return resp.StatusCode == http.StatusAccepted, nil |
|||
} |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
// GetFileOptions includes options for a get file operation
|
|||
type GetFileOptions struct { |
|||
Timeout uint |
|||
GetContentMD5 bool |
|||
} |
|||
|
|||
// DownloadToStream operation downloads the file.
|
|||
//
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
|||
func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) { |
|||
params := prepareOptions(options) |
|||
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
drainRespBody(resp) |
|||
return nil, err |
|||
} |
|||
return resp.Body, nil |
|||
} |
|||
|
|||
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
|||
func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) { |
|||
extraHeaders := map[string]string{ |
|||
"Range": fileRange.String(), |
|||
} |
|||
params := url.Values{} |
|||
if options != nil { |
|||
if options.GetContentMD5 { |
|||
if isRangeTooBig(fileRange) { |
|||
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") |
|||
} |
|||
extraHeaders["x-ms-range-get-content-md5"] = "true" |
|||
} |
|||
params = addTimeout(params, options.Timeout) |
|||
} |
|||
|
|||
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders) |
|||
if err != nil { |
|||
return fs, err |
|||
} |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil { |
|||
drainRespBody(resp) |
|||
return fs, err |
|||
} |
|||
|
|||
fs.Body = resp.Body |
|||
if options != nil && options.GetContentMD5 { |
|||
fs.ContentMD5 = resp.Header.Get("Content-MD5") |
|||
} |
|||
return fs, nil |
|||
} |
|||
|
|||
// Exists returns true if this file exists.
|
|||
func (f *File) Exists() (bool, error) { |
|||
exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile) |
|||
if exists { |
|||
f.updateEtagAndLastModified(headers) |
|||
f.updateProperties(headers) |
|||
} |
|||
return exists, err |
|||
} |
|||
|
|||
// FetchAttributes updates metadata and properties for this file.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties
|
|||
func (f *File) FetchAttributes(options *FileRequestOptions) error { |
|||
params := prepareOptions(options) |
|||
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
f.updateEtagAndLastModified(headers) |
|||
f.updateProperties(headers) |
|||
f.Metadata = getMetadataFromHeaders(headers) |
|||
return nil |
|||
} |
|||
|
|||
// returns true if the range is larger than 4MB
|
|||
func isRangeTooBig(fileRange FileRange) bool { |
|||
if fileRange.End-fileRange.Start > fourMB { |
|||
return true |
|||
} |
|||
|
|||
return false |
|||
} |
|||
|
|||
// ListRangesOptions includes options for a list file ranges operation
|
|||
type ListRangesOptions struct { |
|||
Timeout uint |
|||
ListRange *FileRange |
|||
} |
|||
|
|||
// ListRanges returns the list of valid ranges for this file.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
|||
func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) { |
|||
params := url.Values{"comp": {"rangelist"}} |
|||
|
|||
// add optional range to list
|
|||
var headers map[string]string |
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
if options.ListRange != nil { |
|||
headers = make(map[string]string) |
|||
headers["Range"] = options.ListRange.String() |
|||
} |
|||
} |
|||
|
|||
resp, err := f.fsc.listContent(f.buildPath(), params, headers) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
defer resp.Body.Close() |
|||
var cl uint64 |
|||
cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64) |
|||
if err != nil { |
|||
ioutil.ReadAll(resp.Body) |
|||
return nil, err |
|||
} |
|||
|
|||
var out FileRanges |
|||
out.ContentLength = cl |
|||
out.ETag = resp.Header.Get("ETag") |
|||
out.LastModified = resp.Header.Get("Last-Modified") |
|||
|
|||
err = xmlUnmarshal(resp.Body, &out) |
|||
return &out, err |
|||
} |
|||
|
|||
// modifies a range of bytes in this file
|
|||
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) { |
|||
if err := f.fsc.checkForStorageEmulator(); err != nil { |
|||
return nil, err |
|||
} |
|||
if fileRange.End < fileRange.Start { |
|||
return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart") |
|||
} |
|||
if bytes != nil && isRangeTooBig(fileRange) { |
|||
return nil, errors.New("range cannot exceed 4MB in size") |
|||
} |
|||
|
|||
params := url.Values{"comp": {"range"}} |
|||
if timeout != nil { |
|||
params = addTimeout(params, *timeout) |
|||
} |
|||
|
|||
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params) |
|||
|
|||
// default to clear
|
|||
write := "clear" |
|||
cl := uint64(0) |
|||
|
|||
// if bytes is not nil then this is an update operation
|
|||
if bytes != nil { |
|||
write = "update" |
|||
cl = (fileRange.End - fileRange.Start) + 1 |
|||
} |
|||
|
|||
extraHeaders := map[string]string{ |
|||
"Content-Length": strconv.FormatUint(cl, 10), |
|||
"Range": fileRange.String(), |
|||
"x-ms-write": write, |
|||
} |
|||
|
|||
if contentMD5 != nil { |
|||
extraHeaders["Content-MD5"] = *contentMD5 |
|||
} |
|||
|
|||
headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders) |
|||
resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return resp.Header, checkRespCode(resp, []int{http.StatusCreated}) |
|||
} |
|||
|
|||
// SetMetadata replaces the metadata for this file.
|
|||
//
|
|||
// Some keys may be converted to Camel-Case before sending. All keys
|
|||
// are returned in lower case by GetFileMetadata. HTTP header names
|
|||
// are case-insensitive so case munging should not matter to other
|
|||
// applications either.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata
|
|||
func (f *File) SetMetadata(options *FileRequestOptions) error { |
|||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
f.updateEtagAndLastModified(headers) |
|||
return nil |
|||
} |
|||
|
|||
// SetProperties sets system properties on this file.
|
|||
//
|
|||
// Some keys may be converted to Camel-Case before sending. All keys
|
|||
// are returned in lower case by SetFileProperties. HTTP header names
|
|||
// are case-insensitive so case munging should not matter to other
|
|||
// applications either.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties
|
|||
func (f *File) SetProperties(options *FileRequestOptions) error { |
|||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
f.updateEtagAndLastModified(headers) |
|||
return nil |
|||
} |
|||
|
|||
// updates Etag and last modified date
|
|||
func (f *File) updateEtagAndLastModified(headers http.Header) { |
|||
f.Properties.Etag = headers.Get("Etag") |
|||
f.Properties.LastModified = headers.Get("Last-Modified") |
|||
} |
|||
|
|||
// updates file properties from the specified HTTP header
|
|||
func (f *File) updateProperties(header http.Header) { |
|||
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) |
|||
if err == nil { |
|||
f.Properties.Length = size |
|||
} |
|||
|
|||
f.updateEtagAndLastModified(header) |
|||
f.Properties.CacheControl = header.Get("Cache-Control") |
|||
f.Properties.Disposition = header.Get("Content-Disposition") |
|||
f.Properties.Encoding = header.Get("Content-Encoding") |
|||
f.Properties.Language = header.Get("Content-Language") |
|||
f.Properties.MD5 = header.Get("Content-MD5") |
|||
f.Properties.Type = header.Get("Content-Type") |
|||
} |
|||
|
|||
// URL gets the canonical URL to this file.
|
|||
// This method does not create a publicly accessible URL if the file
|
|||
// is private and this method does not check if the file exists.
|
|||
func (f *File) URL() string { |
|||
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) |
|||
} |
|||
|
|||
// WriteRangeOptions includes options for a write file range operation
|
|||
type WriteRangeOptions struct { |
|||
Timeout uint |
|||
ContentMD5 string |
|||
} |
|||
|
|||
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside
|
|||
// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with
|
|||
// a maximum size of 4MB.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
|
|||
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error { |
|||
if bytes == nil { |
|||
return errors.New("bytes cannot be nil") |
|||
} |
|||
var timeout *uint |
|||
var md5 *string |
|||
if options != nil { |
|||
timeout = &options.Timeout |
|||
md5 = &options.ContentMD5 |
|||
} |
|||
|
|||
headers, err := f.modifyRange(bytes, fileRange, timeout, md5) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
// it's perfectly legal for multiple go routines to call WriteRange
|
|||
// on the same *File (e.g. concurrently writing non-overlapping ranges)
|
|||
// so we must take the file mutex before updating our properties.
|
|||
f.mutex.Lock() |
|||
f.updateEtagAndLastModified(headers) |
|||
f.mutex.Unlock() |
|||
return nil |
|||
} |
@ -0,0 +1,338 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"fmt" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
) |
|||
|
|||
// FileServiceClient contains operations for Microsoft Azure File Service.
|
|||
type FileServiceClient struct { |
|||
client Client |
|||
auth authentication |
|||
} |
|||
|
|||
// ListSharesParameters defines the set of customizable parameters to make a
|
|||
// List Shares call.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
|
|||
type ListSharesParameters struct { |
|||
Prefix string |
|||
Marker string |
|||
Include string |
|||
MaxResults uint |
|||
Timeout uint |
|||
} |
|||
|
|||
// ShareListResponse contains the response fields from
|
|||
// ListShares call.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
|
|||
type ShareListResponse struct { |
|||
XMLName xml.Name `xml:"EnumerationResults"` |
|||
Xmlns string `xml:"xmlns,attr"` |
|||
Prefix string `xml:"Prefix"` |
|||
Marker string `xml:"Marker"` |
|||
NextMarker string `xml:"NextMarker"` |
|||
MaxResults int64 `xml:"MaxResults"` |
|||
Shares []Share `xml:"Shares>Share"` |
|||
} |
|||
|
|||
type compType string |
|||
|
|||
const ( |
|||
compNone compType = "" |
|||
compList compType = "list" |
|||
compMetadata compType = "metadata" |
|||
compProperties compType = "properties" |
|||
compRangeList compType = "rangelist" |
|||
) |
|||
|
|||
func (ct compType) String() string { |
|||
return string(ct) |
|||
} |
|||
|
|||
type resourceType string |
|||
|
|||
const ( |
|||
resourceDirectory resourceType = "directory" |
|||
resourceFile resourceType = "" |
|||
resourceShare resourceType = "share" |
|||
) |
|||
|
|||
func (rt resourceType) String() string { |
|||
return string(rt) |
|||
} |
|||
|
|||
func (p ListSharesParameters) getParameters() url.Values { |
|||
out := url.Values{} |
|||
|
|||
if p.Prefix != "" { |
|||
out.Set("prefix", p.Prefix) |
|||
} |
|||
if p.Marker != "" { |
|||
out.Set("marker", p.Marker) |
|||
} |
|||
if p.Include != "" { |
|||
out.Set("include", p.Include) |
|||
} |
|||
if p.MaxResults != 0 { |
|||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) |
|||
} |
|||
if p.Timeout != 0 { |
|||
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) |
|||
} |
|||
|
|||
return out |
|||
} |
|||
|
|||
func (p ListDirsAndFilesParameters) getParameters() url.Values { |
|||
out := url.Values{} |
|||
|
|||
if p.Prefix != "" { |
|||
out.Set("prefix", p.Prefix) |
|||
} |
|||
if p.Marker != "" { |
|||
out.Set("marker", p.Marker) |
|||
} |
|||
if p.MaxResults != 0 { |
|||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) |
|||
} |
|||
out = addTimeout(out, p.Timeout) |
|||
|
|||
return out |
|||
} |
|||
|
|||
// returns url.Values for the specified types
|
|||
func getURLInitValues(comp compType, res resourceType) url.Values { |
|||
values := url.Values{} |
|||
if comp != compNone { |
|||
values.Set("comp", comp.String()) |
|||
} |
|||
if res != resourceFile { |
|||
values.Set("restype", res.String()) |
|||
} |
|||
return values |
|||
} |
|||
|
|||
// GetShareReference returns a Share object for the specified share name.
|
|||
func (f *FileServiceClient) GetShareReference(name string) *Share { |
|||
return &Share{ |
|||
fsc: f, |
|||
Name: name, |
|||
Properties: ShareProperties{ |
|||
Quota: -1, |
|||
}, |
|||
} |
|||
} |
|||
|
|||
// ListShares returns the list of shares in a storage account along with
|
|||
// pagination token and other response details.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares
|
|||
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { |
|||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) |
|||
|
|||
var out ShareListResponse |
|||
resp, err := f.listContent("", q, nil) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer resp.Body.Close() |
|||
err = xmlUnmarshal(resp.Body, &out) |
|||
|
|||
// assign our client to the newly created Share objects
|
|||
for i := range out.Shares { |
|||
out.Shares[i].fsc = &f |
|||
} |
|||
return &out, err |
|||
} |
|||
|
|||
// GetServiceProperties gets the properties of your storage account's file service.
|
|||
// File service does not support logging
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
|
|||
func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { |
|||
return f.client.getServiceProperties(fileServiceName, f.auth) |
|||
} |
|||
|
|||
// SetServiceProperties sets the properties of your storage account's file service.
|
|||
// File service does not support logging
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
|
|||
func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { |
|||
return f.client.setServiceProperties(props, fileServiceName, f.auth) |
|||
} |
|||
|
|||
// retrieves directory or share content
|
|||
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) { |
|||
if err := f.checkForStorageEmulator(); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
uri := f.client.getEndpoint(fileServiceName, path, params) |
|||
extraHeaders = f.client.protectUserAgent(extraHeaders) |
|||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) |
|||
|
|||
resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
drainRespBody(resp) |
|||
return nil, err |
|||
} |
|||
|
|||
return resp, nil |
|||
} |
|||
|
|||
// returns true if the specified resource exists
|
|||
func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) { |
|||
if err := f.checkForStorageEmulator(); err != nil { |
|||
return false, nil, err |
|||
} |
|||
|
|||
uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) |
|||
headers := f.client.getStandardHeaders() |
|||
|
|||
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { |
|||
return resp.StatusCode == http.StatusOK, resp.Header, nil |
|||
} |
|||
} |
|||
return false, nil, err |
|||
} |
|||
|
|||
// creates a resource depending on the specified resource type
|
|||
func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { |
|||
resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return resp.Header, checkRespCode(resp, expectedResponseCodes) |
|||
} |
|||
|
|||
// creates a resource depending on the specified resource type, doesn't close the response body
|
|||
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) { |
|||
if err := f.checkForStorageEmulator(); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
values := getURLInitValues(compNone, res) |
|||
combinedParams := mergeParams(values, urlParams) |
|||
uri := f.client.getEndpoint(fileServiceName, path, combinedParams) |
|||
extraHeaders = f.client.protectUserAgent(extraHeaders) |
|||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) |
|||
|
|||
return f.client.exec(http.MethodPut, uri, headers, nil, f.auth) |
|||
} |
|||
|
|||
// returns HTTP header data for the specified directory or share
|
|||
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) { |
|||
resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return resp.Header, nil |
|||
} |
|||
|
|||
// gets the specified resource, doesn't close the response body
|
|||
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) { |
|||
if err := f.checkForStorageEmulator(); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
params = mergeParams(params, getURLInitValues(comp, res)) |
|||
uri := f.client.getEndpoint(fileServiceName, path, params) |
|||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) |
|||
|
|||
return f.client.exec(verb, uri, headers, nil, f.auth) |
|||
} |
|||
|
|||
// deletes the resource and returns the response
|
|||
func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error { |
|||
resp, err := f.deleteResourceNoClose(path, res, options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusAccepted}) |
|||
} |
|||
|
|||
// deletes the resource and returns the response, doesn't close the response body
|
|||
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) { |
|||
if err := f.checkForStorageEmulator(); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options)) |
|||
uri := f.client.getEndpoint(fileServiceName, path, values) |
|||
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) |
|||
} |
|||
|
|||
// merges metadata into extraHeaders and returns extraHeaders
|
|||
func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { |
|||
if metadata == nil && extraHeaders == nil { |
|||
return nil |
|||
} |
|||
if extraHeaders == nil { |
|||
extraHeaders = make(map[string]string) |
|||
} |
|||
for k, v := range metadata { |
|||
extraHeaders[userDefinedMetadataHeaderPrefix+k] = v |
|||
} |
|||
return extraHeaders |
|||
} |
|||
|
|||
// sets extra header data for the specified resource
|
|||
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) { |
|||
if err := f.checkForStorageEmulator(); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
params := mergeParams(getURLInitValues(comp, res), prepareOptions(options)) |
|||
uri := f.client.getEndpoint(fileServiceName, path, params) |
|||
extraHeaders = f.client.protectUserAgent(extraHeaders) |
|||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) |
|||
|
|||
resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
return resp.Header, checkRespCode(resp, []int{http.StatusOK}) |
|||
} |
|||
|
|||
//checkForStorageEmulator determines if the client is setup for use with
|
|||
//Azure Storage Emulator, and returns a relevant error
|
|||
func (f FileServiceClient) checkForStorageEmulator() error { |
|||
if f.client.accountName == StorageEmulatorAccountName { |
|||
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") |
|||
} |
|||
return nil |
|||
} |
@ -0,0 +1,201 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"errors" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"time" |
|||
) |
|||
|
|||
// lease constants.
|
|||
const ( |
|||
leaseHeaderPrefix = "x-ms-lease-" |
|||
headerLeaseID = "x-ms-lease-id" |
|||
leaseAction = "x-ms-lease-action" |
|||
leaseBreakPeriod = "x-ms-lease-break-period" |
|||
leaseDuration = "x-ms-lease-duration" |
|||
leaseProposedID = "x-ms-proposed-lease-id" |
|||
leaseTime = "x-ms-lease-time" |
|||
|
|||
acquireLease = "acquire" |
|||
renewLease = "renew" |
|||
changeLease = "change" |
|||
releaseLease = "release" |
|||
breakLease = "break" |
|||
) |
|||
|
|||
// leasePut is common PUT code for the various acquire/release/break etc functions.
|
|||
func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) { |
|||
params := url.Values{"comp": {"lease"}} |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err := checkRespCode(resp, []int{expectedStatus}); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return resp.Header, nil |
|||
} |
|||
|
|||
// LeaseOptions includes options for all operations regarding leasing blobs
|
|||
type LeaseOptions struct { |
|||
Timeout uint |
|||
Origin string `header:"Origin"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// AcquireLease creates a lease for a blob
|
|||
// returns leaseID acquired
|
|||
// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
|
|||
// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|||
func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) { |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers[leaseAction] = acquireLease |
|||
|
|||
if leaseTimeInSeconds == -1 { |
|||
// Do nothing, but don't trigger the following clauses.
|
|||
} else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" { |
|||
leaseTimeInSeconds = 60 |
|||
} else if leaseTimeInSeconds < 15 { |
|||
leaseTimeInSeconds = 15 |
|||
} |
|||
|
|||
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) |
|||
|
|||
if proposedLeaseID != "" { |
|||
headers[leaseProposedID] = proposedLeaseID |
|||
} |
|||
|
|||
respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) |
|||
|
|||
if returnedLeaseID != "" { |
|||
return returnedLeaseID, nil |
|||
} |
|||
|
|||
return "", errors.New("LeaseID not returned") |
|||
} |
|||
|
|||
// BreakLease breaks the lease for a blob
|
|||
// Returns the timeout remaining in the lease in seconds
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|||
func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) { |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers[leaseAction] = breakLease |
|||
return b.breakLeaseCommon(headers, options) |
|||
} |
|||
|
|||
// BreakLeaseWithBreakPeriod breaks the lease for a blob
|
|||
// breakPeriodInSeconds is used to determine how long until new lease can be created.
|
|||
// Returns the timeout remaining in the lease in seconds
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|||
func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) { |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers[leaseAction] = breakLease |
|||
headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) |
|||
return b.breakLeaseCommon(headers, options) |
|||
} |
|||
|
|||
// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
|
|||
func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) { |
|||
|
|||
respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options) |
|||
if err != nil { |
|||
return 0, err |
|||
} |
|||
|
|||
breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) |
|||
if breakTimeoutStr != "" { |
|||
breakTimeout, err = strconv.Atoi(breakTimeoutStr) |
|||
if err != nil { |
|||
return 0, err |
|||
} |
|||
} |
|||
|
|||
return breakTimeout, nil |
|||
} |
|||
|
|||
// ChangeLease changes a lease ID for a blob
|
|||
// Returns the new LeaseID acquired
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|||
func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) { |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers[leaseAction] = changeLease |
|||
headers[headerLeaseID] = currentLeaseID |
|||
headers[leaseProposedID] = proposedLeaseID |
|||
|
|||
respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) |
|||
if newLeaseID != "" { |
|||
return newLeaseID, nil |
|||
} |
|||
|
|||
return "", errors.New("LeaseID not returned") |
|||
} |
|||
|
|||
// ReleaseLease releases the lease for a blob
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|||
func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error { |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers[leaseAction] = releaseLease |
|||
headers[headerLeaseID] = currentLeaseID |
|||
|
|||
_, err := b.leaseCommonPut(headers, http.StatusOK, options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
|
|||
func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error { |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers[leaseAction] = renewLease |
|||
headers[headerLeaseID] = currentLeaseID |
|||
|
|||
_, err := b.leaseCommonPut(headers, http.StatusOK, options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,171 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"fmt" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"time" |
|||
) |
|||
|
|||
// Message represents an Azure message.
|
|||
type Message struct { |
|||
Queue *Queue |
|||
Text string `xml:"MessageText"` |
|||
ID string `xml:"MessageId"` |
|||
Insertion TimeRFC1123 `xml:"InsertionTime"` |
|||
Expiration TimeRFC1123 `xml:"ExpirationTime"` |
|||
PopReceipt string `xml:"PopReceipt"` |
|||
NextVisible TimeRFC1123 `xml:"TimeNextVisible"` |
|||
DequeueCount int `xml:"DequeueCount"` |
|||
} |
|||
|
|||
func (m *Message) buildPath() string { |
|||
return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID) |
|||
} |
|||
|
|||
// PutMessageOptions is the set of options can be specified for Put Messsage
|
|||
// operation. A zero struct does not use any preferences for the request.
|
|||
type PutMessageOptions struct { |
|||
Timeout uint |
|||
VisibilityTimeout int |
|||
MessageTTL int |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// Put operation adds a new message to the back of the message queue.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message
|
|||
func (m *Message) Put(options *PutMessageOptions) error { |
|||
query := url.Values{} |
|||
headers := m.Queue.qsc.client.getStandardHeaders() |
|||
|
|||
req := putMessageRequest{MessageText: m.Text} |
|||
body, nn, err := xmlMarshal(req) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
headers["Content-Length"] = strconv.Itoa(nn) |
|||
|
|||
if options != nil { |
|||
if options.VisibilityTimeout != 0 { |
|||
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) |
|||
} |
|||
if options.MessageTTL != 0 { |
|||
query.Set("messagettl", strconv.Itoa(options.MessageTTL)) |
|||
} |
|||
query = addTimeout(query, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
|
|||
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query) |
|||
resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
err = checkRespCode(resp, []int{http.StatusCreated}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
err = xmlUnmarshal(resp.Body, m) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// UpdateMessageOptions is the set of options can be specified for Update Messsage
|
|||
// operation. A zero struct does not use any preferences for the request.
|
|||
type UpdateMessageOptions struct { |
|||
Timeout uint |
|||
VisibilityTimeout int |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// Update operation updates the specified message.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message
|
|||
func (m *Message) Update(options *UpdateMessageOptions) error { |
|||
query := url.Values{} |
|||
if m.PopReceipt != "" { |
|||
query.Set("popreceipt", m.PopReceipt) |
|||
} |
|||
|
|||
headers := m.Queue.qsc.client.getStandardHeaders() |
|||
req := putMessageRequest{MessageText: m.Text} |
|||
body, nn, err := xmlMarshal(req) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
headers["Content-Length"] = strconv.Itoa(nn) |
|||
// visibilitytimeout is required for Update (zero or greater) so set the default here
|
|||
query.Set("visibilitytimeout", "0") |
|||
if options != nil { |
|||
if options.VisibilityTimeout != 0 { |
|||
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) |
|||
} |
|||
query = addTimeout(query, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query) |
|||
|
|||
resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
m.PopReceipt = resp.Header.Get("x-ms-popreceipt") |
|||
nextTimeStr := resp.Header.Get("x-ms-time-next-visible") |
|||
if nextTimeStr != "" { |
|||
nextTime, err := time.Parse(time.RFC1123, nextTimeStr) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
m.NextVisible = TimeRFC1123(nextTime) |
|||
} |
|||
|
|||
return checkRespCode(resp, []int{http.StatusNoContent}) |
|||
} |
|||
|
|||
// Delete operation deletes the specified message.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
|
|||
func (m *Message) Delete(options *QueueServiceOptions) error { |
|||
params := url.Values{"popreceipt": {m.PopReceipt}} |
|||
headers := m.Queue.qsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params) |
|||
|
|||
resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusNoContent}) |
|||
} |
|||
|
|||
type putMessageRequest struct { |
|||
XMLName xml.Name `xml:"QueueMessage"` |
|||
MessageText string `xml:"MessageText"` |
|||
} |
@ -0,0 +1,48 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// MetadataLevel determines if operations should return a paylod,
|
|||
// and it level of detail.
|
|||
type MetadataLevel string |
|||
|
|||
// This consts are meant to help with Odata supported operations
|
|||
const ( |
|||
OdataTypeSuffix = "@odata.type" |
|||
|
|||
// Types
|
|||
|
|||
OdataBinary = "Edm.Binary" |
|||
OdataDateTime = "Edm.DateTime" |
|||
OdataDouble = "Edm.Double" |
|||
OdataGUID = "Edm.Guid" |
|||
OdataInt64 = "Edm.Int64" |
|||
|
|||
// Query options
|
|||
|
|||
OdataFilter = "$filter" |
|||
OdataOrderBy = "$orderby" |
|||
OdataTop = "$top" |
|||
OdataSkip = "$skip" |
|||
OdataCount = "$count" |
|||
OdataExpand = "$expand" |
|||
OdataSelect = "$select" |
|||
OdataSearch = "$search" |
|||
|
|||
EmptyPayload MetadataLevel = "" |
|||
NoMetadata MetadataLevel = "application/json;odata=nometadata" |
|||
MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata" |
|||
FullMetadata MetadataLevel = "application/json;odata=fullmetadata" |
|||
) |
@ -0,0 +1,203 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"net/http" |
|||
"net/url" |
|||
"time" |
|||
) |
|||
|
|||
// GetPageRangesResponse contains the response fields from
|
|||
// Get Page Ranges call.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
|||
type GetPageRangesResponse struct { |
|||
XMLName xml.Name `xml:"PageList"` |
|||
PageList []PageRange `xml:"PageRange"` |
|||
} |
|||
|
|||
// PageRange contains information about a page of a page blob from
|
|||
// Get Pages Range call.
|
|||
//
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
|||
type PageRange struct { |
|||
Start int64 `xml:"Start"` |
|||
End int64 `xml:"End"` |
|||
} |
|||
|
|||
var ( |
|||
errBlobCopyAborted = errors.New("storage: blob copy is aborted") |
|||
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") |
|||
) |
|||
|
|||
// PutPageOptions includes the options for a put page operation
|
|||
type PutPageOptions struct { |
|||
Timeout uint |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"` |
|||
IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"` |
|||
IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"` |
|||
IfModifiedSince *time.Time `header:"If-Modified-Since"` |
|||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` |
|||
IfMatch string `header:"If-Match"` |
|||
IfNoneMatch string `header:"If-None-Match"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// WriteRange writes a range of pages to a page blob.
|
|||
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
|
|||
// multiplies by 512.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
|
|||
func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { |
|||
if bytes == nil { |
|||
return errors.New("bytes cannot be nil") |
|||
} |
|||
return b.modifyRange(blobRange, bytes, options) |
|||
} |
|||
|
|||
// ClearRange clears the given range in a page blob.
|
|||
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
|
|||
// multiplies by 512.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
|
|||
func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error { |
|||
return b.modifyRange(blobRange, nil, options) |
|||
} |
|||
|
|||
func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { |
|||
if blobRange.End < blobRange.Start { |
|||
return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") |
|||
} |
|||
if blobRange.Start%512 != 0 { |
|||
return errors.New("the value for rangeStart must be a multiple of 512") |
|||
} |
|||
if blobRange.End%512 != 511 { |
|||
return errors.New("the value for rangeEnd must be a multiple of 512 - 1") |
|||
} |
|||
|
|||
params := url.Values{"comp": {"page"}} |
|||
|
|||
// default to clear
|
|||
write := "clear" |
|||
var cl uint64 |
|||
|
|||
// if bytes is not nil then this is an update operation
|
|||
if bytes != nil { |
|||
write = "update" |
|||
cl = (blobRange.End - blobRange.Start) + 1 |
|||
} |
|||
|
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["x-ms-blob-type"] = string(BlobTypePage) |
|||
headers["x-ms-page-write"] = write |
|||
headers["x-ms-range"] = blobRange.String() |
|||
headers["Content-Length"] = fmt.Sprintf("%v", cl) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusCreated}) |
|||
} |
|||
|
|||
// GetPageRangesOptions includes the options for a get page ranges operation
|
|||
type GetPageRangesOptions struct { |
|||
Timeout uint |
|||
Snapshot *time.Time |
|||
PreviousSnapshot *time.Time |
|||
Range *BlobRange |
|||
LeaseID string `header:"x-ms-lease-id"` |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// GetPageRanges returns the list of valid page ranges for a page blob.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges
|
|||
func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) { |
|||
params := url.Values{"comp": {"pagelist"}} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
params = addSnapshot(params, options.Snapshot) |
|||
if options.PreviousSnapshot != nil { |
|||
params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot)) |
|||
} |
|||
if options.Range != nil { |
|||
headers["Range"] = options.Range.String() |
|||
} |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
var out GetPageRangesResponse |
|||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return out, err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return out, err |
|||
} |
|||
err = xmlUnmarshal(resp.Body, &out) |
|||
return out, err |
|||
} |
|||
|
|||
// PutPageBlob initializes an empty page blob with specified name and maximum
|
|||
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
|
|||
// be created using this method before writing pages.
|
|||
//
|
|||
// See CreateBlockBlobFromReader for more info on creating blobs.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
|||
func (b *Blob) PutPageBlob(options *PutBlobOptions) error { |
|||
if b.Properties.ContentLength%512 != 0 { |
|||
return errors.New("Content length must be aligned to a 512-byte boundary") |
|||
} |
|||
|
|||
params := url.Values{} |
|||
headers := b.Container.bsc.client.getStandardHeaders() |
|||
headers["x-ms-blob-type"] = string(BlobTypePage) |
|||
headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength) |
|||
headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber) |
|||
headers = mergeHeaders(headers, headersFromStruct(b.Properties)) |
|||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) |
|||
|
|||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return b.respondCreation(resp, BlobTypePage) |
|||
} |
@ -0,0 +1,436 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"fmt" |
|||
"io" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
// casing is per Golang's http.Header canonicalizing the header names.
|
|||
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" |
|||
) |
|||
|
|||
// QueueAccessPolicy represents each access policy in the queue ACL.
|
|||
type QueueAccessPolicy struct { |
|||
ID string |
|||
StartTime time.Time |
|||
ExpiryTime time.Time |
|||
CanRead bool |
|||
CanAdd bool |
|||
CanUpdate bool |
|||
CanProcess bool |
|||
} |
|||
|
|||
// QueuePermissions represents the queue ACLs.
|
|||
type QueuePermissions struct { |
|||
AccessPolicies []QueueAccessPolicy |
|||
} |
|||
|
|||
// SetQueuePermissionOptions includes options for a set queue permissions operation
|
|||
type SetQueuePermissionOptions struct { |
|||
Timeout uint |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// Queue represents an Azure queue.
|
|||
type Queue struct { |
|||
qsc *QueueServiceClient |
|||
Name string |
|||
Metadata map[string]string |
|||
AproxMessageCount uint64 |
|||
} |
|||
|
|||
func (q *Queue) buildPath() string { |
|||
return fmt.Sprintf("/%s", q.Name) |
|||
} |
|||
|
|||
func (q *Queue) buildPathMessages() string { |
|||
return fmt.Sprintf("%s/messages", q.buildPath()) |
|||
} |
|||
|
|||
// QueueServiceOptions includes options for some queue service operations
|
|||
type QueueServiceOptions struct { |
|||
Timeout uint |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// Create operation creates a queue under the given account.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4
|
|||
func (q *Queue) Create(options *QueueServiceOptions) error { |
|||
params := url.Values{} |
|||
headers := q.qsc.client.getStandardHeaders() |
|||
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) |
|||
|
|||
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusCreated}) |
|||
} |
|||
|
|||
// Delete operation permanently deletes the specified queue.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3
|
|||
func (q *Queue) Delete(options *QueueServiceOptions) error { |
|||
params := url.Values{} |
|||
headers := q.qsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) |
|||
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusNoContent}) |
|||
} |
|||
|
|||
// Exists returns true if a queue with given name exists.
|
|||
func (q *Queue) Exists() (bool, error) { |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) |
|||
resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { |
|||
return resp.StatusCode == http.StatusOK, nil |
|||
} |
|||
err = getErrorFromResponse(resp) |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
// SetMetadata operation sets user-defined metadata on the specified queue.
|
|||
// Metadata is associated with the queue as name-value pairs.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
|
|||
func (q *Queue) SetMetadata(options *QueueServiceOptions) error { |
|||
params := url.Values{"comp": {"metadata"}} |
|||
headers := q.qsc.client.getStandardHeaders() |
|||
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) |
|||
|
|||
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusNoContent}) |
|||
} |
|||
|
|||
// GetMetadata operation retrieves user-defined metadata and queue
|
|||
// properties on the specified queue. Metadata is associated with
|
|||
// the queue as name-values pairs.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
|
|||
//
|
|||
// Because the way Golang's http client (and http.Header in particular)
|
|||
// canonicalize header names, the returned metadata names would always
|
|||
// be all lower case.
|
|||
func (q *Queue) GetMetadata(options *QueueServiceOptions) error { |
|||
params := url.Values{"comp": {"metadata"}} |
|||
headers := q.qsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) |
|||
|
|||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return err |
|||
} |
|||
|
|||
aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader)) |
|||
if aproxMessagesStr != "" { |
|||
aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
q.AproxMessageCount = aproxMessages |
|||
} |
|||
|
|||
q.Metadata = getMetadataFromHeaders(resp.Header) |
|||
return nil |
|||
} |
|||
|
|||
// GetMessageReference returns a message object with the specified text.
|
|||
func (q *Queue) GetMessageReference(text string) *Message { |
|||
return &Message{ |
|||
Queue: q, |
|||
Text: text, |
|||
} |
|||
} |
|||
|
|||
// GetMessagesOptions is the set of options can be specified for Get
|
|||
// Messsages operation. A zero struct does not use any preferences for the
|
|||
// request.
|
|||
type GetMessagesOptions struct { |
|||
Timeout uint |
|||
NumOfMessages int |
|||
VisibilityTimeout int |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
type messages struct { |
|||
XMLName xml.Name `xml:"QueueMessagesList"` |
|||
Messages []Message `xml:"QueueMessage"` |
|||
} |
|||
|
|||
// GetMessages operation retrieves one or more messages from the front of the
|
|||
// queue.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages
|
|||
func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) { |
|||
query := url.Values{} |
|||
headers := q.qsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
if options.NumOfMessages != 0 { |
|||
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) |
|||
} |
|||
if options.VisibilityTimeout != 0 { |
|||
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) |
|||
} |
|||
query = addTimeout(query, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) |
|||
|
|||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) |
|||
if err != nil { |
|||
return []Message{}, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
var out messages |
|||
err = xmlUnmarshal(resp.Body, &out) |
|||
if err != nil { |
|||
return []Message{}, err |
|||
} |
|||
for i := range out.Messages { |
|||
out.Messages[i].Queue = q |
|||
} |
|||
return out.Messages, err |
|||
} |
|||
|
|||
// PeekMessagesOptions is the set of options can be specified for Peek
|
|||
// Messsage operation. A zero struct does not use any preferences for the
|
|||
// request.
|
|||
type PeekMessagesOptions struct { |
|||
Timeout uint |
|||
NumOfMessages int |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// PeekMessages retrieves one or more messages from the front of the queue, but
|
|||
// does not alter the visibility of the message.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages
|
|||
func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) { |
|||
query := url.Values{"peekonly": {"true"}} // Required for peek operation
|
|||
headers := q.qsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
if options.NumOfMessages != 0 { |
|||
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) |
|||
} |
|||
query = addTimeout(query, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) |
|||
|
|||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) |
|||
if err != nil { |
|||
return []Message{}, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
var out messages |
|||
err = xmlUnmarshal(resp.Body, &out) |
|||
if err != nil { |
|||
return []Message{}, err |
|||
} |
|||
for i := range out.Messages { |
|||
out.Messages[i].Queue = q |
|||
} |
|||
return out.Messages, err |
|||
} |
|||
|
|||
// ClearMessages operation deletes all messages from the specified queue.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages
|
|||
func (q *Queue) ClearMessages(options *QueueServiceOptions) error { |
|||
params := url.Values{} |
|||
headers := q.qsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params) |
|||
|
|||
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusNoContent}) |
|||
} |
|||
|
|||
// SetPermissions sets up queue permissions
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl
|
|||
func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error { |
|||
body, length, err := generateQueueACLpayload(permissions.AccessPolicies) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
params := url.Values{ |
|||
"comp": {"acl"}, |
|||
} |
|||
headers := q.qsc.client.getStandardHeaders() |
|||
headers["Content-Length"] = strconv.Itoa(length) |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) |
|||
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusNoContent}) |
|||
} |
|||
|
|||
func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) { |
|||
sil := SignedIdentifiers{ |
|||
SignedIdentifiers: []SignedIdentifier{}, |
|||
} |
|||
for _, qapd := range policies { |
|||
permission := qapd.generateQueuePermissions() |
|||
signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission) |
|||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) |
|||
} |
|||
return xmlMarshal(sil) |
|||
} |
|||
|
|||
func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) { |
|||
// generate the permissions string (raup).
|
|||
// still want the end user API to have bool flags.
|
|||
permissions = "" |
|||
|
|||
if qapd.CanRead { |
|||
permissions += "r" |
|||
} |
|||
|
|||
if qapd.CanAdd { |
|||
permissions += "a" |
|||
} |
|||
|
|||
if qapd.CanUpdate { |
|||
permissions += "u" |
|||
} |
|||
|
|||
if qapd.CanProcess { |
|||
permissions += "p" |
|||
} |
|||
|
|||
return permissions |
|||
} |
|||
|
|||
// GetQueuePermissionOptions includes options for a get queue permissions operation
|
|||
type GetQueuePermissionOptions struct { |
|||
Timeout uint |
|||
RequestID string `header:"x-ms-client-request-id"` |
|||
} |
|||
|
|||
// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl
|
|||
// If timeout is 0 then it will not be passed to Azure
|
|||
func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) { |
|||
params := url.Values{ |
|||
"comp": {"acl"}, |
|||
} |
|||
headers := q.qsc.client.getStandardHeaders() |
|||
|
|||
if options != nil { |
|||
params = addTimeout(params, options.Timeout) |
|||
headers = mergeHeaders(headers, headersFromStruct(*options)) |
|||
} |
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) |
|||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
var ap AccessPolicy |
|||
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return buildQueueAccessPolicy(ap, &resp.Header), nil |
|||
} |
|||
|
|||
func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions { |
|||
permissions := QueuePermissions{ |
|||
AccessPolicies: []QueueAccessPolicy{}, |
|||
} |
|||
|
|||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { |
|||
qapd := QueueAccessPolicy{ |
|||
ID: policy.ID, |
|||
StartTime: policy.AccessPolicy.StartTime, |
|||
ExpiryTime: policy.AccessPolicy.ExpiryTime, |
|||
} |
|||
qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") |
|||
qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a") |
|||
qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") |
|||
qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p") |
|||
|
|||
permissions.AccessPolicies = append(permissions.AccessPolicies, qapd) |
|||
} |
|||
return &permissions |
|||
} |
@ -0,0 +1,146 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"net/url" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
// QueueSASOptions are options to construct a blob SAS
|
|||
// URI.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|||
type QueueSASOptions struct { |
|||
QueueSASPermissions |
|||
SASOptions |
|||
} |
|||
|
|||
// QueueSASPermissions includes the available permissions for
|
|||
// a queue SAS URI.
|
|||
type QueueSASPermissions struct { |
|||
Read bool |
|||
Add bool |
|||
Update bool |
|||
Process bool |
|||
} |
|||
|
|||
func (q QueueSASPermissions) buildString() string { |
|||
permissions := "" |
|||
|
|||
if q.Read { |
|||
permissions += "r" |
|||
} |
|||
if q.Add { |
|||
permissions += "a" |
|||
} |
|||
if q.Update { |
|||
permissions += "u" |
|||
} |
|||
if q.Process { |
|||
permissions += "p" |
|||
} |
|||
return permissions |
|||
} |
|||
|
|||
// GetSASURI creates an URL to the specified queue which contains the Shared
|
|||
// Access Signature with specified permissions and expiration time.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|||
func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) { |
|||
canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
|
|||
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
|
|||
// later, the storage account name, and the resource name, and must be URL-decoded.
|
|||
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
|||
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
|||
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) |
|||
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
signedStart := "" |
|||
if options.Start != (time.Time{}) { |
|||
signedStart = options.Start.UTC().Format(time.RFC3339) |
|||
} |
|||
signedExpiry := options.Expiry.UTC().Format(time.RFC3339) |
|||
|
|||
protocols := "https,http" |
|||
if options.UseHTTPS { |
|||
protocols = "https" |
|||
} |
|||
|
|||
permissions := options.QueueSASPermissions.buildString() |
|||
stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
sig := q.qsc.client.computeHmac256(stringToSign) |
|||
sasParams := url.Values{ |
|||
"sv": {q.qsc.client.apiVersion}, |
|||
"se": {signedExpiry}, |
|||
"sp": {permissions}, |
|||
"sig": {sig}, |
|||
} |
|||
|
|||
if q.qsc.client.apiVersion >= "2015-04-05" { |
|||
sasParams.Add("spr", protocols) |
|||
addQueryParameter(sasParams, "sip", options.IP) |
|||
} |
|||
|
|||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil) |
|||
sasURL, err := url.Parse(uri) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
sasURL.RawQuery = sasParams.Encode() |
|||
return sasURL.String(), nil |
|||
} |
|||
|
|||
func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) { |
|||
|
|||
if signedVersion >= "2015-02-21" { |
|||
canonicalizedResource = "/queue" + canonicalizedResource |
|||
} |
|||
|
|||
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
|
|||
if signedVersion >= "2015-04-05" { |
|||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", |
|||
signedPermissions, |
|||
signedStart, |
|||
signedExpiry, |
|||
canonicalizedResource, |
|||
signedIdentifier, |
|||
signedIP, |
|||
protocols, |
|||
signedVersion), nil |
|||
|
|||
} |
|||
|
|||
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
|||
if signedVersion >= "2013-08-15" { |
|||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil |
|||
} |
|||
|
|||
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") |
|||
} |
@ -0,0 +1,42 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
|||
// Service.
|
|||
type QueueServiceClient struct { |
|||
client Client |
|||
auth authentication |
|||
} |
|||
|
|||
// GetServiceProperties gets the properties of your storage account's queue service.
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
|
|||
func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { |
|||
return q.client.getServiceProperties(queueServiceName, q.auth) |
|||
} |
|||
|
|||
// SetServiceProperties sets the properties of your storage account's queue service.
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
|
|||
func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { |
|||
return q.client.setServiceProperties(props, queueServiceName, q.auth) |
|||
} |
|||
|
|||
// GetQueueReference returns a Container object for the specified queue name.
|
|||
func (q *QueueServiceClient) GetQueueReference(name string) *Queue { |
|||
return &Queue{ |
|||
qsc: q, |
|||
Name: name, |
|||
} |
|||
} |
@ -0,0 +1,216 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"fmt" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
) |
|||
|
|||
// Share represents an Azure file share.
|
|||
type Share struct { |
|||
fsc *FileServiceClient |
|||
Name string `xml:"Name"` |
|||
Properties ShareProperties `xml:"Properties"` |
|||
Metadata map[string]string |
|||
} |
|||
|
|||
// ShareProperties contains various properties of a share.
|
|||
type ShareProperties struct { |
|||
LastModified string `xml:"Last-Modified"` |
|||
Etag string `xml:"Etag"` |
|||
Quota int `xml:"Quota"` |
|||
} |
|||
|
|||
// builds the complete path for this share object.
|
|||
func (s *Share) buildPath() string { |
|||
return fmt.Sprintf("/%s", s.Name) |
|||
} |
|||
|
|||
// Create this share under the associated account.
|
|||
// If a share with the same name already exists, the operation fails.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
|
|||
func (s *Share) Create(options *FileRequestOptions) error { |
|||
extraheaders := map[string]string{} |
|||
if s.Properties.Quota > 0 { |
|||
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) |
|||
} |
|||
|
|||
params := prepareOptions(options) |
|||
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
s.updateEtagAndLastModified(headers) |
|||
return nil |
|||
} |
|||
|
|||
// CreateIfNotExists creates this share under the associated account if
|
|||
// it does not exist. Returns true if the share is newly created or false if
|
|||
// the share already exists.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
|
|||
func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) { |
|||
extraheaders := map[string]string{} |
|||
if s.Properties.Quota > 0 { |
|||
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) |
|||
} |
|||
|
|||
params := prepareOptions(options) |
|||
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { |
|||
if resp.StatusCode == http.StatusCreated { |
|||
s.updateEtagAndLastModified(resp.Header) |
|||
return true, nil |
|||
} |
|||
return false, s.FetchAttributes(nil) |
|||
} |
|||
} |
|||
|
|||
return false, err |
|||
} |
|||
|
|||
// Delete marks this share for deletion. The share along with any files
|
|||
// and directories contained within it are later deleted during garbage
|
|||
// collection. If the share does not exist the operation fails
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
|
|||
func (s *Share) Delete(options *FileRequestOptions) error { |
|||
return s.fsc.deleteResource(s.buildPath(), resourceShare, options) |
|||
} |
|||
|
|||
// DeleteIfExists operation marks this share for deletion if it exists.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
|
|||
func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) { |
|||
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options) |
|||
if resp != nil { |
|||
defer drainRespBody(resp) |
|||
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { |
|||
return resp.StatusCode == http.StatusAccepted, nil |
|||
} |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
// Exists returns true if this share already exists
|
|||
// on the storage account, otherwise returns false.
|
|||
func (s *Share) Exists() (bool, error) { |
|||
exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare) |
|||
if exists { |
|||
s.updateEtagAndLastModified(headers) |
|||
s.updateQuota(headers) |
|||
} |
|||
return exists, err |
|||
} |
|||
|
|||
// FetchAttributes retrieves metadata and properties for this share.
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties
|
|||
func (s *Share) FetchAttributes(options *FileRequestOptions) error { |
|||
params := prepareOptions(options) |
|||
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
s.updateEtagAndLastModified(headers) |
|||
s.updateQuota(headers) |
|||
s.Metadata = getMetadataFromHeaders(headers) |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// GetRootDirectoryReference returns a Directory object at the root of this share.
|
|||
func (s *Share) GetRootDirectoryReference() *Directory { |
|||
return &Directory{ |
|||
fsc: s.fsc, |
|||
share: s, |
|||
} |
|||
} |
|||
|
|||
// ServiceClient returns the FileServiceClient associated with this share.
|
|||
func (s *Share) ServiceClient() *FileServiceClient { |
|||
return s.fsc |
|||
} |
|||
|
|||
// SetMetadata replaces the metadata for this share.
|
|||
//
|
|||
// Some keys may be converted to Camel-Case before sending. All keys
|
|||
// are returned in lower case by GetShareMetadata. HTTP header names
|
|||
// are case-insensitive so case munging should not matter to other
|
|||
// applications either.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata
|
|||
func (s *Share) SetMetadata(options *FileRequestOptions) error { |
|||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
s.updateEtagAndLastModified(headers) |
|||
return nil |
|||
} |
|||
|
|||
// SetProperties sets system properties for this share.
|
|||
//
|
|||
// Some keys may be converted to Camel-Case before sending. All keys
|
|||
// are returned in lower case by SetShareProperties. HTTP header names
|
|||
// are case-insensitive so case munging should not matter to other
|
|||
// applications either.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties
|
|||
func (s *Share) SetProperties(options *FileRequestOptions) error { |
|||
extraheaders := map[string]string{} |
|||
if s.Properties.Quota > 0 { |
|||
if s.Properties.Quota > 5120 { |
|||
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) |
|||
} |
|||
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) |
|||
} |
|||
|
|||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
s.updateEtagAndLastModified(headers) |
|||
return nil |
|||
} |
|||
|
|||
// updates Etag and last modified date
|
|||
func (s *Share) updateEtagAndLastModified(headers http.Header) { |
|||
s.Properties.Etag = headers.Get("Etag") |
|||
s.Properties.LastModified = headers.Get("Last-Modified") |
|||
} |
|||
|
|||
// updates quota value
|
|||
func (s *Share) updateQuota(headers http.Header) { |
|||
quota, err := strconv.Atoi(headers.Get("x-ms-share-quota")) |
|||
if err == nil { |
|||
s.Properties.Quota = quota |
|||
} |
|||
} |
|||
|
|||
// URL gets the canonical URL to this share. This method does not create a publicly accessible
|
|||
// URL if the share is private and this method does not check if the share exists.
|
|||
func (s *Share) URL() string { |
|||
return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{}) |
|||
} |
@ -0,0 +1,61 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
// AccessPolicyDetailsXML has specifics about an access policy
|
|||
// annotated with XML details.
|
|||
type AccessPolicyDetailsXML struct { |
|||
StartTime time.Time `xml:"Start"` |
|||
ExpiryTime time.Time `xml:"Expiry"` |
|||
Permission string `xml:"Permission"` |
|||
} |
|||
|
|||
// SignedIdentifier is a wrapper for a specific policy
|
|||
type SignedIdentifier struct { |
|||
ID string `xml:"Id"` |
|||
AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` |
|||
} |
|||
|
|||
// SignedIdentifiers part of the response from GetPermissions call.
|
|||
type SignedIdentifiers struct { |
|||
SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` |
|||
} |
|||
|
|||
// AccessPolicy is the response type from the GetPermissions call.
|
|||
type AccessPolicy struct { |
|||
SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` |
|||
} |
|||
|
|||
// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the
|
|||
// AccessPolicy struct which will get converted to XML.
|
|||
func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier { |
|||
return SignedIdentifier{ |
|||
ID: id, |
|||
AccessPolicy: AccessPolicyDetailsXML{ |
|||
StartTime: startTime.UTC().Round(time.Second), |
|||
ExpiryTime: expiryTime.UTC().Round(time.Second), |
|||
Permission: permissions, |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func updatePermissions(permissions, permission string) bool { |
|||
return strings.Contains(permissions, permission) |
|||
} |
@ -0,0 +1,150 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
) |
|||
|
|||
// ServiceProperties represents the storage account service properties
|
|||
type ServiceProperties struct { |
|||
Logging *Logging |
|||
HourMetrics *Metrics |
|||
MinuteMetrics *Metrics |
|||
Cors *Cors |
|||
DeleteRetentionPolicy *RetentionPolicy // blob storage only
|
|||
StaticWebsite *StaticWebsite // blob storage only
|
|||
} |
|||
|
|||
// Logging represents the Azure Analytics Logging settings
|
|||
type Logging struct { |
|||
Version string |
|||
Delete bool |
|||
Read bool |
|||
Write bool |
|||
RetentionPolicy *RetentionPolicy |
|||
} |
|||
|
|||
// RetentionPolicy indicates if retention is enabled and for how many days
|
|||
type RetentionPolicy struct { |
|||
Enabled bool |
|||
Days *int |
|||
} |
|||
|
|||
// Metrics provide request statistics.
|
|||
type Metrics struct { |
|||
Version string |
|||
Enabled bool |
|||
IncludeAPIs *bool |
|||
RetentionPolicy *RetentionPolicy |
|||
} |
|||
|
|||
// Cors includes all the CORS rules
|
|||
type Cors struct { |
|||
CorsRule []CorsRule |
|||
} |
|||
|
|||
// CorsRule includes all settings for a Cors rule
|
|||
type CorsRule struct { |
|||
AllowedOrigins string |
|||
AllowedMethods string |
|||
MaxAgeInSeconds int |
|||
ExposedHeaders string |
|||
AllowedHeaders string |
|||
} |
|||
|
|||
// StaticWebsite - The properties that enable an account to host a static website
|
|||
type StaticWebsite struct { |
|||
// Enabled - Indicates whether this account is hosting a static website
|
|||
Enabled bool |
|||
// IndexDocument - The default name of the index page under each directory
|
|||
IndexDocument *string |
|||
// ErrorDocument404Path - The absolute path of the custom 404 page
|
|||
ErrorDocument404Path *string |
|||
} |
|||
|
|||
func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { |
|||
query := url.Values{ |
|||
"restype": {"service"}, |
|||
"comp": {"properties"}, |
|||
} |
|||
uri := c.getEndpoint(service, "", query) |
|||
headers := c.getStandardHeaders() |
|||
|
|||
resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
var out ServiceProperties |
|||
err = xmlUnmarshal(resp.Body, &out) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return &out, nil |
|||
} |
|||
|
|||
func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { |
|||
query := url.Values{ |
|||
"restype": {"service"}, |
|||
"comp": {"properties"}, |
|||
} |
|||
uri := c.getEndpoint(service, "", query) |
|||
|
|||
// Ideally, StorageServiceProperties would be the output struct
|
|||
// This is to avoid golint stuttering, while generating the correct XML
|
|||
type StorageServiceProperties struct { |
|||
Logging *Logging |
|||
HourMetrics *Metrics |
|||
MinuteMetrics *Metrics |
|||
Cors *Cors |
|||
DeleteRetentionPolicy *RetentionPolicy |
|||
StaticWebsite *StaticWebsite |
|||
} |
|||
input := StorageServiceProperties{ |
|||
Logging: props.Logging, |
|||
HourMetrics: props.HourMetrics, |
|||
MinuteMetrics: props.MinuteMetrics, |
|||
Cors: props.Cors, |
|||
} |
|||
// only set these fields for blob storage else it's invalid XML
|
|||
if service == blobServiceName { |
|||
input.DeleteRetentionPolicy = props.DeleteRetentionPolicy |
|||
input.StaticWebsite = props.StaticWebsite |
|||
} |
|||
|
|||
body, length, err := xmlMarshal(input) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
headers := c.getStandardHeaders() |
|||
headers["Content-Length"] = strconv.Itoa(length) |
|||
|
|||
resp, err := c.exec(http.MethodPut, uri, headers, body, auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
return checkRespCode(resp, []int{http.StatusAccepted}) |
|||
} |
@ -0,0 +1,423 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding/json" |
|||
"fmt" |
|||
"io" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
tablesURIPath = "/Tables" |
|||
nextTableQueryParameter = "NextTableName" |
|||
headerNextPartitionKey = "x-ms-continuation-NextPartitionKey" |
|||
headerNextRowKey = "x-ms-continuation-NextRowKey" |
|||
nextPartitionKeyQueryParameter = "NextPartitionKey" |
|||
nextRowKeyQueryParameter = "NextRowKey" |
|||
) |
|||
|
|||
// TableAccessPolicy are used for SETTING table policies
|
|||
type TableAccessPolicy struct { |
|||
ID string |
|||
StartTime time.Time |
|||
ExpiryTime time.Time |
|||
CanRead bool |
|||
CanAppend bool |
|||
CanUpdate bool |
|||
CanDelete bool |
|||
} |
|||
|
|||
// Table represents an Azure table.
|
|||
type Table struct { |
|||
tsc *TableServiceClient |
|||
Name string `json:"TableName"` |
|||
OdataEditLink string `json:"odata.editLink"` |
|||
OdataID string `json:"odata.id"` |
|||
OdataMetadata string `json:"odata.metadata"` |
|||
OdataType string `json:"odata.type"` |
|||
} |
|||
|
|||
// EntityQueryResult contains the response from
|
|||
// ExecuteQuery and ExecuteQueryNextResults functions.
|
|||
type EntityQueryResult struct { |
|||
OdataMetadata string `json:"odata.metadata"` |
|||
Entities []*Entity `json:"value"` |
|||
QueryNextLink |
|||
table *Table |
|||
} |
|||
|
|||
type continuationToken struct { |
|||
NextPartitionKey string |
|||
NextRowKey string |
|||
} |
|||
|
|||
func (t *Table) buildPath() string { |
|||
return fmt.Sprintf("/%s", t.Name) |
|||
} |
|||
|
|||
func (t *Table) buildSpecificPath() string { |
|||
return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name) |
|||
} |
|||
|
|||
// Get gets the referenced table.
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
|
|||
func (t *Table) Get(timeout uint, ml MetadataLevel) error { |
|||
if ml == EmptyPayload { |
|||
return errEmptyPayload |
|||
} |
|||
|
|||
query := url.Values{ |
|||
"timeout": {strconv.FormatUint(uint64(timeout), 10)}, |
|||
} |
|||
headers := t.tsc.client.getStandardHeaders() |
|||
headers[headerAccept] = string(ml) |
|||
|
|||
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query) |
|||
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return err |
|||
} |
|||
|
|||
respBody, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
err = json.Unmarshal(respBody, t) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Create creates the referenced table.
|
|||
// This function fails if the name is not compliant
|
|||
// with the specification or the tables already exists.
|
|||
// ml determines the level of detail of metadata in the operation response,
|
|||
// or no data at all.
|
|||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table
|
|||
func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error { |
|||
uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{ |
|||
"timeout": {strconv.FormatUint(uint64(timeout), 10)}, |
|||
}) |
|||
|
|||
type createTableRequest struct { |
|||
TableName string `json:"TableName"` |
|||
} |
|||
req := createTableRequest{TableName: t.Name} |
|||
buf := new(bytes.Buffer) |
|||
if err := json.NewEncoder(buf).Encode(req); err != nil { |
|||
return err |
|||
} |
|||
|
|||
headers := t.tsc.client.getStandardHeaders() |
|||
headers = addReturnContentHeaders(headers, ml) |
|||
headers = addBodyRelatedHeaders(headers, buf.Len()) |
|||
headers = options.addToHeaders(headers) |
|||
|
|||
resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
if ml == EmptyPayload { |
|||
if err := checkRespCode(resp, []int{http.StatusNoContent}); err != nil { |
|||
return err |
|||
} |
|||
} else { |
|||
if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
if ml != EmptyPayload { |
|||
data, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
err = json.Unmarshal(data, t) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// Delete deletes the referenced table.
|
|||
// This function fails if the table is not present.
|
|||
// Be advised: Delete deletes all the entries that may be present.
|
|||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table
|
|||
func (t *Table) Delete(timeout uint, options *TableOptions) error { |
|||
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{ |
|||
"timeout": {strconv.Itoa(int(timeout))}, |
|||
}) |
|||
|
|||
headers := t.tsc.client.getStandardHeaders() |
|||
headers = addReturnContentHeaders(headers, EmptyPayload) |
|||
headers = options.addToHeaders(headers) |
|||
|
|||
resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
return checkRespCode(resp, []int{http.StatusNoContent}) |
|||
} |
|||
|
|||
// QueryOptions includes options for a query entities operation.
|
|||
// Top, filter and select are OData query options.
|
|||
type QueryOptions struct { |
|||
Top uint |
|||
Filter string |
|||
Select []string |
|||
RequestID string |
|||
} |
|||
|
|||
func (options *QueryOptions) getParameters() (url.Values, map[string]string) { |
|||
query := url.Values{} |
|||
headers := map[string]string{} |
|||
if options != nil { |
|||
if options.Top > 0 { |
|||
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) |
|||
} |
|||
if options.Filter != "" { |
|||
query.Add(OdataFilter, options.Filter) |
|||
} |
|||
if len(options.Select) > 0 { |
|||
query.Add(OdataSelect, strings.Join(options.Select, ",")) |
|||
} |
|||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) |
|||
} |
|||
return query, headers |
|||
} |
|||
|
|||
// QueryEntities returns the entities in the table.
|
|||
// You can use query options defined by the OData Protocol specification.
|
|||
//
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
|||
func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) { |
|||
if ml == EmptyPayload { |
|||
return nil, errEmptyPayload |
|||
} |
|||
query, headers := options.getParameters() |
|||
query = addTimeout(query, timeout) |
|||
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query) |
|||
return t.queryEntities(uri, headers, ml) |
|||
} |
|||
|
|||
// NextResults returns the next page of results
|
|||
// from a QueryEntities or NextResults operation.
|
|||
//
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
|||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
|
|||
func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) { |
|||
if eqr == nil { |
|||
return nil, errNilPreviousResult |
|||
} |
|||
if eqr.NextLink == nil { |
|||
return nil, errNilNextLink |
|||
} |
|||
headers := options.addToHeaders(map[string]string{}) |
|||
return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml) |
|||
} |
|||
|
|||
// SetPermissions sets up table ACL permissions
|
|||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL
|
|||
func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error { |
|||
params := url.Values{"comp": {"acl"}, |
|||
"timeout": {strconv.Itoa(int(timeout))}, |
|||
} |
|||
|
|||
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) |
|||
headers := t.tsc.client.getStandardHeaders() |
|||
headers = options.addToHeaders(headers) |
|||
|
|||
body, length, err := generateTableACLPayload(tap) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
headers["Content-Length"] = strconv.Itoa(length) |
|||
|
|||
resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp) |
|||
|
|||
return checkRespCode(resp, []int{http.StatusNoContent}) |
|||
} |
|||
|
|||
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { |
|||
sil := SignedIdentifiers{ |
|||
SignedIdentifiers: []SignedIdentifier{}, |
|||
} |
|||
for _, tap := range policies { |
|||
permission := generateTablePermissions(&tap) |
|||
signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission) |
|||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) |
|||
} |
|||
return xmlMarshal(sil) |
|||
} |
|||
|
|||
// GetPermissions gets the table ACL permissions
|
|||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl
|
|||
func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) { |
|||
params := url.Values{"comp": {"acl"}, |
|||
"timeout": {strconv.Itoa(int(timeout))}, |
|||
} |
|||
|
|||
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) |
|||
headers := t.tsc.client.getStandardHeaders() |
|||
headers = options.addToHeaders(headers) |
|||
|
|||
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
var ap AccessPolicy |
|||
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return updateTableAccessPolicy(ap), nil |
|||
} |
|||
|
|||
func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) { |
|||
headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders()) |
|||
if ml != EmptyPayload { |
|||
headers[headerAccept] = string(ml) |
|||
} |
|||
|
|||
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
data, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
var entities EntityQueryResult |
|||
err = json.Unmarshal(data, &entities) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
for i := range entities.Entities { |
|||
entities.Entities[i].Table = t |
|||
} |
|||
entities.table = t |
|||
|
|||
contToken := extractContinuationTokenFromHeaders(resp.Header) |
|||
if contToken == nil { |
|||
entities.NextLink = nil |
|||
} else { |
|||
originalURI, err := url.Parse(uri) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
v := originalURI.Query() |
|||
if contToken.NextPartitionKey != "" { |
|||
v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) |
|||
} |
|||
if contToken.NextRowKey != "" { |
|||
v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) |
|||
} |
|||
newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) |
|||
entities.NextLink = &newURI |
|||
entities.ml = ml |
|||
} |
|||
|
|||
return &entities, nil |
|||
} |
|||
|
|||
func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { |
|||
ct := continuationToken{ |
|||
NextPartitionKey: h.Get(headerNextPartitionKey), |
|||
NextRowKey: h.Get(headerNextRowKey), |
|||
} |
|||
|
|||
if ct.NextPartitionKey != "" || ct.NextRowKey != "" { |
|||
return &ct |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { |
|||
taps := []TableAccessPolicy{} |
|||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { |
|||
tap := TableAccessPolicy{ |
|||
ID: policy.ID, |
|||
StartTime: policy.AccessPolicy.StartTime, |
|||
ExpiryTime: policy.AccessPolicy.ExpiryTime, |
|||
} |
|||
tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") |
|||
tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a") |
|||
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") |
|||
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") |
|||
|
|||
taps = append(taps, tap) |
|||
} |
|||
return taps |
|||
} |
|||
|
|||
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { |
|||
// generate the permissions string (raud).
|
|||
// still want the end user API to have bool flags.
|
|||
permissions = "" |
|||
|
|||
if tap.CanRead { |
|||
permissions += "r" |
|||
} |
|||
|
|||
if tap.CanAppend { |
|||
permissions += "a" |
|||
} |
|||
|
|||
if tap.CanUpdate { |
|||
permissions += "u" |
|||
} |
|||
|
|||
if tap.CanDelete { |
|||
permissions += "d" |
|||
} |
|||
return permissions |
|||
} |
@ -0,0 +1,325 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding/json" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"mime/multipart" |
|||
"net/http" |
|||
"net/textproto" |
|||
"sort" |
|||
"strings" |
|||
) |
|||
|
|||
// Operation type. Insert, Delete, Replace etc.
|
|||
type Operation int |
|||
|
|||
// consts for batch operations.
|
|||
const ( |
|||
InsertOp = Operation(1) |
|||
DeleteOp = Operation(2) |
|||
ReplaceOp = Operation(3) |
|||
MergeOp = Operation(4) |
|||
InsertOrReplaceOp = Operation(5) |
|||
InsertOrMergeOp = Operation(6) |
|||
) |
|||
|
|||
// BatchEntity used for tracking Entities to operate on and
|
|||
// whether operations (replace/merge etc) should be forced.
|
|||
// Wrapper for regular Entity with additional data specific for the entity.
|
|||
type BatchEntity struct { |
|||
*Entity |
|||
Force bool |
|||
Op Operation |
|||
} |
|||
|
|||
// TableBatch stores all the entities that will be operated on during a batch process.
|
|||
// Entities can be inserted, replaced or deleted.
|
|||
type TableBatch struct { |
|||
BatchEntitySlice []BatchEntity |
|||
|
|||
// reference to table we're operating on.
|
|||
Table *Table |
|||
} |
|||
|
|||
// defaultChangesetHeaders for changeSets
|
|||
var defaultChangesetHeaders = map[string]string{ |
|||
"Accept": "application/json;odata=minimalmetadata", |
|||
"Content-Type": "application/json", |
|||
"Prefer": "return-no-content", |
|||
} |
|||
|
|||
// NewBatch return new TableBatch for populating.
|
|||
func (t *Table) NewBatch() *TableBatch { |
|||
return &TableBatch{ |
|||
Table: t, |
|||
} |
|||
} |
|||
|
|||
// InsertEntity adds an entity in preparation for a batch insert.
|
|||
func (t *TableBatch) InsertEntity(entity *Entity) { |
|||
be := BatchEntity{Entity: entity, Force: false, Op: InsertOp} |
|||
t.BatchEntitySlice = append(t.BatchEntitySlice, be) |
|||
} |
|||
|
|||
// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace.
|
|||
func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) { |
|||
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp} |
|||
t.BatchEntitySlice = append(t.BatchEntitySlice, be) |
|||
} |
|||
|
|||
// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag
|
|||
func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) { |
|||
t.InsertOrReplaceEntity(entity, true) |
|||
} |
|||
|
|||
// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge.
|
|||
func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) { |
|||
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp} |
|||
t.BatchEntitySlice = append(t.BatchEntitySlice, be) |
|||
} |
|||
|
|||
// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag
|
|||
func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) { |
|||
t.InsertOrMergeEntity(entity, true) |
|||
} |
|||
|
|||
// ReplaceEntity adds an entity in preparation for a batch replace.
|
|||
func (t *TableBatch) ReplaceEntity(entity *Entity) { |
|||
be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp} |
|||
t.BatchEntitySlice = append(t.BatchEntitySlice, be) |
|||
} |
|||
|
|||
// DeleteEntity adds an entity in preparation for a batch delete
|
|||
func (t *TableBatch) DeleteEntity(entity *Entity, force bool) { |
|||
be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp} |
|||
t.BatchEntitySlice = append(t.BatchEntitySlice, be) |
|||
} |
|||
|
|||
// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag
|
|||
func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) { |
|||
t.DeleteEntity(entity, true) |
|||
} |
|||
|
|||
// MergeEntity adds an entity in preparation for a batch merge
|
|||
func (t *TableBatch) MergeEntity(entity *Entity) { |
|||
be := BatchEntity{Entity: entity, Force: false, Op: MergeOp} |
|||
t.BatchEntitySlice = append(t.BatchEntitySlice, be) |
|||
} |
|||
|
|||
// ExecuteBatch executes many table operations in one request to Azure.
|
|||
// The operations can be combinations of Insert, Delete, Replace and Merge
|
|||
// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses
|
|||
// the changesets.
|
|||
// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions
|
|||
func (t *TableBatch) ExecuteBatch() error { |
|||
|
|||
id, err := newUUID() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
changesetBoundary := fmt.Sprintf("changeset_%s", id.String()) |
|||
uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) |
|||
changesetBody, err := t.generateChangesetBody(changesetBoundary) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
id, err = newUUID() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
boundary := fmt.Sprintf("batch_%s", id.String()) |
|||
body, err := generateBody(changesetBody, changesetBoundary, boundary) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
headers := t.Table.tsc.client.getStandardHeaders() |
|||
headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary) |
|||
|
|||
resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer drainRespBody(resp.resp) |
|||
|
|||
if err = checkRespCode(resp.resp, []int{http.StatusAccepted}); err != nil { |
|||
|
|||
// check which batch failed.
|
|||
operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value) |
|||
requestID, date, version := getDebugHeaders(resp.resp.Header) |
|||
return AzureStorageServiceError{ |
|||
StatusCode: resp.resp.StatusCode, |
|||
Code: resp.odata.Err.Code, |
|||
RequestID: requestID, |
|||
Date: date, |
|||
APIVersion: version, |
|||
Message: operationFailedMessage, |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// getFailedOperation parses the original Azure error string and determines which operation failed
|
|||
// and generates appropriate message.
|
|||
func (t *TableBatch) getFailedOperation(errorMessage string) string { |
|||
// errorMessage consists of "number:string" we just need the number.
|
|||
sp := strings.Split(errorMessage, ":") |
|||
if len(sp) > 1 { |
|||
msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage) |
|||
return msg |
|||
} |
|||
|
|||
// cant parse the message, just return the original message to client
|
|||
return errorMessage |
|||
} |
|||
|
|||
// generateBody generates the complete body for the batch request.
|
|||
func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) { |
|||
|
|||
body := new(bytes.Buffer) |
|||
writer := multipart.NewWriter(body) |
|||
writer.SetBoundary(boundary) |
|||
h := make(textproto.MIMEHeader) |
|||
h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary)) |
|||
batchWriter, err := writer.CreatePart(h) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
batchWriter.Write(changeSetBody.Bytes()) |
|||
writer.Close() |
|||
return body, nil |
|||
} |
|||
|
|||
// generateChangesetBody generates the individual changesets for the various operations within the batch request.
|
|||
// There is a changeset for Insert, Delete, Merge etc.
|
|||
func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) { |
|||
|
|||
body := new(bytes.Buffer) |
|||
writer := multipart.NewWriter(body) |
|||
writer.SetBoundary(changesetBoundary) |
|||
|
|||
for _, be := range t.BatchEntitySlice { |
|||
t.generateEntitySubset(&be, writer) |
|||
} |
|||
|
|||
writer.Close() |
|||
return body, nil |
|||
} |
|||
|
|||
// generateVerb generates the HTTP request VERB required for each changeset.
|
|||
func generateVerb(op Operation) (string, error) { |
|||
switch op { |
|||
case InsertOp: |
|||
return http.MethodPost, nil |
|||
case DeleteOp: |
|||
return http.MethodDelete, nil |
|||
case ReplaceOp, InsertOrReplaceOp: |
|||
return http.MethodPut, nil |
|||
case MergeOp, InsertOrMergeOp: |
|||
return "MERGE", nil |
|||
default: |
|||
return "", errors.New("Unable to detect operation") |
|||
} |
|||
} |
|||
|
|||
// generateQueryPath generates the query path for within the changesets
|
|||
// For inserts it will just be a table query path (table name)
|
|||
// but for other operations (modifying an existing entity) then
|
|||
// the partition/row keys need to be generated.
|
|||
func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string { |
|||
if op == InsertOp { |
|||
return entity.Table.buildPath() |
|||
} |
|||
return entity.buildPath() |
|||
} |
|||
|
|||
// generateGenericOperationHeaders generates common headers for a given operation.
|
|||
func generateGenericOperationHeaders(be *BatchEntity) map[string]string { |
|||
retval := map[string]string{} |
|||
|
|||
for k, v := range defaultChangesetHeaders { |
|||
retval[k] = v |
|||
} |
|||
|
|||
if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp { |
|||
if be.Force || be.Entity.OdataEtag == "" { |
|||
retval["If-Match"] = "*" |
|||
} else { |
|||
retval["If-Match"] = be.Entity.OdataEtag |
|||
} |
|||
} |
|||
|
|||
return retval |
|||
} |
|||
|
|||
// generateEntitySubset generates body payload for particular batch entity
|
|||
func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error { |
|||
|
|||
h := make(textproto.MIMEHeader) |
|||
h.Set(headerContentType, "application/http") |
|||
h.Set(headerContentTransferEncoding, "binary") |
|||
|
|||
verb, err := generateVerb(batchEntity.Op) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
genericOpHeadersMap := generateGenericOperationHeaders(batchEntity) |
|||
queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity) |
|||
uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil) |
|||
|
|||
operationWriter, err := writer.CreatePart(h) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri) |
|||
operationWriter.Write([]byte(urlAndVerb)) |
|||
writeHeaders(genericOpHeadersMap, &operationWriter) |
|||
operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body.
|
|||
|
|||
// delete operation doesn't need a body.
|
|||
if batchEntity.Op != DeleteOp { |
|||
//var e Entity = batchEntity.Entity
|
|||
body, err := json.Marshal(batchEntity.Entity) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
operationWriter.Write(body) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func writeHeaders(h map[string]string, writer *io.Writer) { |
|||
// This way it is guaranteed the headers will be written in a sorted order
|
|||
var keys []string |
|||
for k := range h { |
|||
keys = append(keys, k) |
|||
} |
|||
sort.Strings(keys) |
|||
for _, k := range keys { |
|||
(*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k]))) |
|||
} |
|||
} |
@ -0,0 +1,204 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"strconv" |
|||
) |
|||
|
|||
const ( |
|||
headerAccept = "Accept" |
|||
headerEtag = "Etag" |
|||
headerPrefer = "Prefer" |
|||
headerXmsContinuation = "x-ms-Continuation-NextTableName" |
|||
) |
|||
|
|||
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
|||
// Service.
|
|||
type TableServiceClient struct { |
|||
client Client |
|||
auth authentication |
|||
} |
|||
|
|||
// TableOptions includes options for some table operations
|
|||
type TableOptions struct { |
|||
RequestID string |
|||
} |
|||
|
|||
func (options *TableOptions) addToHeaders(h map[string]string) map[string]string { |
|||
if options != nil { |
|||
h = addToHeaders(h, "x-ms-client-request-id", options.RequestID) |
|||
} |
|||
return h |
|||
} |
|||
|
|||
// QueryNextLink includes information for getting the next page of
|
|||
// results in query operations
|
|||
type QueryNextLink struct { |
|||
NextLink *string |
|||
ml MetadataLevel |
|||
} |
|||
|
|||
// GetServiceProperties gets the properties of your storage account's table service.
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
|
|||
func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { |
|||
return t.client.getServiceProperties(tableServiceName, t.auth) |
|||
} |
|||
|
|||
// SetServiceProperties sets the properties of your storage account's table service.
|
|||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
|
|||
func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error { |
|||
return t.client.setServiceProperties(props, tableServiceName, t.auth) |
|||
} |
|||
|
|||
// GetTableReference returns a Table object for the specified table name.
|
|||
func (t *TableServiceClient) GetTableReference(name string) *Table { |
|||
return &Table{ |
|||
tsc: t, |
|||
Name: name, |
|||
} |
|||
} |
|||
|
|||
// QueryTablesOptions includes options for some table operations
|
|||
type QueryTablesOptions struct { |
|||
Top uint |
|||
Filter string |
|||
RequestID string |
|||
} |
|||
|
|||
func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) { |
|||
query := url.Values{} |
|||
headers := map[string]string{} |
|||
if options != nil { |
|||
if options.Top > 0 { |
|||
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) |
|||
} |
|||
if options.Filter != "" { |
|||
query.Add(OdataFilter, options.Filter) |
|||
} |
|||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) |
|||
} |
|||
return query, headers |
|||
} |
|||
|
|||
// QueryTables returns the tables in the storage account.
|
|||
// You can use query options defined by the OData Protocol specification.
|
|||
//
|
|||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables
|
|||
func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) { |
|||
query, headers := options.getParameters() |
|||
uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query) |
|||
return t.queryTables(uri, headers, ml) |
|||
} |
|||
|
|||
// NextResults returns the next page of results
|
|||
// from a QueryTables or a NextResults operation.
|
|||
//
|
|||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables
|
|||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
|
|||
func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) { |
|||
if tqr == nil { |
|||
return nil, errNilPreviousResult |
|||
} |
|||
if tqr.NextLink == nil { |
|||
return nil, errNilNextLink |
|||
} |
|||
headers := options.addToHeaders(map[string]string{}) |
|||
|
|||
return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml) |
|||
} |
|||
|
|||
// TableQueryResult contains the response from
|
|||
// QueryTables and QueryTablesNextResults functions.
|
|||
type TableQueryResult struct { |
|||
OdataMetadata string `json:"odata.metadata"` |
|||
Tables []Table `json:"value"` |
|||
QueryNextLink |
|||
tsc *TableServiceClient |
|||
} |
|||
|
|||
func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) { |
|||
if ml == EmptyPayload { |
|||
return nil, errEmptyPayload |
|||
} |
|||
headers = mergeHeaders(headers, t.client.getStandardHeaders()) |
|||
headers[headerAccept] = string(ml) |
|||
|
|||
resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
respBody, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
var out TableQueryResult |
|||
err = json.Unmarshal(respBody, &out) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
for i := range out.Tables { |
|||
out.Tables[i].tsc = t |
|||
} |
|||
out.tsc = t |
|||
|
|||
nextLink := resp.Header.Get(http.CanonicalHeaderKey(headerXmsContinuation)) |
|||
if nextLink == "" { |
|||
out.NextLink = nil |
|||
} else { |
|||
originalURI, err := url.Parse(uri) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
v := originalURI.Query() |
|||
v.Set(nextTableQueryParameter, nextLink) |
|||
newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v) |
|||
out.NextLink = &newURI |
|||
out.ml = ml |
|||
} |
|||
|
|||
return &out, nil |
|||
} |
|||
|
|||
func addBodyRelatedHeaders(h map[string]string, length int) map[string]string { |
|||
h[headerContentType] = "application/json" |
|||
h[headerContentLength] = fmt.Sprintf("%v", length) |
|||
h[headerAcceptCharset] = "UTF-8" |
|||
return h |
|||
} |
|||
|
|||
func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string { |
|||
if ml != EmptyPayload { |
|||
h[headerPrefer] = "return-content" |
|||
h[headerAccept] = string(ml) |
|||
} else { |
|||
h[headerPrefer] = "return-no-content" |
|||
// From API version 2015-12-11 onwards, Accept header is required
|
|||
h[headerAccept] = string(NoMetadata) |
|||
} |
|||
return h |
|||
} |
@ -0,0 +1,260 @@ |
|||
package storage |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"crypto/hmac" |
|||
"crypto/rand" |
|||
"crypto/sha256" |
|||
"encoding/base64" |
|||
"encoding/xml" |
|||
"fmt" |
|||
"io" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"reflect" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
|
|||
uuid "github.com/satori/go.uuid" |
|||
) |
|||
|
|||
var ( |
|||
fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) |
|||
accountSASOptions = AccountSASTokenOptions{ |
|||
Services: Services{ |
|||
Blob: true, |
|||
}, |
|||
ResourceTypes: ResourceTypes{ |
|||
Service: true, |
|||
Container: true, |
|||
Object: true, |
|||
}, |
|||
Permissions: Permissions{ |
|||
Read: true, |
|||
Write: true, |
|||
Delete: true, |
|||
List: true, |
|||
Add: true, |
|||
Create: true, |
|||
Update: true, |
|||
Process: true, |
|||
}, |
|||
Expiry: fixedTime, |
|||
UseHTTPS: true, |
|||
} |
|||
) |
|||
|
|||
func (c Client) computeHmac256(message string) string { |
|||
h := hmac.New(sha256.New, c.accountKey) |
|||
h.Write([]byte(message)) |
|||
return base64.StdEncoding.EncodeToString(h.Sum(nil)) |
|||
} |
|||
|
|||
func currentTimeRfc1123Formatted() string { |
|||
return timeRfc1123Formatted(time.Now().UTC()) |
|||
} |
|||
|
|||
func timeRfc1123Formatted(t time.Time) string { |
|||
return t.Format(http.TimeFormat) |
|||
} |
|||
|
|||
func timeRFC3339Formatted(t time.Time) string { |
|||
return t.Format("2006-01-02T15:04:05.0000000Z") |
|||
} |
|||
|
|||
func mergeParams(v1, v2 url.Values) url.Values { |
|||
out := url.Values{} |
|||
for k, v := range v1 { |
|||
out[k] = v |
|||
} |
|||
for k, v := range v2 { |
|||
vals, ok := out[k] |
|||
if ok { |
|||
vals = append(vals, v...) |
|||
out[k] = vals |
|||
} else { |
|||
out[k] = v |
|||
} |
|||
} |
|||
return out |
|||
} |
|||
|
|||
func prepareBlockListRequest(blocks []Block) string { |
|||
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>` |
|||
for _, v := range blocks { |
|||
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status) |
|||
} |
|||
s += `</BlockList>` |
|||
return s |
|||
} |
|||
|
|||
func xmlUnmarshal(body io.Reader, v interface{}) error { |
|||
data, err := ioutil.ReadAll(body) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return xml.Unmarshal(data, v) |
|||
} |
|||
|
|||
func xmlMarshal(v interface{}) (io.Reader, int, error) { |
|||
b, err := xml.Marshal(v) |
|||
if err != nil { |
|||
return nil, 0, err |
|||
} |
|||
return bytes.NewReader(b), len(b), nil |
|||
} |
|||
|
|||
func headersFromStruct(v interface{}) map[string]string { |
|||
headers := make(map[string]string) |
|||
value := reflect.ValueOf(v) |
|||
for i := 0; i < value.NumField(); i++ { |
|||
key := value.Type().Field(i).Tag.Get("header") |
|||
if key != "" { |
|||
reflectedValue := reflect.Indirect(value.Field(i)) |
|||
var val string |
|||
if reflectedValue.IsValid() { |
|||
switch reflectedValue.Type() { |
|||
case reflect.TypeOf(fixedTime): |
|||
val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time)) |
|||
case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)): |
|||
val = strconv.FormatUint(reflectedValue.Uint(), 10) |
|||
case reflect.TypeOf(int(0)): |
|||
val = strconv.FormatInt(reflectedValue.Int(), 10) |
|||
default: |
|||
val = reflectedValue.String() |
|||
} |
|||
} |
|||
if val != "" { |
|||
headers[key] = val |
|||
} |
|||
} |
|||
} |
|||
return headers |
|||
} |
|||
|
|||
// merges extraHeaders into headers and returns headers
|
|||
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { |
|||
for k, v := range extraHeaders { |
|||
headers[k] = v |
|||
} |
|||
return headers |
|||
} |
|||
|
|||
func addToHeaders(h map[string]string, key, value string) map[string]string { |
|||
if value != "" { |
|||
h[key] = value |
|||
} |
|||
return h |
|||
} |
|||
|
|||
func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string { |
|||
if value != nil { |
|||
h = addToHeaders(h, key, timeRfc1123Formatted(*value)) |
|||
} |
|||
return h |
|||
} |
|||
|
|||
func addTimeout(params url.Values, timeout uint) url.Values { |
|||
if timeout > 0 { |
|||
params.Add("timeout", fmt.Sprintf("%v", timeout)) |
|||
} |
|||
return params |
|||
} |
|||
|
|||
func addSnapshot(params url.Values, snapshot *time.Time) url.Values { |
|||
if snapshot != nil { |
|||
params.Add("snapshot", timeRFC3339Formatted(*snapshot)) |
|||
} |
|||
return params |
|||
} |
|||
|
|||
func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) { |
|||
var out time.Time |
|||
var err error |
|||
outStr := h.Get(key) |
|||
if outStr != "" { |
|||
out, err = time.Parse(time.RFC1123, outStr) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
} |
|||
return &out, nil |
|||
} |
|||
|
|||
// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling
|
|||
type TimeRFC1123 time.Time |
|||
|
|||
// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout.
|
|||
func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { |
|||
var value string |
|||
d.DecodeElement(&value, &start) |
|||
parse, err := time.Parse(time.RFC1123, value) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
*t = TimeRFC1123(parse) |
|||
return nil |
|||
} |
|||
|
|||
// MarshalXML marshals using time.RFC1123.
|
|||
func (t *TimeRFC1123) MarshalXML(e *xml.Encoder, start xml.StartElement) error { |
|||
return e.EncodeElement(time.Time(*t).Format(time.RFC1123), start) |
|||
} |
|||
|
|||
// returns a map of custom metadata values from the specified HTTP header
|
|||
func getMetadataFromHeaders(header http.Header) map[string]string { |
|||
metadata := make(map[string]string) |
|||
for k, v := range header { |
|||
// Can't trust CanonicalHeaderKey() to munge case
|
|||
// reliably. "_" is allowed in identifiers:
|
|||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
|||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
|||
// ...but "_" is considered invalid by
|
|||
// CanonicalMIMEHeaderKey in
|
|||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
|||
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
|||
k = strings.ToLower(k) |
|||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { |
|||
continue |
|||
} |
|||
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
|||
k = k[len(userDefinedMetadataHeaderPrefix):] |
|||
metadata[k] = v[len(v)-1] |
|||
} |
|||
|
|||
if len(metadata) == 0 { |
|||
return nil |
|||
} |
|||
|
|||
return metadata |
|||
} |
|||
|
|||
// newUUID returns a new uuid using RFC 4122 algorithm.
|
|||
func newUUID() (uuid.UUID, error) { |
|||
u := [16]byte{} |
|||
// Set all bits to randomly (or pseudo-randomly) chosen values.
|
|||
_, err := rand.Read(u[:]) |
|||
if err != nil { |
|||
return uuid.UUID{}, err |
|||
} |
|||
u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) // u.setVariant(ReservedRFC4122)
|
|||
u[6] = (u[6] & 0xF) | (uuid.V4 << 4) // u.setVersion(V4)
|
|||
return uuid.FromBytes(u[:]) |
|||
} |
@ -0,0 +1,21 @@ |
|||
package version |
|||
|
|||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
//
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
//
|
|||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
|||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
|||
|
|||
// Number contains the semantic version of this SDK.
|
|||
const Number = "v32.5.0" |
@ -0,0 +1,191 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
Copyright 2015 Microsoft Corporation |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,191 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
Copyright 2015 Microsoft Corporation |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,292 @@ |
|||
# Azure Active Directory authentication for Go |
|||
|
|||
This is a standalone package for authenticating with Azure Active |
|||
Directory from other Go libraries and applications, in particular the [Azure SDK |
|||
for Go](https://github.com/Azure/azure-sdk-for-go). |
|||
|
|||
Note: Despite the package's name it is not related to other "ADAL" libraries |
|||
maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues |
|||
should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) |
|||
or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue |
|||
trackers. |
|||
|
|||
## Install |
|||
|
|||
```bash |
|||
go get -u github.com/Azure/go-autorest/autorest/adal |
|||
``` |
|||
|
|||
## Usage |
|||
|
|||
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). |
|||
|
|||
### Register an Azure AD Application with secret |
|||
|
|||
|
|||
1. Register a new application with a `secret` credential |
|||
|
|||
``` |
|||
az ad app create \ |
|||
--display-name example-app \ |
|||
--homepage https://example-app/home \ |
|||
--identifier-uris https://example-app/app \ |
|||
--password secret |
|||
``` |
|||
|
|||
2. Create a service principal using the `Application ID` from previous step |
|||
|
|||
``` |
|||
az ad sp create --id "Application ID" |
|||
``` |
|||
|
|||
* Replace `Application ID` with `appId` from step 1. |
|||
|
|||
### Register an Azure AD Application with certificate |
|||
|
|||
1. Create a private key |
|||
|
|||
``` |
|||
openssl genrsa -out "example-app.key" 2048 |
|||
``` |
|||
|
|||
2. Create the certificate |
|||
|
|||
``` |
|||
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" |
|||
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 |
|||
``` |
|||
|
|||
3. Create the PKCS12 version of the certificate containing also the private key |
|||
|
|||
``` |
|||
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: |
|||
|
|||
``` |
|||
|
|||
4. Register a new application with the certificate content form `example-app.crt` |
|||
|
|||
``` |
|||
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" |
|||
|
|||
az ad app create \ |
|||
--display-name example-app \ |
|||
--homepage https://example-app/home \ |
|||
--identifier-uris https://example-app/app \ |
|||
--key-usage Verify --end-date 2018-01-01 \ |
|||
--key-value "${certificateContents}" |
|||
``` |
|||
|
|||
5. Create a service principal using the `Application ID` from previous step |
|||
|
|||
``` |
|||
az ad sp create --id "APPLICATION_ID" |
|||
``` |
|||
|
|||
* Replace `APPLICATION_ID` with `appId` from step 4. |
|||
|
|||
|
|||
### Grant the necessary permissions |
|||
|
|||
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained |
|||
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) |
|||
which can be assigned to a service principal of an Azure AD application depending of your needs. |
|||
|
|||
``` |
|||
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" |
|||
``` |
|||
|
|||
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. |
|||
* Replace the `ROLE_NAME` with a role name of your choice. |
|||
|
|||
It is also possible to define custom role definitions. |
|||
|
|||
``` |
|||
az role definition create --role-definition role-definition.json |
|||
``` |
|||
|
|||
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. |
|||
|
|||
|
|||
### Acquire Access Token |
|||
|
|||
The common configuration used by all flows: |
|||
|
|||
```Go |
|||
const activeDirectoryEndpoint = "https://login.microsoftonline.com/" |
|||
tenantID := "TENANT_ID" |
|||
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) |
|||
|
|||
applicationID := "APPLICATION_ID" |
|||
|
|||
callback := func(token adal.Token) error { |
|||
// This is called after the token is acquired |
|||
} |
|||
|
|||
// The resource for which the token is acquired |
|||
resource := "https://management.core.windows.net/" |
|||
``` |
|||
|
|||
* Replace the `TENANT_ID` with your tenant ID. |
|||
* Replace the `APPLICATION_ID` with the value from previous section. |
|||
|
|||
#### Client Credentials |
|||
|
|||
```Go |
|||
applicationSecret := "APPLICATION_SECRET" |
|||
|
|||
spt, err := adal.NewServicePrincipalToken( |
|||
*oauthConfig, |
|||
appliationID, |
|||
applicationSecret, |
|||
resource, |
|||
callbacks...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
// Acquire a new access token |
|||
err = spt.Refresh() |
|||
if (err == nil) { |
|||
token := spt.Token |
|||
} |
|||
``` |
|||
|
|||
* Replace the `APPLICATION_SECRET` with the `password` value from previous section. |
|||
|
|||
#### Client Certificate |
|||
|
|||
```Go |
|||
certificatePath := "./example-app.pfx" |
|||
|
|||
certData, err := ioutil.ReadFile(certificatePath) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) |
|||
} |
|||
|
|||
// Get the certificate and private key from pfx file |
|||
certificate, rsaPrivateKey, err := decodePkcs12(certData, "") |
|||
if err != nil { |
|||
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) |
|||
} |
|||
|
|||
spt, err := adal.NewServicePrincipalTokenFromCertificate( |
|||
*oauthConfig, |
|||
applicationID, |
|||
certificate, |
|||
rsaPrivateKey, |
|||
resource, |
|||
callbacks...) |
|||
|
|||
// Acquire a new access token |
|||
err = spt.Refresh() |
|||
if (err == nil) { |
|||
token := spt.Token |
|||
} |
|||
``` |
|||
|
|||
* Update the certificate path to point to the example-app.pfx file which was created in previous section. |
|||
|
|||
|
|||
#### Device Code |
|||
|
|||
```Go |
|||
oauthClient := &http.Client{} |
|||
|
|||
// Acquire the device code |
|||
deviceCode, err := adal.InitiateDeviceAuth( |
|||
oauthClient, |
|||
*oauthConfig, |
|||
applicationID, |
|||
resource) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("Failed to start device auth flow: %s", err) |
|||
} |
|||
|
|||
// Display the authentication message |
|||
fmt.Println(*deviceCode.Message) |
|||
|
|||
// Wait here until the user is authenticated |
|||
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) |
|||
} |
|||
|
|||
spt, err := adal.NewServicePrincipalTokenFromManualToken( |
|||
*oauthConfig, |
|||
applicationID, |
|||
resource, |
|||
*token, |
|||
callbacks...) |
|||
|
|||
if (err == nil) { |
|||
token := spt.Token |
|||
} |
|||
``` |
|||
|
|||
#### Username password authenticate |
|||
|
|||
```Go |
|||
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( |
|||
*oauthConfig, |
|||
applicationID, |
|||
username, |
|||
password, |
|||
resource, |
|||
callbacks...) |
|||
|
|||
if (err == nil) { |
|||
token := spt.Token |
|||
} |
|||
``` |
|||
|
|||
#### Authorization code authenticate |
|||
|
|||
``` Go |
|||
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( |
|||
*oauthConfig, |
|||
applicationID, |
|||
clientSecret, |
|||
authorizationCode, |
|||
redirectURI, |
|||
resource, |
|||
callbacks...) |
|||
|
|||
err = spt.Refresh() |
|||
if (err == nil) { |
|||
token := spt.Token |
|||
} |
|||
``` |
|||
|
|||
### Command Line Tool |
|||
|
|||
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. |
|||
|
|||
``` |
|||
adal -h |
|||
|
|||
Usage of ./adal: |
|||
-applicationId string |
|||
application id |
|||
-certificatePath string |
|||
path to pk12/PFC application certificate |
|||
-mode string |
|||
authentication mode (device, secret, cert, refresh) (default "device") |
|||
-resource string |
|||
resource for which the token is requested |
|||
-secret string |
|||
application secret |
|||
-tenantId string |
|||
tenant id |
|||
-tokenCachePath string |
|||
location of oath token cache (default "/home/cgc/.adal/accessToken.json") |
|||
``` |
|||
|
|||
Example acquire a token for `https://management.core.windows.net/` using device code flow: |
|||
|
|||
``` |
|||
adal -mode device \ |
|||
-applicationId "APPLICATION_ID" \ |
|||
-tenantId "TENANT_ID" \ |
|||
-resource https://management.core.windows.net/ |
|||
|
|||
``` |
@ -0,0 +1,151 @@ |
|||
package adal |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"net/url" |
|||
) |
|||
|
|||
const ( |
|||
activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" |
|||
) |
|||
|
|||
// OAuthConfig represents the endpoints needed
|
|||
// in OAuth operations
|
|||
type OAuthConfig struct { |
|||
AuthorityEndpoint url.URL `json:"authorityEndpoint"` |
|||
AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` |
|||
TokenEndpoint url.URL `json:"tokenEndpoint"` |
|||
DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` |
|||
} |
|||
|
|||
// IsZero returns true if the OAuthConfig object is zero-initialized.
|
|||
func (oac OAuthConfig) IsZero() bool { |
|||
return oac == OAuthConfig{} |
|||
} |
|||
|
|||
func validateStringParam(param, name string) error { |
|||
if len(param) == 0 { |
|||
return fmt.Errorf("parameter '" + name + "' cannot be empty") |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
|
|||
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { |
|||
apiVer := "1.0" |
|||
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) |
|||
} |
|||
|
|||
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
|
|||
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
|
|||
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { |
|||
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { |
|||
return nil, err |
|||
} |
|||
api := "" |
|||
// it's legal for tenantID to be empty so don't validate it
|
|||
if apiVersion != nil { |
|||
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { |
|||
return nil, err |
|||
} |
|||
api = fmt.Sprintf("?api-version=%s", *apiVersion) |
|||
} |
|||
u, err := url.Parse(activeDirectoryEndpoint) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
authorityURL, err := u.Parse(tenantID) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return &OAuthConfig{ |
|||
AuthorityEndpoint: *authorityURL, |
|||
AuthorizeEndpoint: *authorizeURL, |
|||
TokenEndpoint: *tokenURL, |
|||
DeviceCodeEndpoint: *deviceCodeURL, |
|||
}, nil |
|||
} |
|||
|
|||
// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
|
|||
type MultiTenantOAuthConfig interface { |
|||
PrimaryTenant() *OAuthConfig |
|||
AuxiliaryTenants() []*OAuthConfig |
|||
} |
|||
|
|||
// OAuthOptions contains optional OAuthConfig creation arguments.
|
|||
type OAuthOptions struct { |
|||
APIVersion string |
|||
} |
|||
|
|||
func (c OAuthOptions) apiVersion() string { |
|||
if c.APIVersion != "" { |
|||
return fmt.Sprintf("?api-version=%s", c.APIVersion) |
|||
} |
|||
return "1.0" |
|||
} |
|||
|
|||
// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
|
|||
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
|
|||
func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { |
|||
if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { |
|||
return nil, errors.New("must specify one to three auxiliary tenants") |
|||
} |
|||
mtCfg := multiTenantOAuthConfig{ |
|||
cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), |
|||
} |
|||
apiVer := options.apiVersion() |
|||
pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) |
|||
} |
|||
mtCfg.cfgs[0] = pri |
|||
for i := range auxiliaryTenantIDs { |
|||
aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) |
|||
} |
|||
mtCfg.cfgs[i+1] = aux |
|||
} |
|||
return mtCfg, nil |
|||
} |
|||
|
|||
type multiTenantOAuthConfig struct { |
|||
// first config in the slice is the primary tenant
|
|||
cfgs []*OAuthConfig |
|||
} |
|||
|
|||
func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { |
|||
return m.cfgs[0] |
|||
} |
|||
|
|||
func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { |
|||
return m.cfgs[1:] |
|||
} |
@ -0,0 +1,242 @@ |
|||
package adal |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
/* |
|||
This file is largely based on rjw57/oauth2device's code, with the follow differences: |
|||
* scope -> resource, and only allow a single one |
|||
* receive "Message" in the DeviceCode struct and show it to users as the prompt |
|||
* azure-xplat-cli has the following behavior that this emulates: |
|||
- does not send client_secret during the token exchange |
|||
- sends resource again in the token exchange request |
|||
*/ |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
logPrefix = "autorest/adal/devicetoken:" |
|||
) |
|||
|
|||
var ( |
|||
// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
|
|||
ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) |
|||
|
|||
// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
|
|||
ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) |
|||
|
|||
// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
|
|||
ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) |
|||
|
|||
// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
|
|||
ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) |
|||
|
|||
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
|
|||
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) |
|||
|
|||
// ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow
|
|||
ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) |
|||
|
|||
// ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow
|
|||
ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) |
|||
|
|||
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" |
|||
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" |
|||
errTokenSendingFails = "Error occurred while sending request with device code for a token" |
|||
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" |
|||
errStatusNotOK = "Error HTTP status != 200" |
|||
) |
|||
|
|||
// DeviceCode is the object returned by the device auth endpoint
|
|||
// It contains information to instruct the user to complete the auth flow
|
|||
type DeviceCode struct { |
|||
DeviceCode *string `json:"device_code,omitempty"` |
|||
UserCode *string `json:"user_code,omitempty"` |
|||
VerificationURL *string `json:"verification_url,omitempty"` |
|||
ExpiresIn *int64 `json:"expires_in,string,omitempty"` |
|||
Interval *int64 `json:"interval,string,omitempty"` |
|||
|
|||
Message *string `json:"message"` // Azure specific
|
|||
Resource string // store the following, stored when initiating, used when exchanging
|
|||
OAuthConfig OAuthConfig |
|||
ClientID string |
|||
} |
|||
|
|||
// TokenError is the object returned by the token exchange endpoint
|
|||
// when something is amiss
|
|||
type TokenError struct { |
|||
Error *string `json:"error,omitempty"` |
|||
ErrorCodes []int `json:"error_codes,omitempty"` |
|||
ErrorDescription *string `json:"error_description,omitempty"` |
|||
Timestamp *string `json:"timestamp,omitempty"` |
|||
TraceID *string `json:"trace_id,omitempty"` |
|||
} |
|||
|
|||
// DeviceToken is the object return by the token exchange endpoint
|
|||
// It can either look like a Token or an ErrorToken, so put both here
|
|||
// and check for presence of "Error" to know if we are in error state
|
|||
type deviceToken struct { |
|||
Token |
|||
TokenError |
|||
} |
|||
|
|||
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
|||
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
|||
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { |
|||
v := url.Values{ |
|||
"client_id": []string{clientID}, |
|||
"resource": []string{resource}, |
|||
} |
|||
|
|||
s := v.Encode() |
|||
body := ioutil.NopCloser(strings.NewReader(s)) |
|||
|
|||
req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) |
|||
} |
|||
|
|||
req.ContentLength = int64(len(s)) |
|||
req.Header.Set(contentType, mimeTypeFormPost) |
|||
resp, err := sender.Do(req) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
rb, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) |
|||
} |
|||
|
|||
if resp.StatusCode != http.StatusOK { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) |
|||
} |
|||
|
|||
if len(strings.Trim(string(rb), " ")) == 0 { |
|||
return nil, ErrDeviceCodeEmpty |
|||
} |
|||
|
|||
var code DeviceCode |
|||
err = json.Unmarshal(rb, &code) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) |
|||
} |
|||
|
|||
code.ClientID = clientID |
|||
code.Resource = resource |
|||
code.OAuthConfig = oauthConfig |
|||
|
|||
return &code, nil |
|||
} |
|||
|
|||
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
|||
// to see if the device flow has: been completed, timed out, or otherwise failed
|
|||
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { |
|||
v := url.Values{ |
|||
"client_id": []string{code.ClientID}, |
|||
"code": []string{*code.DeviceCode}, |
|||
"grant_type": []string{OAuthGrantTypeDeviceCode}, |
|||
"resource": []string{code.Resource}, |
|||
} |
|||
|
|||
s := v.Encode() |
|||
body := ioutil.NopCloser(strings.NewReader(s)) |
|||
|
|||
req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) |
|||
} |
|||
|
|||
req.ContentLength = int64(len(s)) |
|||
req.Header.Set(contentType, mimeTypeFormPost) |
|||
resp, err := sender.Do(req) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) |
|||
} |
|||
defer resp.Body.Close() |
|||
|
|||
rb, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) |
|||
} |
|||
|
|||
if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) |
|||
} |
|||
if len(strings.Trim(string(rb), " ")) == 0 { |
|||
return nil, ErrOAuthTokenEmpty |
|||
} |
|||
|
|||
var token deviceToken |
|||
err = json.Unmarshal(rb, &token) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) |
|||
} |
|||
|
|||
if token.Error == nil { |
|||
return &token.Token, nil |
|||
} |
|||
|
|||
switch *token.Error { |
|||
case "authorization_pending": |
|||
return nil, ErrDeviceAuthorizationPending |
|||
case "slow_down": |
|||
return nil, ErrDeviceSlowDown |
|||
case "access_denied": |
|||
return nil, ErrDeviceAccessDenied |
|||
case "code_expired": |
|||
return nil, ErrDeviceCodeExpired |
|||
default: |
|||
return nil, ErrDeviceGeneric |
|||
} |
|||
} |
|||
|
|||
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
|||
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
|||
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { |
|||
intervalDuration := time.Duration(*code.Interval) * time.Second |
|||
waitDuration := intervalDuration |
|||
|
|||
for { |
|||
token, err := CheckForUserCompletion(sender, code) |
|||
|
|||
if err == nil { |
|||
return token, nil |
|||
} |
|||
|
|||
switch err { |
|||
case ErrDeviceSlowDown: |
|||
waitDuration += waitDuration |
|||
case ErrDeviceAuthorizationPending: |
|||
// noop
|
|||
default: // everything else is "fatal" to us
|
|||
return nil, err |
|||
} |
|||
|
|||
if waitDuration > (intervalDuration * 3) { |
|||
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) |
|||
} |
|||
|
|||
time.Sleep(waitDuration) |
|||
} |
|||
} |
@ -0,0 +1,73 @@ |
|||
package adal |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"os" |
|||
"path/filepath" |
|||
) |
|||
|
|||
// LoadToken restores a Token object from a file located at 'path'.
|
|||
func LoadToken(path string) (*Token, error) { |
|||
file, err := os.Open(path) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) |
|||
} |
|||
defer file.Close() |
|||
|
|||
var token Token |
|||
|
|||
dec := json.NewDecoder(file) |
|||
if err = dec.Decode(&token); err != nil { |
|||
return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) |
|||
} |
|||
return &token, nil |
|||
} |
|||
|
|||
// SaveToken persists an oauth token at the given location on disk.
|
|||
// It moves the new file into place so it can safely be used to replace an existing file
|
|||
// that maybe accessed by multiple processes.
|
|||
func SaveToken(path string, mode os.FileMode, token Token) error { |
|||
dir := filepath.Dir(path) |
|||
err := os.MkdirAll(dir, os.ModePerm) |
|||
if err != nil { |
|||
return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) |
|||
} |
|||
|
|||
newFile, err := ioutil.TempFile(dir, "token") |
|||
if err != nil { |
|||
return fmt.Errorf("failed to create the temp file to write the token: %v", err) |
|||
} |
|||
tempPath := newFile.Name() |
|||
|
|||
if err := json.NewEncoder(newFile).Encode(token); err != nil { |
|||
return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) |
|||
} |
|||
if err := newFile.Close(); err != nil { |
|||
return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) |
|||
} |
|||
|
|||
// Atomic replace to avoid multi-writer file corruptions
|
|||
if err := os.Rename(tempPath, path); err != nil { |
|||
return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) |
|||
} |
|||
if err := os.Chmod(path, mode); err != nil { |
|||
return fmt.Errorf("failed to chmod the token file %s: %v", path, err) |
|||
} |
|||
return nil |
|||
} |
@ -0,0 +1,95 @@ |
|||
package adal |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"crypto/tls" |
|||
"net/http" |
|||
"net/http/cookiejar" |
|||
"sync" |
|||
|
|||
"github.com/Azure/go-autorest/tracing" |
|||
) |
|||
|
|||
const ( |
|||
contentType = "Content-Type" |
|||
mimeTypeFormPost = "application/x-www-form-urlencoded" |
|||
) |
|||
|
|||
var defaultSender Sender |
|||
var defaultSenderInit = &sync.Once{} |
|||
|
|||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
|||
//
|
|||
// The standard http.Client conforms to this interface.
|
|||
type Sender interface { |
|||
Do(*http.Request) (*http.Response, error) |
|||
} |
|||
|
|||
// SenderFunc is a method that implements the Sender interface.
|
|||
type SenderFunc func(*http.Request) (*http.Response, error) |
|||
|
|||
// Do implements the Sender interface on SenderFunc.
|
|||
func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { |
|||
return sf(r) |
|||
} |
|||
|
|||
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
|
|||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
|||
// http.Response result.
|
|||
type SendDecorator func(Sender) Sender |
|||
|
|||
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
|||
func CreateSender(decorators ...SendDecorator) Sender { |
|||
return DecorateSender(sender(), decorators...) |
|||
} |
|||
|
|||
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
|||
// the Sender. Decorators are applied in the order received, but their affect upon the request
|
|||
// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
|
|||
// post-decorator (pass the http.Request along and react to the results in http.Response).
|
|||
func DecorateSender(s Sender, decorators ...SendDecorator) Sender { |
|||
for _, decorate := range decorators { |
|||
s = decorate(s) |
|||
} |
|||
return s |
|||
} |
|||
|
|||
func sender() Sender { |
|||
// note that we can't init defaultSender in init() since it will
|
|||
// execute before calling code has had a chance to enable tracing
|
|||
defaultSenderInit.Do(func() { |
|||
// Use behaviour compatible with DefaultTransport, but require TLS minimum version.
|
|||
defaultTransport := http.DefaultTransport.(*http.Transport) |
|||
transport := &http.Transport{ |
|||
Proxy: defaultTransport.Proxy, |
|||
DialContext: defaultTransport.DialContext, |
|||
MaxIdleConns: defaultTransport.MaxIdleConns, |
|||
IdleConnTimeout: defaultTransport.IdleConnTimeout, |
|||
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, |
|||
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, |
|||
TLSClientConfig: &tls.Config{ |
|||
MinVersion: tls.VersionTLS12, |
|||
}, |
|||
} |
|||
var roundTripper http.RoundTripper = transport |
|||
if tracing.IsEnabled() { |
|||
roundTripper = tracing.NewTransport(transport) |
|||
} |
|||
j, _ := cookiejar.New(nil) |
|||
defaultSender = &http.Client{Jar: j, Transport: roundTripper} |
|||
}) |
|||
return defaultSender |
|||
} |
File diff suppressed because it is too large
@ -0,0 +1,45 @@ |
|||
package adal |
|||
|
|||
import ( |
|||
"fmt" |
|||
"runtime" |
|||
) |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
const number = "v1.0.0" |
|||
|
|||
var ( |
|||
ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", |
|||
runtime.Version(), |
|||
runtime.GOARCH, |
|||
runtime.GOOS, |
|||
number, |
|||
) |
|||
) |
|||
|
|||
// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version.
|
|||
func UserAgent() string { |
|||
return ua |
|||
} |
|||
|
|||
// AddToUserAgent adds an extension to the current user agent
|
|||
func AddToUserAgent(extension string) error { |
|||
if extension != "" { |
|||
ua = fmt.Sprintf("%s %s", ua, extension) |
|||
return nil |
|||
} |
|||
return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) |
|||
} |
@ -0,0 +1,336 @@ |
|||
package autorest |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"crypto/tls" |
|||
"encoding/base64" |
|||
"fmt" |
|||
"net/http" |
|||
"net/url" |
|||
"strings" |
|||
|
|||
"github.com/Azure/go-autorest/autorest/adal" |
|||
) |
|||
|
|||
const ( |
|||
bearerChallengeHeader = "Www-Authenticate" |
|||
bearer = "Bearer" |
|||
tenantID = "tenantID" |
|||
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" |
|||
bingAPISdkHeader = "X-BingApis-SDK-Client" |
|||
golangBingAPISdkHeaderValue = "Go-SDK" |
|||
authorization = "Authorization" |
|||
basic = "Basic" |
|||
) |
|||
|
|||
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
|||
// authorization. Most often, the Authorizer decorator runs last so it has access to the full
|
|||
// state of the formed HTTP request.
|
|||
type Authorizer interface { |
|||
WithAuthorization() PrepareDecorator |
|||
} |
|||
|
|||
// NullAuthorizer implements a default, "do nothing" Authorizer.
|
|||
type NullAuthorizer struct{} |
|||
|
|||
// WithAuthorization returns a PrepareDecorator that does nothing.
|
|||
func (na NullAuthorizer) WithAuthorization() PrepareDecorator { |
|||
return WithNothing() |
|||
} |
|||
|
|||
// APIKeyAuthorizer implements API Key authorization.
|
|||
type APIKeyAuthorizer struct { |
|||
headers map[string]interface{} |
|||
queryParameters map[string]interface{} |
|||
} |
|||
|
|||
// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers.
|
|||
func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { |
|||
return NewAPIKeyAuthorizer(headers, nil) |
|||
} |
|||
|
|||
// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters.
|
|||
func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { |
|||
return NewAPIKeyAuthorizer(nil, queryParameters) |
|||
} |
|||
|
|||
// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers.
|
|||
func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { |
|||
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} |
|||
} |
|||
|
|||
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters.
|
|||
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { |
|||
return func(p Preparer) Preparer { |
|||
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) |
|||
} |
|||
} |
|||
|
|||
// CognitiveServicesAuthorizer implements authorization for Cognitive Services.
|
|||
type CognitiveServicesAuthorizer struct { |
|||
subscriptionKey string |
|||
} |
|||
|
|||
// NewCognitiveServicesAuthorizer is
|
|||
func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { |
|||
return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} |
|||
} |
|||
|
|||
// WithAuthorization is
|
|||
func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { |
|||
headers := make(map[string]interface{}) |
|||
headers[apiKeyAuthorizerHeader] = csa.subscriptionKey |
|||
headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue |
|||
|
|||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() |
|||
} |
|||
|
|||
// BearerAuthorizer implements the bearer authorization
|
|||
type BearerAuthorizer struct { |
|||
tokenProvider adal.OAuthTokenProvider |
|||
} |
|||
|
|||
// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider
|
|||
func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { |
|||
return &BearerAuthorizer{tokenProvider: tp} |
|||
} |
|||
|
|||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|||
// value is "Bearer " followed by the token.
|
|||
//
|
|||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|||
func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { |
|||
return func(p Preparer) Preparer { |
|||
return PreparerFunc(func(r *http.Request) (*http.Request, error) { |
|||
r, err := p.Prepare(r) |
|||
if err == nil { |
|||
// the ordering is important here, prefer RefresherWithContext if available
|
|||
if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { |
|||
err = refresher.EnsureFreshWithContext(r.Context()) |
|||
} else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { |
|||
err = refresher.EnsureFresh() |
|||
} |
|||
if err != nil { |
|||
var resp *http.Response |
|||
if tokError, ok := err.(adal.TokenRefreshError); ok { |
|||
resp = tokError.Response() |
|||
} |
|||
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, |
|||
"Failed to refresh the Token for request to %s", r.URL) |
|||
} |
|||
return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) |
|||
} |
|||
return r, err |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// BearerAuthorizerCallbackFunc is the authentication callback signature.
|
|||
type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) |
|||
|
|||
// BearerAuthorizerCallback implements bearer authorization via a callback.
|
|||
type BearerAuthorizerCallback struct { |
|||
sender Sender |
|||
callback BearerAuthorizerCallbackFunc |
|||
} |
|||
|
|||
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
|
|||
// is invoked when the HTTP request is submitted.
|
|||
func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { |
|||
if s == nil { |
|||
s = sender(tls.RenegotiateNever) |
|||
} |
|||
return &BearerAuthorizerCallback{sender: s, callback: callback} |
|||
} |
|||
|
|||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
|
|||
// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback.
|
|||
//
|
|||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|||
func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { |
|||
return func(p Preparer) Preparer { |
|||
return PreparerFunc(func(r *http.Request) (*http.Request, error) { |
|||
r, err := p.Prepare(r) |
|||
if err == nil { |
|||
// make a copy of the request and remove the body as it's not
|
|||
// required and avoids us having to create a copy of it.
|
|||
rCopy := *r |
|||
removeRequestBody(&rCopy) |
|||
|
|||
resp, err := bacb.sender.Do(&rCopy) |
|||
if err == nil && resp.StatusCode == 401 { |
|||
defer resp.Body.Close() |
|||
if hasBearerChallenge(resp) { |
|||
bc, err := newBearerChallenge(resp) |
|||
if err != nil { |
|||
return r, err |
|||
} |
|||
if bacb.callback != nil { |
|||
ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) |
|||
if err != nil { |
|||
return r, err |
|||
} |
|||
return Prepare(r, ba.WithAuthorization()) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
return r, err |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// returns true if the HTTP response contains a bearer challenge
|
|||
func hasBearerChallenge(resp *http.Response) bool { |
|||
authHeader := resp.Header.Get(bearerChallengeHeader) |
|||
if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { |
|||
return false |
|||
} |
|||
return true |
|||
} |
|||
|
|||
type bearerChallenge struct { |
|||
values map[string]string |
|||
} |
|||
|
|||
func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { |
|||
challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) |
|||
trimmedChallenge := challenge[len(bearer)+1:] |
|||
|
|||
// challenge is a set of key=value pairs that are comma delimited
|
|||
pairs := strings.Split(trimmedChallenge, ",") |
|||
if len(pairs) < 1 { |
|||
err = fmt.Errorf("challenge '%s' contains no pairs", challenge) |
|||
return bc, err |
|||
} |
|||
|
|||
bc.values = make(map[string]string) |
|||
for i := range pairs { |
|||
trimmedPair := strings.TrimSpace(pairs[i]) |
|||
pair := strings.Split(trimmedPair, "=") |
|||
if len(pair) == 2 { |
|||
// remove the enclosing quotes
|
|||
key := strings.Trim(pair[0], "\"") |
|||
value := strings.Trim(pair[1], "\"") |
|||
|
|||
switch key { |
|||
case "authorization", "authorization_uri": |
|||
// strip the tenant ID from the authorization URL
|
|||
asURL, err := url.Parse(value) |
|||
if err != nil { |
|||
return bc, err |
|||
} |
|||
bc.values[tenantID] = asURL.Path[1:] |
|||
default: |
|||
bc.values[key] = value |
|||
} |
|||
} |
|||
} |
|||
|
|||
return bc, err |
|||
} |
|||
|
|||
// EventGridKeyAuthorizer implements authorization for event grid using key authentication.
|
|||
type EventGridKeyAuthorizer struct { |
|||
topicKey string |
|||
} |
|||
|
|||
// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer
|
|||
// with the specified topic key.
|
|||
func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { |
|||
return EventGridKeyAuthorizer{topicKey: topicKey} |
|||
} |
|||
|
|||
// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header.
|
|||
func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { |
|||
headers := map[string]interface{}{ |
|||
"aeg-sas-key": egta.topicKey, |
|||
} |
|||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() |
|||
} |
|||
|
|||
// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header
|
|||
// with the value "Basic <TOKEN>" where <TOKEN> is a base64-encoded username:password tuple.
|
|||
type BasicAuthorizer struct { |
|||
userName string |
|||
password string |
|||
} |
|||
|
|||
// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password.
|
|||
func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { |
|||
return &BasicAuthorizer{ |
|||
userName: userName, |
|||
password: password, |
|||
} |
|||
} |
|||
|
|||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|||
// value is "Basic " followed by the base64-encoded username:password tuple.
|
|||
func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { |
|||
headers := make(map[string]interface{}) |
|||
headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) |
|||
|
|||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() |
|||
} |
|||
|
|||
// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants.
|
|||
type MultiTenantServicePrincipalTokenAuthorizer interface { |
|||
WithAuthorization() PrepareDecorator |
|||
} |
|||
|
|||
// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider
|
|||
func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { |
|||
return &multiTenantSPTAuthorizer{tp: tp} |
|||
} |
|||
|
|||
type multiTenantSPTAuthorizer struct { |
|||
tp adal.MultitenantOAuthTokenProvider |
|||
} |
|||
|
|||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the
|
|||
// primary token along with the auxiliary authorization header using the auxiliary tokens.
|
|||
//
|
|||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|||
func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { |
|||
return func(p Preparer) Preparer { |
|||
return PreparerFunc(func(r *http.Request) (*http.Request, error) { |
|||
r, err := p.Prepare(r) |
|||
if err != nil { |
|||
return r, err |
|||
} |
|||
if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { |
|||
err = refresher.EnsureFreshWithContext(r.Context()) |
|||
if err != nil { |
|||
var resp *http.Response |
|||
if tokError, ok := err.(adal.TokenRefreshError); ok { |
|||
resp = tokError.Response() |
|||
} |
|||
return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, |
|||
"Failed to refresh one or more Tokens for request to %s", r.URL) |
|||
} |
|||
} |
|||
r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) |
|||
if err != nil { |
|||
return r, err |
|||
} |
|||
auxTokens := mt.tp.AuxiliaryOAuthTokens() |
|||
for i := range auxTokens { |
|||
auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) |
|||
} |
|||
return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) |
|||
}) |
|||
} |
|||
} |
@ -0,0 +1,150 @@ |
|||
/* |
|||
Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines |
|||
and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
|
|||
generated Go code. |
|||
|
|||
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, |
|||
and Responding. A typical pattern is: |
|||
|
|||
req, err := Prepare(&http.Request{}, |
|||
token.WithAuthorization()) |
|||
|
|||
resp, err := Send(req, |
|||
WithLogging(logger), |
|||
DoErrorIfStatusCode(http.StatusInternalServerError), |
|||
DoCloseIfError(), |
|||
DoRetryForAttempts(5, time.Second)) |
|||
|
|||
err = Respond(resp, |
|||
ByDiscardingBody(), |
|||
ByClosing()) |
|||
|
|||
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify |
|||
and then pass the data along, pass the data first and then modify the result, or wrap themselves |
|||
around passing the data (such as a logger might do). Decorators run in the order provided. For |
|||
example, the following: |
|||
|
|||
req, err := Prepare(&http.Request{}, |
|||
WithBaseURL("https://microsoft.com/"), |
|||
WithPath("a"), |
|||
WithPath("b"), |
|||
WithPath("c")) |
|||
|
|||
will set the URL to: |
|||
|
|||
https://microsoft.com/a/b/c
|
|||
|
|||
Preparers and Responders may be shared and re-used (assuming the underlying decorators support |
|||
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders |
|||
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, |
|||
all bound together by means of input / output channels. |
|||
|
|||
Decorators hold their passed state within a closure (such as the path components in the example |
|||
above). Be careful to share Preparers and Responders only in a context where such held state |
|||
applies. For example, it may not make sense to share a Preparer that applies a query string from a |
|||
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed |
|||
struct (e.g., ByUnmarshallingJson) is likely incorrect. |
|||
|
|||
Lastly, the Swagger specification (https://swagger.io) that drives AutoRest
|
|||
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
|
|||
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure |
|||
correct parsing and formatting. |
|||
|
|||
Errors raised by autorest objects and methods will conform to the autorest.Error interface. |
|||
|
|||
See the included examples for more detail. For details on the suggested use of this package by |
|||
generated clients, see the Client described below. |
|||
*/ |
|||
package autorest |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"context" |
|||
"net/http" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
// HeaderLocation specifies the HTTP Location header.
|
|||
HeaderLocation = "Location" |
|||
|
|||
// HeaderRetryAfter specifies the HTTP Retry-After header.
|
|||
HeaderRetryAfter = "Retry-After" |
|||
) |
|||
|
|||
// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
|
|||
// and false otherwise.
|
|||
func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { |
|||
if resp == nil { |
|||
return false |
|||
} |
|||
return containsInt(codes, resp.StatusCode) |
|||
} |
|||
|
|||
// GetLocation retrieves the URL from the Location header of the passed response.
|
|||
func GetLocation(resp *http.Response) string { |
|||
return resp.Header.Get(HeaderLocation) |
|||
} |
|||
|
|||
// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If
|
|||
// the header is absent or is malformed, it will return the supplied default delay time.Duration.
|
|||
func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { |
|||
retry := resp.Header.Get(HeaderRetryAfter) |
|||
if retry == "" { |
|||
return defaultDelay |
|||
} |
|||
|
|||
d, err := time.ParseDuration(retry + "s") |
|||
if err != nil { |
|||
return defaultDelay |
|||
} |
|||
|
|||
return d |
|||
} |
|||
|
|||
// NewPollingRequest allocates and returns a new http.Request to poll for the passed response.
|
|||
func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { |
|||
location := GetLocation(resp) |
|||
if location == "" { |
|||
return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") |
|||
} |
|||
|
|||
req, err := Prepare(&http.Request{Cancel: cancel}, |
|||
AsGet(), |
|||
WithBaseURL(location)) |
|||
if err != nil { |
|||
return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) |
|||
} |
|||
|
|||
return req, nil |
|||
} |
|||
|
|||
// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response.
|
|||
func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { |
|||
location := GetLocation(resp) |
|||
if location == "" { |
|||
return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") |
|||
} |
|||
|
|||
req, err := Prepare((&http.Request{}).WithContext(ctx), |
|||
AsGet(), |
|||
WithBaseURL(location)) |
|||
if err != nil { |
|||
return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) |
|||
} |
|||
|
|||
return req, nil |
|||
} |
@ -0,0 +1,924 @@ |
|||
package azure |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"context" |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/Azure/go-autorest/autorest" |
|||
"github.com/Azure/go-autorest/tracing" |
|||
) |
|||
|
|||
const ( |
|||
headerAsyncOperation = "Azure-AsyncOperation" |
|||
) |
|||
|
|||
const ( |
|||
operationInProgress string = "InProgress" |
|||
operationCanceled string = "Canceled" |
|||
operationFailed string = "Failed" |
|||
operationSucceeded string = "Succeeded" |
|||
) |
|||
|
|||
var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} |
|||
|
|||
// Future provides a mechanism to access the status and results of an asynchronous request.
|
|||
// Since futures are stateful they should be passed by value to avoid race conditions.
|
|||
type Future struct { |
|||
pt pollingTracker |
|||
} |
|||
|
|||
// NewFutureFromResponse returns a new Future object initialized
|
|||
// with the initial response from an asynchronous operation.
|
|||
func NewFutureFromResponse(resp *http.Response) (Future, error) { |
|||
pt, err := createPollingTracker(resp) |
|||
return Future{pt: pt}, err |
|||
} |
|||
|
|||
// Response returns the last HTTP response.
|
|||
func (f Future) Response() *http.Response { |
|||
if f.pt == nil { |
|||
return nil |
|||
} |
|||
return f.pt.latestResponse() |
|||
} |
|||
|
|||
// Status returns the last status message of the operation.
|
|||
func (f Future) Status() string { |
|||
if f.pt == nil { |
|||
return "" |
|||
} |
|||
return f.pt.pollingStatus() |
|||
} |
|||
|
|||
// PollingMethod returns the method used to monitor the status of the asynchronous operation.
|
|||
func (f Future) PollingMethod() PollingMethodType { |
|||
if f.pt == nil { |
|||
return PollingUnknown |
|||
} |
|||
return f.pt.pollingMethod() |
|||
} |
|||
|
|||
// DoneWithContext queries the service to see if the operation has completed.
|
|||
func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { |
|||
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") |
|||
defer func() { |
|||
sc := -1 |
|||
resp := f.Response() |
|||
if resp != nil { |
|||
sc = resp.StatusCode |
|||
} |
|||
tracing.EndSpan(ctx, sc, err) |
|||
}() |
|||
|
|||
if f.pt == nil { |
|||
return false, autorest.NewError("Future", "Done", "future is not initialized") |
|||
} |
|||
if f.pt.hasTerminated() { |
|||
return true, f.pt.pollingError() |
|||
} |
|||
if err := f.pt.pollForStatus(ctx, sender); err != nil { |
|||
return false, err |
|||
} |
|||
if err := f.pt.checkForErrors(); err != nil { |
|||
return f.pt.hasTerminated(), err |
|||
} |
|||
if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { |
|||
return false, err |
|||
} |
|||
if err := f.pt.initPollingMethod(); err != nil { |
|||
return false, err |
|||
} |
|||
if err := f.pt.updatePollingMethod(); err != nil { |
|||
return false, err |
|||
} |
|||
return f.pt.hasTerminated(), f.pt.pollingError() |
|||
} |
|||
|
|||
// GetPollingDelay returns a duration the application should wait before checking
|
|||
// the status of the asynchronous request and true; this value is returned from
|
|||
// the service via the Retry-After response header. If the header wasn't returned
|
|||
// then the function returns the zero-value time.Duration and false.
|
|||
func (f Future) GetPollingDelay() (time.Duration, bool) { |
|||
if f.pt == nil { |
|||
return 0, false |
|||
} |
|||
resp := f.pt.latestResponse() |
|||
if resp == nil { |
|||
return 0, false |
|||
} |
|||
|
|||
retry := resp.Header.Get(autorest.HeaderRetryAfter) |
|||
if retry == "" { |
|||
return 0, false |
|||
} |
|||
|
|||
d, err := time.ParseDuration(retry + "s") |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
|
|||
return d, true |
|||
} |
|||
|
|||
// WaitForCompletionRef will return when one of the following conditions is met: the long
|
|||
// running operation has completed, the provided context is cancelled, or the client's
|
|||
// polling duration has been exceeded. It will retry failed polling attempts based on
|
|||
// the retry value defined in the client up to the maximum retry attempts.
|
|||
// If no deadline is specified in the context then the client.PollingDuration will be
|
|||
// used to determine if a default deadline should be used.
|
|||
// If PollingDuration is greater than zero the value will be used as the context's timeout.
|
|||
// If PollingDuration is zero then no default deadline will be used.
|
|||
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { |
|||
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") |
|||
defer func() { |
|||
sc := -1 |
|||
resp := f.Response() |
|||
if resp != nil { |
|||
sc = resp.StatusCode |
|||
} |
|||
tracing.EndSpan(ctx, sc, err) |
|||
}() |
|||
cancelCtx := ctx |
|||
// if the provided context already has a deadline don't override it
|
|||
_, hasDeadline := ctx.Deadline() |
|||
if d := client.PollingDuration; !hasDeadline && d != 0 { |
|||
var cancel context.CancelFunc |
|||
cancelCtx, cancel = context.WithTimeout(ctx, d) |
|||
defer cancel() |
|||
} |
|||
|
|||
done, err := f.DoneWithContext(ctx, client) |
|||
for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { |
|||
if attempts >= client.RetryAttempts { |
|||
return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") |
|||
} |
|||
// we want delayAttempt to be zero in the non-error case so
|
|||
// that DelayForBackoff doesn't perform exponential back-off
|
|||
var delayAttempt int |
|||
var delay time.Duration |
|||
if err == nil { |
|||
// check for Retry-After delay, if not present use the client's polling delay
|
|||
var ok bool |
|||
delay, ok = f.GetPollingDelay() |
|||
if !ok { |
|||
delay = client.PollingDelay |
|||
} |
|||
} else { |
|||
// there was an error polling for status so perform exponential
|
|||
// back-off based on the number of attempts using the client's retry
|
|||
// duration. update attempts after delayAttempt to avoid off-by-one.
|
|||
delayAttempt = attempts |
|||
delay = client.RetryDuration |
|||
attempts++ |
|||
} |
|||
// wait until the delay elapses or the context is cancelled
|
|||
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) |
|||
if !delayElapsed { |
|||
return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") |
|||
} |
|||
} |
|||
return |
|||
} |
|||
|
|||
// MarshalJSON implements the json.Marshaler interface.
|
|||
func (f Future) MarshalJSON() ([]byte, error) { |
|||
return json.Marshal(f.pt) |
|||
} |
|||
|
|||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
|||
func (f *Future) UnmarshalJSON(data []byte) error { |
|||
// unmarshal into JSON object to determine the tracker type
|
|||
obj := map[string]interface{}{} |
|||
err := json.Unmarshal(data, &obj) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if obj["method"] == nil { |
|||
return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") |
|||
} |
|||
method := obj["method"].(string) |
|||
switch strings.ToUpper(method) { |
|||
case http.MethodDelete: |
|||
f.pt = &pollingTrackerDelete{} |
|||
case http.MethodPatch: |
|||
f.pt = &pollingTrackerPatch{} |
|||
case http.MethodPost: |
|||
f.pt = &pollingTrackerPost{} |
|||
case http.MethodPut: |
|||
f.pt = &pollingTrackerPut{} |
|||
default: |
|||
return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) |
|||
} |
|||
// now unmarshal into the tracker
|
|||
return json.Unmarshal(data, &f.pt) |
|||
} |
|||
|
|||
// PollingURL returns the URL used for retrieving the status of the long-running operation.
|
|||
func (f Future) PollingURL() string { |
|||
if f.pt == nil { |
|||
return "" |
|||
} |
|||
return f.pt.pollingURL() |
|||
} |
|||
|
|||
// GetResult should be called once polling has completed successfully.
|
|||
// It makes the final GET call to retrieve the resultant payload.
|
|||
func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { |
|||
if f.pt.finalGetURL() == "" { |
|||
// we can end up in this situation if the async operation returns a 200
|
|||
// with no polling URLs. in that case return the response which should
|
|||
// contain the JSON payload (only do this for successful terminal cases).
|
|||
if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { |
|||
return lr, nil |
|||
} |
|||
return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") |
|||
} |
|||
req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return sender.Do(req) |
|||
} |
|||
|
|||
type pollingTracker interface { |
|||
// these methods can differ per tracker
|
|||
|
|||
// checks the response headers and status code to determine the polling mechanism
|
|||
updatePollingMethod() error |
|||
|
|||
// checks the response for tracker-specific error conditions
|
|||
checkForErrors() error |
|||
|
|||
// returns true if provisioning state should be checked
|
|||
provisioningStateApplicable() bool |
|||
|
|||
// methods common to all trackers
|
|||
|
|||
// initializes a tracker's polling URL and method, called for each iteration.
|
|||
// these values can be overridden by each polling tracker as required.
|
|||
initPollingMethod() error |
|||
|
|||
// initializes the tracker's internal state, call this when the tracker is created
|
|||
initializeState() error |
|||
|
|||
// makes an HTTP request to check the status of the LRO
|
|||
pollForStatus(ctx context.Context, sender autorest.Sender) error |
|||
|
|||
// updates internal tracker state, call this after each call to pollForStatus
|
|||
updatePollingState(provStateApl bool) error |
|||
|
|||
// returns the error response from the service, can be nil
|
|||
pollingError() error |
|||
|
|||
// returns the polling method being used
|
|||
pollingMethod() PollingMethodType |
|||
|
|||
// returns the state of the LRO as returned from the service
|
|||
pollingStatus() string |
|||
|
|||
// returns the URL used for polling status
|
|||
pollingURL() string |
|||
|
|||
// returns the URL used for the final GET to retrieve the resource
|
|||
finalGetURL() string |
|||
|
|||
// returns true if the LRO is in a terminal state
|
|||
hasTerminated() bool |
|||
|
|||
// returns true if the LRO is in a failed terminal state
|
|||
hasFailed() bool |
|||
|
|||
// returns true if the LRO is in a successful terminal state
|
|||
hasSucceeded() bool |
|||
|
|||
// returns the cached HTTP response after a call to pollForStatus(), can be nil
|
|||
latestResponse() *http.Response |
|||
} |
|||
|
|||
type pollingTrackerBase struct { |
|||
// resp is the last response, either from the submission of the LRO or from polling
|
|||
resp *http.Response |
|||
|
|||
// method is the HTTP verb, this is needed for deserialization
|
|||
Method string `json:"method"` |
|||
|
|||
// rawBody is the raw JSON response body
|
|||
rawBody map[string]interface{} |
|||
|
|||
// denotes if polling is using async-operation or location header
|
|||
Pm PollingMethodType `json:"pollingMethod"` |
|||
|
|||
// the URL to poll for status
|
|||
URI string `json:"pollingURI"` |
|||
|
|||
// the state of the LRO as returned from the service
|
|||
State string `json:"lroState"` |
|||
|
|||
// the URL to GET for the final result
|
|||
FinalGetURI string `json:"resultURI"` |
|||
|
|||
// used to hold an error object returned from the service
|
|||
Err *ServiceError `json:"error,omitempty"` |
|||
} |
|||
|
|||
func (pt *pollingTrackerBase) initializeState() error { |
|||
// determine the initial polling state based on response body and/or HTTP status
|
|||
// code. this is applicable to the initial LRO response, not polling responses!
|
|||
pt.Method = pt.resp.Request.Method |
|||
if err := pt.updateRawBody(); err != nil { |
|||
return err |
|||
} |
|||
switch pt.resp.StatusCode { |
|||
case http.StatusOK: |
|||
if ps := pt.getProvisioningState(); ps != nil { |
|||
pt.State = *ps |
|||
if pt.hasFailed() { |
|||
pt.updateErrorFromResponse() |
|||
return pt.pollingError() |
|||
} |
|||
} else { |
|||
pt.State = operationSucceeded |
|||
} |
|||
case http.StatusCreated: |
|||
if ps := pt.getProvisioningState(); ps != nil { |
|||
pt.State = *ps |
|||
} else { |
|||
pt.State = operationInProgress |
|||
} |
|||
case http.StatusAccepted: |
|||
pt.State = operationInProgress |
|||
case http.StatusNoContent: |
|||
pt.State = operationSucceeded |
|||
default: |
|||
pt.State = operationFailed |
|||
pt.updateErrorFromResponse() |
|||
return pt.pollingError() |
|||
} |
|||
return pt.initPollingMethod() |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) getProvisioningState() *string { |
|||
if pt.rawBody != nil && pt.rawBody["properties"] != nil { |
|||
p := pt.rawBody["properties"].(map[string]interface{}) |
|||
if ps := p["provisioningState"]; ps != nil { |
|||
s := ps.(string) |
|||
return &s |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (pt *pollingTrackerBase) updateRawBody() error { |
|||
pt.rawBody = map[string]interface{}{} |
|||
if pt.resp.ContentLength != 0 { |
|||
defer pt.resp.Body.Close() |
|||
b, err := ioutil.ReadAll(pt.resp.Body) |
|||
if err != nil { |
|||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") |
|||
} |
|||
// observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
|
|||
if len(b) == 0 { |
|||
return nil |
|||
} |
|||
// put the body back so it's available to other callers
|
|||
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) |
|||
if err = json.Unmarshal(b, &pt.rawBody); err != nil { |
|||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { |
|||
req, err := http.NewRequest(http.MethodGet, pt.URI, nil) |
|||
if err != nil { |
|||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") |
|||
} |
|||
|
|||
req = req.WithContext(ctx) |
|||
preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) |
|||
req, err = preparer.Prepare(req) |
|||
if err != nil { |
|||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") |
|||
} |
|||
pt.resp, err = sender.Do(req) |
|||
if err != nil { |
|||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") |
|||
} |
|||
if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { |
|||
// reset the service error on success case
|
|||
pt.Err = nil |
|||
err = pt.updateRawBody() |
|||
} else { |
|||
// check response body for error content
|
|||
pt.updateErrorFromResponse() |
|||
err = pt.pollingError() |
|||
} |
|||
return err |
|||
} |
|||
|
|||
// attempts to unmarshal a ServiceError type from the response body.
|
|||
// if that fails then make a best attempt at creating something meaningful.
|
|||
// NOTE: this assumes that the async operation has failed.
|
|||
func (pt *pollingTrackerBase) updateErrorFromResponse() { |
|||
var err error |
|||
if pt.resp.ContentLength != 0 { |
|||
type respErr struct { |
|||
ServiceError *ServiceError `json:"error"` |
|||
} |
|||
re := respErr{} |
|||
defer pt.resp.Body.Close() |
|||
var b []byte |
|||
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { |
|||
goto Default |
|||
} |
|||
if err = json.Unmarshal(b, &re); err != nil { |
|||
goto Default |
|||
} |
|||
// unmarshalling the error didn't yield anything, try unwrapped error
|
|||
if re.ServiceError == nil { |
|||
err = json.Unmarshal(b, &re.ServiceError) |
|||
if err != nil { |
|||
goto Default |
|||
} |
|||
} |
|||
// the unmarshaller will ensure re.ServiceError is non-nil
|
|||
// even if there was no content unmarshalled so check the code.
|
|||
if re.ServiceError.Code != "" { |
|||
pt.Err = re.ServiceError |
|||
return |
|||
} |
|||
} |
|||
Default: |
|||
se := &ServiceError{ |
|||
Code: pt.pollingStatus(), |
|||
Message: "The async operation failed.", |
|||
} |
|||
if err != nil { |
|||
se.InnerError = make(map[string]interface{}) |
|||
se.InnerError["unmarshalError"] = err.Error() |
|||
} |
|||
// stick the response body into the error object in hopes
|
|||
// it contains something useful to help diagnose the failure.
|
|||
if len(pt.rawBody) > 0 { |
|||
se.AdditionalInfo = []map[string]interface{}{ |
|||
pt.rawBody, |
|||
} |
|||
} |
|||
pt.Err = se |
|||
} |
|||
|
|||
func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { |
|||
if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { |
|||
pt.State = pt.rawBody["status"].(string) |
|||
} else { |
|||
if pt.resp.StatusCode == http.StatusAccepted { |
|||
pt.State = operationInProgress |
|||
} else if provStateApl { |
|||
if ps := pt.getProvisioningState(); ps != nil { |
|||
pt.State = *ps |
|||
} else { |
|||
pt.State = operationSucceeded |
|||
} |
|||
} else { |
|||
return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") |
|||
} |
|||
} |
|||
// if the operation has failed update the error state
|
|||
if pt.hasFailed() { |
|||
pt.updateErrorFromResponse() |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) pollingError() error { |
|||
if pt.Err == nil { |
|||
return nil |
|||
} |
|||
return pt.Err |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) pollingMethod() PollingMethodType { |
|||
return pt.Pm |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) pollingStatus() string { |
|||
return pt.State |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) pollingURL() string { |
|||
return pt.URI |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) finalGetURL() string { |
|||
return pt.FinalGetURI |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) hasTerminated() bool { |
|||
return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) hasFailed() bool { |
|||
return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) hasSucceeded() bool { |
|||
return strings.EqualFold(pt.State, operationSucceeded) |
|||
} |
|||
|
|||
func (pt pollingTrackerBase) latestResponse() *http.Response { |
|||
return pt.resp |
|||
} |
|||
|
|||
// error checking common to all trackers
|
|||
func (pt pollingTrackerBase) baseCheckForErrors() error { |
|||
// for Azure-AsyncOperations the response body cannot be nil or empty
|
|||
if pt.Pm == PollingAsyncOperation { |
|||
if pt.resp.Body == nil || pt.resp.ContentLength == 0 { |
|||
return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") |
|||
} |
|||
if pt.rawBody["status"] == nil { |
|||
return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// default initialization of polling URL/method. each verb tracker will update this as required.
|
|||
func (pt *pollingTrackerBase) initPollingMethod() error { |
|||
if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { |
|||
return err |
|||
} else if ao != "" { |
|||
pt.URI = ao |
|||
pt.Pm = PollingAsyncOperation |
|||
return nil |
|||
} |
|||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil { |
|||
return err |
|||
} else if lh != "" { |
|||
pt.URI = lh |
|||
pt.Pm = PollingLocation |
|||
return nil |
|||
} |
|||
// it's ok if we didn't find a polling header, this will be handled elsewhere
|
|||
return nil |
|||
} |
|||
|
|||
// DELETE
|
|||
|
|||
type pollingTrackerDelete struct { |
|||
pollingTrackerBase |
|||
} |
|||
|
|||
func (pt *pollingTrackerDelete) updatePollingMethod() error { |
|||
// for 201 the Location header is required
|
|||
if pt.resp.StatusCode == http.StatusCreated { |
|||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil { |
|||
return err |
|||
} else if lh == "" { |
|||
return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") |
|||
} else { |
|||
pt.URI = lh |
|||
} |
|||
pt.Pm = PollingLocation |
|||
pt.FinalGetURI = pt.URI |
|||
} |
|||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
|||
if pt.resp.StatusCode == http.StatusAccepted { |
|||
ao, err := getURLFromAsyncOpHeader(pt.resp) |
|||
if err != nil { |
|||
return err |
|||
} else if ao != "" { |
|||
pt.URI = ao |
|||
pt.Pm = PollingAsyncOperation |
|||
} |
|||
// if the Location header is invalid and we already have a polling URL
|
|||
// then we don't care if the Location header URL is malformed.
|
|||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { |
|||
return err |
|||
} else if lh != "" { |
|||
if ao == "" { |
|||
pt.URI = lh |
|||
pt.Pm = PollingLocation |
|||
} |
|||
// when both headers are returned we use the value in the Location header for the final GET
|
|||
pt.FinalGetURI = lh |
|||
} |
|||
// make sure a polling URL was found
|
|||
if pt.URI == "" { |
|||
return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (pt pollingTrackerDelete) checkForErrors() error { |
|||
return pt.baseCheckForErrors() |
|||
} |
|||
|
|||
func (pt pollingTrackerDelete) provisioningStateApplicable() bool { |
|||
return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent |
|||
} |
|||
|
|||
// PATCH
|
|||
|
|||
type pollingTrackerPatch struct { |
|||
pollingTrackerBase |
|||
} |
|||
|
|||
func (pt *pollingTrackerPatch) updatePollingMethod() error { |
|||
// by default we can use the original URL for polling and final GET
|
|||
if pt.URI == "" { |
|||
pt.URI = pt.resp.Request.URL.String() |
|||
} |
|||
if pt.FinalGetURI == "" { |
|||
pt.FinalGetURI = pt.resp.Request.URL.String() |
|||
} |
|||
if pt.Pm == PollingUnknown { |
|||
pt.Pm = PollingRequestURI |
|||
} |
|||
// for 201 it's permissible for no headers to be returned
|
|||
if pt.resp.StatusCode == http.StatusCreated { |
|||
if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { |
|||
return err |
|||
} else if ao != "" { |
|||
pt.URI = ao |
|||
pt.Pm = PollingAsyncOperation |
|||
} |
|||
} |
|||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
|||
// note the absence of the "final GET" mechanism for PATCH
|
|||
if pt.resp.StatusCode == http.StatusAccepted { |
|||
ao, err := getURLFromAsyncOpHeader(pt.resp) |
|||
if err != nil { |
|||
return err |
|||
} else if ao != "" { |
|||
pt.URI = ao |
|||
pt.Pm = PollingAsyncOperation |
|||
} |
|||
if ao == "" { |
|||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil { |
|||
return err |
|||
} else if lh == "" { |
|||
return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") |
|||
} else { |
|||
pt.URI = lh |
|||
pt.Pm = PollingLocation |
|||
} |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (pt pollingTrackerPatch) checkForErrors() error { |
|||
return pt.baseCheckForErrors() |
|||
} |
|||
|
|||
func (pt pollingTrackerPatch) provisioningStateApplicable() bool { |
|||
return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated |
|||
} |
|||
|
|||
// POST
|
|||
|
|||
type pollingTrackerPost struct { |
|||
pollingTrackerBase |
|||
} |
|||
|
|||
func (pt *pollingTrackerPost) updatePollingMethod() error { |
|||
// 201 requires Location header
|
|||
if pt.resp.StatusCode == http.StatusCreated { |
|||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil { |
|||
return err |
|||
} else if lh == "" { |
|||
return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") |
|||
} else { |
|||
pt.URI = lh |
|||
pt.FinalGetURI = lh |
|||
pt.Pm = PollingLocation |
|||
} |
|||
} |
|||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
|||
if pt.resp.StatusCode == http.StatusAccepted { |
|||
ao, err := getURLFromAsyncOpHeader(pt.resp) |
|||
if err != nil { |
|||
return err |
|||
} else if ao != "" { |
|||
pt.URI = ao |
|||
pt.Pm = PollingAsyncOperation |
|||
} |
|||
// if the Location header is invalid and we already have a polling URL
|
|||
// then we don't care if the Location header URL is malformed.
|
|||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { |
|||
return err |
|||
} else if lh != "" { |
|||
if ao == "" { |
|||
pt.URI = lh |
|||
pt.Pm = PollingLocation |
|||
} |
|||
// when both headers are returned we use the value in the Location header for the final GET
|
|||
pt.FinalGetURI = lh |
|||
} |
|||
// make sure a polling URL was found
|
|||
if pt.URI == "" { |
|||
return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (pt pollingTrackerPost) checkForErrors() error { |
|||
return pt.baseCheckForErrors() |
|||
} |
|||
|
|||
func (pt pollingTrackerPost) provisioningStateApplicable() bool { |
|||
return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent |
|||
} |
|||
|
|||
// PUT
|
|||
|
|||
type pollingTrackerPut struct { |
|||
pollingTrackerBase |
|||
} |
|||
|
|||
func (pt *pollingTrackerPut) updatePollingMethod() error { |
|||
// by default we can use the original URL for polling and final GET
|
|||
if pt.URI == "" { |
|||
pt.URI = pt.resp.Request.URL.String() |
|||
} |
|||
if pt.FinalGetURI == "" { |
|||
pt.FinalGetURI = pt.resp.Request.URL.String() |
|||
} |
|||
if pt.Pm == PollingUnknown { |
|||
pt.Pm = PollingRequestURI |
|||
} |
|||
// for 201 it's permissible for no headers to be returned
|
|||
if pt.resp.StatusCode == http.StatusCreated { |
|||
if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { |
|||
return err |
|||
} else if ao != "" { |
|||
pt.URI = ao |
|||
pt.Pm = PollingAsyncOperation |
|||
} |
|||
} |
|||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
|||
if pt.resp.StatusCode == http.StatusAccepted { |
|||
ao, err := getURLFromAsyncOpHeader(pt.resp) |
|||
if err != nil { |
|||
return err |
|||
} else if ao != "" { |
|||
pt.URI = ao |
|||
pt.Pm = PollingAsyncOperation |
|||
} |
|||
// if the Location header is invalid and we already have a polling URL
|
|||
// then we don't care if the Location header URL is malformed.
|
|||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { |
|||
return err |
|||
} else if lh != "" { |
|||
if ao == "" { |
|||
pt.URI = lh |
|||
pt.Pm = PollingLocation |
|||
} |
|||
} |
|||
// make sure a polling URL was found
|
|||
if pt.URI == "" { |
|||
return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (pt pollingTrackerPut) checkForErrors() error { |
|||
err := pt.baseCheckForErrors() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
// if there are no LRO headers then the body cannot be empty
|
|||
ao, err := getURLFromAsyncOpHeader(pt.resp) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
lh, err := getURLFromLocationHeader(pt.resp) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if ao == "" && lh == "" && len(pt.rawBody) == 0 { |
|||
return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (pt pollingTrackerPut) provisioningStateApplicable() bool { |
|||
return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated |
|||
} |
|||
|
|||
// creates a polling tracker based on the verb of the original request
|
|||
func createPollingTracker(resp *http.Response) (pollingTracker, error) { |
|||
var pt pollingTracker |
|||
switch strings.ToUpper(resp.Request.Method) { |
|||
case http.MethodDelete: |
|||
pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} |
|||
case http.MethodPatch: |
|||
pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} |
|||
case http.MethodPost: |
|||
pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} |
|||
case http.MethodPut: |
|||
pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} |
|||
default: |
|||
return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) |
|||
} |
|||
if err := pt.initializeState(); err != nil { |
|||
return pt, err |
|||
} |
|||
// this initializes the polling header values, we do this during creation in case the
|
|||
// initial response send us invalid values; this way the API call will return a non-nil
|
|||
// error (not doing this means the error shows up in Future.Done)
|
|||
return pt, pt.updatePollingMethod() |
|||
} |
|||
|
|||
// gets the polling URL from the Azure-AsyncOperation header.
|
|||
// ensures the URL is well-formed and absolute.
|
|||
func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { |
|||
s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) |
|||
if s == "" { |
|||
return "", nil |
|||
} |
|||
if !isValidURL(s) { |
|||
return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) |
|||
} |
|||
return s, nil |
|||
} |
|||
|
|||
// gets the polling URL from the Location header.
|
|||
// ensures the URL is well-formed and absolute.
|
|||
func getURLFromLocationHeader(resp *http.Response) (string, error) { |
|||
s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) |
|||
if s == "" { |
|||
return "", nil |
|||
} |
|||
if !isValidURL(s) { |
|||
return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) |
|||
} |
|||
return s, nil |
|||
} |
|||
|
|||
// verify that the URL is valid and absolute
|
|||
func isValidURL(s string) bool { |
|||
u, err := url.Parse(s) |
|||
return err == nil && u.IsAbs() |
|||
} |
|||
|
|||
// PollingMethodType defines a type used for enumerating polling mechanisms.
|
|||
type PollingMethodType string |
|||
|
|||
const ( |
|||
// PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header.
|
|||
PollingAsyncOperation PollingMethodType = "AsyncOperation" |
|||
|
|||
// PollingLocation indicates the polling method uses the Location header.
|
|||
PollingLocation PollingMethodType = "Location" |
|||
|
|||
// PollingRequestURI indicates the polling method uses the original request URI.
|
|||
PollingRequestURI PollingMethodType = "RequestURI" |
|||
|
|||
// PollingUnknown indicates an unknown polling method and is the default value.
|
|||
PollingUnknown PollingMethodType = "" |
|||
) |
|||
|
|||
// AsyncOpIncompleteError is the type that's returned from a future that has not completed.
|
|||
type AsyncOpIncompleteError struct { |
|||
// FutureType is the name of the type composed of a azure.Future.
|
|||
FutureType string |
|||
} |
|||
|
|||
// Error returns an error message including the originating type name of the error.
|
|||
func (e AsyncOpIncompleteError) Error() string { |
|||
return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) |
|||
} |
|||
|
|||
// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters.
|
|||
func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { |
|||
return AsyncOpIncompleteError{ |
|||
FutureType: futureType, |
|||
} |
|||
} |
@ -0,0 +1,326 @@ |
|||
// Package azure provides Azure-specific implementations used with AutoRest.
|
|||
// See the included examples for more detail.
|
|||
package azure |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"regexp" |
|||
"strconv" |
|||
"strings" |
|||
|
|||
"github.com/Azure/go-autorest/autorest" |
|||
) |
|||
|
|||
const ( |
|||
// HeaderClientID is the Azure extension header to set a user-specified request ID.
|
|||
HeaderClientID = "x-ms-client-request-id" |
|||
|
|||
// HeaderReturnClientID is the Azure extension header to set if the user-specified request ID
|
|||
// should be included in the response.
|
|||
HeaderReturnClientID = "x-ms-return-client-request-id" |
|||
|
|||
// HeaderRequestID is the Azure extension header of the service generated request ID returned
|
|||
// in the response.
|
|||
HeaderRequestID = "x-ms-request-id" |
|||
) |
|||
|
|||
// ServiceError encapsulates the error response from an Azure service.
|
|||
// It adhears to the OData v4 specification for error responses.
|
|||
type ServiceError struct { |
|||
Code string `json:"code"` |
|||
Message string `json:"message"` |
|||
Target *string `json:"target"` |
|||
Details []map[string]interface{} `json:"details"` |
|||
InnerError map[string]interface{} `json:"innererror"` |
|||
AdditionalInfo []map[string]interface{} `json:"additionalInfo"` |
|||
} |
|||
|
|||
func (se ServiceError) Error() string { |
|||
result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) |
|||
|
|||
if se.Target != nil { |
|||
result += fmt.Sprintf(" Target=%q", *se.Target) |
|||
} |
|||
|
|||
if se.Details != nil { |
|||
d, err := json.Marshal(se.Details) |
|||
if err != nil { |
|||
result += fmt.Sprintf(" Details=%v", se.Details) |
|||
} |
|||
result += fmt.Sprintf(" Details=%v", string(d)) |
|||
} |
|||
|
|||
if se.InnerError != nil { |
|||
d, err := json.Marshal(se.InnerError) |
|||
if err != nil { |
|||
result += fmt.Sprintf(" InnerError=%v", se.InnerError) |
|||
} |
|||
result += fmt.Sprintf(" InnerError=%v", string(d)) |
|||
} |
|||
|
|||
if se.AdditionalInfo != nil { |
|||
d, err := json.Marshal(se.AdditionalInfo) |
|||
if err != nil { |
|||
result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) |
|||
} |
|||
result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) |
|||
} |
|||
|
|||
return result |
|||
} |
|||
|
|||
// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type.
|
|||
func (se *ServiceError) UnmarshalJSON(b []byte) error { |
|||
// per the OData v4 spec the details field must be an array of JSON objects.
|
|||
// unfortunately not all services adhear to the spec and just return a single
|
|||
// object instead of an array with one object. so we have to perform some
|
|||
// shenanigans to accommodate both cases.
|
|||
// http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
|
|||
|
|||
type serviceError1 struct { |
|||
Code string `json:"code"` |
|||
Message string `json:"message"` |
|||
Target *string `json:"target"` |
|||
Details []map[string]interface{} `json:"details"` |
|||
InnerError map[string]interface{} `json:"innererror"` |
|||
AdditionalInfo []map[string]interface{} `json:"additionalInfo"` |
|||
} |
|||
|
|||
type serviceError2 struct { |
|||
Code string `json:"code"` |
|||
Message string `json:"message"` |
|||
Target *string `json:"target"` |
|||
Details map[string]interface{} `json:"details"` |
|||
InnerError map[string]interface{} `json:"innererror"` |
|||
AdditionalInfo []map[string]interface{} `json:"additionalInfo"` |
|||
} |
|||
|
|||
se1 := serviceError1{} |
|||
err := json.Unmarshal(b, &se1) |
|||
if err == nil { |
|||
se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) |
|||
return nil |
|||
} |
|||
|
|||
se2 := serviceError2{} |
|||
err = json.Unmarshal(b, &se2) |
|||
if err == nil { |
|||
se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) |
|||
se.Details = append(se.Details, se2.Details) |
|||
return nil |
|||
} |
|||
return err |
|||
} |
|||
|
|||
func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { |
|||
se.Code = code |
|||
se.Message = message |
|||
se.Target = target |
|||
se.Details = details |
|||
se.InnerError = inner |
|||
se.AdditionalInfo = additional |
|||
} |
|||
|
|||
// RequestError describes an error response returned by Azure service.
|
|||
type RequestError struct { |
|||
autorest.DetailedError |
|||
|
|||
// The error returned by the Azure service.
|
|||
ServiceError *ServiceError `json:"error"` |
|||
|
|||
// The request id (from the x-ms-request-id-header) of the request.
|
|||
RequestID string |
|||
} |
|||
|
|||
// Error returns a human-friendly error message from service error.
|
|||
func (e RequestError) Error() string { |
|||
return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", |
|||
e.StatusCode, e.ServiceError) |
|||
} |
|||
|
|||
// IsAzureError returns true if the passed error is an Azure Service error; false otherwise.
|
|||
func IsAzureError(e error) bool { |
|||
_, ok := e.(*RequestError) |
|||
return ok |
|||
} |
|||
|
|||
// Resource contains details about an Azure resource.
|
|||
type Resource struct { |
|||
SubscriptionID string |
|||
ResourceGroup string |
|||
Provider string |
|||
ResourceType string |
|||
ResourceName string |
|||
} |
|||
|
|||
// ParseResourceID parses a resource ID into a ResourceDetails struct.
|
|||
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4.
|
|||
func ParseResourceID(resourceID string) (Resource, error) { |
|||
|
|||
const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` |
|||
resourceIDPattern := regexp.MustCompile(resourceIDPatternText) |
|||
match := resourceIDPattern.FindStringSubmatch(resourceID) |
|||
|
|||
if len(match) == 0 { |
|||
return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) |
|||
} |
|||
|
|||
v := strings.Split(match[5], "/") |
|||
resourceName := v[len(v)-1] |
|||
|
|||
result := Resource{ |
|||
SubscriptionID: match[1], |
|||
ResourceGroup: match[2], |
|||
Provider: match[3], |
|||
ResourceType: match[4], |
|||
ResourceName: resourceName, |
|||
} |
|||
|
|||
return result, nil |
|||
} |
|||
|
|||
// NewErrorWithError creates a new Error conforming object from the
|
|||
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
|
|||
// if resp is nil), message, and original error. message is treated as a format
|
|||
// string to which the optional args apply.
|
|||
func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { |
|||
if v, ok := original.(*RequestError); ok { |
|||
return *v |
|||
} |
|||
|
|||
statusCode := autorest.UndefinedStatusCode |
|||
if resp != nil { |
|||
statusCode = resp.StatusCode |
|||
} |
|||
return RequestError{ |
|||
DetailedError: autorest.DetailedError{ |
|||
Original: original, |
|||
PackageType: packageType, |
|||
Method: method, |
|||
StatusCode: statusCode, |
|||
Message: fmt.Sprintf(message, args...), |
|||
}, |
|||
} |
|||
} |
|||
|
|||
// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of
|
|||
// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g.,
|
|||
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id
|
|||
// header to true such that UUID accompanies the http.Response.
|
|||
func WithReturningClientID(uuid string) autorest.PrepareDecorator { |
|||
preparer := autorest.CreatePreparer( |
|||
WithClientID(uuid), |
|||
WithReturnClientID(true)) |
|||
|
|||
return func(p autorest.Preparer) autorest.Preparer { |
|||
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { |
|||
r, err := p.Prepare(r) |
|||
if err != nil { |
|||
return r, err |
|||
} |
|||
return preparer.Prepare(r) |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// WithClientID returns a PrepareDecorator that adds an HTTP extension header of
|
|||
// x-ms-client-request-id whose value is passed, undecorated UUID (e.g.,
|
|||
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA").
|
|||
func WithClientID(uuid string) autorest.PrepareDecorator { |
|||
return autorest.WithHeader(HeaderClientID, uuid) |
|||
} |
|||
|
|||
// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of
|
|||
// x-ms-return-client-request-id whose boolean value indicates if the value of the
|
|||
// x-ms-client-request-id header should be included in the http.Response.
|
|||
func WithReturnClientID(b bool) autorest.PrepareDecorator { |
|||
return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) |
|||
} |
|||
|
|||
// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the
|
|||
// http.Request sent to the service (and returned in the http.Response)
|
|||
func ExtractClientID(resp *http.Response) string { |
|||
return autorest.ExtractHeaderValue(HeaderClientID, resp) |
|||
} |
|||
|
|||
// ExtractRequestID extracts the Azure server generated request identifier from the
|
|||
// x-ms-request-id header.
|
|||
func ExtractRequestID(resp *http.Response) string { |
|||
return autorest.ExtractHeaderValue(HeaderRequestID, resp) |
|||
} |
|||
|
|||
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an
|
|||
// azure.RequestError by reading the response body unless the response HTTP status code
|
|||
// is among the set passed.
|
|||
//
|
|||
// If there is a chance service may return responses other than the Azure error
|
|||
// format and the response cannot be parsed into an error, a decoding error will
|
|||
// be returned containing the response body. In any case, the Responder will
|
|||
// return an error if the status code is not satisfied.
|
|||
//
|
|||
// If this Responder returns an error, the response body will be replaced with
|
|||
// an in-memory reader, which needs no further closing.
|
|||
func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { |
|||
return func(r autorest.Responder) autorest.Responder { |
|||
return autorest.ResponderFunc(func(resp *http.Response) error { |
|||
err := r.Respond(resp) |
|||
if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { |
|||
var e RequestError |
|||
defer resp.Body.Close() |
|||
|
|||
// Copy and replace the Body in case it does not contain an error object.
|
|||
// This will leave the Body available to the caller.
|
|||
b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) |
|||
resp.Body = ioutil.NopCloser(&b) |
|||
if decodeErr != nil { |
|||
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) |
|||
} |
|||
if e.ServiceError == nil { |
|||
// Check if error is unwrapped ServiceError
|
|||
if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
if e.ServiceError.Message == "" { |
|||
// if we're here it means the returned error wasn't OData v4 compliant.
|
|||
// try to unmarshal the body as raw JSON in hopes of getting something.
|
|||
rawBody := map[string]interface{}{} |
|||
if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { |
|||
return err |
|||
} |
|||
e.ServiceError = &ServiceError{ |
|||
Code: "Unknown", |
|||
Message: "Unknown service error", |
|||
} |
|||
if len(rawBody) > 0 { |
|||
e.ServiceError.Details = []map[string]interface{}{rawBody} |
|||
} |
|||
} |
|||
e.Response = resp |
|||
e.RequestID = ExtractRequestID(resp) |
|||
if e.StatusCode == nil { |
|||
e.StatusCode = resp.StatusCode |
|||
} |
|||
err = &e |
|||
} |
|||
return err |
|||
}) |
|||
} |
|||
} |
@ -0,0 +1,244 @@ |
|||
package azure |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"os" |
|||
"strings" |
|||
) |
|||
|
|||
const ( |
|||
// EnvironmentFilepathName captures the name of the environment variable containing the path to the file
|
|||
// to be used while populating the Azure Environment.
|
|||
EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" |
|||
|
|||
// NotAvailable is used for endpoints and resource IDs that are not available for a given cloud.
|
|||
NotAvailable = "N/A" |
|||
) |
|||
|
|||
var environments = map[string]Environment{ |
|||
"AZURECHINACLOUD": ChinaCloud, |
|||
"AZUREGERMANCLOUD": GermanCloud, |
|||
"AZUREPUBLICCLOUD": PublicCloud, |
|||
"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, |
|||
} |
|||
|
|||
// ResourceIdentifier contains a set of Azure resource IDs.
|
|||
type ResourceIdentifier struct { |
|||
Graph string `json:"graph"` |
|||
KeyVault string `json:"keyVault"` |
|||
Datalake string `json:"datalake"` |
|||
Batch string `json:"batch"` |
|||
OperationalInsights string `json:"operationalInsights"` |
|||
Storage string `json:"storage"` |
|||
} |
|||
|
|||
// Environment represents a set of endpoints for each of Azure's Clouds.
|
|||
type Environment struct { |
|||
Name string `json:"name"` |
|||
ManagementPortalURL string `json:"managementPortalURL"` |
|||
PublishSettingsURL string `json:"publishSettingsURL"` |
|||
ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` |
|||
ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` |
|||
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` |
|||
GalleryEndpoint string `json:"galleryEndpoint"` |
|||
KeyVaultEndpoint string `json:"keyVaultEndpoint"` |
|||
GraphEndpoint string `json:"graphEndpoint"` |
|||
ServiceBusEndpoint string `json:"serviceBusEndpoint"` |
|||
BatchManagementEndpoint string `json:"batchManagementEndpoint"` |
|||
StorageEndpointSuffix string `json:"storageEndpointSuffix"` |
|||
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` |
|||
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` |
|||
KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` |
|||
ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` |
|||
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` |
|||
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` |
|||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` |
|||
CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` |
|||
TokenAudience string `json:"tokenAudience"` |
|||
ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` |
|||
} |
|||
|
|||
var ( |
|||
// PublicCloud is the default public Azure cloud environment
|
|||
PublicCloud = Environment{ |
|||
Name: "AzurePublicCloud", |
|||
ManagementPortalURL: "https://manage.windowsazure.com/", |
|||
PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", |
|||
ServiceManagementEndpoint: "https://management.core.windows.net/", |
|||
ResourceManagerEndpoint: "https://management.azure.com/", |
|||
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", |
|||
GalleryEndpoint: "https://gallery.azure.com/", |
|||
KeyVaultEndpoint: "https://vault.azure.net/", |
|||
GraphEndpoint: "https://graph.windows.net/", |
|||
ServiceBusEndpoint: "https://servicebus.windows.net/", |
|||
BatchManagementEndpoint: "https://batch.core.windows.net/", |
|||
StorageEndpointSuffix: "core.windows.net", |
|||
SQLDatabaseDNSSuffix: "database.windows.net", |
|||
TrafficManagerDNSSuffix: "trafficmanager.net", |
|||
KeyVaultDNSSuffix: "vault.azure.net", |
|||
ServiceBusEndpointSuffix: "servicebus.windows.net", |
|||
ServiceManagementVMDNSSuffix: "cloudapp.net", |
|||
ResourceManagerVMDNSSuffix: "cloudapp.azure.com", |
|||
ContainerRegistryDNSSuffix: "azurecr.io", |
|||
CosmosDBDNSSuffix: "documents.azure.com", |
|||
TokenAudience: "https://management.azure.com/", |
|||
ResourceIdentifiers: ResourceIdentifier{ |
|||
Graph: "https://graph.windows.net/", |
|||
KeyVault: "https://vault.azure.net", |
|||
Datalake: "https://datalake.azure.net/", |
|||
Batch: "https://batch.core.windows.net/", |
|||
OperationalInsights: "https://api.loganalytics.io", |
|||
Storage: "https://storage.azure.com/", |
|||
}, |
|||
} |
|||
|
|||
// USGovernmentCloud is the cloud environment for the US Government
|
|||
USGovernmentCloud = Environment{ |
|||
Name: "AzureUSGovernmentCloud", |
|||
ManagementPortalURL: "https://manage.windowsazure.us/", |
|||
PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", |
|||
ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", |
|||
ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", |
|||
ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", |
|||
GalleryEndpoint: "https://gallery.usgovcloudapi.net/", |
|||
KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", |
|||
GraphEndpoint: "https://graph.windows.net/", |
|||
ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", |
|||
BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", |
|||
StorageEndpointSuffix: "core.usgovcloudapi.net", |
|||
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", |
|||
TrafficManagerDNSSuffix: "usgovtrafficmanager.net", |
|||
KeyVaultDNSSuffix: "vault.usgovcloudapi.net", |
|||
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", |
|||
ServiceManagementVMDNSSuffix: "usgovcloudapp.net", |
|||
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", |
|||
ContainerRegistryDNSSuffix: "azurecr.us", |
|||
CosmosDBDNSSuffix: "documents.azure.us", |
|||
TokenAudience: "https://management.usgovcloudapi.net/", |
|||
ResourceIdentifiers: ResourceIdentifier{ |
|||
Graph: "https://graph.windows.net/", |
|||
KeyVault: "https://vault.usgovcloudapi.net", |
|||
Datalake: NotAvailable, |
|||
Batch: "https://batch.core.usgovcloudapi.net/", |
|||
OperationalInsights: "https://api.loganalytics.us", |
|||
Storage: "https://storage.azure.com/", |
|||
}, |
|||
} |
|||
|
|||
// ChinaCloud is the cloud environment operated in China
|
|||
ChinaCloud = Environment{ |
|||
Name: "AzureChinaCloud", |
|||
ManagementPortalURL: "https://manage.chinacloudapi.com/", |
|||
PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", |
|||
ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", |
|||
ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", |
|||
ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", |
|||
GalleryEndpoint: "https://gallery.chinacloudapi.cn/", |
|||
KeyVaultEndpoint: "https://vault.azure.cn/", |
|||
GraphEndpoint: "https://graph.chinacloudapi.cn/", |
|||
ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", |
|||
BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", |
|||
StorageEndpointSuffix: "core.chinacloudapi.cn", |
|||
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", |
|||
TrafficManagerDNSSuffix: "trafficmanager.cn", |
|||
KeyVaultDNSSuffix: "vault.azure.cn", |
|||
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", |
|||
ServiceManagementVMDNSSuffix: "chinacloudapp.cn", |
|||
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", |
|||
ContainerRegistryDNSSuffix: "azurecr.cn", |
|||
CosmosDBDNSSuffix: "documents.azure.cn", |
|||
TokenAudience: "https://management.chinacloudapi.cn/", |
|||
ResourceIdentifiers: ResourceIdentifier{ |
|||
Graph: "https://graph.chinacloudapi.cn/", |
|||
KeyVault: "https://vault.azure.cn", |
|||
Datalake: NotAvailable, |
|||
Batch: "https://batch.chinacloudapi.cn/", |
|||
OperationalInsights: NotAvailable, |
|||
Storage: "https://storage.azure.com/", |
|||
}, |
|||
} |
|||
|
|||
// GermanCloud is the cloud environment operated in Germany
|
|||
GermanCloud = Environment{ |
|||
Name: "AzureGermanCloud", |
|||
ManagementPortalURL: "http://portal.microsoftazure.de/", |
|||
PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", |
|||
ServiceManagementEndpoint: "https://management.core.cloudapi.de/", |
|||
ResourceManagerEndpoint: "https://management.microsoftazure.de/", |
|||
ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", |
|||
GalleryEndpoint: "https://gallery.cloudapi.de/", |
|||
KeyVaultEndpoint: "https://vault.microsoftazure.de/", |
|||
GraphEndpoint: "https://graph.cloudapi.de/", |
|||
ServiceBusEndpoint: "https://servicebus.cloudapi.de/", |
|||
BatchManagementEndpoint: "https://batch.cloudapi.de/", |
|||
StorageEndpointSuffix: "core.cloudapi.de", |
|||
SQLDatabaseDNSSuffix: "database.cloudapi.de", |
|||
TrafficManagerDNSSuffix: "azuretrafficmanager.de", |
|||
KeyVaultDNSSuffix: "vault.microsoftazure.de", |
|||
ServiceBusEndpointSuffix: "servicebus.cloudapi.de", |
|||
ServiceManagementVMDNSSuffix: "azurecloudapp.de", |
|||
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", |
|||
ContainerRegistryDNSSuffix: NotAvailable, |
|||
CosmosDBDNSSuffix: "documents.microsoftazure.de", |
|||
TokenAudience: "https://management.microsoftazure.de/", |
|||
ResourceIdentifiers: ResourceIdentifier{ |
|||
Graph: "https://graph.cloudapi.de/", |
|||
KeyVault: "https://vault.microsoftazure.de", |
|||
Datalake: NotAvailable, |
|||
Batch: "https://batch.cloudapi.de/", |
|||
OperationalInsights: NotAvailable, |
|||
Storage: "https://storage.azure.com/", |
|||
}, |
|||
} |
|||
) |
|||
|
|||
// EnvironmentFromName returns an Environment based on the common name specified.
|
|||
func EnvironmentFromName(name string) (Environment, error) { |
|||
// IMPORTANT
|
|||
// As per @radhikagupta5:
|
|||
// This is technical debt, fundamentally here because Kubernetes is not currently accepting
|
|||
// contributions to the providers. Once that is an option, the provider should be updated to
|
|||
// directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation
|
|||
// from this method based on the name that is provided to us.
|
|||
if strings.EqualFold(name, "AZURESTACKCLOUD") { |
|||
return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) |
|||
} |
|||
|
|||
name = strings.ToUpper(name) |
|||
env, ok := environments[name] |
|||
if !ok { |
|||
return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) |
|||
} |
|||
|
|||
return env, nil |
|||
} |
|||
|
|||
// EnvironmentFromFile loads an Environment from a configuration file available on disk.
|
|||
// This function is particularly useful in the Hybrid Cloud model, where one must define their own
|
|||
// endpoints.
|
|||
func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { |
|||
fileContents, err := ioutil.ReadFile(location) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
err = json.Unmarshal(fileContents, &unmarshaled) |
|||
|
|||
return |
|||
} |
@ -0,0 +1,245 @@ |
|||
package azure |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"strings" |
|||
|
|||
"github.com/Azure/go-autorest/autorest" |
|||
) |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
type audience []string |
|||
|
|||
type authentication struct { |
|||
LoginEndpoint string `json:"loginEndpoint"` |
|||
Audiences audience `json:"audiences"` |
|||
} |
|||
|
|||
type environmentMetadataInfo struct { |
|||
GalleryEndpoint string `json:"galleryEndpoint"` |
|||
GraphEndpoint string `json:"graphEndpoint"` |
|||
PortalEndpoint string `json:"portalEndpoint"` |
|||
Authentication authentication `json:"authentication"` |
|||
} |
|||
|
|||
// EnvironmentProperty represent property names that clients can override
|
|||
type EnvironmentProperty string |
|||
|
|||
const ( |
|||
// EnvironmentName ...
|
|||
EnvironmentName EnvironmentProperty = "name" |
|||
// EnvironmentManagementPortalURL ..
|
|||
EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" |
|||
// EnvironmentPublishSettingsURL ...
|
|||
EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" |
|||
// EnvironmentServiceManagementEndpoint ...
|
|||
EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" |
|||
// EnvironmentResourceManagerEndpoint ...
|
|||
EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" |
|||
// EnvironmentActiveDirectoryEndpoint ...
|
|||
EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" |
|||
// EnvironmentGalleryEndpoint ...
|
|||
EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" |
|||
// EnvironmentKeyVaultEndpoint ...
|
|||
EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" |
|||
// EnvironmentGraphEndpoint ...
|
|||
EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" |
|||
// EnvironmentServiceBusEndpoint ...
|
|||
EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" |
|||
// EnvironmentBatchManagementEndpoint ...
|
|||
EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" |
|||
// EnvironmentStorageEndpointSuffix ...
|
|||
EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" |
|||
// EnvironmentSQLDatabaseDNSSuffix ...
|
|||
EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" |
|||
// EnvironmentTrafficManagerDNSSuffix ...
|
|||
EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" |
|||
// EnvironmentKeyVaultDNSSuffix ...
|
|||
EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" |
|||
// EnvironmentServiceBusEndpointSuffix ...
|
|||
EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" |
|||
// EnvironmentServiceManagementVMDNSSuffix ...
|
|||
EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" |
|||
// EnvironmentResourceManagerVMDNSSuffix ...
|
|||
EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" |
|||
// EnvironmentContainerRegistryDNSSuffix ...
|
|||
EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" |
|||
// EnvironmentTokenAudience ...
|
|||
EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" |
|||
) |
|||
|
|||
// OverrideProperty represents property name and value that clients can override
|
|||
type OverrideProperty struct { |
|||
Key EnvironmentProperty |
|||
Value string |
|||
} |
|||
|
|||
// EnvironmentFromURL loads an Environment from a URL
|
|||
// This function is particularly useful in the Hybrid Cloud model, where one may define their own
|
|||
// endpoints.
|
|||
func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { |
|||
var metadataEnvProperties environmentMetadataInfo |
|||
|
|||
if resourceManagerEndpoint == "" { |
|||
return environment, fmt.Errorf("Metadata resource manager endpoint is empty") |
|||
} |
|||
|
|||
if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { |
|||
return environment, err |
|||
} |
|||
|
|||
// Give priority to user's override values
|
|||
overrideProperties(&environment, properties) |
|||
|
|||
if environment.Name == "" { |
|||
environment.Name = "HybridEnvironment" |
|||
} |
|||
stampDNSSuffix := environment.StorageEndpointSuffix |
|||
if stampDNSSuffix == "" { |
|||
stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") |
|||
environment.StorageEndpointSuffix = stampDNSSuffix |
|||
} |
|||
if environment.KeyVaultDNSSuffix == "" { |
|||
environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) |
|||
} |
|||
if environment.KeyVaultEndpoint == "" { |
|||
environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) |
|||
} |
|||
if environment.TokenAudience == "" { |
|||
environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] |
|||
} |
|||
if environment.ActiveDirectoryEndpoint == "" { |
|||
environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint |
|||
} |
|||
if environment.ResourceManagerEndpoint == "" { |
|||
environment.ResourceManagerEndpoint = resourceManagerEndpoint |
|||
} |
|||
if environment.GalleryEndpoint == "" { |
|||
environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint |
|||
} |
|||
if environment.GraphEndpoint == "" { |
|||
environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint |
|||
} |
|||
|
|||
return environment, nil |
|||
} |
|||
|
|||
func overrideProperties(environment *Environment, properties []OverrideProperty) { |
|||
for _, property := range properties { |
|||
switch property.Key { |
|||
case EnvironmentName: |
|||
{ |
|||
environment.Name = property.Value |
|||
} |
|||
case EnvironmentManagementPortalURL: |
|||
{ |
|||
environment.ManagementPortalURL = property.Value |
|||
} |
|||
case EnvironmentPublishSettingsURL: |
|||
{ |
|||
environment.PublishSettingsURL = property.Value |
|||
} |
|||
case EnvironmentServiceManagementEndpoint: |
|||
{ |
|||
environment.ServiceManagementEndpoint = property.Value |
|||
} |
|||
case EnvironmentResourceManagerEndpoint: |
|||
{ |
|||
environment.ResourceManagerEndpoint = property.Value |
|||
} |
|||
case EnvironmentActiveDirectoryEndpoint: |
|||
{ |
|||
environment.ActiveDirectoryEndpoint = property.Value |
|||
} |
|||
case EnvironmentGalleryEndpoint: |
|||
{ |
|||
environment.GalleryEndpoint = property.Value |
|||
} |
|||
case EnvironmentKeyVaultEndpoint: |
|||
{ |
|||
environment.KeyVaultEndpoint = property.Value |
|||
} |
|||
case EnvironmentGraphEndpoint: |
|||
{ |
|||
environment.GraphEndpoint = property.Value |
|||
} |
|||
case EnvironmentServiceBusEndpoint: |
|||
{ |
|||
environment.ServiceBusEndpoint = property.Value |
|||
} |
|||
case EnvironmentBatchManagementEndpoint: |
|||
{ |
|||
environment.BatchManagementEndpoint = property.Value |
|||
} |
|||
case EnvironmentStorageEndpointSuffix: |
|||
{ |
|||
environment.StorageEndpointSuffix = property.Value |
|||
} |
|||
case EnvironmentSQLDatabaseDNSSuffix: |
|||
{ |
|||
environment.SQLDatabaseDNSSuffix = property.Value |
|||
} |
|||
case EnvironmentTrafficManagerDNSSuffix: |
|||
{ |
|||
environment.TrafficManagerDNSSuffix = property.Value |
|||
} |
|||
case EnvironmentKeyVaultDNSSuffix: |
|||
{ |
|||
environment.KeyVaultDNSSuffix = property.Value |
|||
} |
|||
case EnvironmentServiceBusEndpointSuffix: |
|||
{ |
|||
environment.ServiceBusEndpointSuffix = property.Value |
|||
} |
|||
case EnvironmentServiceManagementVMDNSSuffix: |
|||
{ |
|||
environment.ServiceManagementVMDNSSuffix = property.Value |
|||
} |
|||
case EnvironmentResourceManagerVMDNSSuffix: |
|||
{ |
|||
environment.ResourceManagerVMDNSSuffix = property.Value |
|||
} |
|||
case EnvironmentContainerRegistryDNSSuffix: |
|||
{ |
|||
environment.ContainerRegistryDNSSuffix = property.Value |
|||
} |
|||
case EnvironmentTokenAudience: |
|||
{ |
|||
environment.TokenAudience = property.Value |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { |
|||
client := autorest.NewClientWithUserAgent("") |
|||
managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") |
|||
req, _ := http.NewRequest("GET", managementEndpoint, nil) |
|||
response, err := client.Do(req) |
|||
if err != nil { |
|||
return environment, err |
|||
} |
|||
defer response.Body.Close() |
|||
jsonResponse, err := ioutil.ReadAll(response.Body) |
|||
if err != nil { |
|||
return environment, err |
|||
} |
|||
err = json.Unmarshal(jsonResponse, &environment) |
|||
return environment, err |
|||
} |
@ -0,0 +1,200 @@ |
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package azure |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"net/http" |
|||
"net/url" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/Azure/go-autorest/autorest" |
|||
) |
|||
|
|||
// DoRetryWithRegistration tries to register the resource provider in case it is unregistered.
|
|||
// It also handles request retries
|
|||
func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { |
|||
return func(s autorest.Sender) autorest.Sender { |
|||
return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { |
|||
rr := autorest.NewRetriableRequest(r) |
|||
for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { |
|||
err = rr.Prepare() |
|||
if err != nil { |
|||
return resp, err |
|||
} |
|||
|
|||
resp, err = autorest.SendWithSender(s, rr.Request(), |
|||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), |
|||
) |
|||
if err != nil { |
|||
return resp, err |
|||
} |
|||
|
|||
if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { |
|||
return resp, err |
|||
} |
|||
var re RequestError |
|||
err = autorest.Respond( |
|||
resp, |
|||
autorest.ByUnmarshallingJSON(&re), |
|||
) |
|||
if err != nil { |
|||
return resp, err |
|||
} |
|||
err = re |
|||
|
|||
if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { |
|||
regErr := register(client, r, re) |
|||
if regErr != nil { |
|||
return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) |
|||
} |
|||
} |
|||
} |
|||
return resp, err |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func getProvider(re RequestError) (string, error) { |
|||
if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { |
|||
return re.ServiceError.Details[0]["target"].(string), nil |
|||
} |
|||
return "", errors.New("provider was not found in the response") |
|||
} |
|||
|
|||
func register(client autorest.Client, originalReq *http.Request, re RequestError) error { |
|||
subID := getSubscription(originalReq.URL.Path) |
|||
if subID == "" { |
|||
return errors.New("missing parameter subscriptionID to register resource provider") |
|||
} |
|||
providerName, err := getProvider(re) |
|||
if err != nil { |
|||
return fmt.Errorf("missing parameter provider to register resource provider: %s", err) |
|||
} |
|||
newURL := url.URL{ |
|||
Scheme: originalReq.URL.Scheme, |
|||
Host: originalReq.URL.Host, |
|||
} |
|||
|
|||
// taken from the resources SDK
|
|||
// with almost identical code, this sections are easier to mantain
|
|||
// It is also not a good idea to import the SDK here
|
|||
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252
|
|||
pathParameters := map[string]interface{}{ |
|||
"resourceProviderNamespace": autorest.Encode("path", providerName), |
|||
"subscriptionId": autorest.Encode("path", subID), |
|||
} |
|||
|
|||
const APIVersion = "2016-09-01" |
|||
queryParameters := map[string]interface{}{ |
|||
"api-version": APIVersion, |
|||
} |
|||
|
|||
preparer := autorest.CreatePreparer( |
|||
autorest.AsPost(), |
|||
autorest.WithBaseURL(newURL.String()), |
|||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), |
|||
autorest.WithQueryParameters(queryParameters), |
|||
) |
|||
|
|||
req, err := preparer.Prepare(&http.Request{}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
req = req.WithContext(originalReq.Context()) |
|||
|
|||
resp, err := autorest.SendWithSender(client, req, |
|||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), |
|||
) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
type Provider struct { |
|||
RegistrationState *string `json:"registrationState,omitempty"` |
|||
} |
|||
var provider Provider |
|||
|
|||
err = autorest.Respond( |
|||
resp, |
|||
WithErrorUnlessStatusCode(http.StatusOK), |
|||
autorest.ByUnmarshallingJSON(&provider), |
|||
autorest.ByClosing(), |
|||
) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
// poll for registered provisioning state
|
|||
registrationStartTime := time.Now() |
|||
for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { |
|||
// taken from the resources SDK
|
|||
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
|
|||
preparer := autorest.CreatePreparer( |
|||
autorest.AsGet(), |
|||
autorest.WithBaseURL(newURL.String()), |
|||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), |
|||
autorest.WithQueryParameters(queryParameters), |
|||
) |
|||
req, err = preparer.Prepare(&http.Request{}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
req = req.WithContext(originalReq.Context()) |
|||
|
|||
resp, err := autorest.SendWithSender(client, req, |
|||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), |
|||
) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
err = autorest.Respond( |
|||
resp, |
|||
WithErrorUnlessStatusCode(http.StatusOK), |
|||
autorest.ByUnmarshallingJSON(&provider), |
|||
autorest.ByClosing(), |
|||
) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if provider.RegistrationState != nil && |
|||
*provider.RegistrationState == "Registered" { |
|||
break |
|||
} |
|||
|
|||
delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) |
|||
if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { |
|||
return originalReq.Context().Err() |
|||
} |
|||
} |
|||
if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { |
|||
return errors.New("polling for resource provider registration has exceeded the polling duration") |
|||
} |
|||
return err |
|||
} |
|||
|
|||
func getSubscription(path string) string { |
|||
parts := strings.Split(path, "/") |
|||
for i, v := range parts { |
|||
if v == "subscriptions" && (i+1) < len(parts) { |
|||
return parts[i+1] |
|||
} |
|||
} |
|||
return "" |
|||
} |
@ -0,0 +1,300 @@ |
|||
package autorest |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"crypto/tls" |
|||
"fmt" |
|||
"io" |
|||
"io/ioutil" |
|||
"log" |
|||
"net/http" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/Azure/go-autorest/logger" |
|||
) |
|||
|
|||
const ( |
|||
// DefaultPollingDelay is a reasonable delay between polling requests.
|
|||
DefaultPollingDelay = 60 * time.Second |
|||
|
|||
// DefaultPollingDuration is a reasonable total polling duration.
|
|||
DefaultPollingDuration = 15 * time.Minute |
|||
|
|||
// DefaultRetryAttempts is number of attempts for retry status codes (5xx).
|
|||
DefaultRetryAttempts = 3 |
|||
|
|||
// DefaultRetryDuration is the duration to wait between retries.
|
|||
DefaultRetryDuration = 30 * time.Second |
|||
) |
|||
|
|||
var ( |
|||
// StatusCodesForRetry are a defined group of status code for which the client will retry
|
|||
StatusCodesForRetry = []int{ |
|||
http.StatusRequestTimeout, // 408
|
|||
http.StatusTooManyRequests, // 429
|
|||
http.StatusInternalServerError, // 500
|
|||
http.StatusBadGateway, // 502
|
|||
http.StatusServiceUnavailable, // 503
|
|||
http.StatusGatewayTimeout, // 504
|
|||
} |
|||
) |
|||
|
|||
const ( |
|||
requestFormat = `HTTP Request Begin =================================================== |
|||
%s |
|||
===================================================== HTTP Request End |
|||
` |
|||
responseFormat = `HTTP Response Begin =================================================== |
|||
%s |
|||
===================================================== HTTP Response End |
|||
` |
|||
) |
|||
|
|||
// Response serves as the base for all responses from generated clients. It provides access to the
|
|||
// last http.Response.
|
|||
type Response struct { |
|||
*http.Response `json:"-"` |
|||
} |
|||
|
|||
// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code.
|
|||
// If there was no response (i.e. the underlying http.Response is nil) the return value is false.
|
|||
func (r Response) IsHTTPStatus(statusCode int) bool { |
|||
if r.Response == nil { |
|||
return false |
|||
} |
|||
return r.Response.StatusCode == statusCode |
|||
} |
|||
|
|||
// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes.
|
|||
// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided
|
|||
// the return value is false.
|
|||
func (r Response) HasHTTPStatus(statusCodes ...int) bool { |
|||
return ResponseHasStatusCode(r.Response, statusCodes...) |
|||
} |
|||
|
|||
// LoggingInspector implements request and response inspectors that log the full request and
|
|||
// response to a supplied log.
|
|||
type LoggingInspector struct { |
|||
Logger *log.Logger |
|||
} |
|||
|
|||
// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The
|
|||
// body is restored after being emitted.
|
|||
//
|
|||
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
|
|||
// important. It is best used to trace JSON or similar body values.
|
|||
func (li LoggingInspector) WithInspection() PrepareDecorator { |
|||
return func(p Preparer) Preparer { |
|||
return PreparerFunc(func(r *http.Request) (*http.Request, error) { |
|||
var body, b bytes.Buffer |
|||
|
|||
defer r.Body.Close() |
|||
|
|||
r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) |
|||
if err := r.Write(&b); err != nil { |
|||
return nil, fmt.Errorf("Failed to write response: %v", err) |
|||
} |
|||
|
|||
li.Logger.Printf(requestFormat, b.String()) |
|||
|
|||
r.Body = ioutil.NopCloser(&body) |
|||
return p.Prepare(r) |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The
|
|||
// body is restored after being emitted.
|
|||
//
|
|||
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
|
|||
// important. It is best used to trace JSON or similar body values.
|
|||
func (li LoggingInspector) ByInspecting() RespondDecorator { |
|||
return func(r Responder) Responder { |
|||
return ResponderFunc(func(resp *http.Response) error { |
|||
var body, b bytes.Buffer |
|||
defer resp.Body.Close() |
|||
resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) |
|||
if err := resp.Write(&b); err != nil { |
|||
return fmt.Errorf("Failed to write response: %v", err) |
|||
} |
|||
|
|||
li.Logger.Printf(responseFormat, b.String()) |
|||
|
|||
resp.Body = ioutil.NopCloser(&body) |
|||
return r.Respond(resp) |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// Client is the base for autorest generated clients. It provides default, "do nothing"
|
|||
// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the
|
|||
// standard, undecorated http.Client as a default Sender.
|
|||
//
|
|||
// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and
|
|||
// return responses that compose with Response.
|
|||
//
|
|||
// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom
|
|||
// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit
|
|||
// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence
|
|||
// sending the request by providing a decorated Sender.
|
|||
type Client struct { |
|||
Authorizer Authorizer |
|||
Sender Sender |
|||
RequestInspector PrepareDecorator |
|||
ResponseInspector RespondDecorator |
|||
|
|||
// PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header
|
|||
PollingDelay time.Duration |
|||
|
|||
// PollingDuration sets the maximum polling time after which an error is returned.
|
|||
// Setting this to zero will use the provided context to control the duration.
|
|||
PollingDuration time.Duration |
|||
|
|||
// RetryAttempts sets the default number of retry attempts for client.
|
|||
RetryAttempts int |
|||
|
|||
// RetryDuration sets the delay duration for retries.
|
|||
RetryDuration time.Duration |
|||
|
|||
// UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent
|
|||
// through the Do method.
|
|||
UserAgent string |
|||
|
|||
Jar http.CookieJar |
|||
|
|||
// Set to true to skip attempted registration of resource providers (false by default).
|
|||
SkipResourceProviderRegistration bool |
|||
} |
|||
|
|||
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
|
|||
// string.
|
|||
func NewClientWithUserAgent(ua string) Client { |
|||
return newClient(ua, tls.RenegotiateNever) |
|||
} |
|||
|
|||
// ClientOptions contains various Client configuration options.
|
|||
type ClientOptions struct { |
|||
// UserAgent is an optional user-agent string to append to the default user agent.
|
|||
UserAgent string |
|||
|
|||
// Renegotiation is an optional setting to control client-side TLS renegotiation.
|
|||
Renegotiation tls.RenegotiationSupport |
|||
} |
|||
|
|||
// NewClientWithOptions returns an instance of a Client with the specified values.
|
|||
func NewClientWithOptions(options ClientOptions) Client { |
|||
return newClient(options.UserAgent, options.Renegotiation) |
|||
} |
|||
|
|||
func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { |
|||
c := Client{ |
|||
PollingDelay: DefaultPollingDelay, |
|||
PollingDuration: DefaultPollingDuration, |
|||
RetryAttempts: DefaultRetryAttempts, |
|||
RetryDuration: DefaultRetryDuration, |
|||
UserAgent: UserAgent(), |
|||
} |
|||
c.Sender = c.sender(renegotiation) |
|||
c.AddToUserAgent(ua) |
|||
return c |
|||
} |
|||
|
|||
// AddToUserAgent adds an extension to the current user agent
|
|||
func (c *Client) AddToUserAgent(extension string) error { |
|||
if extension != "" { |
|||
c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) |
|||
return nil |
|||
} |
|||
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) |
|||
} |
|||
|
|||
// Do implements the Sender interface by invoking the active Sender after applying authorization.
|
|||
// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent
|
|||
// is set, apply set the User-Agent header.
|
|||
func (c Client) Do(r *http.Request) (*http.Response, error) { |
|||
if r.UserAgent() == "" { |
|||
r, _ = Prepare(r, |
|||
WithUserAgent(c.UserAgent)) |
|||
} |
|||
// NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations
|
|||
r, err := Prepare(r, |
|||
c.WithAuthorization(), |
|||
c.WithInspection()) |
|||
if err != nil { |
|||
var resp *http.Response |
|||
if detErr, ok := err.(DetailedError); ok { |
|||
// if the authorization failed (e.g. invalid credentials) there will
|
|||
// be a response associated with the error, be sure to return it.
|
|||
resp = detErr.Response |
|||
} |
|||
return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") |
|||
} |
|||
logger.Instance.WriteRequest(r, logger.Filter{ |
|||
Header: func(k string, v []string) (bool, []string) { |
|||
// remove the auth token from the log
|
|||
if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { |
|||
v = []string{"**REDACTED**"} |
|||
} |
|||
return true, v |
|||
}, |
|||
}) |
|||
resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) |
|||
logger.Instance.WriteResponse(resp, logger.Filter{}) |
|||
Respond(resp, c.ByInspecting()) |
|||
return resp, err |
|||
} |
|||
|
|||
// sender returns the Sender to which to send requests.
|
|||
func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { |
|||
if c.Sender == nil { |
|||
return sender(renengotiation) |
|||
} |
|||
return c.Sender |
|||
} |
|||
|
|||
// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator
|
|||
// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer.
|
|||
func (c Client) WithAuthorization() PrepareDecorator { |
|||
return c.authorizer().WithAuthorization() |
|||
} |
|||
|
|||
// authorizer returns the Authorizer to use.
|
|||
func (c Client) authorizer() Authorizer { |
|||
if c.Authorizer == nil { |
|||
return NullAuthorizer{} |
|||
} |
|||
return c.Authorizer |
|||
} |
|||
|
|||
// WithInspection is a convenience method that passes the request to the supplied RequestInspector,
|
|||
// if present, or returns the WithNothing PrepareDecorator otherwise.
|
|||
func (c Client) WithInspection() PrepareDecorator { |
|||
if c.RequestInspector == nil { |
|||
return WithNothing() |
|||
} |
|||
return c.RequestInspector |
|||
} |
|||
|
|||
// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector,
|
|||
// if present, or returns the ByIgnoring RespondDecorator otherwise.
|
|||
func (c Client) ByInspecting() RespondDecorator { |
|||
if c.ResponseInspector == nil { |
|||
return ByIgnoring() |
|||
} |
|||
return c.ResponseInspector |
|||
} |
@ -0,0 +1,191 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
Copyright 2015 Microsoft Corporation |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,96 @@ |
|||
/* |
|||
Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/)
|
|||
defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of |
|||
time.Time types. And both convert to time.Time through a ToTime method. |
|||
*/ |
|||
package date |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"fmt" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
fullDate = "2006-01-02" |
|||
fullDateJSON = `"2006-01-02"` |
|||
dateFormat = "%04d-%02d-%02d" |
|||
jsonFormat = `"%04d-%02d-%02d"` |
|||
) |
|||
|
|||
// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e.,
|
|||
// 2006-01-02).
|
|||
type Date struct { |
|||
time.Time |
|||
} |
|||
|
|||
// ParseDate create a new Date from the passed string.
|
|||
func ParseDate(date string) (d Date, err error) { |
|||
return parseDate(date, fullDate) |
|||
} |
|||
|
|||
func parseDate(date string, format string) (Date, error) { |
|||
d, err := time.Parse(format, date) |
|||
return Date{Time: d}, err |
|||
} |
|||
|
|||
// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
|
|||
// 2006-01-02).
|
|||
func (d Date) MarshalBinary() ([]byte, error) { |
|||
return d.MarshalText() |
|||
} |
|||
|
|||
// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
|
|||
// 2006-01-02).
|
|||
func (d *Date) UnmarshalBinary(data []byte) error { |
|||
return d.UnmarshalText(data) |
|||
} |
|||
|
|||
// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e.,
|
|||
// 2006-01-02).
|
|||
func (d Date) MarshalJSON() (json []byte, err error) { |
|||
return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil |
|||
} |
|||
|
|||
// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e.,
|
|||
// 2006-01-02).
|
|||
func (d *Date) UnmarshalJSON(data []byte) (err error) { |
|||
d.Time, err = time.Parse(fullDateJSON, string(data)) |
|||
return err |
|||
} |
|||
|
|||
// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
|
|||
// 2006-01-02).
|
|||
func (d Date) MarshalText() (text []byte, err error) { |
|||
return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil |
|||
} |
|||
|
|||
// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
|
|||
// 2006-01-02).
|
|||
func (d *Date) UnmarshalText(data []byte) (err error) { |
|||
d.Time, err = time.Parse(fullDate, string(data)) |
|||
return err |
|||
} |
|||
|
|||
// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02).
|
|||
func (d Date) String() string { |
|||
return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) |
|||
} |
|||
|
|||
// ToTime returns a Date as a time.Time
|
|||
func (d Date) ToTime() time.Time { |
|||
return d.Time |
|||
} |
@ -0,0 +1,103 @@ |
|||
package date |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"regexp" |
|||
"time" |
|||
) |
|||
|
|||
// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases.
|
|||
const ( |
|||
azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` |
|||
azureUtcFormat = "2006-01-02T15:04:05.999999999" |
|||
rfc3339JSON = `"` + time.RFC3339Nano + `"` |
|||
rfc3339 = time.RFC3339Nano |
|||
tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` |
|||
) |
|||
|
|||
// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e.,
|
|||
// 2006-01-02T15:04:05Z).
|
|||
type Time struct { |
|||
time.Time |
|||
} |
|||
|
|||
// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
|
|||
// 2006-01-02T15:04:05Z).
|
|||
func (t Time) MarshalBinary() ([]byte, error) { |
|||
return t.Time.MarshalText() |
|||
} |
|||
|
|||
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
|
|||
// (i.e., 2006-01-02T15:04:05Z).
|
|||
func (t *Time) UnmarshalBinary(data []byte) error { |
|||
return t.UnmarshalText(data) |
|||
} |
|||
|
|||
// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e.,
|
|||
// 2006-01-02T15:04:05Z).
|
|||
func (t Time) MarshalJSON() (json []byte, err error) { |
|||
return t.Time.MarshalJSON() |
|||
} |
|||
|
|||
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time
|
|||
// (i.e., 2006-01-02T15:04:05Z).
|
|||
func (t *Time) UnmarshalJSON(data []byte) (err error) { |
|||
timeFormat := azureUtcFormatJSON |
|||
match, err := regexp.Match(tzOffsetRegex, data) |
|||
if err != nil { |
|||
return err |
|||
} else if match { |
|||
timeFormat = rfc3339JSON |
|||
} |
|||
t.Time, err = ParseTime(timeFormat, string(data)) |
|||
return err |
|||
} |
|||
|
|||
// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
|
|||
// 2006-01-02T15:04:05Z).
|
|||
func (t Time) MarshalText() (text []byte, err error) { |
|||
return t.Time.MarshalText() |
|||
} |
|||
|
|||
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
|
|||
// (i.e., 2006-01-02T15:04:05Z).
|
|||
func (t *Time) UnmarshalText(data []byte) (err error) { |
|||
timeFormat := azureUtcFormat |
|||
match, err := regexp.Match(tzOffsetRegex, data) |
|||
if err != nil { |
|||
return err |
|||
} else if match { |
|||
timeFormat = rfc3339 |
|||
} |
|||
t.Time, err = ParseTime(timeFormat, string(data)) |
|||
return err |
|||
} |
|||
|
|||
// String returns the Time formatted as an RFC3339 date-time string (i.e.,
|
|||
// 2006-01-02T15:04:05Z).
|
|||
func (t Time) String() string { |
|||
// Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does.
|
|||
b, err := t.MarshalText() |
|||
if err != nil { |
|||
return "" |
|||
} |
|||
return string(b) |
|||
} |
|||
|
|||
// ToTime returns a Time as a time.Time
|
|||
func (t Time) ToTime() time.Time { |
|||
return t.Time |
|||
} |
@ -0,0 +1,100 @@ |
|||
package date |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"errors" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
rfc1123JSON = `"` + time.RFC1123 + `"` |
|||
rfc1123 = time.RFC1123 |
|||
) |
|||
|
|||
// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e.,
|
|||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|||
type TimeRFC1123 struct { |
|||
time.Time |
|||
} |
|||
|
|||
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time
|
|||
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
|
|||
func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { |
|||
t.Time, err = ParseTime(rfc1123JSON, string(data)) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e.,
|
|||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|||
func (t TimeRFC1123) MarshalJSON() ([]byte, error) { |
|||
if y := t.Year(); y < 0 || y >= 10000 { |
|||
return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") |
|||
} |
|||
b := []byte(t.Format(rfc1123JSON)) |
|||
return b, nil |
|||
} |
|||
|
|||
// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
|
|||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|||
func (t TimeRFC1123) MarshalText() ([]byte, error) { |
|||
if y := t.Year(); y < 0 || y >= 10000 { |
|||
return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") |
|||
} |
|||
|
|||
b := []byte(t.Format(rfc1123)) |
|||
return b, nil |
|||
} |
|||
|
|||
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
|
|||
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
|
|||
func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { |
|||
t.Time, err = ParseTime(rfc1123, string(data)) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
|
|||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|||
func (t TimeRFC1123) MarshalBinary() ([]byte, error) { |
|||
return t.MarshalText() |
|||
} |
|||
|
|||
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
|
|||
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
|
|||
func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { |
|||
return t.UnmarshalText(data) |
|||
} |
|||
|
|||
// ToTime returns a Time as a time.Time
|
|||
func (t TimeRFC1123) ToTime() time.Time { |
|||
return t.Time |
|||
} |
|||
|
|||
// String returns the Time formatted as an RFC1123 date-time string (i.e.,
|
|||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|||
func (t TimeRFC1123) String() string { |
|||
// Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does.
|
|||
b, err := t.MarshalText() |
|||
if err != nil { |
|||
return "" |
|||
} |
|||
return string(b) |
|||
} |
@ -0,0 +1,123 @@ |
|||
package date |
|||
|
|||
// Copyright 2017 Microsoft Corporation
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding/binary" |
|||
"encoding/json" |
|||
"time" |
|||
) |
|||
|
|||
// unixEpoch is the moment in time that should be treated as timestamp 0.
|
|||
var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) |
|||
|
|||
// UnixTime marshals and unmarshals a time that is represented as the number
|
|||
// of seconds (ignoring skip-seconds) since the Unix Epoch.
|
|||
type UnixTime time.Time |
|||
|
|||
// Duration returns the time as a Duration since the UnixEpoch.
|
|||
func (t UnixTime) Duration() time.Duration { |
|||
return time.Time(t).Sub(unixEpoch) |
|||
} |
|||
|
|||
// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch.
|
|||
func NewUnixTimeFromSeconds(seconds float64) UnixTime { |
|||
return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) |
|||
} |
|||
|
|||
// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch.
|
|||
func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { |
|||
return NewUnixTimeFromDuration(time.Duration(nanoseconds)) |
|||
} |
|||
|
|||
// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch.
|
|||
func NewUnixTimeFromDuration(dur time.Duration) UnixTime { |
|||
return UnixTime(unixEpoch.Add(dur)) |
|||
} |
|||
|
|||
// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0'
|
|||
func UnixEpoch() time.Time { |
|||
return unixEpoch |
|||
} |
|||
|
|||
// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements.
|
|||
// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.)
|
|||
func (t UnixTime) MarshalJSON() ([]byte, error) { |
|||
buffer := &bytes.Buffer{} |
|||
enc := json.NewEncoder(buffer) |
|||
err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return buffer.Bytes(), nil |
|||
} |
|||
|
|||
// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since
|
|||
// midnight January 1st, 1970.
|
|||
func (t *UnixTime) UnmarshalJSON(text []byte) error { |
|||
dec := json.NewDecoder(bytes.NewReader(text)) |
|||
|
|||
var secondsSinceEpoch float64 |
|||
if err := dec.Decode(&secondsSinceEpoch); err != nil { |
|||
return err |
|||
} |
|||
|
|||
*t = NewUnixTimeFromSeconds(secondsSinceEpoch) |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number.
|
|||
func (t UnixTime) MarshalText() ([]byte, error) { |
|||
cast := time.Time(t) |
|||
return cast.MarshalText() |
|||
} |
|||
|
|||
// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch.
|
|||
func (t *UnixTime) UnmarshalText(raw []byte) error { |
|||
var unmarshaled time.Time |
|||
|
|||
if err := unmarshaled.UnmarshalText(raw); err != nil { |
|||
return err |
|||
} |
|||
|
|||
*t = UnixTime(unmarshaled) |
|||
return nil |
|||
} |
|||
|
|||
// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch.
|
|||
func (t UnixTime) MarshalBinary() ([]byte, error) { |
|||
buf := &bytes.Buffer{} |
|||
|
|||
payload := int64(t.Duration()) |
|||
|
|||
if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return buf.Bytes(), nil |
|||
} |
|||
|
|||
// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime.
|
|||
func (t *UnixTime) UnmarshalBinary(raw []byte) error { |
|||
var nanosecondsSinceEpoch int64 |
|||
|
|||
if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { |
|||
return err |
|||
} |
|||
*t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) |
|||
return nil |
|||
} |
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue