loveckiy.ivan
11 months ago
1901 changed files with 443156 additions and 4906 deletions
@ -0,0 +1,321 @@ |
|||
// обертка для логирования, которая дополняем аттрибутами логируемого процесса logrus
|
|||
// дополняем значениями, идентифицирующими запущенный сервис UID,Name,Service
|
|||
|
|||
package logger |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"io" |
|||
"os" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
|
|||
"git.lowcodeplatform.net/fabric/lib" |
|||
"github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
var logrusB = logrus.New() |
|||
|
|||
|
|||
type log struct { |
|||
|
|||
// куда логируем? stdout/;*os.File на файл, в который будем писать логи
|
|||
Output io.Writer `json:"output"` |
|||
//Debug:
|
|||
// сообщения отладки, профилирования.
|
|||
// В production системе обычно сообщения этого уровня включаются при первоначальном
|
|||
// запуске системы или для поиска узких мест (bottleneck-ов).
|
|||
|
|||
//Info: - логировать процесс выполнения
|
|||
// обычные сообщения, информирующие о действиях системы.
|
|||
// Реагировать на такие сообщения вообще не надо, но они могут помочь, например,
|
|||
// при поиске багов, расследовании интересных ситуаций итд.
|
|||
|
|||
//Warning: - логировать странные операции
|
|||
// записывая такое сообщение, система пытается привлечь внимание обслуживающего персонала.
|
|||
// Произошло что-то странное. Возможно, это новый тип ситуации, ещё не известный системе.
|
|||
// Следует разобраться в том, что произошло, что это означает, и отнести ситуацию либо к
|
|||
// инфо-сообщению, либо к ошибке. Соответственно, придётся доработать код обработки таких ситуаций.
|
|||
|
|||
//Error: - логировать ошибки
|
|||
// ошибка в работе системы, требующая вмешательства. Что-то не сохранилось, что-то отвалилось.
|
|||
// Необходимо принимать меры довольно быстро! Ошибки этого уровня и выше требуют немедленной записи в лог,
|
|||
// чтобы ускорить реакцию на них. Нужно понимать, что ошибка пользователя – это не ошибка системы.
|
|||
// Если пользователь ввёл в поле -1, где это не предполагалось – не надо писать об этом в лог ошибок.
|
|||
|
|||
//Panic: - логировать критические ошибки
|
|||
// это особый класс ошибок. Такие ошибки приводят к неработоспособности системы в целом, или
|
|||
// неработоспособности одной из подсистем. Чаще всего случаются фатальные ошибки из-за неверной конфигурации
|
|||
// или отказов оборудования. Требуют срочной, немедленной реакции. Возможно, следует предусмотреть уведомление о таких ошибках по SMS.
|
|||
// указываем уровни логирования Error/Warning/Debug/Info/Panic
|
|||
|
|||
//Trace: - логировать обработки запросов
|
|||
|
|||
// можно указывать через | разные уровени логирования, например Error|Warning
|
|||
// можно указать All - логирование всех уровней
|
|||
Levels string `json:"levels"` |
|||
// uid процесса (сервиса), который логируется (случайная величина)
|
|||
UID string `json:"uid"` |
|||
// имя процесса (сервиса), который логируется
|
|||
Name string `json:"name"` |
|||
// название сервиса (app/gui...)
|
|||
Service string `json:"service"` |
|||
// директория сохранения логов
|
|||
Dir string `json:"dir"` |
|||
// uid-конфигурации с которой был запущен процесс
|
|||
Config string `json:"config"` |
|||
// интервал между проверками актуального файла логирования (для текущего дня)
|
|||
IntervalReload time.Duration `json:"delay_reload"` |
|||
// интервал проверками на наличие файлов на удаление
|
|||
IntervalClearFiles time.Duration `json:"interval_clear_files"` |
|||
// период хранения файлов лет-месяцев-дней (например: 0-1-0 - хранить 1 месяц)
|
|||
PeriodSaveFiles string `json:"period_save_files"` |
|||
|
|||
// путь к сервису отправки логов в хранилище (Logbox)
|
|||
LogboxURL string |
|||
// интервал отправки (в промежутках сохраняем в буфер)
|
|||
LogboxSendInterval time.Duration |
|||
|
|||
File *os.File |
|||
|
|||
mux *sync.Mutex |
|||
} |
|||
|
|||
// ConfigLogger общий конфигуратор логирования
|
|||
type ConfigLogger struct { |
|||
Level, Uid, Name, Srv, Config string |
|||
|
|||
File ConfigFileLogger |
|||
Vfs ConfigVfsLogger |
|||
//Logbox ConfigLogboxLogger
|
|||
Priority []string |
|||
} |
|||
|
|||
type Log interface { |
|||
Trace(args ...interface{}) |
|||
Debug(args ...interface{}) |
|||
Info(args ...interface{}) |
|||
Warning(args ...interface{}) |
|||
Error(err error, args ...interface{}) |
|||
Panic(err error, args ...interface{}) |
|||
Exit(err error, args ...interface{}) |
|||
|
|||
Close() |
|||
} |
|||
|
|||
func (l *log) Trace(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Trace") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.TraceLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Trace(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Trace: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Debug(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Debug") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
|
|||
// Only log the warning severity or above.
|
|||
logrusB.SetLevel(logrus.DebugLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Debug(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Debug: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Info(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Info") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
|
|||
logrusB.SetLevel(logrus.InfoLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Info(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Info: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Warning(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Warning") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.WarnLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Warn(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Warn: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Error(err error, args ...interface{}) { |
|||
if err != nil { |
|||
if args != nil { |
|||
args = append(args, "; error:", err) |
|||
} else { |
|||
args = append(args, "error:", err) |
|||
} |
|||
} |
|||
if strings.Contains(l.Levels, "Error") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.ErrorLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Error(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Error: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Panic(err error, args ...interface{}) { |
|||
if err != nil { |
|||
if args != nil { |
|||
args = append(args, "; error:", err) |
|||
} else { |
|||
args = append(args, "error:", err) |
|||
} |
|||
} |
|||
if strings.Contains(l.Levels, "Panic") { |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Panic: %+v\n", args) |
|||
} |
|||
|
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.PanicLevel) |
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Panic(args...) |
|||
} |
|||
} |
|||
|
|||
// Exit внутренняя ф-ция логирования и прекращения работы программы
|
|||
func (l *log) Exit(err error, args ...interface{}) { |
|||
if err != nil { |
|||
if args != nil { |
|||
args = append(args, "; error:", err) |
|||
} else { |
|||
args = append(args, "error:", err) |
|||
} |
|||
} |
|||
if strings.Contains(l.Levels, "Fatal") { |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Exit: %+v\n", args) |
|||
} |
|||
|
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.FatalLevel) |
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Fatal(args...) |
|||
} |
|||
} |
|||
|
|||
func (l *log) Close() { |
|||
l.File.Close() |
|||
} |
|||
|
|||
func NewLogger(ctx context.Context, cfg ConfigLogger) (logger Log, initType string, err error) { |
|||
var errI error |
|||
err = fmt.Errorf("logger init") |
|||
|
|||
for _, v := range cfg.Priority { |
|||
|
|||
if v == "file" && err != nil { |
|||
// если путь указан относительно / значит задан абсолютный путь, иначе в директории
|
|||
if cfg.File.Dir[:1] != sep { |
|||
rootDir, _ := lib.RootDir() |
|||
cfg.File.Dir = rootDir + sep + "logs" + sep + cfg.File.Dir |
|||
} |
|||
|
|||
// инициализировать лог и его ротацию
|
|||
logger, errI = NewFileLogger(ctx, cfg) |
|||
if errI != nil { |
|||
err = fmt.Errorf("%s %s failed init files-logger, (err: %s)", err, "→", errI) |
|||
fmt.Println(err, cfg) |
|||
} else { |
|||
initType = v |
|||
err = nil |
|||
} |
|||
} |
|||
|
|||
if v == "vfs" && err != nil { |
|||
// инициализировать лог и его ротацию
|
|||
vs := strings.Split(cfg.Vfs.Dir, sep) // берем только последнее значение в пути для vfs-логера
|
|||
vs = vs[len(vs)-1:] |
|||
if len(vs) != 0 { |
|||
cfg.Vfs.Dir = "logs" |
|||
} |
|||
|
|||
// инициализировать лог и его ротацию
|
|||
logger, errI = NewVfsLogger(ctx, cfg) |
|||
fmt.Println(logger, errI) |
|||
if errI != nil { |
|||
err = fmt.Errorf("%s %s failed init files-vfs, (err: %s)", err, "→", errI) |
|||
fmt.Println(err, cfg) |
|||
} else { |
|||
initType = v |
|||
err = nil |
|||
} |
|||
} |
|||
|
|||
//if v == "logbox" && err != nil {
|
|||
// // инициализировать лог и его ротацию
|
|||
// logger, errI = NewLogboxLogger(ctx, cfg)
|
|||
// if errI != nil {
|
|||
// err = fmt.Errorf("%s %s failed init files-logbox, (err: %s)", err, "→", errI)
|
|||
// } else {
|
|||
// initType = v
|
|||
// err = nil
|
|||
// }
|
|||
//}
|
|||
|
|||
} |
|||
|
|||
return logger, initType, err |
|||
} |
@ -1,42 +0,0 @@ |
|||
package logger |
|||
|
|||
import ( |
|||
"context" |
|||
"testing" |
|||
|
|||
"github.com/stretchr/testify/require" |
|||
"go.uber.org/zap" |
|||
) |
|||
|
|||
func TestFieldsStorage_SetFields(t *testing.T) { |
|||
testR := require.New(t) |
|||
testCases := []struct { |
|||
Input []Field |
|||
Expect []zap.Field |
|||
}{ |
|||
{ |
|||
Input: []Field{FieldUint64("MerchantIDKey", 123), FieldUint64("MerchantIDKey", 124)}, |
|||
Expect: []zap.Field{zap.Uint64("MerchantIDKey", 124)}, |
|||
}, |
|||
{ |
|||
Input: []Field{FieldUint64("MerchantIDKey", 124)}, |
|||
Expect: []zap.Field{zap.Uint64("MerchantIDKey", 124)}, |
|||
}, |
|||
{ |
|||
Input: []Field{FieldString("RequestKey", "124"), FieldUint64("MerchantIDKey", 123)}, |
|||
Expect: []zap.Field{zap.Uint64("MerchantIDKey", 123), zap.String("RequestKey", "124")}, |
|||
}, |
|||
{ |
|||
Input: nil, |
|||
Expect: []zap.Field{}, |
|||
}, |
|||
} |
|||
|
|||
for i := range testCases { |
|||
ctx := context.Background() |
|||
ctx = WithFieldsContext(ctx, testCases[i].Input...) |
|||
storage, _ := getStorageFromCtx(ctx) |
|||
res := storage.External() |
|||
testR.ElementsMatch(testCases[i].Expect, res) |
|||
} |
|||
} |
@ -0,0 +1,58 @@ |
|||
package logger |
|||
|
|||
import ( |
|||
"bytes" |
|||
"context" |
|||
"encoding/json" |
|||
"fmt" |
|||
"time" |
|||
|
|||
logboxclient "git.lowcodeplatform.net/fabric/logbox-client" |
|||
) |
|||
|
|||
type LogboxConfig struct { |
|||
Endpoint, AccessKeyID, SecretKey string |
|||
RequestTimeout time.Duration |
|||
} |
|||
|
|||
func (l *LogboxConfig) client(ctx context.Context) (client logboxclient.Client) { |
|||
var err error |
|||
client, err = logboxclient.New(ctx, l.Endpoint, l.RequestTimeout) |
|||
if err != nil { |
|||
return nil |
|||
} |
|||
return client |
|||
} |
|||
|
|||
type logboxSender struct { |
|||
requestTimeout time.Duration |
|||
logboxClient logboxclient.Client |
|||
} |
|||
|
|||
func (v *logboxSender) Write(p []byte) (n int, err error) { |
|||
reqTimeout, cancel := context.WithTimeout(context.Background(), v.requestTimeout) |
|||
defer cancel() |
|||
|
|||
newReq := v.logboxClient.NewUpsertReq() |
|||
recordsBytes := bytes.Split(p, []byte("\n")) |
|||
for _, value := range recordsBytes { |
|||
if string(value) == "" { |
|||
continue |
|||
} |
|||
|
|||
l := LogLine{} |
|||
err = json.Unmarshal(value, &l) |
|||
if err != nil { |
|||
return 0, fmt.Errorf("error unmarshal to logline. err: %s, value: %s", err, string(value)) |
|||
} |
|||
newReq.AddEvent(*v.logboxClient.NewEvent(l.ConfigID, l.Level, l.Msg.(string), l.Name, l.ServiceID, l.Time, l.Uid)) |
|||
} |
|||
|
|||
_, err = v.logboxClient.Upsert(reqTimeout, *newReq) |
|||
|
|||
return len(p), err |
|||
} |
|||
|
|||
func (v *logboxSender) Sync() error { |
|||
return v.logboxClient.Close() |
|||
} |
@ -1,152 +0,0 @@ |
|||
package logger |
|||
|
|||
import ( |
|||
"context" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/stretchr/testify/assert" |
|||
"github.com/stretchr/testify/require" |
|||
"go.uber.org/zap" |
|||
"go.uber.org/zap/zapcore" |
|||
"go.uber.org/zap/zaptest/observer" |
|||
) |
|||
|
|||
func TestSetLevelObserver(t *testing.T) { |
|||
t.Run("Run3Times", func(t *testing.T) { |
|||
level = zap.NewAtomicLevelAt(zap.InfoLevel) |
|||
ctx, cancel := context.WithCancel(context.Background()) |
|||
fnCallsCount := 0 |
|||
levelObserver := func(ctx context.Context, curLevel zapcore.Level) zapcore.Level { |
|||
if fnCallsCount == 3 { |
|||
return curLevel |
|||
} |
|||
switch fnCallsCount { |
|||
case 0: |
|||
assert.Equal(t, zap.InfoLevel, curLevel) |
|||
case 1: |
|||
assert.Equal(t, zap.WarnLevel, curLevel) |
|||
case 2: |
|||
assert.Equal(t, zap.ErrorLevel, curLevel) |
|||
} |
|||
fnCallsCount++ |
|||
if fnCallsCount == 3 { |
|||
cancel() |
|||
} |
|||
|
|||
return curLevel + 1 |
|||
} |
|||
|
|||
SetLevelObserver(ctx, 1*time.Nanosecond, levelObserver) |
|||
|
|||
<-ctx.Done() |
|||
assert.Equal(t, 3, fnCallsCount) |
|||
}) |
|||
t.Run("NotRun", func(t *testing.T) { |
|||
level = zap.NewAtomicLevelAt(zap.InfoLevel) |
|||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) |
|||
defer cancel() |
|||
fn2 := func(ctx context.Context, curLevel zapcore.Level) zapcore.Level { |
|||
assert.Fail(t, "function was call") |
|||
return curLevel |
|||
} |
|||
|
|||
// TODO: функция зависит от вызова в предыдущем тесте
|
|||
SetLevelObserver(ctx, 1*time.Nanosecond, fn2) |
|||
|
|||
<-ctx.Done() |
|||
}) |
|||
} |
|||
|
|||
func TestZapLogger(t *testing.T) { |
|||
t.Run("without context fields", func(t *testing.T) { |
|||
observedZapCore, observedLogs := observer.New(zap.InfoLevel) |
|||
observedLogger := zap.New(observedZapCore) |
|||
logger := New(observedLogger.Named("wb.logger")) |
|||
// init without ctx fields
|
|||
testLogCaseWithoutCtxFields(logger) |
|||
// check
|
|||
require.Equal(t, 2, observedLogs.Len()) |
|||
allLogs := observedLogs.All() |
|||
assert.Equal(t, "init test logs without ctx fields", allLogs[0].Message) |
|||
assert.Equal(t, "log with fields", allLogs[1].Message) |
|||
assert.ElementsMatch(t, |
|||
[]zap.Field{ |
|||
zap.String("first", "123"), |
|||
zap.Uint("second", 12), |
|||
}, allLogs[1].Context) |
|||
}) |
|||
|
|||
t.Run("with context fields", func(t *testing.T) { |
|||
observedZapCore, observedLogs := observer.New(zap.InfoLevel) |
|||
observedLogger := zap.New(observedZapCore) |
|||
logger := New(observedLogger.Named("wb.logger")) |
|||
// init with ctx fields
|
|||
testLogCaseWithCtxFields(logger) |
|||
// check
|
|||
require.Equal(t, 2, observedLogs.Len()) |
|||
allLogs := observedLogs.All() |
|||
assert.Equal(t, "init test logs with ctx fields", allLogs[0].Message) |
|||
assert.Equal(t, "log with fields", allLogs[1].Message) |
|||
assert.ElementsMatch(t, |
|||
[]zap.Field{ |
|||
zap.String("first", "123"), |
|||
zap.String("ctxKey1", "ctxVal1"), |
|||
zap.Uint("second", 12), |
|||
}, allLogs[1].Context) |
|||
}) |
|||
|
|||
t.Run("with context fields with field storage", func(t *testing.T) { |
|||
observedZapCore, observedLogs := observer.New(zap.InfoLevel) |
|||
observedLogger := zap.New(observedZapCore) |
|||
logger := New(observedLogger.Named("wb.logger")) |
|||
// init with ctx fields
|
|||
testLogCaseWithCtxFieldsStorage(logger) |
|||
// check
|
|||
require.Equal(t, 2, observedLogs.Len()) |
|||
allLogs := observedLogs.All() |
|||
assert.Equal(t, "init test logs with ctx fields", allLogs[0].Message) |
|||
assert.Equal(t, "log with fields", allLogs[1].Message) |
|||
assert.ElementsMatch(t, |
|||
[]zap.Field{ |
|||
zap.String("first", "123"), |
|||
zap.String("ctxKey1", "ctxVal1"), |
|||
zap.Uint("second", 12), |
|||
zap.String("ctx2", "ctx2"), |
|||
zap.Uint64("ctx3", 123), |
|||
}, allLogs[1].Context) |
|||
}) |
|||
} |
|||
|
|||
func testLogCaseWithoutCtxFields(logger *Engine) { |
|||
logger.Info("init test logs without ctx fields") |
|||
logger.With( |
|||
zap.String("first", "123"), |
|||
zap.Uint("second", 12), |
|||
).Info("log with fields") |
|||
} |
|||
|
|||
func testLogCaseWithCtxFields(logger *Engine) { |
|||
ctx := context.Background() |
|||
ctx = SetFieldCtx(ctx, "ctxKey1", "ctxVal1") |
|||
logger = logger.WithContext(ctx) |
|||
logger.Info("init test logs with ctx fields") |
|||
logger.With( |
|||
zap.String("first", "123"), |
|||
zap.Uint("second", 12), |
|||
).Info("log with fields") |
|||
} |
|||
|
|||
func testLogCaseWithCtxFieldsStorage(logger *Engine) { |
|||
ctx := context.Background() |
|||
ctx = SetFieldCtx(ctx, "ctxKey1", "ctxVal1") |
|||
|
|||
ctx = WithFieldsContext(ctx, FieldString("ctx2", "ctx2"), FieldUint64("ctx3", 123)) |
|||
|
|||
logger = logger.WithContext(ctx) |
|||
logger.Info("init test logs with ctx fields") |
|||
logger.With( |
|||
zap.String("first", "123"), |
|||
zap.Uint("second", 12), |
|||
).Info("log with fields") |
|||
} |
@ -1,108 +0,0 @@ |
|||
package logger |
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
"io" |
|||
"os" |
|||
"testing" |
|||
"time" |
|||
|
|||
"git.lowcodeplatform.net/packages/logger/types" |
|||
"go.uber.org/zap" |
|||
"go.uber.org/zap/zapcore" |
|||
) |
|||
|
|||
type TestStringCastingObjMarsh struct { |
|||
Name string |
|||
Obj *TestStringCastingObjMarsh |
|||
} |
|||
|
|||
func (o TestStringCastingObjMarsh) MarshalLogObject(encoder zapcore.ObjectEncoder) error { |
|||
encoder.AddString("name", o.Name) |
|||
|
|||
if o.Obj != nil { |
|||
_ = encoder.AddObject("obj", o.Obj) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
type TestStringCastingJSONEncoderObjMarshArr []*TestStringCastingObjMarsh |
|||
|
|||
func (f TestStringCastingJSONEncoderObjMarshArr) MarshalLogArray(enc zapcore.ArrayEncoder) error { |
|||
for _, o := range f { |
|||
_ = enc.AppendObject(o) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func TestStringCastingJSONEncoder(t *testing.T) { |
|||
logFile, err := os.CreateTemp(os.TempDir(), "test-string-casting") |
|||
if err != nil { |
|||
t.Error(err) |
|||
} |
|||
|
|||
ctx := context.Background() |
|||
ctx = SetRequestIDCtx(ctx, "test-string-casting") |
|||
obj := &TestStringCastingObjMarsh{Name: "Sam", Obj: &TestStringCastingObjMarsh{Name: "John"}} |
|||
|
|||
SetupDefaultLogger( |
|||
"test-namespace", |
|||
WithStringCasting(), |
|||
WithOutputPaths([]string{logFile.Name()}), |
|||
) |
|||
|
|||
Logger(ctx).Info("test message", |
|||
zap.Int("int", 4), |
|||
zap.Int8("int8", 4), |
|||
zap.Int16("int16", 4), |
|||
zap.Int32("int32", 4), |
|||
zap.Int64("int64", 4), |
|||
|
|||
zap.Uint("uint", 4), |
|||
zap.Uint8("uint8", 4), |
|||
zap.Uint16("uint16", 4), |
|||
zap.Uint32("uint32", 4), |
|||
zap.Uint64("uint64", 4), |
|||
|
|||
zap.Float32("float32", 4.2), |
|||
zap.Float64("float64", 4.2), |
|||
|
|||
zap.String("string", "string"), |
|||
|
|||
zap.Bool("bool_true", true), |
|||
zap.Bool("bool_false", false), |
|||
|
|||
zap.Any("anyInt", 1), |
|||
zap.Any("anyObj", obj), |
|||
|
|||
zap.Object("object", obj), |
|||
|
|||
zap.Times("times", []time.Time{time.Now(), time.Now()}), |
|||
|
|||
zap.Array("array", TestStringCastingJSONEncoderObjMarshArr{obj, obj}), |
|||
|
|||
zap.Time("time", time.Now()), |
|||
|
|||
zap.ByteString("byte_string", []byte("abc")), |
|||
|
|||
zap.Duration("duration", time.Second*10), |
|||
|
|||
types.JSON("json", `{"key": "value"}`), |
|||
|
|||
types.URL("url", `https://some.url/path?password=123`), |
|||
|
|||
types.StringMap("string_map", map[string]string{"first": "first_val", "second": "second_val"}), |
|||
) |
|||
|
|||
data, err := io.ReadAll(logFile) |
|||
if err != nil { |
|||
t.Error(err) |
|||
} |
|||
|
|||
if !json.Valid(data) { |
|||
t.Error("result is not valid json") |
|||
} |
|||
} |
@ -1,55 +0,0 @@ |
|||
package types |
|||
|
|||
import ( |
|||
"testing" |
|||
|
|||
"github.com/stretchr/testify/require" |
|||
) |
|||
|
|||
func TestMaskSensitiveJSONFields(t *testing.T) { |
|||
for _, testCase := range []struct { |
|||
name string |
|||
input string |
|||
expected string |
|||
}{ |
|||
{ |
|||
name: "empty string", |
|||
}, |
|||
{ |
|||
name: "null", |
|||
input: "null", |
|||
expected: "null", |
|||
}, |
|||
{ |
|||
name: "empty object", |
|||
input: "{}", |
|||
expected: "{}", |
|||
}, |
|||
{ |
|||
name: "wrong json", |
|||
input: "{", |
|||
expected: "{", |
|||
}, |
|||
{ |
|||
name: "wrong json 2", |
|||
input: `{"sd":abc или <html>Internal server Error</html>`, |
|||
expected: `{"sd":abc или <html>Internal server Error</html>`, |
|||
}, |
|||
{ |
|||
name: "with password", |
|||
input: `{"password": "foo", "bar": "baz"}`, |
|||
expected: `{"password":"---","bar":"baz"}`, |
|||
}, |
|||
{ |
|||
name: "with card", |
|||
input: `{"card": "1234123412341234", "bar": "baz"}`, |
|||
expected: `{"card":"1234----1234","bar":"baz"}`, |
|||
}, |
|||
} { |
|||
t.Run(testCase.name, func(t *testing.T) { |
|||
actual, err := MaskSensitiveJSONFields(testCase.input, nil, nil, nil) |
|||
require.NoError(t, err) |
|||
require.Equal(t, testCase.expected, actual) |
|||
}) |
|||
} |
|||
} |
@ -1,38 +0,0 @@ |
|||
package types |
|||
|
|||
import ( |
|||
"testing" |
|||
|
|||
"github.com/stretchr/testify/require" |
|||
) |
|||
|
|||
func TestMask(t *testing.T) { |
|||
for _, testCase := range []struct { |
|||
name string |
|||
source string |
|||
expected string |
|||
}{ |
|||
{ |
|||
name: "empty", |
|||
}, |
|||
{ |
|||
name: "more than 16", |
|||
source: "12345678901234567", |
|||
expected: "123456---(1)---4567", |
|||
}, |
|||
{ |
|||
name: "more than 12", |
|||
source: "1234567890123456", |
|||
expected: "1234----3456", |
|||
}, |
|||
{ |
|||
name: "less than 12", |
|||
source: "123456789012", |
|||
expected: "------------", |
|||
}, |
|||
} { |
|||
t.Run(testCase.name, func(t *testing.T) { |
|||
require.Equal(t, testCase.expected, Mask(testCase.source)) |
|||
}) |
|||
} |
|||
} |
@ -1,77 +0,0 @@ |
|||
package types |
|||
|
|||
import ( |
|||
"testing" |
|||
|
|||
"github.com/stretchr/testify/require" |
|||
) |
|||
|
|||
func TestMaskSensitiveURLFields(t *testing.T) { |
|||
for _, testCase := range []struct { |
|||
name string |
|||
input string |
|||
expected string |
|||
}{ |
|||
{ |
|||
name: "empty string", |
|||
}, |
|||
{ |
|||
name: "single host", |
|||
input: "localhost", |
|||
expected: "localhost", |
|||
}, |
|||
{ |
|||
name: "query", |
|||
input: "foo=bar", |
|||
expected: "foo=bar", |
|||
}, |
|||
{ |
|||
name: "host and query", |
|||
input: "localhost?foo=bar", |
|||
expected: "localhost?foo=bar", |
|||
}, |
|||
{ |
|||
name: "host and hidden query", |
|||
input: "localhost?password=bar", |
|||
expected: "localhost?password=---", |
|||
}, |
|||
{ |
|||
name: "host and card", |
|||
input: "localhost?card=1234123412341234", |
|||
expected: "localhost?card=1234----1234", |
|||
}, |
|||
{ |
|||
name: "postgres user and pass", |
|||
input: "postgres://user:password@host:5678/path?card=1234123412341234&password=bar", |
|||
expected: "postgres://user:***@host:5678/path?card=1234----1234&password=---", |
|||
}, |
|||
} { |
|||
t.Run(testCase.name, func(t *testing.T) { |
|||
actual, err := MaskSensitiveURLFields(testCase.input, nil, nil) |
|||
require.NoError(t, err) |
|||
require.Equal(t, testCase.expected, actual) |
|||
}) |
|||
} |
|||
} |
|||
|
|||
var caseList = []struct { |
|||
Origin string |
|||
Masked string |
|||
}{ |
|||
{ |
|||
Origin: "postgres://user:password@host:5678/path?param1=value1#ancher", |
|||
Masked: "postgres://user:***@host:5678/path?param1=value1#ancher", |
|||
}, |
|||
{ |
|||
Origin: "", |
|||
Masked: "", |
|||
}, |
|||
} |
|||
|
|||
func TestMaskFQDN(t *testing.T) { |
|||
for _, c := range caseList { |
|||
actual, err := MaskFQDN(c.Origin) |
|||
require.NoError(t, err) |
|||
require.Equal(t, actual, c.Masked) |
|||
} |
|||
} |
@ -1,38 +0,0 @@ |
|||
package types |
|||
|
|||
import ( |
|||
"strings" |
|||
"testing" |
|||
|
|||
"go.uber.org/zap/zapcore" |
|||
) |
|||
|
|||
func TestWhere(t *testing.T) { |
|||
field := Where() |
|||
if field.Type == zapcore.SkipType { |
|||
t.Fatal("unexpected zapcore field type: got zapcore.SkipType") |
|||
} |
|||
|
|||
w, ok := field.Interface.(where) |
|||
if !ok { |
|||
t.Fatal("unexpected zapcore field type: can not convert to where structure") |
|||
} |
|||
|
|||
const expectedMethod = "packages/logger/types.TestWhere" |
|||
if w.method() != expectedMethod { |
|||
t.Errorf("got unexpected path of the caller method:\n\texptected: %s\n\tgot: %s", |
|||
expectedMethod, w.method()) |
|||
} |
|||
|
|||
const expectedProject = "git.lowcodeplatform.net/packages" |
|||
if w.project() != expectedProject && w.project() != "" { |
|||
t.Errorf("got unexpected name of the caller project:\n\texptected: %s or empty value\n\tgot: %s", |
|||
expectedProject, w.project()) |
|||
} |
|||
|
|||
const expectedPath = "wbpay-go/packages/logger/types/where_test.go:11" |
|||
if !strings.HasSuffix(w.path(), expectedPath) { |
|||
t.Errorf("got unexpected path of the caller:\n\texptected: %s\n\tgot: %s", |
|||
expectedPath, w.path()) |
|||
} |
|||
} |
@ -0,0 +1,202 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
APPENDIX: How to apply the Apache License to your work. |
|||
|
|||
To apply the Apache License to your work, attach the following |
|||
boilerplate notice, with the fields enclosed by brackets "[]" |
|||
replaced with your own identifying information. (Don't include |
|||
the brackets!) The text should be enclosed in the appropriate |
|||
comment syntax for the file format. We also recommend that a |
|||
file or class name and description of purpose be included on the |
|||
same "printed page" as the copyright notice for easier |
|||
identification within third-party archives. |
|||
|
|||
Copyright [yyyy] [name of copyright owner] |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,202 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
APPENDIX: How to apply the Apache License to your work. |
|||
|
|||
To apply the Apache License to your work, attach the following |
|||
boilerplate notice, with the fields enclosed by brackets "[]" |
|||
replaced with your own identifying information. (Don't include |
|||
the brackets!) The text should be enclosed in the appropriate |
|||
comment syntax for the file format. We also recommend that a |
|||
file or class name and description of purpose be included on the |
|||
same "printed page" as the copyright notice for easier |
|||
identification within third-party archives. |
|||
|
|||
Copyright [yyyy] [name of copyright owner] |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,18 @@ |
|||
// Copyright 2022 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package internal |
|||
|
|||
// Version is the current tagged release of the library.
|
|||
const Version = "1.19.1" |
@ -0,0 +1,19 @@ |
|||
# Changes |
|||
|
|||
## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) |
|||
|
|||
## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) |
|||
|
|||
## [0.1.0] (2022-10-26) |
|||
|
|||
Initial release of metadata being it's own module. |
@ -0,0 +1,202 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
APPENDIX: How to apply the Apache License to your work. |
|||
|
|||
To apply the Apache License to your work, attach the following |
|||
boilerplate notice, with the fields enclosed by brackets "[]" |
|||
replaced with your own identifying information. (Don't include |
|||
the brackets!) The text should be enclosed in the appropriate |
|||
comment syntax for the file format. We also recommend that a |
|||
file or class name and description of purpose be included on the |
|||
same "printed page" as the copyright notice for easier |
|||
identification within third-party archives. |
|||
|
|||
Copyright [yyyy] [name of copyright owner] |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,27 @@ |
|||
# Compute API |
|||
|
|||
[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) |
|||
|
|||
This is a utility library for communicating with Google Cloud metadata service |
|||
on Google Cloud. |
|||
|
|||
## Install |
|||
|
|||
```bash |
|||
go get cloud.google.com/go/compute/metadata |
|||
``` |
|||
|
|||
## Go Version Support |
|||
|
|||
See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) |
|||
section in the root directory's README. |
|||
|
|||
## Contributing |
|||
|
|||
Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) |
|||
document for details. |
|||
|
|||
Please note that this project is released with a Contributor Code of Conduct. |
|||
By participating in this project you agree to abide by its terms. See |
|||
[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) |
|||
for more information. |
@ -0,0 +1,543 @@ |
|||
// Copyright 2014 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package metadata provides access to Google Compute Engine (GCE)
|
|||
// metadata and API service accounts.
|
|||
//
|
|||
// This package is a wrapper around the GCE metadata service,
|
|||
// as documented at https://cloud.google.com/compute/docs/metadata/overview.
|
|||
package metadata // import "cloud.google.com/go/compute/metadata"
|
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net" |
|||
"net/http" |
|||
"net/url" |
|||
"os" |
|||
"runtime" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
// metadataIP is the documented metadata server IP address.
|
|||
metadataIP = "169.254.169.254" |
|||
|
|||
// metadataHostEnv is the environment variable specifying the
|
|||
// GCE metadata hostname. If empty, the default value of
|
|||
// metadataIP ("169.254.169.254") is used instead.
|
|||
// This is variable name is not defined by any spec, as far as
|
|||
// I know; it was made up for the Go package.
|
|||
metadataHostEnv = "GCE_METADATA_HOST" |
|||
|
|||
userAgent = "gcloud-golang/0.1" |
|||
) |
|||
|
|||
type cachedValue struct { |
|||
k string |
|||
trim bool |
|||
mu sync.Mutex |
|||
v string |
|||
} |
|||
|
|||
var ( |
|||
projID = &cachedValue{k: "project/project-id", trim: true} |
|||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true} |
|||
instID = &cachedValue{k: "instance/id", trim: true} |
|||
) |
|||
|
|||
var defaultClient = &Client{hc: newDefaultHTTPClient()} |
|||
|
|||
func newDefaultHTTPClient() *http.Client { |
|||
return &http.Client{ |
|||
Transport: &http.Transport{ |
|||
Dial: (&net.Dialer{ |
|||
Timeout: 2 * time.Second, |
|||
KeepAlive: 30 * time.Second, |
|||
}).Dial, |
|||
IdleConnTimeout: 60 * time.Second, |
|||
}, |
|||
Timeout: 5 * time.Second, |
|||
} |
|||
} |
|||
|
|||
// NotDefinedError is returned when requested metadata is not defined.
|
|||
//
|
|||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
|||
//
|
|||
// This error is not returned if the value is defined to be the empty
|
|||
// string.
|
|||
type NotDefinedError string |
|||
|
|||
func (suffix NotDefinedError) Error() string { |
|||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) |
|||
} |
|||
|
|||
func (c *cachedValue) get(cl *Client) (v string, err error) { |
|||
defer c.mu.Unlock() |
|||
c.mu.Lock() |
|||
if c.v != "" { |
|||
return c.v, nil |
|||
} |
|||
if c.trim { |
|||
v, err = cl.getTrimmed(c.k) |
|||
} else { |
|||
v, err = cl.Get(c.k) |
|||
} |
|||
if err == nil { |
|||
c.v = v |
|||
} |
|||
return |
|||
} |
|||
|
|||
var ( |
|||
onGCEOnce sync.Once |
|||
onGCE bool |
|||
) |
|||
|
|||
// OnGCE reports whether this process is running on Google Compute Engine.
|
|||
func OnGCE() bool { |
|||
onGCEOnce.Do(initOnGCE) |
|||
return onGCE |
|||
} |
|||
|
|||
func initOnGCE() { |
|||
onGCE = testOnGCE() |
|||
} |
|||
|
|||
func testOnGCE() bool { |
|||
// The user explicitly said they're on GCE, so trust them.
|
|||
if os.Getenv(metadataHostEnv) != "" { |
|||
return true |
|||
} |
|||
|
|||
ctx, cancel := context.WithCancel(context.Background()) |
|||
defer cancel() |
|||
|
|||
resc := make(chan bool, 2) |
|||
|
|||
// Try two strategies in parallel.
|
|||
// See https://github.com/googleapis/google-cloud-go/issues/194
|
|||
go func() { |
|||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) |
|||
req.Header.Set("User-Agent", userAgent) |
|||
res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) |
|||
if err != nil { |
|||
resc <- false |
|||
return |
|||
} |
|||
defer res.Body.Close() |
|||
resc <- res.Header.Get("Metadata-Flavor") == "Google" |
|||
}() |
|||
|
|||
go func() { |
|||
resolver := &net.Resolver{} |
|||
addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") |
|||
if err != nil || len(addrs) == 0 { |
|||
resc <- false |
|||
return |
|||
} |
|||
resc <- strsContains(addrs, metadataIP) |
|||
}() |
|||
|
|||
tryHarder := systemInfoSuggestsGCE() |
|||
if tryHarder { |
|||
res := <-resc |
|||
if res { |
|||
// The first strategy succeeded, so let's use it.
|
|||
return true |
|||
} |
|||
// Wait for either the DNS or metadata server probe to
|
|||
// contradict the other one and say we are running on
|
|||
// GCE. Give it a lot of time to do so, since the system
|
|||
// info already suggests we're running on a GCE BIOS.
|
|||
timer := time.NewTimer(5 * time.Second) |
|||
defer timer.Stop() |
|||
select { |
|||
case res = <-resc: |
|||
return res |
|||
case <-timer.C: |
|||
// Too slow. Who knows what this system is.
|
|||
return false |
|||
} |
|||
} |
|||
|
|||
// There's no hint from the system info that we're running on
|
|||
// GCE, so use the first probe's result as truth, whether it's
|
|||
// true or false. The goal here is to optimize for speed for
|
|||
// users who are NOT running on GCE. We can't assume that
|
|||
// either a DNS lookup or an HTTP request to a blackholed IP
|
|||
// address is fast. Worst case this should return when the
|
|||
// metaClient's Transport.ResponseHeaderTimeout or
|
|||
// Transport.Dial.Timeout fires (in two seconds).
|
|||
return <-resc |
|||
} |
|||
|
|||
// systemInfoSuggestsGCE reports whether the local system (without
|
|||
// doing network requests) suggests that we're running on GCE. If this
|
|||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
|||
// server.
|
|||
func systemInfoSuggestsGCE() bool { |
|||
if runtime.GOOS != "linux" { |
|||
// We don't have any non-Linux clues available, at least yet.
|
|||
return false |
|||
} |
|||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") |
|||
name := strings.TrimSpace(string(slurp)) |
|||
return name == "Google" || name == "Google Compute Engine" |
|||
} |
|||
|
|||
// Subscribe calls Client.Subscribe on the default client.
|
|||
func Subscribe(suffix string, fn func(v string, ok bool) error) error { |
|||
return defaultClient.Subscribe(suffix, fn) |
|||
} |
|||
|
|||
// Get calls Client.Get on the default client.
|
|||
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } |
|||
|
|||
// ProjectID returns the current instance's project ID string.
|
|||
func ProjectID() (string, error) { return defaultClient.ProjectID() } |
|||
|
|||
// NumericProjectID returns the current instance's numeric project ID.
|
|||
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } |
|||
|
|||
// InternalIP returns the instance's primary internal IP address.
|
|||
func InternalIP() (string, error) { return defaultClient.InternalIP() } |
|||
|
|||
// ExternalIP returns the instance's primary external (public) IP address.
|
|||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() } |
|||
|
|||
// Email calls Client.Email on the default client.
|
|||
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } |
|||
|
|||
// Hostname returns the instance's hostname. This will be of the form
|
|||
// "<instanceID>.c.<projID>.internal".
|
|||
func Hostname() (string, error) { return defaultClient.Hostname() } |
|||
|
|||
// InstanceTags returns the list of user-defined instance tags,
|
|||
// assigned when initially creating a GCE instance.
|
|||
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } |
|||
|
|||
// InstanceID returns the current VM's numeric instance ID.
|
|||
func InstanceID() (string, error) { return defaultClient.InstanceID() } |
|||
|
|||
// InstanceName returns the current VM's instance ID string.
|
|||
func InstanceName() (string, error) { return defaultClient.InstanceName() } |
|||
|
|||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|||
func Zone() (string, error) { return defaultClient.Zone() } |
|||
|
|||
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
|||
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } |
|||
|
|||
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
|||
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } |
|||
|
|||
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
|||
func InstanceAttributeValue(attr string) (string, error) { |
|||
return defaultClient.InstanceAttributeValue(attr) |
|||
} |
|||
|
|||
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
|||
func ProjectAttributeValue(attr string) (string, error) { |
|||
return defaultClient.ProjectAttributeValue(attr) |
|||
} |
|||
|
|||
// Scopes calls Client.Scopes on the default client.
|
|||
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } |
|||
|
|||
func strsContains(ss []string, s string) bool { |
|||
for _, v := range ss { |
|||
if v == s { |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
|
|||
// A Client provides metadata.
|
|||
type Client struct { |
|||
hc *http.Client |
|||
} |
|||
|
|||
// NewClient returns a Client that can be used to fetch metadata.
|
|||
// Returns the client that uses the specified http.Client for HTTP requests.
|
|||
// If nil is specified, returns the default client.
|
|||
func NewClient(c *http.Client) *Client { |
|||
if c == nil { |
|||
return defaultClient |
|||
} |
|||
|
|||
return &Client{hc: c} |
|||
} |
|||
|
|||
// getETag returns a value from the metadata service as well as the associated ETag.
|
|||
// This func is otherwise equivalent to Get.
|
|||
func (c *Client) getETag(suffix string) (value, etag string, err error) { |
|||
ctx := context.TODO() |
|||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
|||
// a container, which is an important use-case for local testing of cloud
|
|||
// deployments. To enable spoofing of the metadata service, the environment
|
|||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
|||
// requests shall go.
|
|||
host := os.Getenv(metadataHostEnv) |
|||
if host == "" { |
|||
// Using 169.254.169.254 instead of "metadata" here because Go
|
|||
// binaries built with the "netgo" tag and without cgo won't
|
|||
// know the search suffix for "metadata" is
|
|||
// ".google.internal", and this IP address is documented as
|
|||
// being stable anyway.
|
|||
host = metadataIP |
|||
} |
|||
suffix = strings.TrimLeft(suffix, "/") |
|||
u := "http://" + host + "/computeMetadata/v1/" + suffix |
|||
req, err := http.NewRequest("GET", u, nil) |
|||
if err != nil { |
|||
return "", "", err |
|||
} |
|||
req.Header.Set("Metadata-Flavor", "Google") |
|||
req.Header.Set("User-Agent", userAgent) |
|||
var res *http.Response |
|||
var reqErr error |
|||
retryer := newRetryer() |
|||
for { |
|||
res, reqErr = c.hc.Do(req) |
|||
var code int |
|||
if res != nil { |
|||
code = res.StatusCode |
|||
} |
|||
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { |
|||
if err := sleep(ctx, delay); err != nil { |
|||
return "", "", err |
|||
} |
|||
continue |
|||
} |
|||
break |
|||
} |
|||
if reqErr != nil { |
|||
return "", "", reqErr |
|||
} |
|||
defer res.Body.Close() |
|||
if res.StatusCode == http.StatusNotFound { |
|||
return "", "", NotDefinedError(suffix) |
|||
} |
|||
all, err := ioutil.ReadAll(res.Body) |
|||
if err != nil { |
|||
return "", "", err |
|||
} |
|||
if res.StatusCode != 200 { |
|||
return "", "", &Error{Code: res.StatusCode, Message: string(all)} |
|||
} |
|||
return string(all), res.Header.Get("Etag"), nil |
|||
} |
|||
|
|||
// Get returns a value from the metadata service.
|
|||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|||
//
|
|||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
|||
// 169.254.169.254 will be used instead.
|
|||
//
|
|||
// If the requested metadata is not defined, the returned error will
|
|||
// be of type NotDefinedError.
|
|||
func (c *Client) Get(suffix string) (string, error) { |
|||
val, _, err := c.getETag(suffix) |
|||
return val, err |
|||
} |
|||
|
|||
func (c *Client) getTrimmed(suffix string) (s string, err error) { |
|||
s, err = c.Get(suffix) |
|||
s = strings.TrimSpace(s) |
|||
return |
|||
} |
|||
|
|||
func (c *Client) lines(suffix string) ([]string, error) { |
|||
j, err := c.Get(suffix) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
s := strings.Split(strings.TrimSpace(j), "\n") |
|||
for i := range s { |
|||
s[i] = strings.TrimSpace(s[i]) |
|||
} |
|||
return s, nil |
|||
} |
|||
|
|||
// ProjectID returns the current instance's project ID string.
|
|||
func (c *Client) ProjectID() (string, error) { return projID.get(c) } |
|||
|
|||
// NumericProjectID returns the current instance's numeric project ID.
|
|||
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } |
|||
|
|||
// InstanceID returns the current VM's numeric instance ID.
|
|||
func (c *Client) InstanceID() (string, error) { return instID.get(c) } |
|||
|
|||
// InternalIP returns the instance's primary internal IP address.
|
|||
func (c *Client) InternalIP() (string, error) { |
|||
return c.getTrimmed("instance/network-interfaces/0/ip") |
|||
} |
|||
|
|||
// Email returns the email address associated with the service account.
|
|||
// The account may be empty or the string "default" to use the instance's
|
|||
// main account.
|
|||
func (c *Client) Email(serviceAccount string) (string, error) { |
|||
if serviceAccount == "" { |
|||
serviceAccount = "default" |
|||
} |
|||
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") |
|||
} |
|||
|
|||
// ExternalIP returns the instance's primary external (public) IP address.
|
|||
func (c *Client) ExternalIP() (string, error) { |
|||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") |
|||
} |
|||
|
|||
// Hostname returns the instance's hostname. This will be of the form
|
|||
// "<instanceID>.c.<projID>.internal".
|
|||
func (c *Client) Hostname() (string, error) { |
|||
return c.getTrimmed("instance/hostname") |
|||
} |
|||
|
|||
// InstanceTags returns the list of user-defined instance tags,
|
|||
// assigned when initially creating a GCE instance.
|
|||
func (c *Client) InstanceTags() ([]string, error) { |
|||
var s []string |
|||
j, err := c.Get("instance/tags") |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { |
|||
return nil, err |
|||
} |
|||
return s, nil |
|||
} |
|||
|
|||
// InstanceName returns the current VM's instance ID string.
|
|||
func (c *Client) InstanceName() (string, error) { |
|||
return c.getTrimmed("instance/name") |
|||
} |
|||
|
|||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|||
func (c *Client) Zone() (string, error) { |
|||
zone, err := c.getTrimmed("instance/zone") |
|||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
return zone[strings.LastIndex(zone, "/")+1:], nil |
|||
} |
|||
|
|||
// InstanceAttributes returns the list of user-defined attributes,
|
|||
// assigned when initially creating a GCE VM instance. The value of an
|
|||
// attribute can be obtained with InstanceAttributeValue.
|
|||
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } |
|||
|
|||
// ProjectAttributes returns the list of user-defined attributes
|
|||
// applying to the project as a whole, not just this VM. The value of
|
|||
// an attribute can be obtained with ProjectAttributeValue.
|
|||
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } |
|||
|
|||
// InstanceAttributeValue returns the value of the provided VM
|
|||
// instance attribute.
|
|||
//
|
|||
// If the requested attribute is not defined, the returned error will
|
|||
// be of type NotDefinedError.
|
|||
//
|
|||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
|||
// defined to be the empty string.
|
|||
func (c *Client) InstanceAttributeValue(attr string) (string, error) { |
|||
return c.Get("instance/attributes/" + attr) |
|||
} |
|||
|
|||
// ProjectAttributeValue returns the value of the provided
|
|||
// project attribute.
|
|||
//
|
|||
// If the requested attribute is not defined, the returned error will
|
|||
// be of type NotDefinedError.
|
|||
//
|
|||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
|||
// defined to be the empty string.
|
|||
func (c *Client) ProjectAttributeValue(attr string) (string, error) { |
|||
return c.Get("project/attributes/" + attr) |
|||
} |
|||
|
|||
// Scopes returns the service account scopes for the given account.
|
|||
// The account may be empty or the string "default" to use the instance's
|
|||
// main account.
|
|||
func (c *Client) Scopes(serviceAccount string) ([]string, error) { |
|||
if serviceAccount == "" { |
|||
serviceAccount = "default" |
|||
} |
|||
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") |
|||
} |
|||
|
|||
// Subscribe subscribes to a value from the metadata service.
|
|||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|||
// The suffix may contain query parameters.
|
|||
//
|
|||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
|||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
|||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
|||
// is deleted. Subscribe returns the error value returned from the last call to
|
|||
// fn, which may be nil when ok == false.
|
|||
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { |
|||
const failedSubscribeSleep = time.Second * 5 |
|||
|
|||
// First check to see if the metadata value exists at all.
|
|||
val, lastETag, err := c.getETag(suffix) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if err := fn(val, true); err != nil { |
|||
return err |
|||
} |
|||
|
|||
ok := true |
|||
if strings.ContainsRune(suffix, '?') { |
|||
suffix += "&wait_for_change=true&last_etag=" |
|||
} else { |
|||
suffix += "?wait_for_change=true&last_etag=" |
|||
} |
|||
for { |
|||
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) |
|||
if err != nil { |
|||
if _, deleted := err.(NotDefinedError); !deleted { |
|||
time.Sleep(failedSubscribeSleep) |
|||
continue // Retry on other errors.
|
|||
} |
|||
ok = false |
|||
} |
|||
lastETag = etag |
|||
|
|||
if err := fn(val, ok); err != nil || !ok { |
|||
return err |
|||
} |
|||
} |
|||
} |
|||
|
|||
// Error contains an error response from the server.
|
|||
type Error struct { |
|||
// Code is the HTTP response status code.
|
|||
Code int |
|||
// Message is the server response message.
|
|||
Message string |
|||
} |
|||
|
|||
func (e *Error) Error() string { |
|||
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) |
|||
} |
@ -0,0 +1,114 @@ |
|||
// Copyright 2021 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package metadata |
|||
|
|||
import ( |
|||
"context" |
|||
"io" |
|||
"math/rand" |
|||
"net/http" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
maxRetryAttempts = 5 |
|||
) |
|||
|
|||
var ( |
|||
syscallRetryable = func(err error) bool { return false } |
|||
) |
|||
|
|||
// defaultBackoff is basically equivalent to gax.Backoff without the need for
|
|||
// the dependency.
|
|||
type defaultBackoff struct { |
|||
max time.Duration |
|||
mul float64 |
|||
cur time.Duration |
|||
} |
|||
|
|||
func (b *defaultBackoff) Pause() time.Duration { |
|||
d := time.Duration(1 + rand.Int63n(int64(b.cur))) |
|||
b.cur = time.Duration(float64(b.cur) * b.mul) |
|||
if b.cur > b.max { |
|||
b.cur = b.max |
|||
} |
|||
return d |
|||
} |
|||
|
|||
// sleep is the equivalent of gax.Sleep without the need for the dependency.
|
|||
func sleep(ctx context.Context, d time.Duration) error { |
|||
t := time.NewTimer(d) |
|||
select { |
|||
case <-ctx.Done(): |
|||
t.Stop() |
|||
return ctx.Err() |
|||
case <-t.C: |
|||
return nil |
|||
} |
|||
} |
|||
|
|||
func newRetryer() *metadataRetryer { |
|||
return &metadataRetryer{bo: &defaultBackoff{ |
|||
cur: 100 * time.Millisecond, |
|||
max: 30 * time.Second, |
|||
mul: 2, |
|||
}} |
|||
} |
|||
|
|||
type backoff interface { |
|||
Pause() time.Duration |
|||
} |
|||
|
|||
type metadataRetryer struct { |
|||
bo backoff |
|||
attempts int |
|||
} |
|||
|
|||
func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) { |
|||
if status == http.StatusOK { |
|||
return 0, false |
|||
} |
|||
retryOk := shouldRetry(status, err) |
|||
if !retryOk { |
|||
return 0, false |
|||
} |
|||
if r.attempts == maxRetryAttempts { |
|||
return 0, false |
|||
} |
|||
r.attempts++ |
|||
return r.bo.Pause(), true |
|||
} |
|||
|
|||
func shouldRetry(status int, err error) bool { |
|||
if 500 <= status && status <= 599 { |
|||
return true |
|||
} |
|||
if err == io.ErrUnexpectedEOF { |
|||
return true |
|||
} |
|||
// Transient network errors should be retried.
|
|||
if syscallRetryable(err) { |
|||
return true |
|||
} |
|||
if err, ok := err.(interface{ Temporary() bool }); ok { |
|||
if err.Temporary() { |
|||
return true |
|||
} |
|||
} |
|||
if err, ok := err.(interface{ Unwrap() error }); ok { |
|||
return shouldRetry(status, err.Unwrap()) |
|||
} |
|||
return false |
|||
} |
@ -0,0 +1,26 @@ |
|||
// Copyright 2021 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
//go:build linux
|
|||
// +build linux
|
|||
|
|||
package metadata |
|||
|
|||
import "syscall" |
|||
|
|||
func init() { |
|||
// Initialize syscallRetryable to return true on transient socket-level
|
|||
// errors. These errors are specific to Linux.
|
|||
syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } |
|||
} |
@ -0,0 +1,23 @@ |
|||
// Copyright 2022 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// This file, and the {{.RootMod}} import, won't actually become part of
|
|||
// the resultant binary.
|
|||
//go:build modhack
|
|||
// +build modhack
|
|||
|
|||
package metadata |
|||
|
|||
// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
|||
import _ "cloud.google.com/go/compute/internal" |
@ -0,0 +1,97 @@ |
|||
# Changes |
|||
|
|||
## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) |
|||
|
|||
## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** Migrate to new stubs ([a61ddcd](https://github.com/googleapis/google-cloud-go/commit/a61ddcd3041c7af4a15109dc4431f9b327c497fb)) |
|||
|
|||
## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.10.0...iam/v0.11.0) (2023-02-16) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** Start generating proto stubs ([970d763](https://github.com/googleapis/google-cloud-go/commit/970d763531b54b2bc75d7ff26a20b6e05150cab8)) |
|||
|
|||
## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.9.0...iam/v0.10.0) (2023-01-04) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) |
|||
|
|||
## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.8.0...iam/v0.9.0) (2022-12-15) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** Rewrite iam sigs and update proto import ([#7137](https://github.com/googleapis/google-cloud-go/issues/7137)) ([ad67fa3](https://github.com/googleapis/google-cloud-go/commit/ad67fa36c263c161226f7fecbab5221592374dca)) |
|||
|
|||
## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) |
|||
|
|||
## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) |
|||
|
|||
## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc)) |
|||
|
|||
## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7)) |
|||
|
|||
## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50)) |
|||
|
|||
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) |
|||
|
|||
## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e)) |
|||
|
|||
### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **iam:** run formatter ([#5277](https://www.github.com/googleapis/google-cloud-go/issues/5277)) ([8682e4e](https://www.github.com/googleapis/google-cloud-go/commit/8682e4ed57a4428a659fbc225f56c91767e2a4a9)) |
|||
|
|||
## v0.1.0 |
|||
|
|||
This is the first tag to carve out iam as its own module. See |
|||
[Add a module to a multi-module repository](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository). |
@ -0,0 +1,202 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
APPENDIX: How to apply the Apache License to your work. |
|||
|
|||
To apply the Apache License to your work, attach the following |
|||
boilerplate notice, with the fields enclosed by brackets "[]" |
|||
replaced with your own identifying information. (Don't include |
|||
the brackets!) The text should be enclosed in the appropriate |
|||
comment syntax for the file format. We also recommend that a |
|||
file or class name and description of purpose be included on the |
|||
same "printed page" as the copyright notice for easier |
|||
identification within third-party archives. |
|||
|
|||
Copyright [yyyy] [name of copyright owner] |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,40 @@ |
|||
# IAM API |
|||
|
|||
[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/iam.svg)](https://pkg.go.dev/cloud.google.com/go/iam) |
|||
|
|||
Go Client Library for IAM API. |
|||
|
|||
## Install |
|||
|
|||
```bash |
|||
go get cloud.google.com/go/iam |
|||
``` |
|||
|
|||
## Stability |
|||
|
|||
The stability of this module is indicated by SemVer. |
|||
|
|||
However, a `v1+` module may have breaking changes in two scenarios: |
|||
|
|||
* Packages with `alpha` or `beta` in the import path |
|||
* The GoDoc has an explicit stability disclaimer (for example, for an experimental feature). |
|||
|
|||
## Go Version Support |
|||
|
|||
See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) |
|||
section in the root directory's README. |
|||
|
|||
## Authorization |
|||
|
|||
See the [Authorization](https://github.com/googleapis/google-cloud-go#authorization) |
|||
section in the root directory's README. |
|||
|
|||
## Contributing |
|||
|
|||
Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) |
|||
document for details. |
|||
|
|||
Please note that this project is released with a Contributor Code of Conduct. |
|||
By participating in this project you agree to abide by its terms. See |
|||
[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) |
|||
for more information. |
@ -0,0 +1,672 @@ |
|||
// Copyright 2022 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|||
// versions:
|
|||
// protoc-gen-go v1.26.0
|
|||
// protoc v3.21.9
|
|||
// source: google/iam/v1/iam_policy.proto
|
|||
|
|||
package iampb |
|||
|
|||
import ( |
|||
context "context" |
|||
reflect "reflect" |
|||
sync "sync" |
|||
|
|||
_ "google.golang.org/genproto/googleapis/api/annotations" |
|||
grpc "google.golang.org/grpc" |
|||
codes "google.golang.org/grpc/codes" |
|||
status "google.golang.org/grpc/status" |
|||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
|||
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" |
|||
) |
|||
|
|||
const ( |
|||
// Verify that this generated code is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) |
|||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) |
|||
) |
|||
|
|||
// Request message for `SetIamPolicy` method.
|
|||
type SetIamPolicyRequest struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
// REQUIRED: The resource for which the policy is being specified.
|
|||
// See the operation documentation for the appropriate value for this field.
|
|||
Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` |
|||
// REQUIRED: The complete policy to be applied to the `resource`. The size of
|
|||
// the policy is limited to a few 10s of KB. An empty policy is a
|
|||
// valid policy but certain Cloud Platform services (such as Projects)
|
|||
// might reject them.
|
|||
Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` |
|||
// OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
|
|||
// the fields in the mask will be modified. If no mask is provided, the
|
|||
// following default mask is used:
|
|||
//
|
|||
// `paths: "bindings, etag"`
|
|||
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` |
|||
} |
|||
|
|||
func (x *SetIamPolicyRequest) Reset() { |
|||
*x = SetIamPolicyRequest{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *SetIamPolicyRequest) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*SetIamPolicyRequest) ProtoMessage() {} |
|||
|
|||
func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message { |
|||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use SetIamPolicyRequest.ProtoReflect.Descriptor instead.
|
|||
func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { |
|||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{0} |
|||
} |
|||
|
|||
func (x *SetIamPolicyRequest) GetResource() string { |
|||
if x != nil { |
|||
return x.Resource |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *SetIamPolicyRequest) GetPolicy() *Policy { |
|||
if x != nil { |
|||
return x.Policy |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (x *SetIamPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { |
|||
if x != nil { |
|||
return x.UpdateMask |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Request message for `GetIamPolicy` method.
|
|||
type GetIamPolicyRequest struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
// REQUIRED: The resource for which the policy is being requested.
|
|||
// See the operation documentation for the appropriate value for this field.
|
|||
Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` |
|||
// OPTIONAL: A `GetPolicyOptions` object for specifying options to
|
|||
// `GetIamPolicy`.
|
|||
Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` |
|||
} |
|||
|
|||
func (x *GetIamPolicyRequest) Reset() { |
|||
*x = GetIamPolicyRequest{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *GetIamPolicyRequest) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*GetIamPolicyRequest) ProtoMessage() {} |
|||
|
|||
func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message { |
|||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use GetIamPolicyRequest.ProtoReflect.Descriptor instead.
|
|||
func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { |
|||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{1} |
|||
} |
|||
|
|||
func (x *GetIamPolicyRequest) GetResource() string { |
|||
if x != nil { |
|||
return x.Resource |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *GetIamPolicyRequest) GetOptions() *GetPolicyOptions { |
|||
if x != nil { |
|||
return x.Options |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Request message for `TestIamPermissions` method.
|
|||
type TestIamPermissionsRequest struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
// REQUIRED: The resource for which the policy detail is being requested.
|
|||
// See the operation documentation for the appropriate value for this field.
|
|||
Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` |
|||
// The set of permissions to check for the `resource`. Permissions with
|
|||
// wildcards (such as '*' or 'storage.*') are not allowed. For more
|
|||
// information see
|
|||
// [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
|
|||
Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` |
|||
} |
|||
|
|||
func (x *TestIamPermissionsRequest) Reset() { |
|||
*x = TestIamPermissionsRequest{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *TestIamPermissionsRequest) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*TestIamPermissionsRequest) ProtoMessage() {} |
|||
|
|||
func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message { |
|||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use TestIamPermissionsRequest.ProtoReflect.Descriptor instead.
|
|||
func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { |
|||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{2} |
|||
} |
|||
|
|||
func (x *TestIamPermissionsRequest) GetResource() string { |
|||
if x != nil { |
|||
return x.Resource |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *TestIamPermissionsRequest) GetPermissions() []string { |
|||
if x != nil { |
|||
return x.Permissions |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Response message for `TestIamPermissions` method.
|
|||
type TestIamPermissionsResponse struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
// A subset of `TestPermissionsRequest.permissions` that the caller is
|
|||
// allowed.
|
|||
Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` |
|||
} |
|||
|
|||
func (x *TestIamPermissionsResponse) Reset() { |
|||
*x = TestIamPermissionsResponse{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *TestIamPermissionsResponse) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*TestIamPermissionsResponse) ProtoMessage() {} |
|||
|
|||
func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message { |
|||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use TestIamPermissionsResponse.ProtoReflect.Descriptor instead.
|
|||
func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { |
|||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{3} |
|||
} |
|||
|
|||
func (x *TestIamPermissionsResponse) GetPermissions() []string { |
|||
if x != nil { |
|||
return x.Permissions |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
var File_google_iam_v1_iam_policy_proto protoreflect.FileDescriptor |
|||
|
|||
var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ |
|||
0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, |
|||
0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, |
|||
0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, |
|||
0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, |
|||
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, |
|||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, |
|||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, |
|||
0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, |
|||
0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, |
|||
0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, |
|||
0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, |
|||
0x31, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, |
|||
0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, |
|||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, |
|||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, |
|||
0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, |
|||
0x0a, 0x13, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, |
|||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, |
|||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, |
|||
0x01, 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06, |
|||
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, |
|||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, |
|||
0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, |
|||
0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, |
|||
0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, |
|||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, |
|||
0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x77, 0x0a, |
|||
0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, |
|||
0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, |
|||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, |
|||
0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f, |
|||
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, |
|||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, |
|||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, |
|||
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x69, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, |
|||
0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, |
|||
0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, |
|||
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, |
|||
0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, |
|||
0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, |
|||
0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, |
|||
0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, |
|||
0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, |
|||
0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, |
|||
0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, |
|||
0x73, 0x32, 0xb4, 0x03, 0x0a, 0x09, 0x49, 0x41, 0x4d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, |
|||
0x74, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, |
|||
0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, |
|||
0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, |
|||
0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, |
|||
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, |
|||
0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, |
|||
0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, |
|||
0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, |
|||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, |
|||
0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, |
|||
0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, |
|||
0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, |
|||
0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, |
|||
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, |
|||
0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12, |
|||
0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, |
|||
0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, |
|||
0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, |
|||
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, |
|||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, |
|||
0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, |
|||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, |
|||
0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, |
|||
0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, |
|||
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, |
|||
0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, |
|||
0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, |
|||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, |
|||
0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, |
|||
0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, |
|||
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, |
|||
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, |
|||
0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, |
|||
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, |
|||
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, |
|||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, |
|||
} |
|||
|
|||
var ( |
|||
file_google_iam_v1_iam_policy_proto_rawDescOnce sync.Once |
|||
file_google_iam_v1_iam_policy_proto_rawDescData = file_google_iam_v1_iam_policy_proto_rawDesc |
|||
) |
|||
|
|||
func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { |
|||
file_google_iam_v1_iam_policy_proto_rawDescOnce.Do(func() { |
|||
file_google_iam_v1_iam_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_iam_policy_proto_rawDescData) |
|||
}) |
|||
return file_google_iam_v1_iam_policy_proto_rawDescData |
|||
} |
|||
|
|||
var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) |
|||
var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ |
|||
(*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest
|
|||
(*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest
|
|||
(*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest
|
|||
(*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse
|
|||
(*Policy)(nil), // 4: google.iam.v1.Policy
|
|||
(*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask
|
|||
(*GetPolicyOptions)(nil), // 6: google.iam.v1.GetPolicyOptions
|
|||
} |
|||
var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{ |
|||
4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy
|
|||
5, // 1: google.iam.v1.SetIamPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask
|
|||
6, // 2: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions
|
|||
0, // 3: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
|
|||
1, // 4: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
|
|||
2, // 5: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
|
|||
4, // 6: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy
|
|||
4, // 7: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy
|
|||
3, // 8: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
|
|||
6, // [6:9] is the sub-list for method output_type
|
|||
3, // [3:6] is the sub-list for method input_type
|
|||
3, // [3:3] is the sub-list for extension type_name
|
|||
3, // [3:3] is the sub-list for extension extendee
|
|||
0, // [0:3] is the sub-list for field type_name
|
|||
} |
|||
|
|||
func init() { file_google_iam_v1_iam_policy_proto_init() } |
|||
func file_google_iam_v1_iam_policy_proto_init() { |
|||
if File_google_iam_v1_iam_policy_proto != nil { |
|||
return |
|||
} |
|||
file_google_iam_v1_options_proto_init() |
|||
file_google_iam_v1_policy_proto_init() |
|||
if !protoimpl.UnsafeEnabled { |
|||
file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*SetIamPolicyRequest); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*GetIamPolicyRequest); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*TestIamPermissionsRequest); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*TestIamPermissionsResponse); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
} |
|||
type x struct{} |
|||
out := protoimpl.TypeBuilder{ |
|||
File: protoimpl.DescBuilder{ |
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
|||
RawDescriptor: file_google_iam_v1_iam_policy_proto_rawDesc, |
|||
NumEnums: 0, |
|||
NumMessages: 4, |
|||
NumExtensions: 0, |
|||
NumServices: 1, |
|||
}, |
|||
GoTypes: file_google_iam_v1_iam_policy_proto_goTypes, |
|||
DependencyIndexes: file_google_iam_v1_iam_policy_proto_depIdxs, |
|||
MessageInfos: file_google_iam_v1_iam_policy_proto_msgTypes, |
|||
}.Build() |
|||
File_google_iam_v1_iam_policy_proto = out.File |
|||
file_google_iam_v1_iam_policy_proto_rawDesc = nil |
|||
file_google_iam_v1_iam_policy_proto_goTypes = nil |
|||
file_google_iam_v1_iam_policy_proto_depIdxs = nil |
|||
} |
|||
|
|||
// Reference imports to suppress errors if they are not otherwise used.
|
|||
var _ context.Context |
|||
var _ grpc.ClientConnInterface |
|||
|
|||
// This is a compile-time assertion to ensure that this generated file
|
|||
// is compatible with the grpc package it is being compiled against.
|
|||
const _ = grpc.SupportPackageIsVersion6 |
|||
|
|||
// IAMPolicyClient is the client API for IAMPolicy service.
|
|||
//
|
|||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|||
type IAMPolicyClient interface { |
|||
// Sets the access control policy on the specified resource. Replaces any
|
|||
// existing policy.
|
|||
//
|
|||
// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
|
|||
SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) |
|||
// Gets the access control policy for a resource.
|
|||
// Returns an empty policy if the resource exists and does not have a policy
|
|||
// set.
|
|||
GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) |
|||
// Returns permissions that a caller has on the specified resource.
|
|||
// If the resource does not exist, this will return an empty set of
|
|||
// permissions, not a `NOT_FOUND` error.
|
|||
//
|
|||
// Note: This operation is designed to be used for building permission-aware
|
|||
// UIs and command-line tools, not for authorization checking. This operation
|
|||
// may "fail open" without warning.
|
|||
TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) |
|||
} |
|||
|
|||
type iAMPolicyClient struct { |
|||
cc grpc.ClientConnInterface |
|||
} |
|||
|
|||
func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { |
|||
return &iAMPolicyClient{cc} |
|||
} |
|||
|
|||
func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { |
|||
out := new(Policy) |
|||
err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { |
|||
out := new(Policy) |
|||
err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) { |
|||
out := new(TestIamPermissionsResponse) |
|||
err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
// IAMPolicyServer is the server API for IAMPolicy service.
|
|||
type IAMPolicyServer interface { |
|||
// Sets the access control policy on the specified resource. Replaces any
|
|||
// existing policy.
|
|||
//
|
|||
// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
|
|||
SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) |
|||
// Gets the access control policy for a resource.
|
|||
// Returns an empty policy if the resource exists and does not have a policy
|
|||
// set.
|
|||
GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) |
|||
// Returns permissions that a caller has on the specified resource.
|
|||
// If the resource does not exist, this will return an empty set of
|
|||
// permissions, not a `NOT_FOUND` error.
|
|||
//
|
|||
// Note: This operation is designed to be used for building permission-aware
|
|||
// UIs and command-line tools, not for authorization checking. This operation
|
|||
// may "fail open" without warning.
|
|||
TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) |
|||
} |
|||
|
|||
// UnimplementedIAMPolicyServer can be embedded to have forward compatible implementations.
|
|||
type UnimplementedIAMPolicyServer struct { |
|||
} |
|||
|
|||
func (*UnimplementedIAMPolicyServer) SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) { |
|||
return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") |
|||
} |
|||
func (*UnimplementedIAMPolicyServer) GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) { |
|||
return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") |
|||
} |
|||
func (*UnimplementedIAMPolicyServer) TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) { |
|||
return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") |
|||
} |
|||
|
|||
func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { |
|||
s.RegisterService(&_IAMPolicy_serviceDesc, srv) |
|||
} |
|||
|
|||
func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(SetIamPolicyRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(IAMPolicyServer).SetIamPolicy(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(GetIamPolicyRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(IAMPolicyServer).GetIamPolicy(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(TestIamPermissionsRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(IAMPolicyServer).TestIamPermissions(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
var _IAMPolicy_serviceDesc = grpc.ServiceDesc{ |
|||
ServiceName: "google.iam.v1.IAMPolicy", |
|||
HandlerType: (*IAMPolicyServer)(nil), |
|||
Methods: []grpc.MethodDesc{ |
|||
{ |
|||
MethodName: "SetIamPolicy", |
|||
Handler: _IAMPolicy_SetIamPolicy_Handler, |
|||
}, |
|||
{ |
|||
MethodName: "GetIamPolicy", |
|||
Handler: _IAMPolicy_GetIamPolicy_Handler, |
|||
}, |
|||
{ |
|||
MethodName: "TestIamPermissions", |
|||
Handler: _IAMPolicy_TestIamPermissions_Handler, |
|||
}, |
|||
}, |
|||
Streams: []grpc.StreamDesc{}, |
|||
Metadata: "google/iam/v1/iam_policy.proto", |
|||
} |
@ -0,0 +1,187 @@ |
|||
// Copyright 2022 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|||
// versions:
|
|||
// protoc-gen-go v1.26.0
|
|||
// protoc v3.21.9
|
|||
// source: google/iam/v1/options.proto
|
|||
|
|||
package iampb |
|||
|
|||
import ( |
|||
reflect "reflect" |
|||
sync "sync" |
|||
|
|||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
|||
) |
|||
|
|||
const ( |
|||
// Verify that this generated code is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) |
|||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) |
|||
) |
|||
|
|||
// Encapsulates settings provided to GetIamPolicy.
|
|||
type GetPolicyOptions struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
// Optional. The maximum policy version that will be used to format the
|
|||
// policy.
|
|||
//
|
|||
// Valid values are 0, 1, and 3. Requests specifying an invalid value will be
|
|||
// rejected.
|
|||
//
|
|||
// Requests for policies with any conditional role bindings must specify
|
|||
// version 3. Policies with no conditional role bindings may specify any valid
|
|||
// value or leave the field unset.
|
|||
//
|
|||
// The policy in the response might use the policy version that you specified,
|
|||
// or it might use a lower policy version. For example, if you specify version
|
|||
// 3, but the policy has no conditional role bindings, the response uses
|
|||
// version 1.
|
|||
//
|
|||
// To learn which resources support conditions in their IAM policies, see the
|
|||
// [IAM
|
|||
// documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
|
|||
RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` |
|||
} |
|||
|
|||
func (x *GetPolicyOptions) Reset() { |
|||
*x = GetPolicyOptions{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_google_iam_v1_options_proto_msgTypes[0] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *GetPolicyOptions) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*GetPolicyOptions) ProtoMessage() {} |
|||
|
|||
func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message { |
|||
mi := &file_google_iam_v1_options_proto_msgTypes[0] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use GetPolicyOptions.ProtoReflect.Descriptor instead.
|
|||
func (*GetPolicyOptions) Descriptor() ([]byte, []int) { |
|||
return file_google_iam_v1_options_proto_rawDescGZIP(), []int{0} |
|||
} |
|||
|
|||
func (x *GetPolicyOptions) GetRequestedPolicyVersion() int32 { |
|||
if x != nil { |
|||
return x.RequestedPolicyVersion |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
var File_google_iam_v1_options_proto protoreflect.FileDescriptor |
|||
|
|||
var file_google_iam_v1_options_proto_rawDesc = []byte{ |
|||
0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, |
|||
0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, |
|||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x4c, 0x0a, 0x10, |
|||
0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, |
|||
0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, |
|||
0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, |
|||
0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, |
|||
0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, |
|||
0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, |
|||
0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, |
|||
0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, |
|||
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, |
|||
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, |
|||
0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, |
|||
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, |
|||
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, |
|||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, |
|||
} |
|||
|
|||
var ( |
|||
file_google_iam_v1_options_proto_rawDescOnce sync.Once |
|||
file_google_iam_v1_options_proto_rawDescData = file_google_iam_v1_options_proto_rawDesc |
|||
) |
|||
|
|||
func file_google_iam_v1_options_proto_rawDescGZIP() []byte { |
|||
file_google_iam_v1_options_proto_rawDescOnce.Do(func() { |
|||
file_google_iam_v1_options_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_options_proto_rawDescData) |
|||
}) |
|||
return file_google_iam_v1_options_proto_rawDescData |
|||
} |
|||
|
|||
var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) |
|||
var file_google_iam_v1_options_proto_goTypes = []interface{}{ |
|||
(*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions
|
|||
} |
|||
var file_google_iam_v1_options_proto_depIdxs = []int32{ |
|||
0, // [0:0] is the sub-list for method output_type
|
|||
0, // [0:0] is the sub-list for method input_type
|
|||
0, // [0:0] is the sub-list for extension type_name
|
|||
0, // [0:0] is the sub-list for extension extendee
|
|||
0, // [0:0] is the sub-list for field type_name
|
|||
} |
|||
|
|||
func init() { file_google_iam_v1_options_proto_init() } |
|||
func file_google_iam_v1_options_proto_init() { |
|||
if File_google_iam_v1_options_proto != nil { |
|||
return |
|||
} |
|||
if !protoimpl.UnsafeEnabled { |
|||
file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*GetPolicyOptions); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
} |
|||
type x struct{} |
|||
out := protoimpl.TypeBuilder{ |
|||
File: protoimpl.DescBuilder{ |
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
|||
RawDescriptor: file_google_iam_v1_options_proto_rawDesc, |
|||
NumEnums: 0, |
|||
NumMessages: 1, |
|||
NumExtensions: 0, |
|||
NumServices: 0, |
|||
}, |
|||
GoTypes: file_google_iam_v1_options_proto_goTypes, |
|||
DependencyIndexes: file_google_iam_v1_options_proto_depIdxs, |
|||
MessageInfos: file_google_iam_v1_options_proto_msgTypes, |
|||
}.Build() |
|||
File_google_iam_v1_options_proto = out.File |
|||
file_google_iam_v1_options_proto_rawDesc = nil |
|||
file_google_iam_v1_options_proto_goTypes = nil |
|||
file_google_iam_v1_options_proto_depIdxs = nil |
|||
} |
File diff suppressed because it is too large
@ -0,0 +1,387 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package iam supports the resource-specific operations of Google Cloud
|
|||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
|||
// See https://cloud.google.com/iam for more about IAM.
|
|||
//
|
|||
// Users of the Google Cloud Libraries will typically not use this package
|
|||
// directly. Instead they will begin with some resource that supports IAM, like
|
|||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
|||
package iam |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"time" |
|||
|
|||
pb "cloud.google.com/go/iam/apiv1/iampb" |
|||
gax "github.com/googleapis/gax-go/v2" |
|||
"google.golang.org/grpc" |
|||
"google.golang.org/grpc/codes" |
|||
"google.golang.org/grpc/metadata" |
|||
) |
|||
|
|||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
|||
type client interface { |
|||
Get(ctx context.Context, resource string) (*pb.Policy, error) |
|||
Set(ctx context.Context, resource string, p *pb.Policy) error |
|||
Test(ctx context.Context, resource string, perms []string) ([]string, error) |
|||
GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) |
|||
} |
|||
|
|||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
|||
type grpcClient struct { |
|||
c pb.IAMPolicyClient |
|||
} |
|||
|
|||
var withRetry = gax.WithRetry(func() gax.Retryer { |
|||
return gax.OnCodes([]codes.Code{ |
|||
codes.DeadlineExceeded, |
|||
codes.Unavailable, |
|||
}, gax.Backoff{ |
|||
Initial: 100 * time.Millisecond, |
|||
Max: 60 * time.Second, |
|||
Multiplier: 1.3, |
|||
}) |
|||
}) |
|||
|
|||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { |
|||
return g.GetWithVersion(ctx, resource, 1) |
|||
} |
|||
|
|||
func (g *grpcClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) { |
|||
var proto *pb.Policy |
|||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) |
|||
ctx = insertMetadata(ctx, md) |
|||
|
|||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { |
|||
var err error |
|||
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{ |
|||
Resource: resource, |
|||
Options: &pb.GetPolicyOptions{ |
|||
RequestedPolicyVersion: requestedPolicyVersion, |
|||
}, |
|||
}) |
|||
return err |
|||
}, withRetry) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return proto, nil |
|||
} |
|||
|
|||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { |
|||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) |
|||
ctx = insertMetadata(ctx, md) |
|||
|
|||
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { |
|||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ |
|||
Resource: resource, |
|||
Policy: p, |
|||
}) |
|||
return err |
|||
}, withRetry) |
|||
} |
|||
|
|||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { |
|||
var res *pb.TestIamPermissionsResponse |
|||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) |
|||
ctx = insertMetadata(ctx, md) |
|||
|
|||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { |
|||
var err error |
|||
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ |
|||
Resource: resource, |
|||
Permissions: perms, |
|||
}) |
|||
return err |
|||
}, withRetry) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return res.Permissions, nil |
|||
} |
|||
|
|||
// A Handle provides IAM operations for a resource.
|
|||
type Handle struct { |
|||
c client |
|||
resource string |
|||
} |
|||
|
|||
// A Handle3 provides IAM operations for a resource. It is similar to a Handle, but provides access to newer IAM features (e.g., conditions).
|
|||
type Handle3 struct { |
|||
c client |
|||
resource string |
|||
version int32 |
|||
} |
|||
|
|||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
|||
//
|
|||
// InternalNewHandle returns a Handle for resource.
|
|||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
|||
func InternalNewHandle(conn grpc.ClientConnInterface, resource string) *Handle { |
|||
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) |
|||
} |
|||
|
|||
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
|
|||
//
|
|||
// InternalNewHandleClient returns a Handle for resource using the given
|
|||
// grpc service that implements IAM as a mixin
|
|||
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { |
|||
return InternalNewHandleClient(&grpcClient{c: c}, resource) |
|||
} |
|||
|
|||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
|||
//
|
|||
// InternalNewHandleClient returns a Handle for resource using the given
|
|||
// client implementation.
|
|||
func InternalNewHandleClient(c client, resource string) *Handle { |
|||
return &Handle{ |
|||
c: c, |
|||
resource: resource, |
|||
} |
|||
} |
|||
|
|||
// V3 returns a Handle3, which is like Handle except it sets
|
|||
// requestedPolicyVersion to 3 when retrieving a policy and policy.version to 3
|
|||
// when storing a policy.
|
|||
func (h *Handle) V3() *Handle3 { |
|||
return &Handle3{ |
|||
c: h.c, |
|||
resource: h.resource, |
|||
version: 3, |
|||
} |
|||
} |
|||
|
|||
// Policy retrieves the IAM policy for the resource.
|
|||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) { |
|||
proto, err := h.c.Get(ctx, h.resource) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return &Policy{InternalProto: proto}, nil |
|||
} |
|||
|
|||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
|||
//
|
|||
// If policy was created from a prior call to Get, then the modification will
|
|||
// only succeed if the policy has not changed since the Get.
|
|||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { |
|||
return h.c.Set(ctx, h.resource, policy.InternalProto) |
|||
} |
|||
|
|||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
|||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { |
|||
return h.c.Test(ctx, h.resource, permissions) |
|||
} |
|||
|
|||
// A RoleName is a name representing a collection of permissions.
|
|||
type RoleName string |
|||
|
|||
// Common role names.
|
|||
const ( |
|||
Owner RoleName = "roles/owner" |
|||
Editor RoleName = "roles/editor" |
|||
Viewer RoleName = "roles/viewer" |
|||
) |
|||
|
|||
const ( |
|||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
|||
AllUsers = "allUsers" |
|||
|
|||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
|||
AllAuthenticatedUsers = "allAuthenticatedUsers" |
|||
) |
|||
|
|||
// A Policy is a list of Bindings representing roles
|
|||
// granted to members.
|
|||
//
|
|||
// The zero Policy is a valid policy with no bindings.
|
|||
type Policy struct { |
|||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
|||
// and provide an exported alias here.
|
|||
|
|||
// This field is exported for use by the Google Cloud Libraries only.
|
|||
// It may become unexported in a future release.
|
|||
InternalProto *pb.Policy |
|||
} |
|||
|
|||
// Members returns the list of members with the supplied role.
|
|||
// The return value should not be modified. Use Add and Remove
|
|||
// to modify the members of a role.
|
|||
func (p *Policy) Members(r RoleName) []string { |
|||
b := p.binding(r) |
|||
if b == nil { |
|||
return nil |
|||
} |
|||
return b.Members |
|||
} |
|||
|
|||
// HasRole reports whether member has role r.
|
|||
func (p *Policy) HasRole(member string, r RoleName) bool { |
|||
return memberIndex(member, p.binding(r)) >= 0 |
|||
} |
|||
|
|||
// Add adds member member to role r if it is not already present.
|
|||
// A new binding is created if there is no binding for the role.
|
|||
func (p *Policy) Add(member string, r RoleName) { |
|||
b := p.binding(r) |
|||
if b == nil { |
|||
if p.InternalProto == nil { |
|||
p.InternalProto = &pb.Policy{} |
|||
} |
|||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ |
|||
Role: string(r), |
|||
Members: []string{member}, |
|||
}) |
|||
return |
|||
} |
|||
if memberIndex(member, b) < 0 { |
|||
b.Members = append(b.Members, member) |
|||
return |
|||
} |
|||
} |
|||
|
|||
// Remove removes member from role r if it is present.
|
|||
func (p *Policy) Remove(member string, r RoleName) { |
|||
bi := p.bindingIndex(r) |
|||
if bi < 0 { |
|||
return |
|||
} |
|||
bindings := p.InternalProto.Bindings |
|||
b := bindings[bi] |
|||
mi := memberIndex(member, b) |
|||
if mi < 0 { |
|||
return |
|||
} |
|||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
|||
// into the removed spot and shrink the slice.
|
|||
if len(b.Members) == 1 { |
|||
// Remove binding.
|
|||
last := len(bindings) - 1 |
|||
bindings[bi] = bindings[last] |
|||
bindings[last] = nil |
|||
p.InternalProto.Bindings = bindings[:last] |
|||
return |
|||
} |
|||
// Remove member.
|
|||
// TODO(jba): worry about multiple copies of m?
|
|||
last := len(b.Members) - 1 |
|||
b.Members[mi] = b.Members[last] |
|||
b.Members[last] = "" |
|||
b.Members = b.Members[:last] |
|||
} |
|||
|
|||
// Roles returns the names of all the roles that appear in the Policy.
|
|||
func (p *Policy) Roles() []RoleName { |
|||
if p.InternalProto == nil { |
|||
return nil |
|||
} |
|||
var rns []RoleName |
|||
for _, b := range p.InternalProto.Bindings { |
|||
rns = append(rns, RoleName(b.Role)) |
|||
} |
|||
return rns |
|||
} |
|||
|
|||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
|||
func (p *Policy) binding(r RoleName) *pb.Binding { |
|||
i := p.bindingIndex(r) |
|||
if i < 0 { |
|||
return nil |
|||
} |
|||
return p.InternalProto.Bindings[i] |
|||
} |
|||
|
|||
func (p *Policy) bindingIndex(r RoleName) int { |
|||
if p.InternalProto == nil { |
|||
return -1 |
|||
} |
|||
for i, b := range p.InternalProto.Bindings { |
|||
if b.Role == string(r) { |
|||
return i |
|||
} |
|||
} |
|||
return -1 |
|||
} |
|||
|
|||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
|||
func memberIndex(m string, b *pb.Binding) int { |
|||
if b == nil { |
|||
return -1 |
|||
} |
|||
for i, mm := range b.Members { |
|||
if mm == m { |
|||
return i |
|||
} |
|||
} |
|||
return -1 |
|||
} |
|||
|
|||
// insertMetadata inserts metadata into the given context
|
|||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { |
|||
out, _ := metadata.FromOutgoingContext(ctx) |
|||
out = out.Copy() |
|||
for _, md := range mds { |
|||
for k, v := range md { |
|||
out[k] = append(out[k], v...) |
|||
} |
|||
} |
|||
return metadata.NewOutgoingContext(ctx, out) |
|||
} |
|||
|
|||
// A Policy3 is a list of Bindings representing roles granted to members.
|
|||
//
|
|||
// The zero Policy3 is a valid policy with no bindings.
|
|||
//
|
|||
// It is similar to a Policy, except a Policy3 provides direct access to the
|
|||
// list of Bindings.
|
|||
//
|
|||
// The policy version is always set to 3.
|
|||
type Policy3 struct { |
|||
etag []byte |
|||
Bindings []*pb.Binding |
|||
} |
|||
|
|||
// Policy retrieves the IAM policy for the resource.
|
|||
//
|
|||
// requestedPolicyVersion is always set to 3.
|
|||
func (h *Handle3) Policy(ctx context.Context) (*Policy3, error) { |
|||
proto, err := h.c.GetWithVersion(ctx, h.resource, h.version) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return &Policy3{ |
|||
Bindings: proto.Bindings, |
|||
etag: proto.Etag, |
|||
}, nil |
|||
} |
|||
|
|||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
|||
//
|
|||
// If policy was created from a prior call to Get, then the modification will
|
|||
// only succeed if the policy has not changed since the Get.
|
|||
func (h *Handle3) SetPolicy(ctx context.Context, policy *Policy3) error { |
|||
return h.c.Set(ctx, h.resource, &pb.Policy{ |
|||
Bindings: policy.Bindings, |
|||
Etag: policy.etag, |
|||
Version: h.version, |
|||
}) |
|||
} |
|||
|
|||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
|||
func (h *Handle3) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { |
|||
return h.c.Test(ctx, h.resource, permissions) |
|||
} |
File diff suppressed because it is too large
@ -0,0 +1,43 @@ |
|||
# Internal |
|||
|
|||
This directory contains internal code for cloud.google.com/go packages. |
|||
|
|||
## .repo-metadata-full.json |
|||
|
|||
`.repo-metadata-full.json` contains metadata about the packages in this repo. It |
|||
is generated by `internal/gapicgen/generator`. It's processed by external tools |
|||
to build lists of all of the packages. |
|||
|
|||
Don't make breaking changes to the format without consulting with the external |
|||
tools. |
|||
|
|||
One day, we may want to create individual `.repo-metadata.json` files next to |
|||
each package, which is the pattern followed by some other languages. External |
|||
tools would then talk to pkg.go.dev or some other service to get the overall |
|||
list of packages and use the `.repo-metadata.json` files to get the additional |
|||
metadata required. For now, `.repo-metadata-full.json` includes everything. |
|||
|
|||
## cloudbuild.yaml |
|||
|
|||
To kick off a build locally run from the repo root: |
|||
|
|||
```bash |
|||
gcloud builds submit --project=cloud-devrel-kokoro-resources --config=internal/cloudbuild.yaml |
|||
``` |
|||
|
|||
### Updating OwlBot SHA |
|||
|
|||
You may want to manually update the which version of the post processor will be |
|||
used -- to do this you need to update the SHA in the OwlBot lock file. Start by |
|||
running the following commands: |
|||
|
|||
```bash |
|||
docker pull gcr.io/cloud-devrel-public-resources/owlbot-go:latest |
|||
docker inspect --format='{{index .RepoDigests 0}}' gcr.io/cloud-devrel-public-resources/owlbot-go:latest |
|||
``` |
|||
|
|||
This will give you a SHA. You can use this value to update the value in |
|||
`.github/.OwlBot.lock.yaml`. |
|||
|
|||
*Note*: OwlBot will eventually open a pull request to update this value if it |
|||
discovers a new version of the container. |
@ -0,0 +1,55 @@ |
|||
// Copyright 2017 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package internal |
|||
|
|||
import ( |
|||
"fmt" |
|||
|
|||
"google.golang.org/api/googleapi" |
|||
"google.golang.org/grpc/status" |
|||
) |
|||
|
|||
// Annotate prepends msg to the error message in err, attempting
|
|||
// to preserve other information in err, like an error code.
|
|||
//
|
|||
// Annotate panics if err is nil.
|
|||
//
|
|||
// Annotate knows about these error types:
|
|||
// - "google.golang.org/grpc/status".Status
|
|||
// - "google.golang.org/api/googleapi".Error
|
|||
// If the error is not one of these types, Annotate behaves
|
|||
// like
|
|||
//
|
|||
// fmt.Errorf("%s: %v", msg, err)
|
|||
func Annotate(err error, msg string) error { |
|||
if err == nil { |
|||
panic("Annotate called with nil") |
|||
} |
|||
if s, ok := status.FromError(err); ok { |
|||
p := s.Proto() |
|||
p.Message = msg + ": " + p.Message |
|||
return status.ErrorProto(p) |
|||
} |
|||
if g, ok := err.(*googleapi.Error); ok { |
|||
g.Message = msg + ": " + g.Message |
|||
return g |
|||
} |
|||
return fmt.Errorf("%s: %v", msg, err) |
|||
} |
|||
|
|||
// Annotatef uses format and args to format a string, then calls Annotate.
|
|||
func Annotatef(err error, format string, args ...interface{}) error { |
|||
return Annotate(err, fmt.Sprintf(format, args...)) |
|||
} |
@ -0,0 +1,25 @@ |
|||
# Copyright 2023 Google LLC |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
# note: /workspace is a special directory in the docker image where all the files in this folder |
|||
# get placed on your behalf |
|||
|
|||
timeout: 7200s # 2 hours |
|||
steps: |
|||
- name: gcr.io/cloud-builders/docker |
|||
args: ['build', '-t', 'gcr.io/cloud-devrel-public-resources/owlbot-go', '-f', 'postprocessor/Dockerfile', '.'] |
|||
dir: internal |
|||
|
|||
images: |
|||
- gcr.io/cloud-devrel-public-resources/owlbot-go:latest |
@ -0,0 +1,108 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package optional provides versions of primitive types that can
|
|||
// be nil. These are useful in methods that update some of an API object's
|
|||
// fields.
|
|||
package optional |
|||
|
|||
import ( |
|||
"fmt" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
type ( |
|||
// Bool is either a bool or nil.
|
|||
Bool interface{} |
|||
|
|||
// String is either a string or nil.
|
|||
String interface{} |
|||
|
|||
// Int is either an int or nil.
|
|||
Int interface{} |
|||
|
|||
// Uint is either a uint or nil.
|
|||
Uint interface{} |
|||
|
|||
// Float64 is either a float64 or nil.
|
|||
Float64 interface{} |
|||
|
|||
// Duration is either a time.Duration or nil.
|
|||
Duration interface{} |
|||
) |
|||
|
|||
// ToBool returns its argument as a bool.
|
|||
// It panics if its argument is nil or not a bool.
|
|||
func ToBool(v Bool) bool { |
|||
x, ok := v.(bool) |
|||
if !ok { |
|||
doPanic("Bool", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToString returns its argument as a string.
|
|||
// It panics if its argument is nil or not a string.
|
|||
func ToString(v String) string { |
|||
x, ok := v.(string) |
|||
if !ok { |
|||
doPanic("String", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToInt returns its argument as an int.
|
|||
// It panics if its argument is nil or not an int.
|
|||
func ToInt(v Int) int { |
|||
x, ok := v.(int) |
|||
if !ok { |
|||
doPanic("Int", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToUint returns its argument as a uint.
|
|||
// It panics if its argument is nil or not a uint.
|
|||
func ToUint(v Uint) uint { |
|||
x, ok := v.(uint) |
|||
if !ok { |
|||
doPanic("Uint", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToFloat64 returns its argument as a float64.
|
|||
// It panics if its argument is nil or not a float64.
|
|||
func ToFloat64(v Float64) float64 { |
|||
x, ok := v.(float64) |
|||
if !ok { |
|||
doPanic("Float64", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// ToDuration returns its argument as a time.Duration.
|
|||
// It panics if its argument is nil or not a time.Duration.
|
|||
func ToDuration(v Duration) time.Duration { |
|||
x, ok := v.(time.Duration) |
|||
if !ok { |
|||
doPanic("Duration", v) |
|||
} |
|||
return x |
|||
} |
|||
|
|||
func doPanic(capType string, v interface{}) { |
|||
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) |
|||
} |
@ -0,0 +1,85 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package internal |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"time" |
|||
|
|||
gax "github.com/googleapis/gax-go/v2" |
|||
"google.golang.org/grpc/status" |
|||
) |
|||
|
|||
// Retry calls the supplied function f repeatedly according to the provided
|
|||
// backoff parameters. It returns when one of the following occurs:
|
|||
// When f's first return value is true, Retry immediately returns with f's second
|
|||
// return value.
|
|||
// When the provided context is done, Retry returns with an error that
|
|||
// includes both ctx.Error() and the last error returned by f.
|
|||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { |
|||
return retry(ctx, bo, f, gax.Sleep) |
|||
} |
|||
|
|||
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), |
|||
sleep func(context.Context, time.Duration) error) error { |
|||
var lastErr error |
|||
for { |
|||
stop, err := f() |
|||
if stop { |
|||
return err |
|||
} |
|||
// Remember the last "real" error from f.
|
|||
if err != nil && err != context.Canceled && err != context.DeadlineExceeded { |
|||
lastErr = err |
|||
} |
|||
p := bo.Pause() |
|||
if ctxErr := sleep(ctx, p); ctxErr != nil { |
|||
if lastErr != nil { |
|||
return wrappedCallErr{ctxErr: ctxErr, wrappedErr: lastErr} |
|||
} |
|||
return ctxErr |
|||
} |
|||
} |
|||
} |
|||
|
|||
// Use this error type to return an error which allows introspection of both
|
|||
// the context error and the error from the service.
|
|||
type wrappedCallErr struct { |
|||
ctxErr error |
|||
wrappedErr error |
|||
} |
|||
|
|||
func (e wrappedCallErr) Error() string { |
|||
return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) |
|||
} |
|||
|
|||
func (e wrappedCallErr) Unwrap() error { |
|||
return e.wrappedErr |
|||
} |
|||
|
|||
// Is allows errors.Is to match the error from the call as well as context
|
|||
// sentinel errors.
|
|||
func (e wrappedCallErr) Is(err error) bool { |
|||
return e.ctxErr == err || e.wrappedErr == err |
|||
} |
|||
|
|||
// GRPCStatus allows the wrapped error to be used with status.FromError.
|
|||
func (e wrappedCallErr) GRPCStatus() *status.Status { |
|||
if s, ok := status.FromError(e.wrappedErr); ok { |
|||
return s |
|||
} |
|||
return nil |
|||
} |
@ -0,0 +1,111 @@ |
|||
// Copyright 2018 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package trace |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
|
|||
"go.opencensus.io/trace" |
|||
"golang.org/x/xerrors" |
|||
"google.golang.org/api/googleapi" |
|||
"google.golang.org/genproto/googleapis/rpc/code" |
|||
"google.golang.org/grpc/status" |
|||
) |
|||
|
|||
// StartSpan adds a span to the trace with the given name.
|
|||
func StartSpan(ctx context.Context, name string) context.Context { |
|||
ctx, _ = trace.StartSpan(ctx, name) |
|||
return ctx |
|||
} |
|||
|
|||
// EndSpan ends a span with the given error.
|
|||
func EndSpan(ctx context.Context, err error) { |
|||
span := trace.FromContext(ctx) |
|||
if err != nil { |
|||
span.SetStatus(toStatus(err)) |
|||
} |
|||
span.End() |
|||
} |
|||
|
|||
// toStatus interrogates an error and converts it to an appropriate
|
|||
// OpenCensus status.
|
|||
func toStatus(err error) trace.Status { |
|||
var err2 *googleapi.Error |
|||
if ok := xerrors.As(err, &err2); ok { |
|||
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} |
|||
} else if s, ok := status.FromError(err); ok { |
|||
return trace.Status{Code: int32(s.Code()), Message: s.Message()} |
|||
} else { |
|||
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} |
|||
} |
|||
} |
|||
|
|||
// TODO(deklerk): switch to using OpenCensus function when it becomes available.
|
|||
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
|
|||
func httpStatusCodeToOCCode(httpStatusCode int) int32 { |
|||
switch httpStatusCode { |
|||
case 200: |
|||
return int32(code.Code_OK) |
|||
case 499: |
|||
return int32(code.Code_CANCELLED) |
|||
case 500: |
|||
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
|
|||
case 400: |
|||
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
|
|||
case 504: |
|||
return int32(code.Code_DEADLINE_EXCEEDED) |
|||
case 404: |
|||
return int32(code.Code_NOT_FOUND) |
|||
case 409: |
|||
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
|
|||
case 403: |
|||
return int32(code.Code_PERMISSION_DENIED) |
|||
case 401: |
|||
return int32(code.Code_UNAUTHENTICATED) |
|||
case 429: |
|||
return int32(code.Code_RESOURCE_EXHAUSTED) |
|||
case 501: |
|||
return int32(code.Code_UNIMPLEMENTED) |
|||
case 503: |
|||
return int32(code.Code_UNAVAILABLE) |
|||
default: |
|||
return int32(code.Code_UNKNOWN) |
|||
} |
|||
} |
|||
|
|||
// TODO: (odeke-em): perhaps just pass around spans due to the cost
|
|||
// incurred from using trace.FromContext(ctx) yet we could avoid
|
|||
// throwing away the work done by ctx, span := trace.StartSpan.
|
|||
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { |
|||
var attrs []trace.Attribute |
|||
for k, v := range attrMap { |
|||
var a trace.Attribute |
|||
switch v := v.(type) { |
|||
case string: |
|||
a = trace.StringAttribute(k, v) |
|||
case bool: |
|||
a = trace.BoolAttribute(k, v) |
|||
case int: |
|||
a = trace.Int64Attribute(k, int64(v)) |
|||
case int64: |
|||
a = trace.Int64Attribute(k, v) |
|||
default: |
|||
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) |
|||
} |
|||
attrs = append(attrs, a) |
|||
} |
|||
trace.FromContext(ctx).Annotatef(attrs, format, args...) |
|||
} |
@ -0,0 +1,19 @@ |
|||
#!/bin/bash |
|||
# Copyright 2019 Google LLC |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
today=$(date +%Y%m%d) |
|||
|
|||
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE |
|||
|
@ -0,0 +1,71 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
//go:generate ./update_version.sh
|
|||
|
|||
// Package version contains version information for Google Cloud Client
|
|||
// Libraries for Go, as reported in request headers.
|
|||
package version |
|||
|
|||
import ( |
|||
"runtime" |
|||
"strings" |
|||
"unicode" |
|||
) |
|||
|
|||
// Repo is the current version of the client libraries in this
|
|||
// repo. It should be a date in YYYYMMDD format.
|
|||
const Repo = "20201104" |
|||
|
|||
// Go returns the Go runtime version. The returned string
|
|||
// has no whitespace.
|
|||
func Go() string { |
|||
return goVersion |
|||
} |
|||
|
|||
var goVersion = goVer(runtime.Version()) |
|||
|
|||
const develPrefix = "devel +" |
|||
|
|||
func goVer(s string) string { |
|||
if strings.HasPrefix(s, develPrefix) { |
|||
s = s[len(develPrefix):] |
|||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { |
|||
s = s[:p] |
|||
} |
|||
return s |
|||
} |
|||
|
|||
if strings.HasPrefix(s, "go1") { |
|||
s = s[2:] |
|||
var prerelease string |
|||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 { |
|||
s, prerelease = s[:p], s[p:] |
|||
} |
|||
if strings.HasSuffix(s, ".") { |
|||
s += "0" |
|||
} else if strings.Count(s, ".") < 2 { |
|||
s += ".0" |
|||
} |
|||
if prerelease != "" { |
|||
s += "-" + prerelease |
|||
} |
|||
return s |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func notSemverRune(r rune) bool { |
|||
return !strings.ContainsRune("0123456789.", r) |
|||
} |
@ -0,0 +1,330 @@ |
|||
# Changes |
|||
|
|||
|
|||
## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) |
|||
|
|||
## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1)) |
|||
* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8)) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb)) |
|||
* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857) |
|||
|
|||
|
|||
### Documentation |
|||
|
|||
* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3)) |
|||
|
|||
## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8)) |
|||
|
|||
## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e)) |
|||
|
|||
## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f)) |
|||
* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e)) |
|||
|
|||
## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d)) |
|||
|
|||
## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d)) |
|||
* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473)) |
|||
|
|||
### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** bump genproto, remove deadcode ([#6059](https://github.com/googleapis/google-cloud-go/issues/6059)) ([bb10f9f](https://github.com/googleapis/google-cloud-go/commit/bb10f9faca57dc3b987e0fb601090887b3507f07)) |
|||
* **storage:** remove field that no longer exists ([#6061](https://github.com/googleapis/google-cloud-go/issues/6061)) ([ee150cf](https://github.com/googleapis/google-cloud-go/commit/ee150cfd194463ddfcb59898cfb0237e47777973)) |
|||
|
|||
## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** allow specifying includeTrailingDelimiter ([#5617](https://github.com/googleapis/google-cloud-go/issues/5617)) ([a34503b](https://github.com/googleapis/google-cloud-go/commit/a34503bc0f0b95399285e8db66976b227e3b0072)) |
|||
* **storage:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** respect STORAGE_EMULATOR_HOST in signedURL ([#5673](https://github.com/googleapis/google-cloud-go/issues/5673)) ([1c249ae](https://github.com/googleapis/google-cloud-go/commit/1c249ae5b4980cf53fa74635943ca8bf6a96a341)) |
|||
|
|||
## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** add better version metadata to calls ([#5507](https://github.com/googleapis/google-cloud-go/issues/5507)) ([13fe0bc](https://github.com/googleapis/google-cloud-go/commit/13fe0bc0d8acbffd46b59ab69b25449f1cbd6a88)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749) |
|||
* **storage:** add Writer.ChunkRetryDeadline ([#5482](https://github.com/googleapis/google-cloud-go/issues/5482)) ([498a746](https://github.com/googleapis/google-cloud-go/commit/498a746769fa43958b92af8875b927879947128e)) |
|||
|
|||
## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.19.0...storage/v1.20.0) (2022-02-04) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage/internal:** Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#5447](https://www.github.com/googleapis/google-cloud-go/issues/5447)) ([7d175ef](https://www.github.com/googleapis/google-cloud-go/commit/7d175ef12b7b3e75585427f5dd2aab4a175e92d6)) |
|||
|
|||
## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314)) |
|||
* This release contains changes to fully align this library's retry strategy |
|||
with best practices as described in the |
|||
Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy). |
|||
* The library will now retry only idempotent operations by default. This means |
|||
that for certain operations, including object upload, compose, rewrite, |
|||
update, and delete, requests will not be retried by default unless |
|||
[idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency) |
|||
for the request have been met. |
|||
* The library now has methods to configure aspects of retry policy for |
|||
API calls, including which errors are retried, the timing of the |
|||
exponential backoff, and how idempotency is taken into account. |
|||
* If you wish to re-enable retries for a non-idempotent request, use the |
|||
[RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways) |
|||
policy. |
|||
* For full details on how to configure retries, see the |
|||
[package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests) |
|||
and the |
|||
[Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy) |
|||
* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8)) |
|||
* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c)) |
|||
* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0)) |
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264)) |
|||
|
|||
### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** upgrade genproto ([#4993](https://www.github.com/googleapis/google-cloud-go/issues/4993)) ([5ca462d](https://www.github.com/googleapis/google-cloud-go/commit/5ca462d99fe851b7cddfd70108798e2fa959bdfd)), refs [#4991](https://www.github.com/googleapis/google-cloud-go/issues/4991) |
|||
|
|||
### [1.18.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.0...storage/v1.18.1) (2021-10-14) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** don't assume auth from a client option ([#4982](https://www.github.com/googleapis/google-cloud-go/issues/4982)) ([e17334d](https://www.github.com/googleapis/google-cloud-go/commit/e17334d1fe7645d89d14ae7148313498b984dfbb)) |
|||
|
|||
## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.17.0...storage/v1.18.0) (2021-10-11) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** returned wrapped error for timeouts ([#4802](https://www.github.com/googleapis/google-cloud-go/issues/4802)) ([0e102a3](https://www.github.com/googleapis/google-cloud-go/commit/0e102a385dc67a06f6b444b3a93e6998428529be)), refs [#4197](https://www.github.com/googleapis/google-cloud-go/issues/4197) |
|||
* **storage:** SignedUrl can use existing creds to authenticate ([#4604](https://www.github.com/googleapis/google-cloud-go/issues/4604)) ([b824c89](https://www.github.com/googleapis/google-cloud-go/commit/b824c897e6941270747b612f6d36a8d6ae081315)) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** update PAP to use inherited instead of unspecified ([#4909](https://www.github.com/googleapis/google-cloud-go/issues/4909)) ([dac26b1](https://www.github.com/googleapis/google-cloud-go/commit/dac26b1af2f2972f12775341173bcc5f982438b8)) |
|||
|
|||
## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.1...storage/v1.17.0) (2021-09-28) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** add projectNumber field to bucketAttrs. ([#4805](https://www.github.com/googleapis/google-cloud-go/issues/4805)) ([07343af](https://www.github.com/googleapis/google-cloud-go/commit/07343afc15085b164cc41d202d13f9d46f5c0d02)) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** align retry idempotency (part 1) ([#4715](https://www.github.com/googleapis/google-cloud-go/issues/4715)) ([ffa903e](https://www.github.com/googleapis/google-cloud-go/commit/ffa903eeec61aa3869e5220e2f09371127b5c393)) |
|||
|
|||
### [1.16.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.0...storage/v1.16.1) (2021-08-30) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage/internal:** Update encryption_key fields to "bytes" type. fix: Improve date/times and field name clarity in lifecycle conditions. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) |
|||
* **storage:** accept emulator env var without scheme ([#4616](https://www.github.com/googleapis/google-cloud-go/issues/4616)) ([5f8cbb9](https://www.github.com/googleapis/google-cloud-go/commit/5f8cbb98070109e2a34409ac775ed63b94d37efd)) |
|||
* **storage:** preserve supplied endpoint's scheme ([#4609](https://www.github.com/googleapis/google-cloud-go/issues/4609)) ([ee2756f](https://www.github.com/googleapis/google-cloud-go/commit/ee2756fb0a335d591464a770c9fa4f8fe0ba2e01)) |
|||
* **storage:** remove unnecessary variable ([#4608](https://www.github.com/googleapis/google-cloud-go/issues/4608)) ([27fc784](https://www.github.com/googleapis/google-cloud-go/commit/27fc78456fb251652bdf5cdb493734a7e1e643e1)) |
|||
* **storage:** retry LockRetentionPolicy ([#4439](https://www.github.com/googleapis/google-cloud-go/issues/4439)) ([09879ea](https://www.github.com/googleapis/google-cloud-go/commit/09879ea80cb67f9bfd8fc9384b0fda335567cba9)), refs [#4437](https://www.github.com/googleapis/google-cloud-go/issues/4437) |
|||
* **storage:** revise Reader to send XML preconditions ([#4479](https://www.github.com/googleapis/google-cloud-go/issues/4479)) ([e36b29a](https://www.github.com/googleapis/google-cloud-go/commit/e36b29a3d43bce5c1c044f7daf6e1db00b0a49e0)), refs [#4470](https://www.github.com/googleapis/google-cloud-go/issues/4470) |
|||
|
|||
## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.15.0...storage/v1.16.0) (2021-06-28) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** support PublicAccessPrevention ([#3608](https://www.github.com/googleapis/google-cloud-go/issues/3608)) ([99bc782](https://www.github.com/googleapis/google-cloud-go/commit/99bc782fb50a47602b45278384ef5d5b5da9263b)), refs [#3203](https://www.github.com/googleapis/google-cloud-go/issues/3203) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** fix Writer.ChunkSize validation ([#4255](https://www.github.com/googleapis/google-cloud-go/issues/4255)) ([69c2e9d](https://www.github.com/googleapis/google-cloud-go/commit/69c2e9dc6303e1a004d3104a8178532fa738e742)), refs [#4167](https://www.github.com/googleapis/google-cloud-go/issues/4167) |
|||
* **storage:** try to reopen for failed Reads ([#4226](https://www.github.com/googleapis/google-cloud-go/issues/4226)) ([564102b](https://www.github.com/googleapis/google-cloud-go/commit/564102b335dbfb558bec8af883e5f898efb5dd10)), refs [#3040](https://www.github.com/googleapis/google-cloud-go/issues/3040) |
|||
|
|||
## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.13.0...storage/v1.15.0) (2021-04-21) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **transport** Bump dependency on google.golang.org/api to pick up HTTP/2 |
|||
config updates (see [googleapis/google-api-go-client#882](https://github.com/googleapis/google-api-go-client/pull/882)). |
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** retry io.ErrUnexpectedEOF ([#3957](https://www.github.com/googleapis/google-cloud-go/issues/3957)) ([f6590cd](https://www.github.com/googleapis/google-cloud-go/commit/f6590cdc26c8479be5df48949fa59f879e0c24fc)) |
|||
|
|||
|
|||
## v1.14.0 |
|||
|
|||
- Updates to various dependencies. |
|||
|
|||
## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03) |
|||
|
|||
|
|||
### Features |
|||
|
|||
* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0)) |
|||
* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b)) |
|||
|
|||
|
|||
### Bug Fixes |
|||
|
|||
* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537)) |
|||
|
|||
## v1.12.0 |
|||
- V4 signed URL fixes: |
|||
- Fix encoding of spaces in query parameters. |
|||
- Add fields that were missing from PostPolicyV4 policy conditions. |
|||
- Fix Query to correctly list prefixes as well as objects when SetAttrSelection |
|||
is used. |
|||
|
|||
## v1.11.0 |
|||
- Add support for CustomTime and NoncurrentTime object lifecycle management |
|||
features. |
|||
|
|||
## v1.10.0 |
|||
- Bump dependency on google.golang.org/api to capture changes to retry logic |
|||
which will make retries on writes more resilient. |
|||
- Improve documentation for Writer.ChunkSize. |
|||
- Fix a bug in lifecycle to allow callers to clear lifecycle rules on a bucket. |
|||
|
|||
## v1.9.0 |
|||
- Add retry for transient network errors on most operations (with the exception |
|||
of writes). |
|||
- Bump dependency for google.golang.org/api to capture a change in the default |
|||
HTTP transport which will improve performance for reads under heavy load. |
|||
- Add CRC32C checksum validation option to Composer. |
|||
|
|||
## v1.8.0 |
|||
- Add support for V4 signed post policies. |
|||
|
|||
## v1.7.0 |
|||
- V4 signed URL support: |
|||
- Add support for bucket-bound domains and virtual hosted style URLs. |
|||
- Add support for query parameters in the signature. |
|||
- Fix text encoding to align with standards. |
|||
- Add the object name to query parameters for write calls. |
|||
- Fix retry behavior when reading files with Content-Encoding gzip. |
|||
- Fix response header in reader. |
|||
- New code examples: |
|||
- Error handling for `ObjectHandle` preconditions. |
|||
- Existence checks for buckets and objects. |
|||
|
|||
## v1.6.0 |
|||
|
|||
- Updated option handling: |
|||
- Don't drop custom scopes (#1756) |
|||
- Don't drop port in provided endpoint (#1737) |
|||
|
|||
## v1.5.0 |
|||
|
|||
- Honor WithEndpoint client option for reads as well as writes. |
|||
- Add archive storage class to docs. |
|||
- Make fixes to storage benchwrapper. |
|||
|
|||
## v1.4.0 |
|||
|
|||
- When listing objects in a bucket, allow callers to specify which attributes |
|||
are queried. This allows for performance optimization. |
|||
|
|||
## v1.3.0 |
|||
|
|||
- Use `storage.googleapis.com/storage/v1` by default for GCS requests |
|||
instead of `www.googleapis.com/storage/v1`. |
|||
|
|||
## v1.2.1 |
|||
|
|||
- Fixed a bug where UniformBucketLevelAccess and BucketPolicyOnly were not |
|||
being sent in all cases. |
|||
|
|||
## v1.2.0 |
|||
|
|||
- Add support for UniformBucketLevelAccess. This configures access checks |
|||
to use only bucket-level IAM policies. |
|||
See: https://godoc.org/cloud.google.com/go/storage#UniformBucketLevelAccess. |
|||
- Fix userAgent to use correct version. |
|||
|
|||
## v1.1.2 |
|||
|
|||
- Fix memory leak in BucketIterator and ObjectIterator. |
|||
|
|||
## v1.1.1 |
|||
|
|||
- Send BucketPolicyOnly even when it's disabled. |
|||
|
|||
## v1.1.0 |
|||
|
|||
- Performance improvements for ObjectIterator and BucketIterator. |
|||
- Fix Bucket.ObjectIterator size calculation checks. |
|||
- Added HMACKeyOptions to all the methods which allows for options such as |
|||
UserProject to be set per invocation and optionally be used. |
|||
|
|||
## v1.0.0 |
|||
|
|||
This is the first tag to carve out storage as its own module. See: |
|||
https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. |
@ -0,0 +1,202 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
APPENDIX: How to apply the Apache License to your work. |
|||
|
|||
To apply the Apache License to your work, attach the following |
|||
boilerplate notice, with the fields enclosed by brackets "[]" |
|||
replaced with your own identifying information. (Don't include |
|||
the brackets!) The text should be enclosed in the appropriate |
|||
comment syntax for the file format. We also recommend that a |
|||
file or class name and description of purpose be included on the |
|||
same "printed page" as the copyright notice for easier |
|||
identification within third-party archives. |
|||
|
|||
Copyright [yyyy] [name of copyright owner] |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,32 @@ |
|||
## Cloud Storage [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/storage.svg)](https://pkg.go.dev/cloud.google.com/go/storage) |
|||
|
|||
- [About Cloud Storage](https://cloud.google.com/storage/) |
|||
- [API documentation](https://cloud.google.com/storage/docs) |
|||
- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest) |
|||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage) |
|||
|
|||
### Example Usage |
|||
|
|||
First create a `storage.Client` to use throughout your application: |
|||
|
|||
[snip]:# (storage-1) |
|||
```go |
|||
client, err := storage.NewClient(ctx) |
|||
if err != nil { |
|||
log.Fatal(err) |
|||
} |
|||
``` |
|||
|
|||
[snip]:# (storage-2) |
|||
```go |
|||
// Read the object1 from bucket. |
|||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) |
|||
if err != nil { |
|||
log.Fatal(err) |
|||
} |
|||
defer rc.Close() |
|||
body, err := io.ReadAll(rc) |
|||
if err != nil { |
|||
log.Fatal(err) |
|||
} |
|||
``` |
@ -0,0 +1,356 @@ |
|||
// Copyright 2014 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"net/http" |
|||
"reflect" |
|||
|
|||
"cloud.google.com/go/internal/trace" |
|||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" |
|||
raw "google.golang.org/api/storage/v1" |
|||
) |
|||
|
|||
// ACLRole is the level of access to grant.
|
|||
type ACLRole string |
|||
|
|||
const ( |
|||
RoleOwner ACLRole = "OWNER" |
|||
RoleReader ACLRole = "READER" |
|||
RoleWriter ACLRole = "WRITER" |
|||
) |
|||
|
|||
// ACLEntity refers to a user or group.
|
|||
// They are sometimes referred to as grantees.
|
|||
//
|
|||
// It could be in the form of:
|
|||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
|||
// "domain-<domain>" and "project-team-<projectId>".
|
|||
//
|
|||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
|||
type ACLEntity string |
|||
|
|||
const ( |
|||
AllUsers ACLEntity = "allUsers" |
|||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" |
|||
) |
|||
|
|||
// ACLRule represents a grant for a role to an entity (user, group or team) for a
|
|||
// Google Cloud Storage object or bucket.
|
|||
type ACLRule struct { |
|||
Entity ACLEntity |
|||
EntityID string |
|||
Role ACLRole |
|||
Domain string |
|||
Email string |
|||
ProjectTeam *ProjectTeam |
|||
} |
|||
|
|||
// ProjectTeam is the project team associated with the entity, if any.
|
|||
type ProjectTeam struct { |
|||
ProjectNumber string |
|||
Team string |
|||
} |
|||
|
|||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
|||
// ACLHandle on an object operates on the latest generation of that object by default.
|
|||
// Selecting a specific generation of an object is not currently supported by the client.
|
|||
type ACLHandle struct { |
|||
c *Client |
|||
bucket string |
|||
object string |
|||
isDefault bool |
|||
userProject string // for requester-pays buckets
|
|||
retry *retryConfig |
|||
} |
|||
|
|||
// Delete permanently deletes the ACL entry for the given entity.
|
|||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if a.object != "" { |
|||
return a.objectDelete(ctx, entity) |
|||
} |
|||
if a.isDefault { |
|||
return a.bucketDefaultDelete(ctx, entity) |
|||
} |
|||
return a.bucketDelete(ctx, entity) |
|||
} |
|||
|
|||
// Set sets the role for the given entity.
|
|||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if a.object != "" { |
|||
return a.objectSet(ctx, entity, role, false) |
|||
} |
|||
if a.isDefault { |
|||
return a.objectSet(ctx, entity, role, true) |
|||
} |
|||
return a.bucketSet(ctx, entity, role) |
|||
} |
|||
|
|||
// List retrieves ACL entries.
|
|||
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if a.object != "" { |
|||
return a.objectList(ctx) |
|||
} |
|||
if a.isDefault { |
|||
return a.bucketDefaultList(ctx) |
|||
} |
|||
return a.bucketList(ctx) |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { |
|||
opts := makeStorageOpts(true, a.retry, a.userProject) |
|||
return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...) |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { |
|||
opts := makeStorageOpts(false, a.retry, a.userProject) |
|||
return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...) |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { |
|||
opts := makeStorageOpts(true, a.retry, a.userProject) |
|||
return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...) |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { |
|||
opts := makeStorageOpts(false, a.retry, a.userProject) |
|||
return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...) |
|||
} |
|||
|
|||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { |
|||
opts := makeStorageOpts(false, a.retry, a.userProject) |
|||
return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...) |
|||
} |
|||
|
|||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { |
|||
opts := makeStorageOpts(true, a.retry, a.userProject) |
|||
return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...) |
|||
} |
|||
|
|||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { |
|||
opts := makeStorageOpts(false, a.retry, a.userProject) |
|||
if isBucketDefault { |
|||
return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...) |
|||
} |
|||
return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...) |
|||
} |
|||
|
|||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { |
|||
opts := makeStorageOpts(false, a.retry, a.userProject) |
|||
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...) |
|||
} |
|||
|
|||
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { |
|||
vc := reflect.ValueOf(call) |
|||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) |
|||
if a.userProject != "" { |
|||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) |
|||
} |
|||
setClientHeader(call.Header()) |
|||
} |
|||
|
|||
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { |
|||
var rs []ACLRule |
|||
for _, item := range items { |
|||
rs = append(rs, toObjectACLRule(item)) |
|||
} |
|||
return rs |
|||
} |
|||
|
|||
func toObjectACLRulesFromProto(items []*storagepb.ObjectAccessControl) []ACLRule { |
|||
var rs []ACLRule |
|||
for _, item := range items { |
|||
rs = append(rs, toObjectACLRuleFromProto(item)) |
|||
} |
|||
return rs |
|||
} |
|||
|
|||
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { |
|||
var rs []ACLRule |
|||
for _, item := range items { |
|||
rs = append(rs, toBucketACLRule(item)) |
|||
} |
|||
return rs |
|||
} |
|||
|
|||
func toBucketACLRulesFromProto(items []*storagepb.BucketAccessControl) []ACLRule { |
|||
var rs []ACLRule |
|||
for _, item := range items { |
|||
rs = append(rs, toBucketACLRuleFromProto(item)) |
|||
} |
|||
return rs |
|||
} |
|||
|
|||
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { |
|||
return ACLRule{ |
|||
Entity: ACLEntity(a.Entity), |
|||
EntityID: a.EntityId, |
|||
Role: ACLRole(a.Role), |
|||
Domain: a.Domain, |
|||
Email: a.Email, |
|||
ProjectTeam: toObjectProjectTeam(a.ProjectTeam), |
|||
} |
|||
} |
|||
|
|||
func toObjectACLRuleFromProto(a *storagepb.ObjectAccessControl) ACLRule { |
|||
return ACLRule{ |
|||
Entity: ACLEntity(a.GetEntity()), |
|||
EntityID: a.GetEntityId(), |
|||
Role: ACLRole(a.GetRole()), |
|||
Domain: a.GetDomain(), |
|||
Email: a.GetEmail(), |
|||
ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), |
|||
} |
|||
} |
|||
|
|||
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { |
|||
return ACLRule{ |
|||
Entity: ACLEntity(a.Entity), |
|||
EntityID: a.EntityId, |
|||
Role: ACLRole(a.Role), |
|||
Domain: a.Domain, |
|||
Email: a.Email, |
|||
ProjectTeam: toBucketProjectTeam(a.ProjectTeam), |
|||
} |
|||
} |
|||
|
|||
func toBucketACLRuleFromProto(a *storagepb.BucketAccessControl) ACLRule { |
|||
return ACLRule{ |
|||
Entity: ACLEntity(a.GetEntity()), |
|||
EntityID: a.GetEntityId(), |
|||
Role: ACLRole(a.GetRole()), |
|||
Domain: a.GetDomain(), |
|||
Email: a.GetEmail(), |
|||
ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), |
|||
} |
|||
} |
|||
|
|||
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { |
|||
if len(rules) == 0 { |
|||
return nil |
|||
} |
|||
r := make([]*raw.ObjectAccessControl, 0, len(rules)) |
|||
for _, rule := range rules { |
|||
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
|
|||
} |
|||
return r |
|||
} |
|||
|
|||
func toProtoObjectACL(rules []ACLRule) []*storagepb.ObjectAccessControl { |
|||
if len(rules) == 0 { |
|||
return nil |
|||
} |
|||
r := make([]*storagepb.ObjectAccessControl, 0, len(rules)) |
|||
for _, rule := range rules { |
|||
r = append(r, rule.toProtoObjectAccessControl("")) // bucket name unnecessary
|
|||
} |
|||
return r |
|||
} |
|||
|
|||
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { |
|||
if len(rules) == 0 { |
|||
return nil |
|||
} |
|||
r := make([]*raw.BucketAccessControl, 0, len(rules)) |
|||
for _, rule := range rules { |
|||
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
|
|||
} |
|||
return r |
|||
} |
|||
|
|||
func toProtoBucketACL(rules []ACLRule) []*storagepb.BucketAccessControl { |
|||
if len(rules) == 0 { |
|||
return nil |
|||
} |
|||
r := make([]*storagepb.BucketAccessControl, 0, len(rules)) |
|||
for _, rule := range rules { |
|||
r = append(r, rule.toProtoBucketAccessControl()) |
|||
} |
|||
return r |
|||
} |
|||
|
|||
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { |
|||
return &raw.BucketAccessControl{ |
|||
Bucket: bucket, |
|||
Entity: string(r.Entity), |
|||
Role: string(r.Role), |
|||
// The other fields are not settable.
|
|||
} |
|||
} |
|||
|
|||
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl { |
|||
return &raw.ObjectAccessControl{ |
|||
Bucket: bucket, |
|||
Entity: string(r.Entity), |
|||
Role: string(r.Role), |
|||
// The other fields are not settable.
|
|||
} |
|||
} |
|||
|
|||
func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAccessControl { |
|||
return &storagepb.ObjectAccessControl{ |
|||
Entity: string(r.Entity), |
|||
Role: string(r.Role), |
|||
// The other fields are not settable.
|
|||
} |
|||
} |
|||
|
|||
func (r ACLRule) toProtoBucketAccessControl() *storagepb.BucketAccessControl { |
|||
return &storagepb.BucketAccessControl{ |
|||
Entity: string(r.Entity), |
|||
Role: string(r.Role), |
|||
// The other fields are not settable.
|
|||
} |
|||
} |
|||
|
|||
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { |
|||
if p == nil { |
|||
return nil |
|||
} |
|||
return &ProjectTeam{ |
|||
ProjectNumber: p.ProjectNumber, |
|||
Team: p.Team, |
|||
} |
|||
} |
|||
|
|||
func toProjectTeamFromProto(p *storagepb.ProjectTeam) *ProjectTeam { |
|||
if p == nil { |
|||
return nil |
|||
} |
|||
return &ProjectTeam{ |
|||
ProjectNumber: p.GetProjectNumber(), |
|||
Team: p.GetTeam(), |
|||
} |
|||
} |
|||
|
|||
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { |
|||
if p == nil { |
|||
return nil |
|||
} |
|||
return &ProjectTeam{ |
|||
ProjectNumber: p.ProjectNumber, |
|||
Team: p.Team, |
|||
} |
|||
} |
File diff suppressed because it is too large
@ -0,0 +1,333 @@ |
|||
// Copyright 2022 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"io" |
|||
"time" |
|||
|
|||
gax "github.com/googleapis/gax-go/v2" |
|||
"google.golang.org/api/option" |
|||
iampb "google.golang.org/genproto/googleapis/iam/v1" |
|||
) |
|||
|
|||
// TODO(noahdietz): Move existing factory methods to this file.
|
|||
|
|||
// storageClient is an internal-only interface designed to separate the
|
|||
// transport-specific logic of making Storage API calls from the logic of the
|
|||
// client library.
|
|||
//
|
|||
// Implementation requirements beyond implementing the interface include:
|
|||
// * factory method(s) must accept a `userProject string` param
|
|||
// * `settings` must be retained per instance
|
|||
// * `storageOption`s must be resolved in the order they are received
|
|||
// * all API errors must be wrapped in the gax-go APIError type
|
|||
// * any unimplemented interface methods must return a StorageUnimplementedErr
|
|||
//
|
|||
// TODO(noahdietz): This interface is currently not used in the production code
|
|||
// paths
|
|||
type storageClient interface { |
|||
|
|||
// Top-level methods.
|
|||
|
|||
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) |
|||
CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) |
|||
ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator |
|||
Close() error |
|||
|
|||
// Bucket methods.
|
|||
|
|||
DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error |
|||
GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) |
|||
UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) |
|||
LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error |
|||
ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator |
|||
|
|||
// Object metadata methods.
|
|||
|
|||
DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error |
|||
GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) |
|||
UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) |
|||
|
|||
// Default Object ACL methods.
|
|||
|
|||
DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error |
|||
ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) |
|||
UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error |
|||
|
|||
// Bucket ACL methods.
|
|||
|
|||
DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error |
|||
ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) |
|||
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error |
|||
|
|||
// Object ACL methods.
|
|||
|
|||
DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error |
|||
ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) |
|||
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error |
|||
|
|||
// Media operations.
|
|||
|
|||
ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) |
|||
RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) |
|||
|
|||
NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error) |
|||
OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) |
|||
|
|||
// IAM methods.
|
|||
|
|||
GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) |
|||
SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error |
|||
TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) |
|||
|
|||
// HMAC Key methods.
|
|||
|
|||
GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) |
|||
ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator |
|||
UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) |
|||
CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) |
|||
DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error |
|||
|
|||
// Notification methods.
|
|||
ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error) |
|||
CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error) |
|||
DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error |
|||
} |
|||
|
|||
// settings contains transport-agnostic configuration for API calls made via
|
|||
// the storageClient inteface. All implementations must utilize settings
|
|||
// and respect those that are applicable.
|
|||
type settings struct { |
|||
// retry is the complete retry configuration to use when evaluating if an
|
|||
// API call should be retried.
|
|||
retry *retryConfig |
|||
|
|||
// gax is a set of gax.CallOption to be conveyed to gax.Invoke.
|
|||
// Note: Not all storageClient interfaces will must use gax.Invoke.
|
|||
gax []gax.CallOption |
|||
|
|||
// idempotent indicates if the call is idempotent or not when considering
|
|||
// if the call should be retired or not.
|
|||
idempotent bool |
|||
|
|||
// clientOption is a set of option.ClientOption to be used during client
|
|||
// transport initialization. See https://pkg.go.dev/google.golang.org/api/option
|
|||
// for a list of supported options.
|
|||
clientOption []option.ClientOption |
|||
|
|||
// userProject is the user project that should be billed for the request.
|
|||
userProject string |
|||
} |
|||
|
|||
func initSettings(opts ...storageOption) *settings { |
|||
s := &settings{} |
|||
resolveOptions(s, opts...) |
|||
return s |
|||
} |
|||
|
|||
func resolveOptions(s *settings, opts ...storageOption) { |
|||
for _, o := range opts { |
|||
o.Apply(s) |
|||
} |
|||
} |
|||
|
|||
// callSettings is a helper for resolving storage options against the settings
|
|||
// in the context of an individual call. This is to ensure that client-level
|
|||
// default settings are not mutated by two different calls getting options.
|
|||
//
|
|||
// Example: s := callSettings(c.settings, opts...)
|
|||
func callSettings(defaults *settings, opts ...storageOption) *settings { |
|||
if defaults == nil { |
|||
return nil |
|||
} |
|||
// This does not make a deep copy of the pointer/slice fields, but all
|
|||
// options replace the settings fields rather than modify their values in
|
|||
// place.
|
|||
cs := *defaults |
|||
resolveOptions(&cs, opts...) |
|||
return &cs |
|||
} |
|||
|
|||
// makeStorageOpts is a helper for generating a set of storageOption based on
|
|||
// idempotency, retryConfig, and userProject. All top-level client operations
|
|||
// will generally have to pass these options through the interface.
|
|||
func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption { |
|||
opts := []storageOption{idempotent(isIdempotent)} |
|||
if retry != nil { |
|||
opts = append(opts, withRetryConfig(retry)) |
|||
} |
|||
if userProject != "" { |
|||
opts = append(opts, withUserProject(userProject)) |
|||
} |
|||
return opts |
|||
} |
|||
|
|||
// storageOption is the transport-agnostic call option for the storageClient
|
|||
// interface.
|
|||
type storageOption interface { |
|||
Apply(s *settings) |
|||
} |
|||
|
|||
func withGAXOptions(opts ...gax.CallOption) storageOption { |
|||
return &gaxOption{opts} |
|||
} |
|||
|
|||
type gaxOption struct { |
|||
opts []gax.CallOption |
|||
} |
|||
|
|||
func (o *gaxOption) Apply(s *settings) { s.gax = o.opts } |
|||
|
|||
func withRetryConfig(rc *retryConfig) storageOption { |
|||
return &retryOption{rc} |
|||
} |
|||
|
|||
type retryOption struct { |
|||
rc *retryConfig |
|||
} |
|||
|
|||
func (o *retryOption) Apply(s *settings) { s.retry = o.rc } |
|||
|
|||
func idempotent(i bool) storageOption { |
|||
return &idempotentOption{i} |
|||
} |
|||
|
|||
type idempotentOption struct { |
|||
idempotency bool |
|||
} |
|||
|
|||
func (o *idempotentOption) Apply(s *settings) { s.idempotent = o.idempotency } |
|||
|
|||
func withClientOptions(opts ...option.ClientOption) storageOption { |
|||
return &clientOption{opts: opts} |
|||
} |
|||
|
|||
type clientOption struct { |
|||
opts []option.ClientOption |
|||
} |
|||
|
|||
func (o *clientOption) Apply(s *settings) { s.clientOption = o.opts } |
|||
|
|||
func withUserProject(project string) storageOption { |
|||
return &userProjectOption{project} |
|||
} |
|||
|
|||
type userProjectOption struct { |
|||
project string |
|||
} |
|||
|
|||
func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project } |
|||
|
|||
type openWriterParams struct { |
|||
// Writer configuration
|
|||
|
|||
// ctx is the context used by the writer routine to make all network calls
|
|||
// and to manage the writer routine - see `Writer.ctx`.
|
|||
// Required.
|
|||
ctx context.Context |
|||
// chunkSize - see `Writer.ChunkSize`.
|
|||
// Optional.
|
|||
chunkSize int |
|||
// chunkRetryDeadline - see `Writer.ChunkRetryDeadline`.
|
|||
// Optional.
|
|||
chunkRetryDeadline time.Duration |
|||
|
|||
// Object/request properties
|
|||
|
|||
// bucket - see `Writer.o.bucket`.
|
|||
// Required.
|
|||
bucket string |
|||
// attrs - see `Writer.ObjectAttrs`.
|
|||
// Required.
|
|||
attrs *ObjectAttrs |
|||
// conds - see `Writer.o.conds`.
|
|||
// Optional.
|
|||
conds *Conditions |
|||
// encryptionKey - see `Writer.o.encryptionKey`
|
|||
// Optional.
|
|||
encryptionKey []byte |
|||
// sendCRC32C - see `Writer.SendCRC32C`.
|
|||
// Optional.
|
|||
sendCRC32C bool |
|||
|
|||
// Writer callbacks
|
|||
|
|||
// donec - see `Writer.donec`.
|
|||
// Required.
|
|||
donec chan struct{} |
|||
// setError callback for reporting errors - see `Writer.error`.
|
|||
// Required.
|
|||
setError func(error) |
|||
// progress callback for reporting upload progress - see `Writer.progress`.
|
|||
// Required.
|
|||
progress func(int64) |
|||
// setObj callback for reporting the resulting object - see `Writer.obj`.
|
|||
// Required.
|
|||
setObj func(*ObjectAttrs) |
|||
} |
|||
|
|||
type newRangeReaderParams struct { |
|||
bucket string |
|||
conds *Conditions |
|||
encryptionKey []byte |
|||
gen int64 |
|||
length int64 |
|||
object string |
|||
offset int64 |
|||
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
|
|||
} |
|||
|
|||
type composeObjectRequest struct { |
|||
dstBucket string |
|||
dstObject destinationObject |
|||
srcs []sourceObject |
|||
predefinedACL string |
|||
sendCRC32C bool |
|||
} |
|||
|
|||
type sourceObject struct { |
|||
name string |
|||
bucket string |
|||
gen int64 |
|||
conds *Conditions |
|||
encryptionKey []byte |
|||
} |
|||
|
|||
type destinationObject struct { |
|||
name string |
|||
bucket string |
|||
conds *Conditions |
|||
attrs *ObjectAttrs // attrs to set on the destination object.
|
|||
encryptionKey []byte |
|||
keyName string |
|||
} |
|||
|
|||
type rewriteObjectRequest struct { |
|||
srcObject sourceObject |
|||
dstObject destinationObject |
|||
predefinedACL string |
|||
token string |
|||
maxBytesRewrittenPerCall int64 |
|||
} |
|||
|
|||
type rewriteObjectResponse struct { |
|||
resource *ObjectAttrs |
|||
done bool |
|||
written int64 |
|||
size int64 |
|||
token string |
|||
} |
@ -0,0 +1,233 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"errors" |
|||
"fmt" |
|||
|
|||
"cloud.google.com/go/internal/trace" |
|||
) |
|||
|
|||
// CopierFrom creates a Copier that can copy src to dst.
|
|||
// You can immediately call Run on the returned Copier, or
|
|||
// you can configure it first.
|
|||
//
|
|||
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
|||
// in which case the user project of src is billed.
|
|||
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { |
|||
return &Copier{dst: dst, src: src} |
|||
} |
|||
|
|||
// A Copier copies a source object to a destination.
|
|||
type Copier struct { |
|||
// ObjectAttrs are optional attributes to set on the destination object.
|
|||
// Any attributes must be initialized before any calls on the Copier. Nil
|
|||
// or zero-valued attributes are ignored.
|
|||
ObjectAttrs |
|||
|
|||
// RewriteToken can be set before calling Run to resume a copy
|
|||
// operation. After Run returns a non-nil error, RewriteToken will
|
|||
// have been updated to contain the value needed to resume the copy.
|
|||
RewriteToken string |
|||
|
|||
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
|||
// operation. If ProgressFunc is not nil and copying requires multiple
|
|||
// calls to the underlying service (see
|
|||
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
|||
// ProgressFunc will be invoked after each call with the number of bytes of
|
|||
// content copied so far and the total size in bytes of the source object.
|
|||
//
|
|||
// ProgressFunc is intended to make upload progress available to the
|
|||
// application. For example, the implementation of ProgressFunc may update
|
|||
// a progress bar in the application's UI, or log the result of
|
|||
// float64(copiedBytes)/float64(totalBytes).
|
|||
//
|
|||
// ProgressFunc should return quickly without blocking.
|
|||
ProgressFunc func(copiedBytes, totalBytes uint64) |
|||
|
|||
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
|
|||
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
|
|||
// any.
|
|||
//
|
|||
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
|
|||
// (via ObjectHandle.Key) on the destination object will result in an error when
|
|||
// Run is called.
|
|||
DestinationKMSKeyName string |
|||
|
|||
dst, src *ObjectHandle |
|||
|
|||
// The maximum number of bytes that will be rewritten per rewrite request.
|
|||
// Most callers shouldn't need to specify this parameter - it is primarily
|
|||
// in place to support testing. If specified the value must be an integral
|
|||
// multiple of 1 MiB (1048576). Also, this only applies to requests where
|
|||
// the source and destination span locations and/or storage classes. Finally,
|
|||
// this value must not change across rewrite calls else you'll get an error
|
|||
// that the `rewriteToken` is invalid.
|
|||
maxBytesRewrittenPerCall int64 |
|||
} |
|||
|
|||
// Run performs the copy.
|
|||
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if err := c.src.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if err := c.dst.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { |
|||
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") |
|||
} |
|||
if c.dst.gen != defaultGen { |
|||
return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen) |
|||
} |
|||
// Convert destination attributes to raw form, omitting the bucket.
|
|||
// If the bucket is included but name or content-type aren't, the service
|
|||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
|||
// does not cause any problems.
|
|||
req := &rewriteObjectRequest{ |
|||
srcObject: sourceObject{ |
|||
name: c.src.object, |
|||
bucket: c.src.bucket, |
|||
gen: c.src.gen, |
|||
conds: c.src.conds, |
|||
encryptionKey: c.src.encryptionKey, |
|||
}, |
|||
dstObject: destinationObject{ |
|||
name: c.dst.object, |
|||
bucket: c.dst.bucket, |
|||
conds: c.dst.conds, |
|||
attrs: &c.ObjectAttrs, |
|||
encryptionKey: c.dst.encryptionKey, |
|||
keyName: c.DestinationKMSKeyName, |
|||
}, |
|||
predefinedACL: c.PredefinedACL, |
|||
token: c.RewriteToken, |
|||
maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall, |
|||
} |
|||
|
|||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) |
|||
var userProject string |
|||
if c.dst.userProject != "" { |
|||
userProject = c.dst.userProject |
|||
} else if c.src.userProject != "" { |
|||
userProject = c.src.userProject |
|||
} |
|||
opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject) |
|||
|
|||
for { |
|||
res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
c.RewriteToken = res.token |
|||
req.token = res.token |
|||
if c.ProgressFunc != nil { |
|||
c.ProgressFunc(uint64(res.written), uint64(res.size)) |
|||
} |
|||
if res.done { // Finished successfully.
|
|||
return res.resource, nil |
|||
} |
|||
} |
|||
} |
|||
|
|||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
|||
// You can immediately call Run on the returned Composer, or you can
|
|||
// configure it first.
|
|||
//
|
|||
// The encryption key for the destination object will be used to decrypt all
|
|||
// source objects and encrypt the destination object. It is an error
|
|||
// to specify an encryption key for any of the source objects.
|
|||
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { |
|||
return &Composer{dst: dst, srcs: srcs} |
|||
} |
|||
|
|||
// A Composer composes source objects into a destination object.
|
|||
//
|
|||
// For Requester Pays buckets, the user project of dst is billed.
|
|||
type Composer struct { |
|||
// ObjectAttrs are optional attributes to set on the destination object.
|
|||
// Any attributes must be initialized before any calls on the Composer. Nil
|
|||
// or zero-valued attributes are ignored.
|
|||
ObjectAttrs |
|||
|
|||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
|||
// to true in addition to setting the Composer's CRC32C field, because zero
|
|||
// is a valid CRC and normally a zero would not be transmitted.
|
|||
// If a CRC32C is sent, and the data in the destination object does not match
|
|||
// the checksum, the compose will be rejected.
|
|||
SendCRC32C bool |
|||
|
|||
dst *ObjectHandle |
|||
srcs []*ObjectHandle |
|||
} |
|||
|
|||
// Run performs the compose operation.
|
|||
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if err := c.dst.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if c.dst.gen != defaultGen { |
|||
return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen) |
|||
} |
|||
if len(c.srcs) == 0 { |
|||
return nil, errors.New("storage: at least one source object must be specified") |
|||
} |
|||
|
|||
for _, src := range c.srcs { |
|||
if err := src.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if src.bucket != c.dst.bucket { |
|||
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) |
|||
} |
|||
if src.encryptionKey != nil { |
|||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) |
|||
} |
|||
} |
|||
|
|||
req := &composeObjectRequest{ |
|||
dstBucket: c.dst.bucket, |
|||
predefinedACL: c.PredefinedACL, |
|||
sendCRC32C: c.SendCRC32C, |
|||
} |
|||
req.dstObject = destinationObject{ |
|||
name: c.dst.object, |
|||
bucket: c.dst.bucket, |
|||
conds: c.dst.conds, |
|||
attrs: &c.ObjectAttrs, |
|||
encryptionKey: c.dst.encryptionKey, |
|||
} |
|||
for _, src := range c.srcs { |
|||
s := sourceObject{ |
|||
name: src.object, |
|||
bucket: src.bucket, |
|||
gen: src.gen, |
|||
conds: src.conds, |
|||
} |
|||
req.srcs = append(req.srcs, s) |
|||
} |
|||
|
|||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) |
|||
opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject) |
|||
return c.dst.c.tc.ComposeObject(ctx, req, opts...) |
|||
} |
@ -0,0 +1,328 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
/* |
|||
Package storage provides an easy way to work with Google Cloud Storage. |
|||
Google Cloud Storage stores data in named objects, which are grouped into buckets. |
|||
|
|||
More information about Google Cloud Storage is available at |
|||
https://cloud.google.com/storage/docs.
|
|||
|
|||
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
|
|||
connection pooling and similar aspects of this package. |
|||
|
|||
# Creating a Client |
|||
|
|||
To start working with this package, create a [Client]: |
|||
|
|||
ctx := context.Background() |
|||
client, err := storage.NewClient(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
|
|||
The client will use your default application credentials. Clients should be |
|||
reused instead of created as needed. The methods of [Client] are safe for |
|||
concurrent use by multiple goroutines. |
|||
|
|||
If you only wish to access public data, you can create |
|||
an unauthenticated client with |
|||
|
|||
client, err := storage.NewClient(ctx, option.WithoutAuthentication()) |
|||
|
|||
To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST |
|||
environment variable to the address at which your emulator is running. This will |
|||
send requests to that address instead of to Cloud Storage. You can then create |
|||
and use a client as usual: |
|||
|
|||
// Set STORAGE_EMULATOR_HOST environment variable.
|
|||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000") |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
|
|||
// Create client as usual.
|
|||
client, err := storage.NewClient(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
|
|||
// This request is now directed to http://localhost:9000/storage/v1/b
|
|||
// instead of https://storage.googleapis.com/storage/v1/b
|
|||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
|
|||
Please note that there is no official emulator for Cloud Storage. |
|||
|
|||
# Buckets |
|||
|
|||
A Google Cloud Storage bucket is a collection of objects. To work with a |
|||
bucket, make a bucket handle: |
|||
|
|||
bkt := client.Bucket(bucketName) |
|||
|
|||
A handle is a reference to a bucket. You can have a handle even if the |
|||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage, |
|||
call [BucketHandle.Create]: |
|||
|
|||
if err := bkt.Create(ctx, projectID, nil); err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
|
|||
Note that although buckets are associated with projects, bucket names are |
|||
global across all projects. |
|||
|
|||
Each bucket has associated metadata, represented in this package by |
|||
[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set |
|||
the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use |
|||
[BucketHandle.Attrs]: |
|||
|
|||
attrs, err := bkt.Attrs(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", |
|||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) |
|||
|
|||
# Objects |
|||
|
|||
An object holds arbitrary data as a sequence of bytes, like a file. You |
|||
refer to objects using a handle, just as with buckets, but unlike buckets |
|||
you don't explicitly create an object. Instead, the first time you write |
|||
to an object it will be created. You can use the standard Go [io.Reader] |
|||
and [io.Writer] interfaces to read and write object data: |
|||
|
|||
obj := bkt.Object("data") |
|||
// Write something to obj.
|
|||
// w implements io.Writer.
|
|||
w := obj.NewWriter(ctx) |
|||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
|||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
// Close, just like writing a file.
|
|||
if err := w.Close(); err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
|
|||
// Read it back.
|
|||
r, err := obj.NewReader(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
defer r.Close() |
|||
if _, err := io.Copy(os.Stdout, r); err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
// Prints "This object contains text."
|
|||
|
|||
Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]: |
|||
|
|||
objAttrs, err := obj.Attrs(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
fmt.Printf("object %s has size %d and can be read using %s\n", |
|||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) |
|||
|
|||
# Listing objects |
|||
|
|||
Listing objects in a bucket is done with the [BucketHandle.Objects] method: |
|||
|
|||
query := &storage.Query{Prefix: ""} |
|||
|
|||
var names []string |
|||
it := bkt.Objects(ctx, query) |
|||
for { |
|||
attrs, err := it.Next() |
|||
if err == iterator.Done { |
|||
break |
|||
} |
|||
if err != nil { |
|||
log.Fatal(err) |
|||
} |
|||
names = append(names, attrs.Name) |
|||
} |
|||
|
|||
Objects are listed lexicographically by name. To filter objects |
|||
lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used: |
|||
|
|||
query := &storage.Query{ |
|||
Prefix: "", |
|||
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
|||
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
|||
} |
|||
|
|||
// ... as before
|
|||
|
|||
If only a subset of object attributes is needed when listing, specifying this |
|||
subset using [Query.SetAttrSelection] may speed up the listing process: |
|||
|
|||
query := &storage.Query{Prefix: ""} |
|||
query.SetAttrSelection([]string{"Name"}) |
|||
|
|||
// ... as before
|
|||
|
|||
# ACLs |
|||
|
|||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of |
|||
ACLRules, each of which specifies the role of a user, group or project. ACLs |
|||
are suitable for fine-grained control, but you may prefer using IAM to control |
|||
access at the project level (see [Cloud Storage IAM docs]. |
|||
|
|||
To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]: |
|||
|
|||
acls, err := obj.ACL().List(ctx) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
for _, rule := range acls { |
|||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) |
|||
} |
|||
|
|||
You can also set and delete ACLs. |
|||
|
|||
# Conditions |
|||
|
|||
Every object has a generation and a metageneration. The generation changes |
|||
whenever the content changes, and the metageneration changes whenever the |
|||
metadata changes. [Conditions] let you check these values before an operation; |
|||
the operation only executes if the conditions match. You can use conditions to |
|||
prevent race conditions in read-modify-write operations. |
|||
|
|||
For example, say you've read an object's metadata into objAttrs. Now |
|||
you want to write to that object, but only if its contents haven't changed |
|||
since you read it. Here is how to express that: |
|||
|
|||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) |
|||
// Proceed with writing as above.
|
|||
|
|||
# Signed URLs |
|||
|
|||
You can obtain a URL that lets anyone read or write an object for a limited time. |
|||
Signing a URL requires credentials authorized to sign a URL. To use the same |
|||
authentication that was used when instantiating the Storage client, use |
|||
[BucketHandle.SignedURL]. |
|||
|
|||
url, err := client.Bucket(bucketName).SignedURL(objectName, opts) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
fmt.Println(url) |
|||
|
|||
You can also sign a URL without creating a client. See the documentation of |
|||
[SignedURL] for details. |
|||
|
|||
url, err := storage.SignedURL(bucketName, "shared-object", opts) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
fmt.Println(url) |
|||
|
|||
# Post Policy V4 Signed Request |
|||
|
|||
A type of signed request that allows uploads through HTML forms directly to Cloud Storage with |
|||
temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised |
|||
by a user. |
|||
|
|||
For more information, please see the [XML POST Object docs] as well |
|||
as the documentation of [BucketHandle.GenerateSignedPostPolicyV4]. |
|||
|
|||
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts) |
|||
if err != nil { |
|||
// TODO: Handle error.
|
|||
} |
|||
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) |
|||
|
|||
# Credential requirements for signing |
|||
|
|||
If the GoogleAccessID and PrivateKey option fields are not provided, they will |
|||
be automatically detected by [BucketHandle.SignedURL] and |
|||
[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true: |
|||
- you are authenticated to the Storage Client with a service account's |
|||
downloaded private key, either directly in code or by setting the |
|||
GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]), |
|||
- your application is running on Google Compute Engine (GCE), or |
|||
- you are logged into [gcloud using application default credentials] |
|||
with [impersonation enabled]. |
|||
|
|||
Detecting GoogleAccessID may not be possible if you are authenticated using a |
|||
token source or using [option.WithHTTPClient]. In this case, you can provide a |
|||
service account email for GoogleAccessID and the client will attempt to sign |
|||
the URL or Post Policy using that service account. |
|||
|
|||
To generate the signature, you must have: |
|||
- iam.serviceAccounts.signBlob permissions on the GoogleAccessID service |
|||
account, and |
|||
- the [IAM Service Account Credentials API] enabled (unless authenticating |
|||
with a downloaded private key). |
|||
|
|||
# Errors |
|||
|
|||
Errors returned by this client are often of the type [googleapi.Error]. |
|||
These errors can be introspected for more information by using [errors.As] |
|||
with the richer [googleapi.Error] type. For example: |
|||
|
|||
var e *googleapi.Error |
|||
if ok := errors.As(err, &e); ok { |
|||
if e.Code == 409 { ... } |
|||
} |
|||
|
|||
# Retrying failed requests |
|||
|
|||
Methods in this package may retry calls that fail with transient errors. |
|||
Retrying continues indefinitely unless the controlling context is canceled, the |
|||
client is closed, or a non-transient error is received. To stop retries from |
|||
continuing, use context timeouts or cancellation. |
|||
|
|||
The retry strategy in this library follows best practices for Cloud Storage. By |
|||
default, operations are retried only if they are idempotent, and exponential |
|||
backoff with jitter is employed. In addition, errors are only retried if they |
|||
are defined as transient by the service. See the [Cloud Storage retry docs] |
|||
for more information. |
|||
|
|||
Users can configure non-default retry behavior for a single library call (using |
|||
[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a |
|||
client (using [Client.SetRetry]). For example: |
|||
|
|||
o := client.Bucket(bucket).Object(object).Retryer( |
|||
// Use WithBackoff to change the timing of the exponential backoff.
|
|||
storage.WithBackoff(gax.Backoff{ |
|||
Initial: 2 * time.Second, |
|||
}), |
|||
// Use WithPolicy to configure the idempotency policy. RetryAlways will
|
|||
// retry the operation even if it is non-idempotent.
|
|||
storage.WithPolicy(storage.RetryAlways), |
|||
) |
|||
|
|||
// Use a context timeout to set an overall deadline on the call, including all
|
|||
// potential retries.
|
|||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) |
|||
defer cancel() |
|||
|
|||
// Delete an object using the specified strategy and timeout.
|
|||
if err := o.Delete(ctx); err != nil { |
|||
// Handle err.
|
|||
} |
|||
|
|||
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
|
|||
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
|
|||
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
|
|||
[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth
|
|||
[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login
|
|||
[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account
|
|||
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
|
|||
*/ |
|||
package storage // import "cloud.google.com/go/storage"
|
@ -0,0 +1,92 @@ |
|||
#!/bin/bash |
|||
# Copyright 2021 Google LLC |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License.. |
|||
|
|||
# Fail on any error |
|||
set -eo pipefail |
|||
|
|||
# Display commands being run |
|||
set -x |
|||
|
|||
# Only run on Go 1.17+ |
|||
min_minor_ver=17 |
|||
|
|||
v=`go version | { read _ _ v _; echo ${v#go}; }` |
|||
comps=(${v//./ }) |
|||
minor_ver=${comps[1]} |
|||
|
|||
if [ "$minor_ver" -lt "$min_minor_ver" ]; then |
|||
echo minor version $minor_ver, skipping |
|||
exit 0 |
|||
fi |
|||
|
|||
export STORAGE_EMULATOR_HOST="http://localhost:9000" |
|||
export STORAGE_EMULATOR_HOST_GRPC="localhost:8888" |
|||
|
|||
DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench' |
|||
DEFAULT_IMAGE_TAG='latest' |
|||
DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG} |
|||
CONTAINER_NAME=storage_testbench |
|||
|
|||
# Note: --net=host makes the container bind directly to the Docker host’s network, |
|||
# with no network isolation. If we were to use port-mapping instead, reset connection errors |
|||
# would be captured differently and cause unexpected test behaviour. |
|||
# The host networking driver works only on Linux hosts. |
|||
# See more about using host networking: https://docs.docker.com/network/host/ |
|||
DOCKER_NETWORK="--net=host" |
|||
# Note: We do not expect the RetryConformanceTest suite to pass on darwin due to |
|||
# differences in the network errors emitted by the system. |
|||
if [ `go env GOOS` == 'darwin' ]; then |
|||
DOCKER_NETWORK="-p 9000:9000 -p 8888:8888" |
|||
fi |
|||
|
|||
# Get the docker image for the testbench |
|||
docker pull $DOCKER_IMAGE |
|||
|
|||
# Start the testbench |
|||
|
|||
docker run --name $CONTAINER_NAME --rm -d $DOCKER_NETWORK $DOCKER_IMAGE |
|||
echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST" |
|||
sleep 1 |
|||
|
|||
# Stop the testbench & cleanup environment variables |
|||
function cleanup() { |
|||
echo "Cleanup testbench" |
|||
docker stop $CONTAINER_NAME |
|||
unset STORAGE_EMULATOR_HOST; |
|||
unset STORAGE_EMULATOR_HOST_GRPC; |
|||
} |
|||
trap cleanup EXIT |
|||
|
|||
# Check that the server is running - retry several times to allow for start-up time |
|||
response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null) |
|||
|
|||
if [[ $response != 200 ]] |
|||
then |
|||
echo "Testbench server did not start correctly" |
|||
exit 1 |
|||
fi |
|||
|
|||
# Start the gRPC server on port 8888. |
|||
echo "Starting the gRPC server on port 8888" |
|||
response=$(curl -w "%{http_code}\n" --retry 5 --retry-max-time 40 -o /dev/null "$STORAGE_EMULATOR_HOST/start_grpc?port=8888") |
|||
|
|||
if [[ $response != 200 ]] |
|||
then |
|||
echo "Testbench gRPC server did not start correctly" |
|||
exit 1 |
|||
fi |
|||
|
|||
# Run tests |
|||
go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log |
File diff suppressed because it is too large
@ -0,0 +1,392 @@ |
|||
// Copyright 2019 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"errors" |
|||
"fmt" |
|||
"time" |
|||
|
|||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" |
|||
"google.golang.org/api/iterator" |
|||
raw "google.golang.org/api/storage/v1" |
|||
) |
|||
|
|||
// HMACState is the state of the HMAC key.
|
|||
//
|
|||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|||
type HMACState string |
|||
|
|||
const ( |
|||
// Active is the status for an active key that can be used to sign
|
|||
// requests.
|
|||
Active HMACState = "ACTIVE" |
|||
|
|||
// Inactive is the status for an inactive key thus requests signed by
|
|||
// this key will be denied.
|
|||
Inactive HMACState = "INACTIVE" |
|||
|
|||
// Deleted is the status for a key that is deleted.
|
|||
// Once in this state the key cannot key cannot be recovered
|
|||
// and does not count towards key limits. Deleted keys will be cleaned
|
|||
// up later.
|
|||
Deleted HMACState = "DELETED" |
|||
) |
|||
|
|||
// HMACKey is the representation of a Google Cloud Storage HMAC key.
|
|||
//
|
|||
// HMAC keys are used to authenticate signed access to objects. To enable HMAC key
|
|||
// authentication, please visit https://cloud.google.com/storage/docs/migrating.
|
|||
//
|
|||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|||
type HMACKey struct { |
|||
// The HMAC's secret key.
|
|||
Secret string |
|||
|
|||
// AccessID is the ID of the HMAC key.
|
|||
AccessID string |
|||
|
|||
// Etag is the HTTP/1.1 Entity tag.
|
|||
Etag string |
|||
|
|||
// ID is the ID of the HMAC key, including the ProjectID and AccessID.
|
|||
ID string |
|||
|
|||
// ProjectID is the ID of the project that owns the
|
|||
// service account to which the key authenticates.
|
|||
ProjectID string |
|||
|
|||
// ServiceAccountEmail is the email address
|
|||
// of the key's associated service account.
|
|||
ServiceAccountEmail string |
|||
|
|||
// CreatedTime is the creation time of the HMAC key.
|
|||
CreatedTime time.Time |
|||
|
|||
// UpdatedTime is the last modification time of the HMAC key metadata.
|
|||
UpdatedTime time.Time |
|||
|
|||
// State is the state of the HMAC key.
|
|||
// It can be one of StateActive, StateInactive or StateDeleted.
|
|||
State HMACState |
|||
} |
|||
|
|||
// HMACKeyHandle helps provide access and management for HMAC keys.
|
|||
//
|
|||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|||
type HMACKeyHandle struct { |
|||
projectID string |
|||
accessID string |
|||
retry *retryConfig |
|||
tc storageClient |
|||
} |
|||
|
|||
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
|
|||
//
|
|||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { |
|||
return &HMACKeyHandle{ |
|||
projectID: projectID, |
|||
accessID: accessID, |
|||
retry: c.retry, |
|||
tc: c.tc, |
|||
} |
|||
} |
|||
|
|||
// Get invokes an RPC to retrieve the HMAC key referenced by the
|
|||
// HMACKeyHandle's accessID.
|
|||
//
|
|||
// Options such as UserProjectForHMACKeys can be used to set the
|
|||
// userProject to be billed against for operations.
|
|||
//
|
|||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { |
|||
desc := new(hmacKeyDesc) |
|||
for _, opt := range opts { |
|||
opt.withHMACKeyDesc(desc) |
|||
} |
|||
|
|||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID) |
|||
hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...) |
|||
|
|||
return hk, err |
|||
} |
|||
|
|||
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
|
|||
// Only inactive HMAC keys can be deleted.
|
|||
// After deletion, a key cannot be used to authenticate requests.
|
|||
//
|
|||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { |
|||
desc := new(hmacKeyDesc) |
|||
for _, opt := range opts { |
|||
opt.withHMACKeyDesc(desc) |
|||
} |
|||
|
|||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID) |
|||
return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...) |
|||
} |
|||
|
|||
func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { |
|||
hkmd := hk.Metadata |
|||
if hkmd == nil { |
|||
return nil, errors.New("field Metadata cannot be nil") |
|||
} |
|||
createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("field CreatedTime: %w", err) |
|||
} |
|||
updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated) |
|||
if err != nil && !updatedTimeCanBeNil { |
|||
return nil, fmt.Errorf("field UpdatedTime: %w", err) |
|||
} |
|||
|
|||
hmKey := &HMACKey{ |
|||
AccessID: hkmd.AccessId, |
|||
Secret: hk.Secret, |
|||
Etag: hkmd.Etag, |
|||
ID: hkmd.Id, |
|||
State: HMACState(hkmd.State), |
|||
ProjectID: hkmd.ProjectId, |
|||
CreatedTime: createdTime, |
|||
UpdatedTime: updatedTime, |
|||
|
|||
ServiceAccountEmail: hkmd.ServiceAccountEmail, |
|||
} |
|||
|
|||
return hmKey, nil |
|||
} |
|||
|
|||
func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { |
|||
if pbmd == nil { |
|||
return nil |
|||
} |
|||
|
|||
return &HMACKey{ |
|||
AccessID: pbmd.GetAccessId(), |
|||
ID: pbmd.GetId(), |
|||
State: HMACState(pbmd.GetState()), |
|||
ProjectID: pbmd.GetProject(), |
|||
CreatedTime: convertProtoTime(pbmd.GetCreateTime()), |
|||
UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()), |
|||
ServiceAccountEmail: pbmd.GetServiceAccountEmail(), |
|||
} |
|||
} |
|||
|
|||
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
|
|||
//
|
|||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { |
|||
if projectID == "" { |
|||
return nil, errors.New("storage: expecting a non-blank projectID") |
|||
} |
|||
if serviceAccountEmail == "" { |
|||
return nil, errors.New("storage: expecting a non-blank service account email") |
|||
} |
|||
|
|||
desc := new(hmacKeyDesc) |
|||
for _, opt := range opts { |
|||
opt.withHMACKeyDesc(desc) |
|||
} |
|||
|
|||
o := makeStorageOpts(false, c.retry, desc.userProjectID) |
|||
hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...) |
|||
return hk, err |
|||
} |
|||
|
|||
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
|
|||
//
|
|||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|||
type HMACKeyAttrsToUpdate struct { |
|||
// State is required and must be either StateActive or StateInactive.
|
|||
State HMACState |
|||
|
|||
// Etag is an optional field and it is the HTTP/1.1 Entity tag.
|
|||
Etag string |
|||
} |
|||
|
|||
// Update mutates the HMACKey referred to by accessID.
|
|||
//
|
|||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { |
|||
if au.State != Active && au.State != Inactive { |
|||
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) |
|||
} |
|||
|
|||
desc := new(hmacKeyDesc) |
|||
for _, opt := range opts { |
|||
opt.withHMACKeyDesc(desc) |
|||
} |
|||
|
|||
isIdempotent := len(au.Etag) > 0 |
|||
o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID) |
|||
hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...) |
|||
return hk, err |
|||
} |
|||
|
|||
// An HMACKeysIterator is an iterator over HMACKeys.
|
|||
//
|
|||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
|||
//
|
|||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|||
type HMACKeysIterator struct { |
|||
ctx context.Context |
|||
raw *raw.ProjectsHmacKeysService |
|||
projectID string |
|||
hmacKeys []*HMACKey |
|||
pageInfo *iterator.PageInfo |
|||
nextFunc func() error |
|||
index int |
|||
desc hmacKeyDesc |
|||
retry *retryConfig |
|||
} |
|||
|
|||
// ListHMACKeys returns an iterator for listing HMACKeys.
|
|||
//
|
|||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
|||
//
|
|||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { |
|||
desc := new(hmacKeyDesc) |
|||
for _, opt := range opts { |
|||
opt.withHMACKeyDesc(desc) |
|||
} |
|||
|
|||
o := makeStorageOpts(true, c.retry, desc.userProjectID) |
|||
return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...) |
|||
} |
|||
|
|||
// Next returns the next result. Its second return value is iterator.Done if
|
|||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
|||
// calls will return iterator.Done.
|
|||
//
|
|||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
|||
//
|
|||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func (it *HMACKeysIterator) Next() (*HMACKey, error) { |
|||
if err := it.nextFunc(); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
key := it.hmacKeys[it.index] |
|||
it.index++ |
|||
|
|||
return key, nil |
|||
} |
|||
|
|||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
|||
//
|
|||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
|||
//
|
|||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } |
|||
|
|||
func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { |
|||
// TODO: Remove fetch method upon integration. This method is internalized into
|
|||
// httpStorageClient.ListHMACKeys() as it is the only caller.
|
|||
call := it.raw.List(it.projectID) |
|||
setClientHeader(call.Header()) |
|||
if pageToken != "" { |
|||
call = call.PageToken(pageToken) |
|||
} |
|||
if it.desc.showDeletedKeys { |
|||
call = call.ShowDeletedKeys(true) |
|||
} |
|||
if it.desc.userProjectID != "" { |
|||
call = call.UserProject(it.desc.userProjectID) |
|||
} |
|||
if it.desc.forServiceAccountEmail != "" { |
|||
call = call.ServiceAccountEmail(it.desc.forServiceAccountEmail) |
|||
} |
|||
if pageSize > 0 { |
|||
call = call.MaxResults(int64(pageSize)) |
|||
} |
|||
|
|||
ctx := it.ctx |
|||
var resp *raw.HmacKeysMetadata |
|||
err = run(it.ctx, func() error { |
|||
resp, err = call.Context(ctx).Do() |
|||
return err |
|||
}, it.retry, true, setRetryHeaderHTTP(call)) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
for _, metadata := range resp.Items { |
|||
hk := &raw.HmacKey{ |
|||
Metadata: metadata, |
|||
} |
|||
hkey, err := toHMACKeyFromRaw(hk, true) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
it.hmacKeys = append(it.hmacKeys, hkey) |
|||
} |
|||
return resp.NextPageToken, nil |
|||
} |
|||
|
|||
type hmacKeyDesc struct { |
|||
forServiceAccountEmail string |
|||
showDeletedKeys bool |
|||
userProjectID string |
|||
} |
|||
|
|||
// HMACKeyOption configures the behavior of HMACKey related methods and actions.
|
|||
//
|
|||
// This interface is EXPERIMENTAL and subject to change or removal without notice.
|
|||
type HMACKeyOption interface { |
|||
withHMACKeyDesc(*hmacKeyDesc) |
|||
} |
|||
|
|||
type hmacKeyDescFunc func(*hmacKeyDesc) |
|||
|
|||
func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) { |
|||
hkdf(hkd) |
|||
} |
|||
|
|||
// ForHMACKeyServiceAccountEmail returns HMAC Keys that are
|
|||
// associated with the email address of a service account in the project.
|
|||
//
|
|||
// Only one service account email can be used as a filter, so if multiple
|
|||
// of these options are applied, the last email to be set will be used.
|
|||
//
|
|||
// This option is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { |
|||
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { |
|||
hkd.forServiceAccountEmail = serviceAccountEmail |
|||
}) |
|||
} |
|||
|
|||
// ShowDeletedHMACKeys will also list keys whose state is "DELETED".
|
|||
//
|
|||
// This option is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func ShowDeletedHMACKeys() HMACKeyOption { |
|||
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { |
|||
hkd.showDeletedKeys = true |
|||
}) |
|||
} |
|||
|
|||
// UserProjectForHMACKeys will bill the request against userProjectID
|
|||
// if userProjectID is non-empty.
|
|||
//
|
|||
// Note: This is a noop right now and only provided for API compatibility.
|
|||
//
|
|||
// This option is EXPERIMENTAL and subject to change or removal without notice.
|
|||
func UserProjectForHMACKeys(userProjectID string) HMACKeyOption { |
|||
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { |
|||
hkd.userProjectID = userProjectID |
|||
}) |
|||
} |
File diff suppressed because it is too large
@ -0,0 +1,133 @@ |
|||
// Copyright 2017 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
|
|||
"cloud.google.com/go/iam" |
|||
"cloud.google.com/go/internal/trace" |
|||
raw "google.golang.org/api/storage/v1" |
|||
iampb "google.golang.org/genproto/googleapis/iam/v1" |
|||
"google.golang.org/genproto/googleapis/type/expr" |
|||
) |
|||
|
|||
// IAM provides access to IAM access control for the bucket.
|
|||
func (b *BucketHandle) IAM() *iam.Handle { |
|||
return iam.InternalNewHandleClient(&iamClient{ |
|||
userProject: b.userProject, |
|||
retry: b.retry, |
|||
client: b.c, |
|||
}, b.name) |
|||
} |
|||
|
|||
// iamClient implements the iam.client interface.
|
|||
type iamClient struct { |
|||
userProject string |
|||
retry *retryConfig |
|||
client *Client |
|||
} |
|||
|
|||
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { |
|||
return c.GetWithVersion(ctx, resource, 1) |
|||
} |
|||
|
|||
func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
o := makeStorageOpts(true, c.retry, c.userProject) |
|||
return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...) |
|||
} |
|||
|
|||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
isIdempotent := len(p.Etag) > 0 |
|||
o := makeStorageOpts(isIdempotent, c.retry, c.userProject) |
|||
return c.client.tc.SetIamPolicy(ctx, resource, p, o...) |
|||
} |
|||
|
|||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
o := makeStorageOpts(true, c.retry, c.userProject) |
|||
return c.client.tc.TestIamPermissions(ctx, resource, perms, o...) |
|||
} |
|||
|
|||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { |
|||
return &raw.Policy{ |
|||
Bindings: iamToStorageBindings(ip.Bindings), |
|||
Etag: string(ip.Etag), |
|||
Version: int64(ip.Version), |
|||
} |
|||
} |
|||
|
|||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { |
|||
var rbs []*raw.PolicyBindings |
|||
for _, ib := range ibs { |
|||
rbs = append(rbs, &raw.PolicyBindings{ |
|||
Role: ib.Role, |
|||
Members: ib.Members, |
|||
Condition: iamToStorageCondition(ib.Condition), |
|||
}) |
|||
} |
|||
return rbs |
|||
} |
|||
|
|||
func iamToStorageCondition(exprpb *expr.Expr) *raw.Expr { |
|||
if exprpb == nil { |
|||
return nil |
|||
} |
|||
return &raw.Expr{ |
|||
Expression: exprpb.Expression, |
|||
Description: exprpb.Description, |
|||
Location: exprpb.Location, |
|||
Title: exprpb.Title, |
|||
} |
|||
} |
|||
|
|||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { |
|||
return &iampb.Policy{ |
|||
Bindings: iamFromStorageBindings(rp.Bindings), |
|||
Etag: []byte(rp.Etag), |
|||
} |
|||
} |
|||
|
|||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { |
|||
var ibs []*iampb.Binding |
|||
for _, rb := range rbs { |
|||
ibs = append(ibs, &iampb.Binding{ |
|||
Role: rb.Role, |
|||
Members: rb.Members, |
|||
Condition: iamFromStorageCondition(rb.Condition), |
|||
}) |
|||
} |
|||
return ibs |
|||
} |
|||
|
|||
func iamFromStorageCondition(rawexpr *raw.Expr) *expr.Expr { |
|||
if rawexpr == nil { |
|||
return nil |
|||
} |
|||
return &expr.Expr{ |
|||
Expression: rawexpr.Expression, |
|||
Description: rawexpr.Description, |
|||
Location: rawexpr.Location, |
|||
Title: rawexpr.Title, |
|||
} |
|||
} |
@ -0,0 +1,174 @@ |
|||
// Copyright 2022 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// https://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
|||
|
|||
// Package storage is an auto-generated package for the
|
|||
// Cloud Storage API.
|
|||
//
|
|||
// Lets you store and retrieve potentially-large, immutable data objects.
|
|||
//
|
|||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
|||
//
|
|||
// # Example usage
|
|||
//
|
|||
// To get started with this package, create a client.
|
|||
//
|
|||
// ctx := context.Background()
|
|||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
|||
// // It will require modifications to work:
|
|||
// // - It may require correct/in-range values for request initialization.
|
|||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
|||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
|||
// c, err := storage.NewClient(ctx)
|
|||
// if err != nil {
|
|||
// // TODO: Handle error.
|
|||
// }
|
|||
// defer c.Close()
|
|||
//
|
|||
// The client will use your default application credentials. Clients should be reused instead of created as needed.
|
|||
// The methods of Client are safe for concurrent use by multiple goroutines.
|
|||
// The returned client must be Closed when it is done being used.
|
|||
//
|
|||
// # Using the Client
|
|||
//
|
|||
// The following is an example of making an API call with the newly created client.
|
|||
//
|
|||
// ctx := context.Background()
|
|||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
|||
// // It will require modifications to work:
|
|||
// // - It may require correct/in-range values for request initialization.
|
|||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
|||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
|||
// c, err := storage.NewClient(ctx)
|
|||
// if err != nil {
|
|||
// // TODO: Handle error.
|
|||
// }
|
|||
// defer c.Close()
|
|||
//
|
|||
// req := &storagepb.DeleteBucketRequest{
|
|||
// // TODO: Fill request struct fields.
|
|||
// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest.
|
|||
// }
|
|||
// err = c.DeleteBucket(ctx, req)
|
|||
// if err != nil {
|
|||
// // TODO: Handle error.
|
|||
// }
|
|||
//
|
|||
// # Use of Context
|
|||
//
|
|||
// The ctx passed to NewClient is used for authentication requests and
|
|||
// for creating the underlying connection, but is not used for subsequent calls.
|
|||
// Individual methods on the client use the ctx given to them.
|
|||
//
|
|||
// To close the open connection, use the Close() method.
|
|||
//
|
|||
// For information about setting deadlines, reusing contexts, and more
|
|||
// please visit https://pkg.go.dev/cloud.google.com/go.
|
|||
package storage // import "cloud.google.com/go/storage/internal/apiv2"
|
|||
|
|||
import ( |
|||
"context" |
|||
"os" |
|||
"runtime" |
|||
"strconv" |
|||
"strings" |
|||
"unicode" |
|||
|
|||
"google.golang.org/api/option" |
|||
"google.golang.org/grpc/metadata" |
|||
) |
|||
|
|||
// For more information on implementing a client constructor hook, see
|
|||
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
|
|||
type clientHookParams struct{} |
|||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) |
|||
|
|||
var versionClient string |
|||
|
|||
func getVersionClient() string { |
|||
if versionClient == "" { |
|||
return "UNKNOWN" |
|||
} |
|||
return versionClient |
|||
} |
|||
|
|||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { |
|||
out, _ := metadata.FromOutgoingContext(ctx) |
|||
out = out.Copy() |
|||
for _, md := range mds { |
|||
for k, v := range md { |
|||
out[k] = append(out[k], v...) |
|||
} |
|||
} |
|||
return metadata.NewOutgoingContext(ctx, out) |
|||
} |
|||
|
|||
func checkDisableDeadlines() (bool, error) { |
|||
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") |
|||
if !ok { |
|||
return false, nil |
|||
} |
|||
|
|||
b, err := strconv.ParseBool(raw) |
|||
return b, err |
|||
} |
|||
|
|||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
|||
func DefaultAuthScopes() []string { |
|||
return []string{ |
|||
"https://www.googleapis.com/auth/cloud-platform", |
|||
"https://www.googleapis.com/auth/cloud-platform.read-only", |
|||
"https://www.googleapis.com/auth/devstorage.full_control", |
|||
"https://www.googleapis.com/auth/devstorage.read_only", |
|||
"https://www.googleapis.com/auth/devstorage.read_write", |
|||
} |
|||
} |
|||
|
|||
// versionGo returns the Go runtime version. The returned string
|
|||
// has no whitespace, suitable for reporting in header.
|
|||
func versionGo() string { |
|||
const develPrefix = "devel +" |
|||
|
|||
s := runtime.Version() |
|||
if strings.HasPrefix(s, develPrefix) { |
|||
s = s[len(develPrefix):] |
|||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { |
|||
s = s[:p] |
|||
} |
|||
return s |
|||
} |
|||
|
|||
notSemverRune := func(r rune) bool { |
|||
return !strings.ContainsRune("0123456789.", r) |
|||
} |
|||
|
|||
if strings.HasPrefix(s, "go1") { |
|||
s = s[2:] |
|||
var prerelease string |
|||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 { |
|||
s, prerelease = s[:p], s[p:] |
|||
} |
|||
if strings.HasSuffix(s, ".") { |
|||
s += "0" |
|||
} else if strings.Count(s, ".") < 2 { |
|||
s += ".0" |
|||
} |
|||
if prerelease != "" { |
|||
s += "-" + prerelease |
|||
} |
|||
return s |
|||
} |
|||
return "UNKNOWN" |
|||
} |
@ -0,0 +1,168 @@ |
|||
{ |
|||
"schema": "1.0", |
|||
"comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", |
|||
"language": "go", |
|||
"protoPackage": "google.storage.v2", |
|||
"libraryPackage": "cloud.google.com/go/storage/internal/apiv2", |
|||
"services": { |
|||
"Storage": { |
|||
"clients": { |
|||
"grpc": { |
|||
"libraryClient": "Client", |
|||
"rpcs": { |
|||
"CancelResumableWrite": { |
|||
"methods": [ |
|||
"CancelResumableWrite" |
|||
] |
|||
}, |
|||
"ComposeObject": { |
|||
"methods": [ |
|||
"ComposeObject" |
|||
] |
|||
}, |
|||
"CreateBucket": { |
|||
"methods": [ |
|||
"CreateBucket" |
|||
] |
|||
}, |
|||
"CreateHmacKey": { |
|||
"methods": [ |
|||
"CreateHmacKey" |
|||
] |
|||
}, |
|||
"CreateNotification": { |
|||
"methods": [ |
|||
"CreateNotification" |
|||
] |
|||
}, |
|||
"DeleteBucket": { |
|||
"methods": [ |
|||
"DeleteBucket" |
|||
] |
|||
}, |
|||
"DeleteHmacKey": { |
|||
"methods": [ |
|||
"DeleteHmacKey" |
|||
] |
|||
}, |
|||
"DeleteNotification": { |
|||
"methods": [ |
|||
"DeleteNotification" |
|||
] |
|||
}, |
|||
"DeleteObject": { |
|||
"methods": [ |
|||
"DeleteObject" |
|||
] |
|||
}, |
|||
"GetBucket": { |
|||
"methods": [ |
|||
"GetBucket" |
|||
] |
|||
}, |
|||
"GetHmacKey": { |
|||
"methods": [ |
|||
"GetHmacKey" |
|||
] |
|||
}, |
|||
"GetIamPolicy": { |
|||
"methods": [ |
|||
"GetIamPolicy" |
|||
] |
|||
}, |
|||
"GetNotification": { |
|||
"methods": [ |
|||
"GetNotification" |
|||
] |
|||
}, |
|||
"GetObject": { |
|||
"methods": [ |
|||
"GetObject" |
|||
] |
|||
}, |
|||
"GetServiceAccount": { |
|||
"methods": [ |
|||
"GetServiceAccount" |
|||
] |
|||
}, |
|||
"ListBuckets": { |
|||
"methods": [ |
|||
"ListBuckets" |
|||
] |
|||
}, |
|||
"ListHmacKeys": { |
|||
"methods": [ |
|||
"ListHmacKeys" |
|||
] |
|||
}, |
|||
"ListNotifications": { |
|||
"methods": [ |
|||
"ListNotifications" |
|||
] |
|||
}, |
|||
"ListObjects": { |
|||
"methods": [ |
|||
"ListObjects" |
|||
] |
|||
}, |
|||
"LockBucketRetentionPolicy": { |
|||
"methods": [ |
|||
"LockBucketRetentionPolicy" |
|||
] |
|||
}, |
|||
"QueryWriteStatus": { |
|||
"methods": [ |
|||
"QueryWriteStatus" |
|||
] |
|||
}, |
|||
"ReadObject": { |
|||
"methods": [ |
|||
"ReadObject" |
|||
] |
|||
}, |
|||
"RewriteObject": { |
|||
"methods": [ |
|||
"RewriteObject" |
|||
] |
|||
}, |
|||
"SetIamPolicy": { |
|||
"methods": [ |
|||
"SetIamPolicy" |
|||
] |
|||
}, |
|||
"StartResumableWrite": { |
|||
"methods": [ |
|||
"StartResumableWrite" |
|||
] |
|||
}, |
|||
"TestIamPermissions": { |
|||
"methods": [ |
|||
"TestIamPermissions" |
|||
] |
|||
}, |
|||
"UpdateBucket": { |
|||
"methods": [ |
|||
"UpdateBucket" |
|||
] |
|||
}, |
|||
"UpdateHmacKey": { |
|||
"methods": [ |
|||
"UpdateHmacKey" |
|||
] |
|||
}, |
|||
"UpdateObject": { |
|||
"methods": [ |
|||
"UpdateObject" |
|||
] |
|||
}, |
|||
"WriteObject": { |
|||
"methods": [ |
|||
"WriteObject" |
|||
] |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,26 @@ |
|||
// Copyright 2022 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// https://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
|
|||
"google.golang.org/grpc/metadata" |
|||
) |
|||
|
|||
// InsertMetadata inserts the given gRPC metadata into the outgoing context.
|
|||
func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { |
|||
return insertMetadata(ctx, mds...) |
|||
} |
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -0,0 +1,23 @@ |
|||
// Copyright 2022 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Code generated by gapicgen. DO NOT EDIT.
|
|||
|
|||
package storage |
|||
|
|||
import "cloud.google.com/go/storage/internal" |
|||
|
|||
func init() { |
|||
versionClient = internal.Version |
|||
} |
@ -0,0 +1,18 @@ |
|||
// Copyright 2022 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package internal |
|||
|
|||
// Version is the current tagged release of the library.
|
|||
const Version = "1.28.1" |
@ -0,0 +1,146 @@ |
|||
// Copyright 2014 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"net" |
|||
"net/http" |
|||
"net/url" |
|||
"strings" |
|||
|
|||
"cloud.google.com/go/internal" |
|||
"cloud.google.com/go/internal/version" |
|||
sinternal "cloud.google.com/go/storage/internal" |
|||
"github.com/google/uuid" |
|||
gax "github.com/googleapis/gax-go/v2" |
|||
"google.golang.org/api/googleapi" |
|||
"google.golang.org/grpc/codes" |
|||
"google.golang.org/grpc/status" |
|||
) |
|||
|
|||
var defaultRetry *retryConfig = &retryConfig{} |
|||
var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version) |
|||
|
|||
// run determines whether a retry is necessary based on the config and
|
|||
// idempotency information. It then calls the function with or without retries
|
|||
// as appropriate, using the configured settings.
|
|||
func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error { |
|||
attempts := 1 |
|||
invocationID := uuid.New().String() |
|||
|
|||
if retry == nil { |
|||
retry = defaultRetry |
|||
} |
|||
if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever { |
|||
setHeader(invocationID, attempts) |
|||
return call() |
|||
} |
|||
bo := gax.Backoff{} |
|||
if retry.backoff != nil { |
|||
bo.Multiplier = retry.backoff.Multiplier |
|||
bo.Initial = retry.backoff.Initial |
|||
bo.Max = retry.backoff.Max |
|||
} |
|||
var errorFunc func(err error) bool = ShouldRetry |
|||
if retry.shouldRetry != nil { |
|||
errorFunc = retry.shouldRetry |
|||
} |
|||
|
|||
return internal.Retry(ctx, bo, func() (stop bool, err error) { |
|||
setHeader(invocationID, attempts) |
|||
err = call() |
|||
attempts++ |
|||
return !errorFunc(err), err |
|||
}) |
|||
} |
|||
|
|||
func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) { |
|||
return func(invocationID string, attempts int) { |
|||
if req == nil { |
|||
return |
|||
} |
|||
header := req.Header() |
|||
invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) |
|||
xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") |
|||
header.Set("x-goog-api-client", xGoogHeader) |
|||
} |
|||
} |
|||
|
|||
// TODO: Implement method setting header via context for gRPC
|
|||
func setRetryHeaderGRPC(_ context.Context) func(string, int) { |
|||
return func(_ string, _ int) { |
|||
return |
|||
} |
|||
} |
|||
|
|||
// ShouldRetry returns true if an error is retryable, based on best practice
|
|||
// guidance from GCS. See
|
|||
// https://cloud.google.com/storage/docs/retry-strategy#go for more information
|
|||
// on what errors are considered retryable.
|
|||
//
|
|||
// If you would like to customize retryable errors, use the WithErrorFunc to
|
|||
// supply a RetryOption to your library calls. For example, to retry additional
|
|||
// errors, you can write a custom func that wraps ShouldRetry and also specifies
|
|||
// additional errors that should return true.
|
|||
func ShouldRetry(err error) bool { |
|||
if err == nil { |
|||
return false |
|||
} |
|||
if errors.Is(err, io.ErrUnexpectedEOF) { |
|||
return true |
|||
} |
|||
|
|||
switch e := err.(type) { |
|||
case *net.OpError: |
|||
if strings.Contains(e.Error(), "use of closed network connection") { |
|||
// TODO: check against net.ErrClosed (go 1.16+) instead of string
|
|||
return true |
|||
} |
|||
case *googleapi.Error: |
|||
// Retry on 408, 429, and 5xx, according to
|
|||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
|||
return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600) |
|||
case *url.Error: |
|||
// Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall).
|
|||
// Unfortunately the error type is unexported, so we resort to string
|
|||
// matching.
|
|||
retriable := []string{"connection refused", "connection reset"} |
|||
for _, s := range retriable { |
|||
if strings.Contains(e.Error(), s) { |
|||
return true |
|||
} |
|||
} |
|||
case interface{ Temporary() bool }: |
|||
if e.Temporary() { |
|||
return true |
|||
} |
|||
} |
|||
// HTTP 429, 502, 503, and 504 all map to gRPC UNAVAILABLE per
|
|||
// https://grpc.github.io/grpc/core/md_doc_http-grpc-status-mapping.html.
|
|||
//
|
|||
// This is only necessary for the experimental gRPC-based media operations.
|
|||
if st, ok := status.FromError(err); ok && st.Code() == codes.Unavailable { |
|||
return true |
|||
} |
|||
// Unwrap is only supported in go1.13.x+
|
|||
if e, ok := err.(interface{ Unwrap() error }); ok { |
|||
return ShouldRetry(e.Unwrap()) |
|||
} |
|||
return false |
|||
} |
@ -0,0 +1,200 @@ |
|||
// Copyright 2017 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"errors" |
|||
"fmt" |
|||
"regexp" |
|||
|
|||
"cloud.google.com/go/internal/trace" |
|||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" |
|||
raw "google.golang.org/api/storage/v1" |
|||
) |
|||
|
|||
// A Notification describes how to send Cloud PubSub messages when certain
|
|||
// events occur in a bucket.
|
|||
type Notification struct { |
|||
//The ID of the notification.
|
|||
ID string |
|||
|
|||
// The ID of the topic to which this subscription publishes.
|
|||
TopicID string |
|||
|
|||
// The ID of the project to which the topic belongs.
|
|||
TopicProjectID string |
|||
|
|||
// Only send notifications about listed event types. If empty, send notifications
|
|||
// for all event types.
|
|||
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
|
|||
EventTypes []string |
|||
|
|||
// If present, only apply this notification configuration to object names that
|
|||
// begin with this prefix.
|
|||
ObjectNamePrefix string |
|||
|
|||
// An optional list of additional attributes to attach to each Cloud PubSub
|
|||
// message published for this notification subscription.
|
|||
CustomAttributes map[string]string |
|||
|
|||
// The contents of the message payload.
|
|||
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
|
|||
PayloadFormat string |
|||
} |
|||
|
|||
// Values for Notification.PayloadFormat.
|
|||
const ( |
|||
// Send no payload with notification messages.
|
|||
NoPayload = "NONE" |
|||
|
|||
// Send object metadata as JSON with notification messages.
|
|||
JSONPayload = "JSON_API_V1" |
|||
) |
|||
|
|||
// Values for Notification.EventTypes.
|
|||
const ( |
|||
// Event that occurs when an object is successfully created.
|
|||
ObjectFinalizeEvent = "OBJECT_FINALIZE" |
|||
|
|||
// Event that occurs when the metadata of an existing object changes.
|
|||
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" |
|||
|
|||
// Event that occurs when an object is permanently deleted.
|
|||
ObjectDeleteEvent = "OBJECT_DELETE" |
|||
|
|||
// Event that occurs when the live version of an object becomes an
|
|||
// archived version.
|
|||
ObjectArchiveEvent = "OBJECT_ARCHIVE" |
|||
) |
|||
|
|||
func toNotification(rn *raw.Notification) *Notification { |
|||
n := &Notification{ |
|||
ID: rn.Id, |
|||
EventTypes: rn.EventTypes, |
|||
ObjectNamePrefix: rn.ObjectNamePrefix, |
|||
CustomAttributes: rn.CustomAttributes, |
|||
PayloadFormat: rn.PayloadFormat, |
|||
} |
|||
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) |
|||
return n |
|||
} |
|||
|
|||
func toNotificationFromProto(pbn *storagepb.Notification) *Notification { |
|||
n := &Notification{ |
|||
ID: pbn.GetName(), |
|||
EventTypes: pbn.GetEventTypes(), |
|||
ObjectNamePrefix: pbn.GetObjectNamePrefix(), |
|||
CustomAttributes: pbn.GetCustomAttributes(), |
|||
PayloadFormat: pbn.GetPayloadFormat(), |
|||
} |
|||
n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic) |
|||
return n |
|||
} |
|||
|
|||
func toProtoNotification(n *Notification) *storagepb.Notification { |
|||
return &storagepb.Notification{ |
|||
Name: n.ID, |
|||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", |
|||
n.TopicProjectID, n.TopicID), |
|||
EventTypes: n.EventTypes, |
|||
ObjectNamePrefix: n.ObjectNamePrefix, |
|||
CustomAttributes: n.CustomAttributes, |
|||
PayloadFormat: n.PayloadFormat, |
|||
} |
|||
} |
|||
|
|||
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") |
|||
|
|||
// parseNotificationTopic extracts the project and topic IDs from from the full
|
|||
// resource name returned by the service. If the name is malformed, it returns
|
|||
// "?" for both IDs.
|
|||
func parseNotificationTopic(nt string) (projectID, topicID string) { |
|||
matches := topicRE.FindStringSubmatch(nt) |
|||
if matches == nil { |
|||
return "?", "?" |
|||
} |
|||
return matches[1], matches[2] |
|||
} |
|||
|
|||
func toRawNotification(n *Notification) *raw.Notification { |
|||
return &raw.Notification{ |
|||
Id: n.ID, |
|||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", |
|||
n.TopicProjectID, n.TopicID), |
|||
EventTypes: n.EventTypes, |
|||
ObjectNamePrefix: n.ObjectNamePrefix, |
|||
CustomAttributes: n.CustomAttributes, |
|||
PayloadFormat: string(n.PayloadFormat), |
|||
} |
|||
} |
|||
|
|||
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
|
|||
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
|
|||
// returned Notification's ID can be used to refer to it.
|
|||
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if n.ID != "" { |
|||
return nil, errors.New("storage: AddNotification: ID must not be set") |
|||
} |
|||
if n.TopicProjectID == "" { |
|||
return nil, errors.New("storage: AddNotification: missing TopicProjectID") |
|||
} |
|||
if n.TopicID == "" { |
|||
return nil, errors.New("storage: AddNotification: missing TopicID") |
|||
} |
|||
|
|||
opts := makeStorageOpts(false, b.retry, b.userProject) |
|||
ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...) |
|||
return ret, err |
|||
} |
|||
|
|||
// Notifications returns all the Notifications configured for this bucket, as a map
|
|||
// indexed by notification ID.
|
|||
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
opts := makeStorageOpts(true, b.retry, b.userProject) |
|||
n, err = b.c.tc.ListNotifications(ctx, b.name, opts...) |
|||
return n, err |
|||
} |
|||
|
|||
func notificationsToMap(rns []*raw.Notification) map[string]*Notification { |
|||
m := map[string]*Notification{} |
|||
for _, rn := range rns { |
|||
m[rn.Id] = toNotification(rn) |
|||
} |
|||
return m |
|||
} |
|||
|
|||
func notificationsToMapFromProto(ns []*storagepb.Notification) map[string]*Notification { |
|||
m := map[string]*Notification{} |
|||
for _, n := range ns { |
|||
m[n.Name] = toNotificationFromProto(n) |
|||
} |
|||
return m |
|||
} |
|||
|
|||
// DeleteNotification deletes the notification with the given ID.
|
|||
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
opts := makeStorageOpts(true, b.retry, b.userProject) |
|||
return b.c.tc.DeleteNotification(ctx, b.name, id, opts...) |
|||
} |
@ -0,0 +1,436 @@ |
|||
// Copyright 2020 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"crypto" |
|||
"crypto/rand" |
|||
"crypto/rsa" |
|||
"crypto/sha256" |
|||
"encoding/base64" |
|||
"encoding/json" |
|||
"errors" |
|||
"fmt" |
|||
"net/url" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
// PostPolicyV4Options are used to construct a signed post policy.
|
|||
// Please see https://cloud.google.com/storage/docs/xml-api/post-object
|
|||
// for reference about the fields.
|
|||
type PostPolicyV4Options struct { |
|||
// GoogleAccessID represents the authorizer of the signed URL generation.
|
|||
// It is typically the Google service account client email address from
|
|||
// the Google Developers Console in the form of "xxx@developer.gserviceaccount.com".
|
|||
// Required.
|
|||
GoogleAccessID string |
|||
|
|||
// PrivateKey is the Google service account private key. It is obtainable
|
|||
// from the Google Developers Console.
|
|||
// At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
|
|||
// create a service account client ID or reuse one of your existing service account
|
|||
// credentials. Click on the "Generate new P12 key" to generate and download
|
|||
// a new private key. Once you download the P12 file, use the following command
|
|||
// to convert it into a PEM file.
|
|||
//
|
|||
// $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
|
|||
//
|
|||
// Provide the contents of the PEM file as a byte slice.
|
|||
// Exactly one of PrivateKey or SignBytes must be non-nil.
|
|||
PrivateKey []byte |
|||
|
|||
// SignBytes is a function for implementing custom signing.
|
|||
//
|
|||
// Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined,
|
|||
// SignBytes will be ignored.
|
|||
// This SignBytes function expects the bytes it receives to be hashed, while
|
|||
// SignRawBytes accepts the raw bytes without hashing, allowing more flexibility.
|
|||
// Add the following to the top of your signing function to hash the bytes
|
|||
// to use SignRawBytes instead:
|
|||
// shaSum := sha256.Sum256(bytes)
|
|||
// bytes = shaSum[:]
|
|||
//
|
|||
SignBytes func(hashBytes []byte) (signature []byte, err error) |
|||
|
|||
// SignRawBytes is a function for implementing custom signing. For example, if
|
|||
// your application is running on Google App Engine, you can use
|
|||
// appengine's internal signing function:
|
|||
// ctx := appengine.NewContext(request)
|
|||
// acc, _ := appengine.ServiceAccount(ctx)
|
|||
// &PostPolicyV4Options{
|
|||
// GoogleAccessID: acc,
|
|||
// SignRawBytes: func(b []byte) ([]byte, error) {
|
|||
// _, signedBytes, err := appengine.SignBytes(ctx, b)
|
|||
// return signedBytes, err
|
|||
// },
|
|||
// // etc.
|
|||
// })
|
|||
//
|
|||
// SignRawBytes is equivalent to the SignBytes field on SignedURLOptions;
|
|||
// that is, you may use the same signing function for the two.
|
|||
//
|
|||
// Exactly one of PrivateKey or SignRawBytes must be non-nil.
|
|||
SignRawBytes func(bytes []byte) (signature []byte, err error) |
|||
|
|||
// Expires is the expiration time on the signed URL.
|
|||
// It must be a time in the future.
|
|||
// Required.
|
|||
Expires time.Time |
|||
|
|||
// Style provides options for the type of URL to use. Options are
|
|||
// PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See
|
|||
// https://cloud.google.com/storage/docs/request-endpoints for details.
|
|||
// Optional.
|
|||
Style URLStyle |
|||
|
|||
// Insecure when set indicates that the generated URL's scheme
|
|||
// will use "http" instead of "https" (default).
|
|||
// Optional.
|
|||
Insecure bool |
|||
|
|||
// Fields specifies the attributes of a PostPolicyV4 request.
|
|||
// When Fields is non-nil, its attributes must match those that will
|
|||
// passed into field Conditions.
|
|||
// Optional.
|
|||
Fields *PolicyV4Fields |
|||
|
|||
// The conditions that the uploaded file will be expected to conform to.
|
|||
// When used, the failure of an upload to satisfy a condition will result in
|
|||
// a 4XX status code, back with the message describing the problem.
|
|||
// Optional.
|
|||
Conditions []PostPolicyV4Condition |
|||
|
|||
shouldHashSignBytes bool |
|||
} |
|||
|
|||
func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options { |
|||
return &PostPolicyV4Options{ |
|||
GoogleAccessID: opts.GoogleAccessID, |
|||
PrivateKey: opts.PrivateKey, |
|||
SignBytes: opts.SignBytes, |
|||
SignRawBytes: opts.SignRawBytes, |
|||
Expires: opts.Expires, |
|||
Style: opts.Style, |
|||
Insecure: opts.Insecure, |
|||
Fields: opts.Fields, |
|||
Conditions: opts.Conditions, |
|||
shouldHashSignBytes: opts.shouldHashSignBytes, |
|||
} |
|||
} |
|||
|
|||
// PolicyV4Fields describes the attributes for a PostPolicyV4 request.
|
|||
type PolicyV4Fields struct { |
|||
// ACL specifies the access control permissions for the object.
|
|||
// Optional.
|
|||
ACL string |
|||
// CacheControl specifies the caching directives for the object.
|
|||
// Optional.
|
|||
CacheControl string |
|||
// ContentType specifies the media type of the object.
|
|||
// Optional.
|
|||
ContentType string |
|||
// ContentDisposition specifies how the file will be served back to requesters.
|
|||
// Optional.
|
|||
ContentDisposition string |
|||
// ContentEncoding specifies the decompressive transcoding that the object.
|
|||
// This field is complementary to ContentType in that the file could be
|
|||
// compressed but ContentType specifies the file's original media type.
|
|||
// Optional.
|
|||
ContentEncoding string |
|||
// Metadata specifies custom metadata for the object.
|
|||
// If any key doesn't begin with "x-goog-meta-", an error will be returned.
|
|||
// Optional.
|
|||
Metadata map[string]string |
|||
// StatusCodeOnSuccess when set, specifies the status code that Cloud Storage
|
|||
// will serve back on successful upload of the object.
|
|||
// Optional.
|
|||
StatusCodeOnSuccess int |
|||
// RedirectToURLOnSuccess when set, specifies the URL that Cloud Storage
|
|||
// will serve back on successful upload of the object.
|
|||
// Optional.
|
|||
RedirectToURLOnSuccess string |
|||
} |
|||
|
|||
// PostPolicyV4 describes the URL and respective form fields for a generated PostPolicyV4 request.
|
|||
type PostPolicyV4 struct { |
|||
// URL is the generated URL that the file upload will be made to.
|
|||
URL string |
|||
// Fields specifies the generated key-values that the file uploader
|
|||
// must include in their multipart upload form.
|
|||
Fields map[string]string |
|||
} |
|||
|
|||
// PostPolicyV4Condition describes the constraints that the subsequent
|
|||
// object upload's multipart form fields will be expected to conform to.
|
|||
type PostPolicyV4Condition interface { |
|||
isEmpty() bool |
|||
json.Marshaler |
|||
} |
|||
|
|||
type startsWith struct { |
|||
key, value string |
|||
} |
|||
|
|||
func (sw *startsWith) MarshalJSON() ([]byte, error) { |
|||
return json.Marshal([]string{"starts-with", sw.key, sw.value}) |
|||
} |
|||
func (sw *startsWith) isEmpty() bool { |
|||
return sw.value == "" |
|||
} |
|||
|
|||
// ConditionStartsWith checks that an attributes starts with value.
|
|||
// An empty value will cause this condition to be ignored.
|
|||
func ConditionStartsWith(key, value string) PostPolicyV4Condition { |
|||
return &startsWith{key, value} |
|||
} |
|||
|
|||
type contentLengthRangeCondition struct { |
|||
start, end uint64 |
|||
} |
|||
|
|||
func (clr *contentLengthRangeCondition) MarshalJSON() ([]byte, error) { |
|||
return json.Marshal([]interface{}{"content-length-range", clr.start, clr.end}) |
|||
} |
|||
func (clr *contentLengthRangeCondition) isEmpty() bool { |
|||
return clr.start == 0 && clr.end == 0 |
|||
} |
|||
|
|||
type singleValueCondition struct { |
|||
name, value string |
|||
} |
|||
|
|||
func (svc *singleValueCondition) MarshalJSON() ([]byte, error) { |
|||
return json.Marshal(map[string]string{svc.name: svc.value}) |
|||
} |
|||
func (svc *singleValueCondition) isEmpty() bool { |
|||
return svc.value == "" |
|||
} |
|||
|
|||
// ConditionContentLengthRange constraints the limits that the
|
|||
// multipart upload's range header will be expected to be within.
|
|||
func ConditionContentLengthRange(start, end uint64) PostPolicyV4Condition { |
|||
return &contentLengthRangeCondition{start, end} |
|||
} |
|||
|
|||
func conditionRedirectToURLOnSuccess(redirectURL string) PostPolicyV4Condition { |
|||
return &singleValueCondition{"success_action_redirect", redirectURL} |
|||
} |
|||
|
|||
func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition { |
|||
svc := &singleValueCondition{name: "success_action_status"} |
|||
if statusCode > 0 { |
|||
svc.value = fmt.Sprintf("%d", statusCode) |
|||
} |
|||
return svc |
|||
} |
|||
|
|||
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
|
|||
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
|
|||
// If initializing a Storage Client, instead use the Bucket.GenerateSignedPostPolicyV4
|
|||
// method which uses the Client's credentials to handle authentication.
|
|||
func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { |
|||
if bucket == "" { |
|||
return nil, errors.New("storage: bucket must be non-empty") |
|||
} |
|||
if object == "" { |
|||
return nil, errors.New("storage: object must be non-empty") |
|||
} |
|||
now := utcNow() |
|||
if err := validatePostPolicyV4Options(opts, now); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
var signingFn func(hashedBytes []byte) ([]byte, error) |
|||
switch { |
|||
case opts.SignRawBytes != nil: |
|||
signingFn = opts.SignRawBytes |
|||
case opts.shouldHashSignBytes: |
|||
signingFn = opts.SignBytes |
|||
case len(opts.PrivateKey) != 0: |
|||
parsedRSAPrivKey, err := parseKey(opts.PrivateKey) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
signingFn = func(b []byte) ([]byte, error) { |
|||
sum := sha256.Sum256(b) |
|||
return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:]) |
|||
} |
|||
|
|||
default: |
|||
return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") |
|||
} |
|||
|
|||
var descFields PolicyV4Fields |
|||
if opts.Fields != nil { |
|||
descFields = *opts.Fields |
|||
} |
|||
|
|||
if err := validateMetadata(descFields.Metadata); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
// Build the policy.
|
|||
conds := make([]PostPolicyV4Condition, len(opts.Conditions)) |
|||
copy(conds, opts.Conditions) |
|||
conds = append(conds, |
|||
// These are ordered lexicographically. Technically the order doesn't matter
|
|||
// for creating the policy, but we use this order to match the
|
|||
// cross-language conformance tests for this feature.
|
|||
&singleValueCondition{"acl", descFields.ACL}, |
|||
&singleValueCondition{"cache-control", descFields.CacheControl}, |
|||
&singleValueCondition{"content-disposition", descFields.ContentDisposition}, |
|||
&singleValueCondition{"content-encoding", descFields.ContentEncoding}, |
|||
&singleValueCondition{"content-type", descFields.ContentType}, |
|||
conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), |
|||
conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), |
|||
) |
|||
|
|||
YYYYMMDD := now.Format(yearMonthDay) |
|||
policyFields := map[string]string{ |
|||
"key": object, |
|||
"x-goog-date": now.Format(iso8601), |
|||
"x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", |
|||
"x-goog-algorithm": "GOOG4-RSA-SHA256", |
|||
"acl": descFields.ACL, |
|||
"cache-control": descFields.CacheControl, |
|||
"content-disposition": descFields.ContentDisposition, |
|||
"content-encoding": descFields.ContentEncoding, |
|||
"content-type": descFields.ContentType, |
|||
"success_action_redirect": descFields.RedirectToURLOnSuccess, |
|||
} |
|||
for key, value := range descFields.Metadata { |
|||
conds = append(conds, &singleValueCondition{key, value}) |
|||
policyFields[key] = value |
|||
} |
|||
|
|||
// Following from the order expected by the conformance test cases,
|
|||
// hence manually inserting these fields in a specific order.
|
|||
conds = append(conds, |
|||
&singleValueCondition{"bucket", bucket}, |
|||
&singleValueCondition{"key", object}, |
|||
&singleValueCondition{"x-goog-date", now.Format(iso8601)}, |
|||
&singleValueCondition{ |
|||
name: "x-goog-credential", |
|||
value: opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", |
|||
}, |
|||
&singleValueCondition{"x-goog-algorithm", "GOOG4-RSA-SHA256"}, |
|||
) |
|||
|
|||
nonEmptyConds := make([]PostPolicyV4Condition, 0, len(opts.Conditions)) |
|||
for _, cond := range conds { |
|||
if cond == nil || !cond.isEmpty() { |
|||
nonEmptyConds = append(nonEmptyConds, cond) |
|||
} |
|||
} |
|||
condsAsJSON, err := json.Marshal(map[string]interface{}{ |
|||
"conditions": nonEmptyConds, |
|||
"expiration": opts.Expires.Format(time.RFC3339), |
|||
}) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err) |
|||
} |
|||
|
|||
b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) |
|||
var signature []byte |
|||
var signErr error |
|||
|
|||
if opts.shouldHashSignBytes { |
|||
// SignBytes expects hashed bytes as input instead of raw bytes, so we hash them
|
|||
shaSum := sha256.Sum256([]byte(b64Policy)) |
|||
signature, signErr = signingFn(shaSum[:]) |
|||
} else { |
|||
signature, signErr = signingFn([]byte(b64Policy)) |
|||
} |
|||
if signErr != nil { |
|||
return nil, signErr |
|||
} |
|||
|
|||
policyFields["policy"] = b64Policy |
|||
policyFields["x-goog-signature"] = fmt.Sprintf("%x", signature) |
|||
|
|||
// Construct the URL.
|
|||
scheme := "https" |
|||
if opts.Insecure { |
|||
scheme = "http" |
|||
} |
|||
path := opts.Style.path(bucket, "") + "/" |
|||
u := &url.URL{ |
|||
Path: path, |
|||
RawPath: pathEncodeV4(path), |
|||
Host: opts.Style.host(bucket), |
|||
Scheme: scheme, |
|||
} |
|||
|
|||
if descFields.StatusCodeOnSuccess > 0 { |
|||
policyFields["success_action_status"] = fmt.Sprintf("%d", descFields.StatusCodeOnSuccess) |
|||
} |
|||
|
|||
// Clear out fields with blanks values.
|
|||
for key, value := range policyFields { |
|||
if value == "" { |
|||
delete(policyFields, key) |
|||
} |
|||
} |
|||
pp4 := &PostPolicyV4{ |
|||
Fields: policyFields, |
|||
URL: u.String(), |
|||
} |
|||
return pp4, nil |
|||
} |
|||
|
|||
// validatePostPolicyV4Options checks that:
|
|||
// * GoogleAccessID is set
|
|||
// * either PrivateKey or SignRawBytes/SignBytes is set, but not both
|
|||
// * the deadline set in Expires is not in the past
|
|||
// * if Style is not set, it'll use PathStyle
|
|||
// * sets shouldHashSignBytes to true if opts.SignBytes should be used
|
|||
func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error { |
|||
if opts == nil || opts.GoogleAccessID == "" { |
|||
return errors.New("storage: missing required GoogleAccessID") |
|||
} |
|||
if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank { |
|||
return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") |
|||
} |
|||
if opts.Expires.Before(now) { |
|||
return errors.New("storage: expecting Expires to be in the future") |
|||
} |
|||
if opts.Style == nil { |
|||
opts.Style = PathStyle() |
|||
} |
|||
if opts.SignRawBytes == nil && opts.SignBytes != nil { |
|||
opts.shouldHashSignBytes = true |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// validateMetadata ensures that all keys passed in have a prefix of "x-goog-meta-",
|
|||
// otherwise it will return an error.
|
|||
func validateMetadata(hdrs map[string]string) (err error) { |
|||
if len(hdrs) == 0 { |
|||
return nil |
|||
} |
|||
|
|||
badKeys := make([]string, 0, len(hdrs)) |
|||
for key := range hdrs { |
|||
if !strings.HasPrefix(key, "x-goog-meta-") { |
|||
badKeys = append(badKeys, key) |
|||
} |
|||
} |
|||
if len(badKeys) != 0 { |
|||
err = errors.New("storage: expected metadata to begin with x-goog-meta-, got " + strings.Join(badKeys, ", ")) |
|||
} |
|||
return |
|||
} |
@ -0,0 +1,266 @@ |
|||
// Copyright 2016 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"hash/crc32" |
|||
"io" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"strings" |
|||
"time" |
|||
|
|||
"cloud.google.com/go/internal/trace" |
|||
) |
|||
|
|||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli) |
|||
|
|||
// ReaderObjectAttrs are attributes about the object being read. These are populated
|
|||
// during the New call. This struct only holds a subset of object attributes: to
|
|||
// get the full set of attributes, use ObjectHandle.Attrs.
|
|||
//
|
|||
// Each field is read-only.
|
|||
type ReaderObjectAttrs struct { |
|||
// Size is the length of the object's content.
|
|||
Size int64 |
|||
|
|||
// StartOffset is the byte offset within the object
|
|||
// from which reading begins.
|
|||
// This value is only non-zero for range requests.
|
|||
StartOffset int64 |
|||
|
|||
// ContentType is the MIME type of the object's content.
|
|||
ContentType string |
|||
|
|||
// ContentEncoding is the encoding of the object's content.
|
|||
ContentEncoding string |
|||
|
|||
// CacheControl specifies whether and for how long browser and Internet
|
|||
// caches are allowed to cache your objects.
|
|||
CacheControl string |
|||
|
|||
// LastModified is the time that the object was last modified.
|
|||
LastModified time.Time |
|||
|
|||
// Generation is the generation number of the object's content.
|
|||
Generation int64 |
|||
|
|||
// Metageneration is the version of the metadata for this object at
|
|||
// this generation. This field is used for preconditions and for
|
|||
// detecting changes in metadata. A metageneration number is only
|
|||
// meaningful in the context of a particular generation of a
|
|||
// particular object.
|
|||
Metageneration int64 |
|||
} |
|||
|
|||
// NewReader creates a new Reader to read the contents of the
|
|||
// object.
|
|||
// ErrObjectNotExist will be returned if the object is not found.
|
|||
//
|
|||
// The caller must call Close on the returned Reader when done reading.
|
|||
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { |
|||
return o.NewRangeReader(ctx, 0, -1) |
|||
} |
|||
|
|||
// NewRangeReader reads part of an object, reading at most length bytes
|
|||
// starting at the given offset. If length is negative, the object is read
|
|||
// until the end. If offset is negative, the object is read abs(offset) bytes
|
|||
// from the end, and length must also be negative to indicate all remaining
|
|||
// bytes will be read.
|
|||
//
|
|||
// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies
|
|||
// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding
|
|||
// that file will be served back whole, regardless of the requested range as
|
|||
// Google Cloud Storage dictates.
|
|||
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { |
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") |
|||
defer func() { trace.EndSpan(ctx, err) }() |
|||
|
|||
if err := o.validate(); err != nil { |
|||
return nil, err |
|||
} |
|||
if offset < 0 && length >= 0 { |
|||
return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", offset) |
|||
} |
|||
if o.conds != nil { |
|||
if err := o.conds.validate("NewRangeReader"); err != nil { |
|||
return nil, err |
|||
} |
|||
} |
|||
|
|||
opts := makeStorageOpts(true, o.retry, o.userProject) |
|||
|
|||
params := &newRangeReaderParams{ |
|||
bucket: o.bucket, |
|||
object: o.object, |
|||
gen: o.gen, |
|||
offset: offset, |
|||
length: length, |
|||
encryptionKey: o.encryptionKey, |
|||
conds: o.conds, |
|||
readCompressed: o.readCompressed, |
|||
} |
|||
|
|||
r, err = o.c.tc.NewRangeReader(ctx, params, opts...) |
|||
|
|||
return r, err |
|||
} |
|||
|
|||
// decompressiveTranscoding returns true if the request was served decompressed
|
|||
// and different than its original storage form. This happens when the "Content-Encoding"
|
|||
// header is "gzip".
|
|||
// See:
|
|||
// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
|
|||
// - https://github.com/googleapis/google-cloud-go/issues/1800
|
|||
func decompressiveTranscoding(res *http.Response) bool { |
|||
// Decompressive Transcoding.
|
|||
return res.Header.Get("Content-Encoding") == "gzip" || |
|||
res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" |
|||
} |
|||
|
|||
func uncompressedByServer(res *http.Response) bool { |
|||
// If the data is stored as gzip but is not encoded as gzip, then it
|
|||
// was uncompressed by the server.
|
|||
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" && |
|||
res.Header.Get("Content-Encoding") != "gzip" |
|||
} |
|||
|
|||
func parseCRC32c(res *http.Response) (uint32, bool) { |
|||
const prefix = "crc32c=" |
|||
for _, spec := range res.Header["X-Goog-Hash"] { |
|||
if strings.HasPrefix(spec, prefix) { |
|||
c, err := decodeUint32(spec[len(prefix):]) |
|||
if err == nil { |
|||
return c, true |
|||
} |
|||
} |
|||
} |
|||
return 0, false |
|||
} |
|||
|
|||
// setConditionsHeaders sets precondition request headers for downloads
|
|||
// using the XML API. It assumes that the conditions have been validated.
|
|||
func setConditionsHeaders(headers http.Header, conds *Conditions) error { |
|||
if conds == nil { |
|||
return nil |
|||
} |
|||
if conds.MetagenerationMatch != 0 { |
|||
headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch)) |
|||
} |
|||
switch { |
|||
case conds.GenerationMatch != 0: |
|||
headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch)) |
|||
case conds.DoesNotExist: |
|||
headers.Set("x-goog-if-generation-match", "0") |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Wrap a request to look similar to an apiary library request, in order to
|
|||
// be used by run().
|
|||
type readerRequestWrapper struct { |
|||
req *http.Request |
|||
} |
|||
|
|||
func (w *readerRequestWrapper) Header() http.Header { |
|||
return w.req.Header |
|||
} |
|||
|
|||
var emptyBody = ioutil.NopCloser(strings.NewReader("")) |
|||
|
|||
// Reader reads a Cloud Storage object.
|
|||
// It implements io.Reader.
|
|||
//
|
|||
// Typically, a Reader computes the CRC of the downloaded content and compares it to
|
|||
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
|
|||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
|||
type Reader struct { |
|||
Attrs ReaderObjectAttrs |
|||
seen, remain, size int64 |
|||
checkCRC bool // should we check the CRC?
|
|||
wantCRC uint32 // the CRC32c value the server sent in the header
|
|||
gotCRC uint32 // running crc
|
|||
|
|||
reader io.ReadCloser |
|||
} |
|||
|
|||
// Close closes the Reader. It must be called when done reading.
|
|||
func (r *Reader) Close() error { |
|||
return r.reader.Close() |
|||
} |
|||
|
|||
func (r *Reader) Read(p []byte) (int, error) { |
|||
n, err := r.reader.Read(p) |
|||
if r.remain != -1 { |
|||
r.remain -= int64(n) |
|||
} |
|||
if r.checkCRC { |
|||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) |
|||
// Check CRC here. It would be natural to check it in Close, but
|
|||
// everybody defers Close on the assumption that it doesn't return
|
|||
// anything worth looking at.
|
|||
if err == io.EOF { |
|||
if r.gotCRC != r.wantCRC { |
|||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", |
|||
r.gotCRC, r.wantCRC) |
|||
} |
|||
} |
|||
} |
|||
return n, err |
|||
} |
|||
|
|||
// Size returns the size of the object in bytes.
|
|||
// The returned value is always the same and is not affected by
|
|||
// calls to Read or Close.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.Size.
|
|||
func (r *Reader) Size() int64 { |
|||
return r.Attrs.Size |
|||
} |
|||
|
|||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
|||
func (r *Reader) Remain() int64 { |
|||
return r.remain |
|||
} |
|||
|
|||
// ContentType returns the content type of the object.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.ContentType.
|
|||
func (r *Reader) ContentType() string { |
|||
return r.Attrs.ContentType |
|||
} |
|||
|
|||
// ContentEncoding returns the content encoding of the object.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.ContentEncoding.
|
|||
func (r *Reader) ContentEncoding() string { |
|||
return r.Attrs.ContentEncoding |
|||
} |
|||
|
|||
// CacheControl returns the cache control of the object.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.CacheControl.
|
|||
func (r *Reader) CacheControl() string { |
|||
return r.Attrs.CacheControl |
|||
} |
|||
|
|||
// LastModified returns the value of the Last-Modified header.
|
|||
//
|
|||
// Deprecated: use Reader.Attrs.LastModified.
|
|||
func (r *Reader) LastModified() (time.Time, error) { |
|||
return r.Attrs.LastModified, nil |
|||
} |
File diff suppressed because it is too large
File diff suppressed because one or more lines are too long
@ -0,0 +1,273 @@ |
|||
// Copyright 2014 Google LLC
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package storage |
|||
|
|||
import ( |
|||
"context" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"sync" |
|||
"time" |
|||
"unicode/utf8" |
|||
) |
|||
|
|||
// A Writer writes a Cloud Storage object.
|
|||
type Writer struct { |
|||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
|||
// must be initialized before the first Write call. Nil or zero-valued
|
|||
// attributes are ignored.
|
|||
ObjectAttrs |
|||
|
|||
// SendCRC32C specifies whether to transmit a CRC32C field. It should be set
|
|||
// to true in addition to setting the Writer's CRC32C field, because zero
|
|||
// is a valid CRC and normally a zero would not be transmitted.
|
|||
// If a CRC32C is sent, and the data written does not match the checksum,
|
|||
// the write will be rejected.
|
|||
//
|
|||
// Note: SendCRC32C must be set to true BEFORE the first call to
|
|||
// Writer.Write() in order to send the checksum. If it is set after that
|
|||
// point, the checksum will be ignored.
|
|||
SendCRC32C bool |
|||
|
|||
// ChunkSize controls the maximum number of bytes of the object that the
|
|||
// Writer will attempt to send to the server in a single request. Objects
|
|||
// smaller than the size will be sent in a single request, while larger
|
|||
// objects will be split over multiple requests. The value will be rounded up
|
|||
// to the nearest multiple of 256K. The default ChunkSize is 16MiB.
|
|||
//
|
|||
// Each Writer will internally allocate a buffer of size ChunkSize. This is
|
|||
// used to buffer input data and allow for the input to be sent again if a
|
|||
// request must be retried.
|
|||
//
|
|||
// If you upload small objects (< 16MiB), you should set ChunkSize
|
|||
// to a value slightly larger than the objects' sizes to avoid memory bloat.
|
|||
// This is especially important if you are uploading many small objects
|
|||
// concurrently. See
|
|||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size
|
|||
// for more information about performance trade-offs related to ChunkSize.
|
|||
//
|
|||
// If ChunkSize is set to zero, chunking will be disabled and the object will
|
|||
// be uploaded in a single request without the use of a buffer. This will
|
|||
// further reduce memory used during uploads, but will also prevent the writer
|
|||
// from retrying in case of a transient error from the server or resuming an
|
|||
// upload that fails midway through, since the buffer is required in order to
|
|||
// retry the failed request.
|
|||
//
|
|||
// ChunkSize must be set before the first Write call.
|
|||
ChunkSize int |
|||
|
|||
// ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk
|
|||
// resumable uploads.
|
|||
//
|
|||
// For uploads of larger files, the Writer will attempt to retry if the
|
|||
// request to upload a particular chunk fails with a transient error.
|
|||
// If a single chunk has been attempting to upload for longer than this
|
|||
// deadline and the request fails, it will no longer be retried, and the error
|
|||
// will be returned to the caller. This is only applicable for files which are
|
|||
// large enough to require a multi-chunk resumable upload. The default value
|
|||
// is 32s. Users may want to pick a longer deadline if they are using larger
|
|||
// values for ChunkSize or if they expect to have a slow or unreliable
|
|||
// internet connection.
|
|||
//
|
|||
// To set a deadline on the entire upload, use context timeout or
|
|||
// cancellation.
|
|||
ChunkRetryDeadline time.Duration |
|||
|
|||
// ProgressFunc can be used to monitor the progress of a large write.
|
|||
// operation. If ProgressFunc is not nil and writing requires multiple
|
|||
// calls to the underlying service (see
|
|||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
|||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
|||
// content copied so far.
|
|||
//
|
|||
// ProgressFunc should return quickly without blocking.
|
|||
ProgressFunc func(int64) |
|||
|
|||
ctx context.Context |
|||
o *ObjectHandle |
|||
|
|||
opened bool |
|||
pw *io.PipeWriter |
|||
|
|||
donec chan struct{} // closed after err and obj are set.
|
|||
obj *ObjectAttrs |
|||
|
|||
mu sync.Mutex |
|||
err error |
|||
} |
|||
|
|||
// Write appends to w. It implements the io.Writer interface.
|
|||
//
|
|||
// Since writes happen asynchronously, Write may return a nil
|
|||
// error even though the write failed (or will fail). Always
|
|||
// use the error returned from Writer.Close to determine if
|
|||
// the upload was successful.
|
|||
//
|
|||
// Writes will be retried on transient errors from the server, unless
|
|||
// Writer.ChunkSize has been set to zero.
|
|||
func (w *Writer) Write(p []byte) (n int, err error) { |
|||
w.mu.Lock() |
|||
werr := w.err |
|||
w.mu.Unlock() |
|||
if werr != nil { |
|||
return 0, werr |
|||
} |
|||
if !w.opened { |
|||
if err := w.openWriter(); err != nil { |
|||
return 0, err |
|||
} |
|||
} |
|||
n, err = w.pw.Write(p) |
|||
if err != nil { |
|||
w.mu.Lock() |
|||
werr := w.err |
|||
w.mu.Unlock() |
|||
// Preserve existing functionality that when context is canceled, Write will return
|
|||
// context.Canceled instead of "io: read/write on closed pipe". This hides the
|
|||
// pipe implementation detail from users and makes Write seem as though it's an RPC.
|
|||
if errors.Is(werr, context.Canceled) || errors.Is(werr, context.DeadlineExceeded) { |
|||
return n, werr |
|||
} |
|||
} |
|||
return n, err |
|||
} |
|||
|
|||
// Close completes the write operation and flushes any buffered data.
|
|||
// If Close doesn't return an error, metadata about the written object
|
|||
// can be retrieved by calling Attrs.
|
|||
func (w *Writer) Close() error { |
|||
if !w.opened { |
|||
if err := w.openWriter(); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
// Closing either the read or write causes the entire pipe to close.
|
|||
if err := w.pw.Close(); err != nil { |
|||
return err |
|||
} |
|||
|
|||
<-w.donec |
|||
w.mu.Lock() |
|||
defer w.mu.Unlock() |
|||
return w.err |
|||
} |
|||
|
|||
func (w *Writer) openWriter() (err error) { |
|||
if err := w.validateWriteAttrs(); err != nil { |
|||
return err |
|||
} |
|||
if w.o.gen != defaultGen { |
|||
return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen) |
|||
} |
|||
|
|||
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) |
|||
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) |
|||
params := &openWriterParams{ |
|||
ctx: w.ctx, |
|||
chunkSize: w.ChunkSize, |
|||
chunkRetryDeadline: w.ChunkRetryDeadline, |
|||
bucket: w.o.bucket, |
|||
attrs: &w.ObjectAttrs, |
|||
conds: w.o.conds, |
|||
encryptionKey: w.o.encryptionKey, |
|||
sendCRC32C: w.SendCRC32C, |
|||
donec: w.donec, |
|||
setError: w.error, |
|||
progress: w.progress, |
|||
setObj: func(o *ObjectAttrs) { w.obj = o }, |
|||
} |
|||
if err := w.ctx.Err(); err != nil { |
|||
return err // short-circuit
|
|||
} |
|||
w.pw, err = w.o.c.tc.OpenWriter(params, opts...) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
w.opened = true |
|||
go w.monitorCancel() |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// monitorCancel is intended to be used as a background goroutine. It monitors the
|
|||
// context, and when it observes that the context has been canceled, it manually
|
|||
// closes things that do not take a context.
|
|||
func (w *Writer) monitorCancel() { |
|||
select { |
|||
case <-w.ctx.Done(): |
|||
w.mu.Lock() |
|||
werr := w.ctx.Err() |
|||
w.err = werr |
|||
w.mu.Unlock() |
|||
|
|||
// Closing either the read or write causes the entire pipe to close.
|
|||
w.CloseWithError(werr) |
|||
case <-w.donec: |
|||
} |
|||
} |
|||
|
|||
// CloseWithError aborts the write operation with the provided error.
|
|||
// CloseWithError always returns nil.
|
|||
//
|
|||
// Deprecated: cancel the context passed to NewWriter instead.
|
|||
func (w *Writer) CloseWithError(err error) error { |
|||
if !w.opened { |
|||
return nil |
|||
} |
|||
return w.pw.CloseWithError(err) |
|||
} |
|||
|
|||
// Attrs returns metadata about a successfully-written object.
|
|||
// It's only valid to call it after Close returns nil.
|
|||
func (w *Writer) Attrs() *ObjectAttrs { |
|||
return w.obj |
|||
} |
|||
|
|||
func (w *Writer) validateWriteAttrs() error { |
|||
attrs := w.ObjectAttrs |
|||
// Check the developer didn't change the object Name (this is unfortunate, but
|
|||
// we don't want to store an object under the wrong name).
|
|||
if attrs.Name != w.o.object { |
|||
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) |
|||
} |
|||
if !utf8.ValidString(attrs.Name) { |
|||
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) |
|||
} |
|||
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { |
|||
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") |
|||
} |
|||
if w.ChunkSize < 0 { |
|||
return errors.New("storage: Writer.ChunkSize must be non-negative") |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// progress is a convenience wrapper that reports write progress to the Writer
|
|||
// ProgressFunc if it is set and progress is non-zero.
|
|||
func (w *Writer) progress(p int64) { |
|||
if w.ProgressFunc != nil && p != 0 { |
|||
w.ProgressFunc(p) |
|||
} |
|||
} |
|||
|
|||
// error acquires the Writer's lock, sets the Writer's err to the given error,
|
|||
// then relinquishes the lock.
|
|||
func (w *Writer) error(err error) { |
|||
w.mu.Lock() |
|||
w.err = err |
|||
w.mu.Unlock() |
|||
} |
@ -0,0 +1,2 @@ |
|||
[url "ssh://git@git.lowcodeplatform.net/"] |
|||
insteadOf = https://git.lowcodeplatform.net/ |
@ -0,0 +1,9 @@ |
|||
.history |
|||
.idea |
|||
.vscode |
|||
.DS_Store |
|||
*~merged* |
|||
*~merged |
|||
/public |
|||
.env |
|||
local |
@ -0,0 +1,3 @@ |
|||
# lib |
|||
|
|||
Библиотека общих компонентов для сервисов Buildbox Fabric |
@ -0,0 +1,28 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"runtime" |
|||
"runtime/debug" |
|||
) |
|||
|
|||
func RunAsync(ctx context.Context, fn func()) { |
|||
go func() { |
|||
defer Recover(ctx) |
|||
fn() |
|||
}() |
|||
} |
|||
|
|||
func Recover(ctx context.Context) (flag bool, msg string) { |
|||
recoverErr := recover() |
|||
if recoverErr == nil { |
|||
return false, "" |
|||
} |
|||
|
|||
stack := debug.Stack() |
|||
pc, file, line, _ := runtime.Caller(2) |
|||
msg = fmt.Sprintf("Recovered panic. file: %s, line: %d, function: %s, error: %s, stack: %s", file, line, runtime.FuncForPC(pc).Name(), recoverErr, stack) |
|||
|
|||
return true, msg |
|||
} |
@ -0,0 +1,184 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"fmt" |
|||
"sync" |
|||
"time" |
|||
|
|||
"github.com/ReneKroon/ttlcache" |
|||
|
|||
"github.com/pkg/errors" |
|||
) |
|||
|
|||
const cacheKeyPrefix = "cache." |
|||
|
|||
var ( |
|||
cacheCollection cache |
|||
) |
|||
|
|||
type cache struct { |
|||
items map[string]*cacheItem |
|||
mx sync.RWMutex |
|||
} |
|||
|
|||
type cacheItem struct { |
|||
// Getter определяет механизм получения данных от любого источника к/р поддерживает интерфейс
|
|||
reader Reader |
|||
cache *ttlcache.Cache |
|||
persistentCache *ttlcache.Cache |
|||
locks locks |
|||
cacheTTL time.Duration |
|||
} |
|||
|
|||
type Reader interface { |
|||
ReadSource() (res []byte, err error) |
|||
} |
|||
|
|||
func Cache() *cache { |
|||
if &cacheCollection == nil { |
|||
panic("cache has not been initialized, call CacheRegister() before use") |
|||
} |
|||
|
|||
return &cacheCollection |
|||
} |
|||
|
|||
// Register регистрируем новый кеш (указываем фукнцию, кр будет возвращать нужное значение)
|
|||
func (c *cache) Register(key string, source Reader, ttl time.Duration) (err error) { |
|||
c.mx.Lock() |
|||
defer c.mx.Unlock() |
|||
|
|||
cache := ttlcache.NewCache() |
|||
cache.SkipTtlExtensionOnHit(true) |
|||
|
|||
ci := cacheItem{ |
|||
cache: cache, |
|||
persistentCache: ttlcache.NewCache(), |
|||
locks: locks{keys: map[string]bool{}}, |
|||
reader: source, |
|||
cacheTTL: ttl, |
|||
} |
|||
c.items[key] = &ci |
|||
return err |
|||
} |
|||
|
|||
// Unregister
|
|||
func (c *cache) Unregister(key string) (err error) { |
|||
c.mx.Lock() |
|||
defer c.mx.Unlock() |
|||
|
|||
delete(c.items, key) |
|||
return err |
|||
} |
|||
|
|||
// Get возвращает текущее значение параметра в сервисе keeper.
|
|||
// Нужно учитывать, что значения на время кешируются и обновляются с заданной периодичностью.
|
|||
func (c *cache) Get(key string) (value interface{}, err error) { |
|||
var item *cacheItem |
|||
var found bool |
|||
|
|||
item, found = c.items[key] |
|||
if !found { |
|||
return nil, fmt.Errorf("error. key is not found") |
|||
} |
|||
|
|||
if item.cache == nil { |
|||
return nil, fmt.Errorf("cache is not inited") |
|||
} |
|||
|
|||
if item.persistentCache == nil { |
|||
return nil, fmt.Errorf("persistent cache is not inited") |
|||
} |
|||
|
|||
if cachedValue, ok := item.cache.Get(cacheKeyPrefix + key); ok { |
|||
return cachedValue, nil |
|||
} |
|||
|
|||
// Если стоит блокировка, значит кто-то уже обновляет кеш. В этом случае
|
|||
// пытаемся отдать предыдущее значение.
|
|||
if item.locks.Get(key) { |
|||
return c.tryToGetOldValue(key) |
|||
} |
|||
|
|||
// Значение не найдено. Первый из запросов блокирует за собой обновление (на самом деле
|
|||
// может возникнуть ситуация когда несколько запросов поставят блокировку и начнут
|
|||
// обновлять кеш - пока считаем это некритичным).
|
|||
item.locks.Set(key, true) |
|||
defer item.locks.Set(key, false) |
|||
|
|||
var values []byte |
|||
values, err = item.reader.ReadSource() |
|||
if err != nil { |
|||
return nil, errors.Wrap(err, "could not get value from getter") |
|||
} |
|||
|
|||
value = values |
|||
|
|||
item.cache.SetWithTTL(cacheKeyPrefix+key, value, item.cacheTTL) |
|||
item.persistentCache.Set(cacheKeyPrefix+key, value) |
|||
|
|||
return value, nil |
|||
} |
|||
|
|||
// tryToGetOldValue пытается получить старое значение, если в момент запроса на актуальном стоит блокировка.
|
|||
func (c *cache) tryToGetOldValue(key string) (interface{}, error) { |
|||
var item *cacheItem |
|||
var found bool |
|||
|
|||
item, found = c.items[key] |
|||
if !found { |
|||
return nil, fmt.Errorf("error. key is not found") |
|||
} |
|||
|
|||
fnGetPersistentCacheValue := func() (interface{}, error) { |
|||
if cachedValue, ok := item.persistentCache.Get(cacheKeyPrefix + key); ok { |
|||
return cachedValue, nil |
|||
} |
|||
|
|||
return nil, fmt.Errorf("persinstent cache is empty") |
|||
} |
|||
|
|||
oldValue, err := fnGetPersistentCacheValue() |
|||
|
|||
// Повторяем попытку получить значение. При старте сервиса может возникнуть блокировка
|
|||
// обновления ключа, но при этом в постоянном кеше еще может не быть значения.
|
|||
if err != nil { |
|||
time.Sleep(100 * time.Millisecond) |
|||
|
|||
oldValue, err = fnGetPersistentCacheValue() |
|||
} |
|||
|
|||
return oldValue, err |
|||
} |
|||
|
|||
// CacheInit инициализировали глобальную переменную defaultCache
|
|||
// source - источник, откуда мы получаем значения для кеширования
|
|||
func CacheRegister() { |
|||
d := cache{ |
|||
items: map[string]*cacheItem{}, |
|||
mx: sync.RWMutex{}, |
|||
} |
|||
cacheCollection = d |
|||
} |
|||
|
|||
// locks выполняет функции блокировки при одновременном обновлении значений в кеше.
|
|||
type locks struct { |
|||
// keys хранит информацию о локах по каждому отдельному ключу.
|
|||
// Если значение установлено в true, в данный момент обновление кеша захвачено одной из горутин.
|
|||
keys map[string]bool |
|||
mx sync.RWMutex |
|||
} |
|||
|
|||
// Get возвращает информацию о том идет ли в данный момент обновление конкретного ключа.
|
|||
func (l *locks) Get(key string) bool { |
|||
l.mx.RLock() |
|||
defer l.mx.RUnlock() |
|||
|
|||
return l.keys[key] |
|||
} |
|||
|
|||
// Set устанавливает блокировку на обновление конкретного ключа другими горутинами.
|
|||
func (l *locks) Set(key string, value bool) { |
|||
l.mx.Lock() |
|||
l.keys[key] = value |
|||
l.mx.Unlock() |
|||
} |
@ -0,0 +1,328 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"os" |
|||
|
|||
"github.com/urfave/cli" |
|||
) |
|||
|
|||
const sep = string(os.PathSeparator) |
|||
|
|||
// RunServiceFuncCLI обраатываем параметры с консоли и вызываем переданую функцию
|
|||
func RunServiceFuncCLI(funcCLI func(configfile, dir, port, mode, service, param1, param2, param3, sourcedb, action, version string) error) error { |
|||
var err error |
|||
|
|||
appCLI := cli.NewApp() |
|||
appCLI.Usage = "Demon Buildbox Proxy started" |
|||
appCLI.Commands = []cli.Command{ |
|||
{ |
|||
Name: "webinit", ShortName: "", |
|||
Usage: "Start Web-UI from init infractractire LowCodePlatform-service", |
|||
Flags: []cli.Flag{ |
|||
cli.StringFlag{ |
|||
Name: "port, c", |
|||
Usage: "Порт запуска UI", |
|||
Value: "8088", |
|||
}, |
|||
}, |
|||
Action: func(c *cli.Context) error { |
|||
port := c.String("port") |
|||
|
|||
err = funcCLI("", "", port, "", "", "", "", "", "", "webinit", "") |
|||
return err |
|||
}, |
|||
}, |
|||
{ |
|||
Name: "update", ShortName: "", |
|||
Usage: "Update service", |
|||
Flags: []cli.Flag{ |
|||
cli.StringFlag{ |
|||
Name: "service, s", |
|||
Usage: "Обновить сервис", |
|||
Value: "lowcodebox", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "version, v", |
|||
Usage: "Версия, до которой обновляем", |
|||
Value: "latest", |
|||
}, |
|||
}, |
|||
Action: func(c *cli.Context) error { |
|||
service := c.String("service") |
|||
version := c.String("version") |
|||
|
|||
err = funcCLI("", "", "", "", service, "", "", "", "", "update", version) |
|||
return err |
|||
}, |
|||
}, |
|||
{ |
|||
Name: "stop", ShortName: "", |
|||
Usage: "Stop service", |
|||
Flags: []cli.Flag{ |
|||
cli.StringFlag{ |
|||
Name: "service, s", |
|||
Usage: "Остановить сервисы (через запятую). '-s systems' - остановить системные сервисы; '-s custom' - остановить рабочие пользовательские сервисы ", |
|||
Value: "all", |
|||
}, |
|||
}, |
|||
Action: func(c *cli.Context) error { |
|||
service := c.String("service") |
|||
|
|||
err = funcCLI("", "", "", "", service, "", "", "", "", "stop", "") |
|||
return err |
|||
}, |
|||
}, |
|||
{ |
|||
Name: "start", ShortName: "", |
|||
Usage: "Start single Buildbox-service process", |
|||
Flags: []cli.Flag{ |
|||
cli.StringFlag{ |
|||
Name: "config, c", |
|||
Usage: "Название файла конфигурации, с которым будет запущен сервис", |
|||
Value: "lowcodebox", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "dir, d", |
|||
Usage: "Путь к шаблонам", |
|||
Value: "", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "port, p", |
|||
Usage: "Порт, на котором запустить процесс", |
|||
Value: "", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "mode, m", |
|||
Usage: "Доп.режимы запуска: debug (логирования stdout в файл)", |
|||
Value: "", |
|||
}, |
|||
|
|||
cli.StringFlag{ |
|||
Name: "service, s", |
|||
Usage: "Запуск сервиса (для запуска нескольких сервисов укажите их через запятую)", |
|||
Value: "systems", |
|||
}, |
|||
}, |
|||
Action: func(c *cli.Context) error { |
|||
configfile := c.String("config") |
|||
port := c.String("port") |
|||
dir := c.String("dir") |
|||
mode := c.String("mode") |
|||
|
|||
service := c.String("service") |
|||
|
|||
if dir == "default" { |
|||
dir, err = RootDir() |
|||
} |
|||
|
|||
err = funcCLI(configfile, dir, port, mode, service, "", "", "", "", "start", "") |
|||
return err |
|||
}, |
|||
}, |
|||
{ |
|||
Name: "init", ShortName: "", |
|||
Usage: "Init single LowCodePlatform-service process", |
|||
Flags: []cli.Flag{ |
|||
cli.StringFlag{ |
|||
Name: "service, s", |
|||
Usage: "Инициализация сервиса", |
|||
Value: "false", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "version, v", |
|||
Usage: "До какой версии обновить выбранный сервис", |
|||
Value: "latest", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "param1, p1", |
|||
Usage: "Зарезервировано", |
|||
Value: "false", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "param2, p2", |
|||
Usage: "Зарезервировано", |
|||
Value: "false", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "param3, p3", |
|||
Usage: "Зарезервировано", |
|||
Value: "false", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "dir, d", |
|||
Usage: "Директория создания проекта (по-умолчанию - текущая директория)", |
|||
Value: "", |
|||
}, |
|||
cli.StringFlag{ |
|||
Name: "sourcedb, db", |
|||
Usage: "База данных, где будет развернута фабрика (поддерживается SQLite, MySQL, Postgres, CocckroachDB) (по-умолчанию: SQLite)", |
|||
Value: "./default.db", |
|||
}, |
|||
}, |
|||
Action: func(c *cli.Context) error { |
|||
service := c.String("service") |
|||
param1 := c.String("param1") |
|||
param2 := c.String("param2") |
|||
param3 := c.String("param3") |
|||
dir := c.String("dir") |
|||
version := c.String("version") |
|||
sourcedb := c.String("sourcedb") |
|||
|
|||
if dir == "default" { |
|||
dir, err = RootDir() |
|||
} |
|||
|
|||
err = funcCLI("", dir, "", "", service, param1, param2, param3, sourcedb, "init", version) |
|||
return err |
|||
}, |
|||
}, |
|||
} |
|||
err = appCLI.Run(os.Args) |
|||
|
|||
return err |
|||
} |
|||
|
|||
// Stop завершение процесса
|
|||
func Stop(pid int) (err error) { |
|||
var sig os.Signal |
|||
sig = os.Kill |
|||
p, err := os.FindProcess(pid) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
err = p.Signal(sig) |
|||
return err |
|||
} |
|||
|
|||
// завершение всех процессов для текущей конфигурации
|
|||
// config - ид-конфигурации
|
|||
//func PidsByConfig(config, portProxy string) (result []string, err error) {
|
|||
// _, fullresult, _, _ := Ps("full", portProxy)
|
|||
//
|
|||
// // получаем pid для переданной конфигурации
|
|||
// for _, v1 := range fullresult {
|
|||
// for _, v := range v1 {
|
|||
// configfile := v[1] // файл
|
|||
// idProcess := v[0] // pid
|
|||
//
|
|||
// if config == configfile {
|
|||
// result = append(result, idProcess)
|
|||
// }
|
|||
//
|
|||
// if err != nil {
|
|||
// fmt.Println("Error stopped process config:", config, ", err:", err)
|
|||
// }
|
|||
// }
|
|||
// }
|
|||
//
|
|||
// return
|
|||
//}
|
|||
|
|||
// получаем строки пидов подходящих под условия, в котором:
|
|||
// domain - название проекта (домен)
|
|||
// alias - название алиас-сервиса (gui/api/proxy и тд - то, что в мап-прокси идет второй частью адреса)
|
|||
// если алиас явно не задан, то он может быть получен из домена
|
|||
//func PidsByAlias(domain, alias, portProxy string) (result []string, err error) {
|
|||
//
|
|||
// if domain == "" {
|
|||
// domain = "all"
|
|||
// }
|
|||
// if alias == "" {
|
|||
// alias = "all"
|
|||
// }
|
|||
//
|
|||
// // можем в домене передать полный путь с учетом алиаса типа buildbox/gui
|
|||
// // в этом случае алиас если он явно не задан заполним значением алиаса полученного из домена
|
|||
// splitDomain := strings.Split(domain, "/")
|
|||
// if len(splitDomain) == 2 {
|
|||
// domain = splitDomain[0]
|
|||
// alias = splitDomain[1]
|
|||
// }
|
|||
// _, _, raw, _ := Ps("full", portProxy)
|
|||
//
|
|||
// // получаем pid для переданной конфигурации
|
|||
// for _, pidRegistry := range raw {
|
|||
// for d, v1 := range pidRegistry {
|
|||
// // пропускаем если точное сравнение и не подоходит
|
|||
// if domain != "all" && d != domain {
|
|||
// continue
|
|||
// }
|
|||
//
|
|||
// for a, v2 := range v1 {
|
|||
// // пропускаем если точное сравнение и не подоходит
|
|||
// if alias != "all" && a != alias {
|
|||
// continue
|
|||
// }
|
|||
//
|
|||
// for _, v3 := range v2 {
|
|||
// k3 := strings.Split(v3, ":")
|
|||
// idProcess := k3[0] // pid
|
|||
// // дополняем результат значениями домена и алиаса (для возврата их при остановке если не переданы алиас явно)
|
|||
// // бывают значения, когда мы останавлитваем процесс тошько по домену и тогда мы не можем возврашить алиас остановленного процесса
|
|||
// // а алиас нужен для поиска в прокси в картах /Pid и /Мар для удаления из активных сервисов по домену и алиасу
|
|||
// // если алиаса нет (не приходит в ответе от лоадера, то не находим и прибитые процессы залипают в мапах)
|
|||
// result = append(result, v3+":"+ d + ":" + a)
|
|||
//
|
|||
// if err != nil {
|
|||
// fmt.Println("Error stopped process: pid:", idProcess, ", err:", err)
|
|||
// }
|
|||
// }
|
|||
// }
|
|||
// }
|
|||
// }
|
|||
//
|
|||
// return
|
|||
//}
|
|||
|
|||
// уничтожить все процессы
|
|||
//func Destroy(portProxy string) (err error) {
|
|||
// pids, _, _, _ := Ps("pid", portProxy)
|
|||
// for _, v := range pids {
|
|||
// pi, err := strconv.Atoi(v)
|
|||
// if err == nil {
|
|||
// Stop(pi)
|
|||
// }
|
|||
// }
|
|||
// return err
|
|||
//}
|
|||
|
|||
// инициализация приложения
|
|||
//func Install() (err error) {
|
|||
//
|
|||
// // 1. задание переменных окружения
|
|||
// currentDir, err := CurrentDir()
|
|||
// if err != nil {
|
|||
// return
|
|||
// }
|
|||
// os.Setenv("BBPATH", currentDir)
|
|||
//
|
|||
// //var rootPath = os.Getenv("BBPATH")
|
|||
//
|
|||
// //fmt.Println(rootPath)
|
|||
// //path, _ := os.LookupEnv("BBPATH")
|
|||
// //fmt.Print("BBPATH: ", path)
|
|||
//
|
|||
// // 2. копирование файла запуска в /etc/bin
|
|||
// //src := "./buildbox"
|
|||
// //dst := "/usr/bin/buildbox"
|
|||
// //
|
|||
// //in, err := os.Open(src)
|
|||
// //if err != nil {
|
|||
// // return err
|
|||
// //}
|
|||
// //defer in.Close()
|
|||
// //
|
|||
// //out, err := os.Create(dst)
|
|||
// //if err != nil {
|
|||
// // return err
|
|||
// //}
|
|||
// //defer out.Close()
|
|||
// //
|
|||
// //_, err = io.Copy(out, in)
|
|||
// //if err != nil {
|
|||
// // return err
|
|||
// //}
|
|||
// //return out.Close()
|
|||
//
|
|||
// return err
|
|||
//}
|
@ -0,0 +1,107 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"encoding/base64" |
|||
"fmt" |
|||
"os" |
|||
"strings" |
|||
|
|||
"github.com/BurntSushi/toml" |
|||
"github.com/kelseyhightower/envconfig" |
|||
"github.com/labstack/gommon/color" |
|||
) |
|||
|
|||
var warning = color.Red("[Fail]") |
|||
|
|||
// ConfigLoad читаем конфигурации
|
|||
// получаем только название конфигурации
|
|||
// 1. поднимаемся до корневой директории
|
|||
// 2. от нее ищем полный путь до конфига
|
|||
// 3. читаем по этому пути
|
|||
func ConfigLoad(config string, pointToCfg interface{}) (err error) { |
|||
var payload string |
|||
|
|||
if err := envconfig.Process("", pointToCfg); err != nil { |
|||
fmt.Printf("%s Error load default enviroment: %s\n", warning, err) |
|||
err = fmt.Errorf("Error load default enviroment: %s", err) |
|||
return err |
|||
} |
|||
|
|||
// проверка на длину конфигурационного файла
|
|||
// если он больше 100, то скорее всего передали конфигурацию в base64
|
|||
if len(config) < 200 { |
|||
// 3.
|
|||
if len(config) == 0 { |
|||
return fmt.Errorf("%s", "Error. Configfile is empty.") |
|||
} |
|||
if !strings.Contains(config, "."){ |
|||
config = config + ".cfg" |
|||
} |
|||
|
|||
// 4. читаем из файла
|
|||
payload, err = ReadFile(config) |
|||
if err != nil { |
|||
return fmt.Errorf("Error raed configfile: (%s), err: %s", config, err) |
|||
} |
|||
|
|||
} else { |
|||
// пробуем расшифровать из base64
|
|||
debase, err := base64.StdEncoding.DecodeString(config) |
|||
if err != nil { |
|||
return fmt.Errorf("Error decode to string from base64 configfile. err: %s", err) |
|||
} |
|||
payload = string(debase) |
|||
} |
|||
err = decodeConfig(payload, pointToCfg) |
|||
|
|||
return err |
|||
} |
|||
|
|||
// Читаем конфигурация по заданному полному пути
|
|||
func decodeConfig(configfile string, cfg interface{}) (err error) { |
|||
if _, err = toml.Decode(configfile, cfg); err != nil { |
|||
fmt.Printf("%s Error: %s (configfile: %s)\n", warning, err, configfile) |
|||
} |
|||
|
|||
return err |
|||
} |
|||
|
|||
// SearchConfigDir получаем путь до искомой конфигурации от переданной директории
|
|||
func searchConfigDir(startDir, configuration string) (configPath string, err error) { |
|||
var nextPath string |
|||
directory, err := os.Open(startDir) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
defer directory.Close() |
|||
|
|||
objects, err := directory.Readdir(-1) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
// пробегаем текущую папку и считаем совпадание признаков
|
|||
for _, obj := range objects { |
|||
nextPath = startDir + sep + obj.Name() |
|||
if obj.IsDir() { |
|||
dirName := obj.Name() |
|||
|
|||
// не входим в скрытые папки
|
|||
if dirName[:1] != "." { |
|||
configPath, err = searchConfigDir(nextPath, configuration) |
|||
if configPath != "" { |
|||
return configPath, err // поднимает результат наверх
|
|||
} |
|||
} |
|||
} else { |
|||
if !strings.Contains(nextPath, "/.") { |
|||
// проверяем только файлы конфигурации (игнорируем .json)
|
|||
if strings.Contains(obj.Name(), configuration + ".cfg") { |
|||
return nextPath, err |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
return configPath, err |
|||
} |
@ -0,0 +1,99 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"bytes" |
|||
"crypto/aes" |
|||
"crypto/cipher" |
|||
"crypto/rand" |
|||
"encoding/base64" |
|||
"errors" |
|||
"io" |
|||
"strings" |
|||
) |
|||
|
|||
// Пример использования
|
|||
//func main() {
|
|||
// key := []byte("LKHlhb899Y09olUi")
|
|||
// encryptMsg, _ := encrypt(key, "Hello World")
|
|||
// msg, _ := decrypt(key, encryptMsg)
|
|||
// fmt.Println(msg) // Hello World
|
|||
//}
|
|||
|
|||
|
|||
func addBase64Padding(value string) string { |
|||
m := len(value) % 4 |
|||
if m != 0 { |
|||
value += strings.Repeat("=", 4-m) |
|||
} |
|||
|
|||
return value |
|||
} |
|||
|
|||
func removeBase64Padding(value string) string { |
|||
return strings.Replace(value, "=", "", -1) |
|||
} |
|||
|
|||
func unpad(src []byte) ([]byte, error) { |
|||
length := len(src) |
|||
unpadding := int(src[length-1]) |
|||
|
|||
if unpadding > length { |
|||
return nil, errors.New("unpad error. This could happen when incorrect encryption key is used") |
|||
} |
|||
|
|||
return src[:(length - unpadding)], nil |
|||
} |
|||
|
|||
func Pad(src []byte) []byte { |
|||
padding := aes.BlockSize - len(src)%aes.BlockSize |
|||
padtext := bytes.Repeat([]byte{byte(padding)}, padding) |
|||
return append(src, padtext...) |
|||
} |
|||
|
|||
func Encrypt(key []byte, text string) (string, error) { |
|||
block, err := aes.NewCipher(key) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
msg := Pad([]byte(text)) |
|||
ciphertext := make([]byte, aes.BlockSize+len(msg)) |
|||
iv := ciphertext[:aes.BlockSize] |
|||
if _, err := io.ReadFull(rand.Reader, iv); err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
cfb := cipher.NewCFBEncrypter(block, iv) |
|||
cfb.XORKeyStream(ciphertext[aes.BlockSize:], []byte(msg)) |
|||
finalMsg := removeBase64Padding(base64.URLEncoding.EncodeToString(ciphertext)) |
|||
return finalMsg, nil |
|||
} |
|||
|
|||
func Decrypt(key []byte, text string) (string, error) { |
|||
block, err := aes.NewCipher(key) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
decodedMsg, err := base64.URLEncoding.DecodeString(addBase64Padding(text)) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
if (len(decodedMsg) % aes.BlockSize) != 0 { |
|||
return "", errors.New("blocksize must be multipe of decoded message length") |
|||
} |
|||
|
|||
iv := decodedMsg[:aes.BlockSize] |
|||
msg := decodedMsg[aes.BlockSize:] |
|||
|
|||
cfb := cipher.NewCFBDecrypter(block, iv) |
|||
cfb.XORKeyStream(msg, msg) |
|||
|
|||
unpadMsg, err := unpad(msg) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
return string(unpadMsg), nil |
|||
} |
@ -0,0 +1,306 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"archive/zip" |
|||
"fmt" |
|||
"io" |
|||
"io/ioutil" |
|||
"os" |
|||
"path/filepath" |
|||
"strings" |
|||
) |
|||
|
|||
// CreateFile Создаем файл по указанному пути если его нет
|
|||
func CreateFile(path string) (err error) { |
|||
|
|||
// detect if file exists
|
|||
_, err = os.Stat(path) |
|||
var file *os.File |
|||
|
|||
// delete old file if exists
|
|||
if !os.IsNotExist(err) { |
|||
os.RemoveAll(path) |
|||
} |
|||
|
|||
// create file
|
|||
file, err = os.Create(path) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer file.Close() |
|||
|
|||
return err |
|||
} |
|||
|
|||
// WriteFile пишем в файл по указанному пути
|
|||
func WriteFile(path string, data []byte) (err error) { |
|||
|
|||
// detect if file exists and create
|
|||
err = CreateFile(path) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
// open file using READ & WRITE permission
|
|||
file, err := os.OpenFile(path, os.O_RDWR, 0644) |
|||
if err != nil { |
|||
return |
|||
} |
|||
defer file.Close() |
|||
|
|||
// write into file
|
|||
_, err = file.Write(data) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
// save changes
|
|||
err = file.Sync() |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
// ReadFile читаем файл. (отключил: всегда в рамках рабочей диретории)
|
|||
func ReadFile(path string) (result string, err error) { |
|||
// если не от корня, то подставляем текущую директорию
|
|||
//if path[:1] != "/" {
|
|||
// path = CurrentDir() + "/" + path
|
|||
//} else {
|
|||
// path = CurrentDir() + path
|
|||
//}
|
|||
|
|||
file, err := os.Open(path) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
defer file.Close() |
|||
|
|||
b, err := ioutil.ReadAll(file) |
|||
if err == nil { |
|||
result = string(b) |
|||
} |
|||
|
|||
return result, err |
|||
} |
|||
|
|||
// CopyFolder копирование папки
|
|||
func CopyFolder(source string, dest string) (err error) { |
|||
|
|||
sourceinfo, err := os.Stat(source) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
err = os.MkdirAll(dest, sourceinfo.Mode()) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
directory, _ := os.Open(source) |
|||
defer directory.Close() |
|||
objects, err := directory.Readdir(-1) |
|||
|
|||
for _, obj := range objects { |
|||
sourcefilepointer := source + "/" + obj.Name() |
|||
destinationfilepointer := dest + "/" + obj.Name() |
|||
|
|||
if obj.IsDir() { |
|||
err = CopyFolder(sourcefilepointer, destinationfilepointer) |
|||
if err != nil { |
|||
fmt.Println(err) |
|||
} |
|||
} else { |
|||
err = CopyFile(sourcefilepointer, destinationfilepointer) |
|||
if err != nil { |
|||
fmt.Println(err) |
|||
} |
|||
} |
|||
|
|||
} |
|||
return |
|||
} |
|||
|
|||
// CopyFile копирование файла
|
|||
func CopyFile(source string, dest string) (err error) { |
|||
sourcefile, err := os.Open(source) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer sourcefile.Close() |
|||
|
|||
destfile, err := os.Create(dest) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer destfile.Close() |
|||
|
|||
_, err = io.Copy(destfile, sourcefile) |
|||
if err == nil { |
|||
sourceinfo, err := os.Stat(source) |
|||
if err != nil { |
|||
err = os.Chmod(dest, sourceinfo.Mode()) |
|||
} |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
// IsExist определяем наличие директории/файла
|
|||
func IsExist(path string) (exist bool) { |
|||
if _, err := os.Stat(path); !os.IsNotExist(err) { |
|||
return true |
|||
} |
|||
|
|||
return false |
|||
} |
|||
|
|||
// CreateDir создание папки
|
|||
func CreateDir(path string, mode os.FileMode) (err error) { |
|||
if mode == 0 { |
|||
mode = 0711 |
|||
} |
|||
err = os.MkdirAll(path, mode) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func DeleteFile(path string) (err error) { |
|||
err = os.Remove(path) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func MoveFile(source string, dest string) (err error) { |
|||
err = CopyFile(source, dest) |
|||
if err != nil { |
|||
return |
|||
} |
|||
err = DeleteFile(source) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// Zip
|
|||
// zip("/tmp/documents", "/tmp/backup.zip")
|
|||
func Zip(source, target string) (err error) { |
|||
zipfile, err := os.Create(target) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer zipfile.Close() |
|||
|
|||
archive := zip.NewWriter(zipfile) |
|||
defer archive.Close() |
|||
|
|||
info, err := os.Stat(source) |
|||
if err != nil { |
|||
return nil |
|||
} |
|||
|
|||
var baseDir string |
|||
if info.IsDir() { |
|||
baseDir = filepath.Base(source) |
|||
} |
|||
|
|||
filepath.Walk(source, func(path string, info os.FileInfo, err error) error { |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
header, err := zip.FileInfoHeader(info) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if baseDir != "" { |
|||
header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source)) |
|||
} |
|||
|
|||
if info.IsDir() { |
|||
header.Name += "/" |
|||
} else { |
|||
header.Method = zip.Deflate |
|||
} |
|||
|
|||
writer, err := archive.CreateHeader(header) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if info.IsDir() { |
|||
return nil |
|||
} |
|||
|
|||
file, err := os.Open(path) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer file.Close() |
|||
_, err = io.Copy(writer, file) |
|||
return err |
|||
}) |
|||
|
|||
return err |
|||
} |
|||
|
|||
// Unzip
|
|||
// unzip("/tmp/report-2015.zip", "/tmp/reports/")
|
|||
func Unzip(archive, target string) (err error) { |
|||
reader, err := zip.OpenReader(archive) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if err := os.MkdirAll(target, 0755); err != nil { |
|||
return err |
|||
} |
|||
for _, file := range reader.File { |
|||
path := filepath.Join(target, file.Name) |
|||
if file.FileInfo().IsDir() { |
|||
os.MkdirAll(path, file.Mode()) |
|||
continue |
|||
} |
|||
|
|||
fileReader, err := file.Open() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer fileReader.Close() |
|||
|
|||
targetFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode()) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer targetFile.Close() |
|||
|
|||
if _, err := io.Copy(targetFile, fileReader); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
func Chmod(path string, mode os.FileMode) (err error) { |
|||
file, err := os.Open(path) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer file.Close() |
|||
|
|||
err = file.Chmod(mode) |
|||
|
|||
return err |
|||
} |
@ -0,0 +1,239 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"context" |
|||
"crypto/sha1" |
|||
"encoding/hex" |
|||
"encoding/json" |
|||
"fmt" |
|||
"net/http" |
|||
"os" |
|||
"os/exec" |
|||
"path" |
|||
"path/filepath" |
|||
"strings" |
|||
"syscall" |
|||
"time" |
|||
|
|||
"git.lowcodeplatform.net/fabric/models" |
|||
uuid "github.com/satori/go.uuid" |
|||
) |
|||
|
|||
// ResponseJSON если status не из списка, то вставляем статус - 501 и Descraption из статуса
|
|||
func ResponseJSON(w http.ResponseWriter, objResponse interface{}, status string, error error, metrics interface{}) (err error) { |
|||
|
|||
if w == nil { |
|||
return |
|||
} |
|||
|
|||
errMessage := models.RestStatus{} |
|||
st, found := models.StatusCode[status] |
|||
if found { |
|||
errMessage = st |
|||
} else { |
|||
errMessage = models.StatusCode["NotStatus"] |
|||
} |
|||
|
|||
objResp := &models.Response{} |
|||
if error != nil { |
|||
errMessage.Error = error |
|||
} |
|||
|
|||
// Metrics
|
|||
b1, _ := json.Marshal(metrics) |
|||
var metricsR models.Metrics |
|||
json.Unmarshal(b1, &metricsR) |
|||
if metrics != nil { |
|||
objResp.Metrics = metricsR |
|||
} |
|||
|
|||
objResp.Status = errMessage |
|||
objResp.Data = objResponse |
|||
|
|||
// формируем ответ
|
|||
out, err := json.Marshal(objResp) |
|||
if err != nil { |
|||
out = []byte(fmt.Sprintf("%s", err)) |
|||
} |
|||
|
|||
//WriteFile("./dump.json", out)
|
|||
|
|||
w.WriteHeader(errMessage.Status) |
|||
w.Header().Set("Content-Type", "application/json; charset=UTF-8") |
|||
w.Write(out) |
|||
|
|||
return |
|||
} |
|||
|
|||
// RunProcess стартуем сервис из конфига
|
|||
func RunProcess(path, config, command, mode string) (pid int, err error) { |
|||
var cmd *exec.Cmd |
|||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) |
|||
|
|||
if config == "" { |
|||
return 0, fmt.Errorf("%s", "Configuration file is not found") |
|||
} |
|||
if command == "" { |
|||
command = "start" |
|||
} |
|||
|
|||
path = strings.Replace(path, "//", "/", -1) |
|||
|
|||
cmd = exec.Command(path, command, "--config", config, "--mode", mode) |
|||
if mode == "debug" { |
|||
t := time.Now().Format("2006.01.02-15-04-05") |
|||
s := strings.Split(path, sep) |
|||
srv := s[len(s)-1] |
|||
|
|||
dirPath := "debug" + sep + srv |
|||
err = CreateDir(dirPath, 0777) |
|||
if err != nil { |
|||
return 0, fmt.Errorf("error create directory for debug-file. path: %s, err: %s", dirPath, err) |
|||
} |
|||
|
|||
filePath := "debug" + sep + srv + sep + fmt.Sprint(t) + "_" + UUID()[:6] + ".log" |
|||
f, err := os.Create(filePath) |
|||
if err != nil { |
|||
return 0, fmt.Errorf("error create debug-file. path: %s, err: %s", filePath, err) |
|||
} |
|||
cmd.Stdout = f |
|||
cmd.Stderr = f |
|||
} |
|||
|
|||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} |
|||
err = cmd.Start() |
|||
if err != nil { |
|||
err = fmt.Errorf("status: %d, config: %s", cmd.ProcessState.ExitCode(), config) |
|||
|
|||
return 0, err |
|||
} |
|||
|
|||
pid = cmd.Process.Pid |
|||
|
|||
// в течение заданного интервала ожидаем завершающий статус запуска
|
|||
// или выходим если -1 (в процессе)
|
|||
for { |
|||
exitCode := cmd.ProcessState.ExitCode() |
|||
timer := time.NewTimer(100 * time.Millisecond) |
|||
// успешный запуск
|
|||
if exitCode == 0 { |
|||
timer.Stop() |
|||
return |
|||
} |
|||
// финальный неуспех
|
|||
if exitCode > 0 { |
|||
cancel() |
|||
} |
|||
|
|||
select { |
|||
case <-timer.C: |
|||
timer.Stop() |
|||
case <-ctx.Done(): // ожидание завершилось, если -1 - то работает
|
|||
timer.Stop() |
|||
return |
|||
} |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
// RootDir получаем корневую директорию от места где запускаем файл
|
|||
func RootDir() (rootDir string, err error) { |
|||
file, err := filepath.Abs(os.Args[0]) |
|||
if err != nil { |
|||
return |
|||
} |
|||
rootDir = path.Dir(file) |
|||
if err != nil { |
|||
fmt.Println("Error calculation RootDir. File: ", file, "; Error: ", err) |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
func Hash(str string) (result string) { |
|||
h := sha1.New() |
|||
h.Write([]byte(str)) |
|||
result = hex.EncodeToString(h.Sum(nil)) |
|||
|
|||
return |
|||
} |
|||
|
|||
func PanicOnErr(err error) { |
|||
if err != nil { |
|||
fmt.Println("Error: ", err) |
|||
panic(err) |
|||
} |
|||
} |
|||
|
|||
func UUID() (result string) { |
|||
stUUID := uuid.NewV4() |
|||
return stUUID.String() |
|||
} |
|||
|
|||
// RemoveElementFromData удаляем элемент из слайса
|
|||
func RemoveElementFromData(p *models.ResponseData, i int) bool { |
|||
|
|||
if i < len(p.Data) { |
|||
p.Data = append(p.Data[:i], p.Data[i+1:]...) |
|||
} else { |
|||
//log.Warning("Error! Position invalid (", i, ")")
|
|||
return false |
|||
} |
|||
|
|||
return true |
|||
} |
|||
|
|||
// JsonEscape экранируем "
|
|||
// fmt.Println(jsonEscape(`dog "fish" cat`))
|
|||
// output: dog \"fish\" cat
|
|||
func JsonEscape(i string) string { |
|||
b, err := json.Marshal(i) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
s := string(b) |
|||
return s[1 : len(s)-1] |
|||
} |
|||
|
|||
// SearchConfigDir получаем путь до искомой конфигурации от переданной директории
|
|||
func SearchConfig(projectDir, configuration string) (configPath string, err error) { |
|||
var nextPath string |
|||
|
|||
directory, err := os.Open(projectDir) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
defer directory.Close() |
|||
|
|||
objects, err := directory.Readdir(-1) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
// пробегаем текущую папку и считаем совпадание признаков
|
|||
for _, obj := range objects { |
|||
|
|||
nextPath = projectDir + sep + obj.Name() |
|||
if obj.IsDir() { |
|||
dirName := obj.Name() |
|||
|
|||
// не входим в скрытые папки
|
|||
if dirName[:1] != "." { |
|||
configPath, err = SearchConfig(nextPath, configuration) |
|||
if configPath != "" { |
|||
return configPath, err // поднимает результат наверх
|
|||
} |
|||
} |
|||
} else { |
|||
if !strings.Contains(nextPath, "/.") { |
|||
// проверяем только файлы конфигурации (игнорируем .json)
|
|||
if strings.Contains(obj.Name(), configuration+".cfg") { |
|||
return nextPath, err |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
return configPath, err |
|||
} |
@ -0,0 +1,165 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"strings" |
|||
"time" |
|||
|
|||
"git.lowcodeplatform.net/fabric/models" |
|||
"github.com/labstack/gommon/color" |
|||
) |
|||
|
|||
// Curl всегде возвращает результат в интерфейс + ошибка (полезно для внешних запросов с неизвестной структурой)
|
|||
// сериализуем в объект, при передаче ссылки на переменную типа
|
|||
func Curl(method, urlc, bodyJSON string, response interface{}, headers map[string]string, cookies []*http.Cookie) (result interface{}, err error) { |
|||
var mapValues map[string]string |
|||
var req *http.Request |
|||
client := &http.Client{} |
|||
client.Timeout = 3 * time.Second |
|||
|
|||
if method == "" { |
|||
method = "POST" |
|||
} |
|||
|
|||
method = strings.Trim(method, " ") |
|||
values := url.Values{} |
|||
actionType := "" |
|||
|
|||
// если в гете мы передали еще и json (его добавляем в строку запроса)
|
|||
// только если в запросе не указаны передаваемые параметры
|
|||
clearUrl := strings.Contains(urlc, "?") |
|||
|
|||
bodyJSON = strings.Replace(bodyJSON, " ", "", -1) |
|||
err = json.Unmarshal([]byte(bodyJSON), &mapValues) |
|||
|
|||
if method == "JSONTOGET" && bodyJSON != "" && clearUrl { |
|||
actionType = "JSONTOGET" |
|||
} |
|||
if method == "JSONTOPOST" && bodyJSON != "" { |
|||
actionType = "JSONTOPOST" |
|||
} |
|||
|
|||
switch actionType { |
|||
case "JSONTOGET": // преобразуем параметры в json в строку запроса
|
|||
if err == nil { |
|||
for k, v := range mapValues { |
|||
values.Set(k, v) |
|||
} |
|||
uri, _ := url.Parse(urlc) |
|||
uri.RawQuery = values.Encode() |
|||
urlc = uri.String() |
|||
req, err = http.NewRequest("GET", urlc, strings.NewReader(bodyJSON)) |
|||
} else { |
|||
fmt.Println("Error! Fail parsed bodyJSON from GET Curl: ", err) |
|||
} |
|||
case "JSONTOPOST": // преобразуем параметры в json в тело запроса
|
|||
if err == nil { |
|||
for k, v := range mapValues { |
|||
values.Set(k, v) |
|||
} |
|||
req, err = http.NewRequest("POST", urlc, strings.NewReader(values.Encode())) |
|||
req.PostForm = values |
|||
req.Header.Add("Content-Type", "application/x-www-form-urlencoded") |
|||
} else { |
|||
fmt.Println("Error! Fail parsed bodyJSON to POST: ", err) |
|||
} |
|||
default: |
|||
req, err = http.NewRequest(method, urlc, strings.NewReader(bodyJSON)) |
|||
} |
|||
|
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
// дополняем переданными заголовками
|
|||
if len(headers) > 0 { |
|||
for k, v := range headers { |
|||
req.Header.Add(k, v) |
|||
} |
|||
} |
|||
|
|||
// дополянем куками назначенными для данного запроса
|
|||
if cookies != nil { |
|||
for _, v := range cookies { |
|||
req.AddCookie(v) |
|||
} |
|||
} |
|||
|
|||
resp, err := client.Do(req) |
|||
if err != nil { |
|||
fmt.Println("Error request: method:", method, ", url:", urlc, ", bodyJSON:", bodyJSON, "err:", err) |
|||
return "", err |
|||
} else { |
|||
defer resp.Body.Close() |
|||
} |
|||
|
|||
responseData, err := ioutil.ReadAll(resp.Body) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
responseString := string(responseData) |
|||
|
|||
// возвращаем объект ответа, если передано - в какой объект класть результат
|
|||
if response != nil { |
|||
json.Unmarshal([]byte(responseString), &response) |
|||
} |
|||
|
|||
// всегда отдаем в интерфейсе результат (полезно, когда внешние запросы или сериализация на клиенте)
|
|||
//json.Unmarshal([]byte(responseString), &result)
|
|||
if resp.StatusCode != 200 { |
|||
err = fmt.Errorf("request is not success. request:%s, status: %s", urlc, resp.Status) |
|||
} |
|||
|
|||
return responseString, err |
|||
} |
|||
|
|||
func AddressProxy(addressProxy, interval string) (port string, err error) { |
|||
fail := color.Red("[Fail]") |
|||
urlProxy := "" |
|||
|
|||
// если автоматическая настройка портов
|
|||
if addressProxy != "" && interval != "" { |
|||
if addressProxy[len(addressProxy)-1:] != "/" { |
|||
addressProxy = addressProxy + "/" |
|||
} |
|||
|
|||
var portDataAPI models.Response |
|||
// запрашиваем порт у указанного прокси-сервера
|
|||
urlProxy = addressProxy + "port?interval=" + interval |
|||
_, err := Curl("GET", urlProxy, "", &portDataAPI, map[string]string{}, nil) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
port = fmt.Sprint(portDataAPI.Data) |
|||
} |
|||
|
|||
if port == "" { |
|||
err = fmt.Errorf("%s", "Port APP-service is null. Servive not running.") |
|||
fmt.Print(fail, " Port APP-service is null. Servive not running.\n") |
|||
} |
|||
|
|||
return port, err |
|||
} |
|||
|
|||
func ClearSlash(url string) (result string) { |
|||
if len(url) == 0 { |
|||
return "" |
|||
} |
|||
// удаляем слеш сзади
|
|||
lastSleshF := url[len(url)-1:] |
|||
if lastSleshF == "/" { |
|||
url = url[:len(url)-1] |
|||
} |
|||
|
|||
// удаляем слеш спереди
|
|||
lastSleshS := url[0:1] |
|||
if lastSleshS == "/" { |
|||
url = url[1:len(url)] |
|||
} |
|||
|
|||
return url |
|||
} |
@ -0,0 +1,195 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"io" |
|||
"os" |
|||
"runtime/debug" |
|||
"strconv" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
|
|||
"github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
type ConfigFileLogger struct { |
|||
Dir string |
|||
IntervalReload, IntervalClearFiles time.Duration |
|||
PeriodSaveFiles string |
|||
} |
|||
|
|||
// вспомогательная фукнция очистки старых файлов для файлового логера
|
|||
func (l *log) fileLoggerClearing(ctx context.Context) { |
|||
|
|||
// попытка очистки старых файлов (каждые пол часа)
|
|||
go func() { |
|||
ticker := time.NewTicker(l.IntervalClearFiles) |
|||
defer ticker.Stop() |
|||
|
|||
// получаем период, через который мы будем удалять файлы
|
|||
period := l.PeriodSaveFiles |
|||
if period == "" { |
|||
l.Error(fmt.Errorf("%s", "Fail perion save log files. (expected format: year-month-day; eg: 0-1-0)")) |
|||
return |
|||
} |
|||
slPeriod := strings.Split(period, "-") |
|||
if len(slPeriod) < 3 { |
|||
l.Error(fmt.Errorf("%s", "Fail perion save log files. (expected format: year-month-day; eg: 0-1-0)")) |
|||
return |
|||
} |
|||
|
|||
// получаем числовые значения года месяца и дня для расчета даты удаления файлов
|
|||
year, err := strconv.Atoi(slPeriod[0]) |
|||
if err != nil { |
|||
l.Error(err, "Fail converted Year from period saved log files. (expected format: year-month-day; eg: 0-1-0)") |
|||
} |
|||
month, err := strconv.Atoi(slPeriod[1]) |
|||
if err != nil { |
|||
l.Error(err, "Fail converted Month from period saved log files. (expected format: year-month-day; eg: 0-1-0)") |
|||
} |
|||
day, err := strconv.Atoi(slPeriod[2]) |
|||
if err != nil { |
|||
l.Error(err, "Fail converted Day from period saved log files. (expected format: year-month-day; eg: 0-1-0)") |
|||
} |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
oneMonthAgo := time.Now().AddDate(-year, -month, -day) // minus 1 месяц
|
|||
fileMonthAgoDate := oneMonthAgo.Format("2006.01.02") |
|||
|
|||
// пробегаем директорию и читаем все файлы, если имя меньше текущее время - месяц = удаляем
|
|||
directory, _ := os.Open(l.Dir) |
|||
objects, err := directory.Readdir(-1) |
|||
if err != nil { |
|||
l.Error(err, "Error read directory: ", directory) |
|||
return |
|||
} |
|||
|
|||
for _, obj := range objects { |
|||
filename := obj.Name() |
|||
filenameMonthAgoDate := fileMonthAgoDate + "_" + l.Service |
|||
|
|||
if filenameMonthAgoDate > filename { |
|||
pathFile := l.Dir + sep + filename |
|||
err = os.Remove(pathFile) |
|||
if err != nil { |
|||
l.Error(err, "Error deleted file: ", pathFile) |
|||
return |
|||
} |
|||
} |
|||
} |
|||
ticker = time.NewTicker(l.IntervalClearFiles) |
|||
} |
|||
} |
|||
}() |
|||
} |
|||
|
|||
// NewFileLogger инициируем логер, которых хранит логи в файлах по указанному пути
|
|||
func NewFileLogger(ctx context.Context, cfg ConfigLogger) (Log, error) { |
|||
var output io.Writer |
|||
var file *os.File |
|||
var err error |
|||
var mode os.FileMode |
|||
m := sync.Mutex{} |
|||
|
|||
l := &log{ |
|||
Output: output, |
|||
Levels: cfg.Level, |
|||
UID: cfg.Uid, |
|||
Name: cfg.Name, |
|||
Service: cfg.Srv, |
|||
Config: cfg.Config, |
|||
Dir: cfg.File.Dir, |
|||
IntervalReload: cfg.File.IntervalReload, |
|||
IntervalClearFiles: cfg.File.IntervalClearFiles, |
|||
PeriodSaveFiles: cfg.File.PeriodSaveFiles, |
|||
mux: &m, |
|||
File: file, |
|||
} |
|||
|
|||
datefile := time.Now().Format("2006.01.02") |
|||
logName := datefile + "_" + cfg.Srv + "_" + cfg.Uid + ".log" |
|||
|
|||
fmt.Println(logName) |
|||
|
|||
// создаем/открываем файл логирования и назначаем его логеру
|
|||
mode = 0711 |
|||
err = CreateDir(cfg.File.Dir, mode) |
|||
if err != nil { |
|||
logrus.Error(err, "Error creating directory") |
|||
return nil, err |
|||
} |
|||
|
|||
pathFile := cfg.File.Dir + "/" + logName |
|||
if !IsExist(pathFile) { |
|||
err = CreateFile(pathFile) |
|||
if err != nil { |
|||
logrus.Error(err, "Error creating file") |
|||
return nil, err |
|||
} |
|||
} |
|||
|
|||
file, err = os.OpenFile(pathFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) |
|||
defer file.Close() |
|||
|
|||
l.File = file |
|||
l.Output = file |
|||
if err != nil { |
|||
logrus.Panic(err, "error opening file") |
|||
return nil, err |
|||
} |
|||
|
|||
defer func() { |
|||
rec := recover() |
|||
if rec != nil { |
|||
b := string(debug.Stack()) |
|||
fmt.Printf("panic in loggier (RotateInit). stack: %+v", b) |
|||
//cancel()
|
|||
//os.Exit(1)
|
|||
} |
|||
}() |
|||
|
|||
// попытка обновить файл (раз в 10 минут)
|
|||
go func() { |
|||
ticker := time.NewTicker(l.IntervalReload) |
|||
defer ticker.Stop() |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
l.File.Close() // закрыл старый файл
|
|||
datefile = time.Now().Format("2006.01.02") |
|||
logName = datefile + "_" + cfg.Srv + "_" + cfg.Uid + ".log" |
|||
pathFile = cfg.File.Dir + "/" + logName |
|||
if !IsExist(pathFile) { |
|||
err := CreateFile(pathFile) |
|||
if err != nil { |
|||
logrus.Error(err, "Error creating file") |
|||
return |
|||
} |
|||
} |
|||
|
|||
file, err = os.OpenFile(pathFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) |
|||
if err != nil { |
|||
logrus.Panic(err, "error opening file") |
|||
return |
|||
} |
|||
|
|||
output = file |
|||
l.Output = output |
|||
l.File = file |
|||
ticker = time.NewTicker(l.IntervalReload) |
|||
} |
|||
} |
|||
}() |
|||
l.fileLoggerClearing(ctx) |
|||
|
|||
return l, err |
|||
} |
@ -1,4 +1,4 @@ |
|||
package logger |
|||
package lib |
|||
|
|||
import ( |
|||
"bytes" |
@ -0,0 +1,96 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"bytes" |
|||
"context" |
|||
"io" |
|||
"sync" |
|||
"time" |
|||
) |
|||
|
|||
type ConfigVfsLogger struct { |
|||
Kind, Endpoint, AccessKeyID, SecretKey, Region, Bucket, Comma string |
|||
Dir string |
|||
IntervalReload time.Duration |
|||
} |
|||
|
|||
// NewVfsLogger инициализация отправки логов на сервер сбора
|
|||
// ВНИМАНИЕ! крайне неэффективно
|
|||
// при добавлении лога выкачивется весь файл лога, добавляется строка и перезаписывается
|
|||
func NewVfsLogger(ctx context.Context, cfg ConfigLogger) (logger Log, err error) { |
|||
var output io.Writer |
|||
m := sync.Mutex{} |
|||
|
|||
vfs := NewVfs(cfg.Vfs.Kind, cfg.Vfs.Endpoint, cfg.Vfs.AccessKeyID, cfg.Vfs.SecretKey, cfg.Vfs.Region, cfg.Vfs.Bucket, cfg.Vfs.Comma) |
|||
err = vfs.Connect() |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
sender := newVfsSender(ctx, vfs, cfg.Vfs.Dir, cfg.Srv, cfg.Uid, cfg.Vfs.IntervalReload) |
|||
output = sender |
|||
|
|||
l := &log{ |
|||
Output: output, |
|||
Levels: cfg.Level, |
|||
UID: cfg.Uid, |
|||
Name: cfg.Name, |
|||
Service: cfg.Srv, |
|||
IntervalReload: cfg.Vfs.IntervalReload, |
|||
mux: &m, |
|||
} |
|||
|
|||
return l, nil |
|||
} |
|||
|
|||
type vfsSender struct { |
|||
vfsStorage Vfs |
|||
file string |
|||
} |
|||
|
|||
func (v *vfsSender) Write(p []byte) (n int, err error) { |
|||
dataFile, _, err := v.vfsStorage.Read(v.file) |
|||
concatSlices := [][]byte{ |
|||
dataFile, |
|||
p, |
|||
} |
|||
resultSlice := bytes.Join(concatSlices, []byte("")) |
|||
|
|||
err = v.vfsStorage.Write(v.file, resultSlice) |
|||
if err != nil { |
|||
return 0, err |
|||
} |
|||
return len(p), nil |
|||
} |
|||
|
|||
func newVfsSender(ctx context.Context, vfsStorage Vfs, dir, srv, uid string, intervalReload time.Duration) io.Writer { |
|||
|
|||
sender := &vfsSender{ |
|||
vfsStorage, |
|||
"", |
|||
} |
|||
|
|||
//datefile := time.Now().Format("2006.01.02")
|
|||
datefile := time.Now().Format("2006.01.02") |
|||
sender.file = "/" + dir + "/" + datefile + "_" + srv + "_" + uid + ".log" |
|||
|
|||
// попытка обновить файл (раз в 10 минут)
|
|||
go func() { |
|||
ticker := time.NewTicker(intervalReload) |
|||
defer ticker.Stop() |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
datefile = time.Now().Format("2006.01.02") |
|||
|
|||
sender.file = "/" + dir + "/" + datefile + "_" + srv + "_" + uid + ".log" |
|||
ticker = time.NewTicker(intervalReload) |
|||
} |
|||
} |
|||
}() |
|||
|
|||
return sender |
|||
} |
@ -0,0 +1,343 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
"net/http" |
|||
"sort" |
|||
"sync" |
|||
"time" |
|||
) |
|||
|
|||
type Metrics struct { |
|||
StateHost StateHost |
|||
Connections int // количество соединений за весь период учета
|
|||
Queue_AVG float32 // среднее количество запросов в очереди
|
|||
Queue_QTL_80 float32 // квантиль 80% - какое среднее кол-во запросов до границы 80% в отсорованном ряду
|
|||
Queue_QTL_90 float32 // квантиль 90%
|
|||
Queue_QTL_99 float32 // квантиль 99%
|
|||
TPR_AVG_MS float32 // (ms) Time per request - среднее время обработки запроса
|
|||
TPR_QTL_MS_80 float32 // (ms) квантиль 80% - какое среднее время обработки запросов до границы 80% в отсорованном ряду
|
|||
TPR_QTL_MS_90 float32 // (ms) квантиль 90%
|
|||
TPR_QTL_MS_99 float32 // (ms) квантиль 99%
|
|||
|
|||
RPS int // Request per second - количество запросов в секунду
|
|||
} |
|||
|
|||
type serviceMetric struct { |
|||
Metrics |
|||
Stash Metrics // карман для сохранения предыдущего значения
|
|||
connectionOpen int // текущее кол-во открытых соединений (+ при запрос - при ответе)
|
|||
queue []int // массив соединений в очереди (не закрытых) см.выше
|
|||
tpr []time.Duration // массив времен обработки запросов
|
|||
mux *sync.Mutex |
|||
ctx context.Context |
|||
} |
|||
|
|||
type ServiceMetric interface { |
|||
SetState() |
|||
SetConnectionIncrement() |
|||
SetConnectionDecrement() |
|||
SetTimeRequest(timeRequest time.Duration) |
|||
Generate() |
|||
Get() (result Metrics) |
|||
Clear() |
|||
SaveToStash() |
|||
Middleware(next http.Handler) http.Handler |
|||
} |
|||
|
|||
func (s *serviceMetric) SetState() { |
|||
//s.mux.Lock()
|
|||
//defer s.mux.Unlock()
|
|||
|
|||
s.StateHost.Tick() |
|||
|
|||
return |
|||
} |
|||
|
|||
// записываем время обработки запроса в массив
|
|||
func (s *serviceMetric) SetTimeRequest(timeRequest time.Duration) { |
|||
go func() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.tpr = append(s.tpr, timeRequest) |
|||
}() |
|||
|
|||
return |
|||
} |
|||
|
|||
// SetConnectionIncrement увеличиваем счетчик и добавляем в массив метрик
|
|||
// формируем временной ряд количества соединений
|
|||
// при начале запроса увеличиваем, при завершении уменьшаем
|
|||
// запускаем в отдельной рутине, потому что ф-ция вызывается из сервиса и не должна быть блокирующей
|
|||
func (s *serviceMetric) SetConnectionIncrement() { |
|||
go func() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.Connections = s.Connections + 1 |
|||
s.connectionOpen = s.connectionOpen + 1 |
|||
s.queue = append(s.queue, s.connectionOpen) |
|||
}() |
|||
|
|||
return |
|||
} |
|||
|
|||
// SetConnectionDecrement уменьшаем счетчик и добавляем в массив метрик
|
|||
// запускаем в отдельной рутине, потому что ф-ция вызывается из сервиса и не должна быть блокирующей
|
|||
func (s *serviceMetric) SetConnectionDecrement() { |
|||
go func() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
if s.connectionOpen != 0 { |
|||
s.connectionOpen = s.connectionOpen - 1 |
|||
} |
|||
s.queue = append(s.queue, s.connectionOpen) |
|||
}() |
|||
|
|||
return |
|||
} |
|||
|
|||
func (s *serviceMetric) SetP(value time.Duration) { |
|||
go func() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.tpr = append(s.tpr, value) |
|||
}() |
|||
|
|||
return |
|||
} |
|||
|
|||
// SaveToStash сохраняем текущее значение расчитанных метрик в кармане
|
|||
func (s *serviceMetric) SaveToStash() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.Stash.StateHost = s.StateHost |
|||
s.Stash.Connections = s.Connections |
|||
s.Stash.RPS = s.RPS |
|||
|
|||
s.Stash.Queue_AVG = s.Queue_AVG |
|||
s.Stash.Queue_QTL_99 = s.Queue_QTL_99 |
|||
s.Stash.Queue_QTL_90 = s.Queue_QTL_90 |
|||
s.Stash.Queue_QTL_80 = s.Queue_QTL_80 |
|||
|
|||
s.Stash.TPR_AVG_MS = s.TPR_AVG_MS |
|||
s.Stash.TPR_QTL_MS_80 = s.TPR_QTL_MS_80 |
|||
s.Stash.TPR_QTL_MS_90 = s.TPR_QTL_MS_90 |
|||
s.Stash.TPR_QTL_MS_99 = s.TPR_QTL_MS_99 |
|||
|
|||
return |
|||
} |
|||
|
|||
func (s *serviceMetric) Clear() { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.Connections = 0 |
|||
s.connectionOpen = 0 |
|||
s.queue = []int{} |
|||
s.tpr = []time.Duration{} |
|||
|
|||
s.RPS = 0 |
|||
s.Queue_AVG = 0.0 |
|||
s.Queue_QTL_80 = 0.0 |
|||
s.Queue_QTL_90 = 0.0 |
|||
s.Queue_QTL_99 = 0.0 |
|||
|
|||
s.TPR_AVG_MS = 0.0 |
|||
s.TPR_QTL_MS_80 = 0.0 |
|||
s.TPR_QTL_MS_90 = 0.0 |
|||
s.TPR_QTL_MS_99 = 0.0 |
|||
|
|||
return |
|||
} |
|||
|
|||
func (s *serviceMetric) Get() (result Metrics) { |
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
return s.Stash |
|||
} |
|||
|
|||
func (s *serviceMetric) Generate() { |
|||
var val_Queue_QTL_80, val_Queue_QTL_90, val_Queue_QTL_99, val_Queue float32 |
|||
var Queue_AVG, Queue_QTL_80, Queue_QTL_90, Queue_QTL_99 float32 |
|||
var val_TPR_80, val_TPR_90, val_TPR_99, val_TPR float32 |
|||
var AVG_TPR, QTL_TPR_80, QTL_TPR_90, QTL_TPR_99 float32 |
|||
|
|||
s.mux.Lock() |
|||
defer s.mux.Unlock() |
|||
|
|||
s.SetState() // БЕЗ БЛОКИРОВКИ получаю текущие метрики загрузки хоста
|
|||
|
|||
//////////////////////////////////////////////////////////
|
|||
// расчитываем среднее кол-во запросо и квартили (средние значения после 80-90-99 процентов всех запросов)
|
|||
//////////////////////////////////////////////////////////
|
|||
|
|||
// сортируем список
|
|||
sort.Ints(s.queue) |
|||
|
|||
lenQueue := len(s.queue) |
|||
|
|||
if lenQueue != 0 { |
|||
len_Queue_QTL_80 := lenQueue * 8 / 10 |
|||
len_Queue_QTL_90 := lenQueue * 9 / 10 |
|||
len_Queue_QTL_99 := lenQueue * 99 / 100 |
|||
|
|||
for i, v := range s.queue { |
|||
vall := float32(v) |
|||
// суммируем значения которые после 80% других
|
|||
if i > len_Queue_QTL_80 { |
|||
val_Queue_QTL_80 = val_Queue_QTL_80 + vall |
|||
} |
|||
// суммируем значения которые после 90% других
|
|||
if i > len_Queue_QTL_90 { |
|||
val_Queue_QTL_90 = val_Queue_QTL_90 + vall |
|||
} |
|||
// суммируем значения которые после 99% других
|
|||
if i > len_Queue_QTL_99 { |
|||
val_Queue_QTL_99 = val_Queue_QTL_99 + vall |
|||
} |
|||
|
|||
val_Queue = val_Queue + vall |
|||
} |
|||
|
|||
lQ := float32(lenQueue) - 1 // проверка на 0
|
|||
if lQ == 0 { |
|||
lQ = 1 |
|||
} |
|||
Queue_AVG = val_Queue / lQ |
|||
Queue_QTL_80 = val_Queue_QTL_80 / float32(lenQueue-len_Queue_QTL_80) |
|||
Queue_QTL_90 = val_Queue_QTL_90 / float32(lenQueue-len_Queue_QTL_90) |
|||
Queue_QTL_99 = val_Queue_QTL_99 / float32(lenQueue-len_Queue_QTL_99) |
|||
} |
|||
|
|||
//////////////////////////////////////////////////////////
|
|||
// расчитываем среднее время запросо и квартили (средние значения после 80-90-99 процентов всех запросов)
|
|||
//////////////////////////////////////////////////////////
|
|||
|
|||
// сортируем список
|
|||
lenTPR := len(s.tpr) |
|||
if lenTPR != 0 { |
|||
|
|||
timeInt := []float64{} |
|||
for _, v := range s.tpr { |
|||
timeInt = append(timeInt, float64(v.Microseconds())) |
|||
} |
|||
sort.Float64s(timeInt) |
|||
|
|||
len_TPR_80 := lenTPR * 8 / 10 |
|||
len_TPR_90 := lenTPR * 9 / 10 |
|||
len_TPR_99 := lenTPR * 99 / 100 |
|||
|
|||
for i, v := range timeInt { |
|||
vall := float32(v) |
|||
// суммируем значения которые после 80% других
|
|||
if i > len_TPR_80 { |
|||
val_TPR_80 = val_TPR_80 + vall |
|||
} |
|||
// суммируем значения которые после 90% других
|
|||
if i > len_TPR_90 { |
|||
val_TPR_90 = val_TPR_90 + vall |
|||
} |
|||
// суммируем значения которые после 99% других
|
|||
if i > len_TPR_99 { |
|||
val_TPR_99 = val_TPR_99 + vall |
|||
} |
|||
|
|||
val_TPR = val_TPR + vall |
|||
} |
|||
|
|||
lQ := float32(lenQueue) - 1 |
|||
if lQ == 0 { |
|||
lQ = 1 |
|||
} |
|||
AVG_TPR = val_TPR / lQ |
|||
QTL_TPR_80 = val_TPR_80 / float32(lenTPR-len_TPR_80) |
|||
QTL_TPR_90 = val_TPR_90 / float32(lenTPR-len_TPR_90) |
|||
QTL_TPR_99 = val_TPR_99 / float32(lenTPR-len_TPR_99) |
|||
} |
|||
|
|||
//////////////////////////////////////////////////////////
|
|||
//////////////////////////////////////////////////////////
|
|||
|
|||
s.RPS = lenQueue / 10 |
|||
|
|||
s.Queue_AVG = Queue_AVG |
|||
s.Queue_QTL_80 = Queue_QTL_80 |
|||
s.Queue_QTL_90 = Queue_QTL_90 |
|||
s.Queue_QTL_99 = Queue_QTL_99 |
|||
|
|||
s.TPR_AVG_MS = AVG_TPR |
|||
s.TPR_QTL_MS_80 = QTL_TPR_80 |
|||
s.TPR_QTL_MS_90 = QTL_TPR_90 |
|||
s.TPR_QTL_MS_99 = QTL_TPR_99 |
|||
|
|||
return |
|||
} |
|||
|
|||
func (s *serviceMetric) Middleware(next http.Handler) http.Handler { |
|||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
|||
// увеличиваем счетчик активных сессий
|
|||
s.SetConnectionIncrement() |
|||
next.ServeHTTP(w, r) |
|||
|
|||
// уменьшаем счетчик активных сессий
|
|||
s.SetConnectionDecrement() |
|||
}) |
|||
} |
|||
|
|||
// interval - интервалы времени, через которые статистика будет сбрасыватсья в лог
|
|||
func NewMetric(ctx context.Context, logger Log, interval time.Duration) (metrics ServiceMetric) { |
|||
m := sync.Mutex{} |
|||
t := StateHost{} |
|||
s := Metrics{ |
|||
StateHost: t, |
|||
Queue_AVG: 0, |
|||
Queue_QTL_99: 0, |
|||
Queue_QTL_90: 0, |
|||
Queue_QTL_80: 0, |
|||
TPR_AVG_MS: 0, |
|||
TPR_QTL_MS_80: 0, |
|||
TPR_QTL_MS_90: 0, |
|||
TPR_QTL_MS_99: 0, |
|||
RPS: 0, |
|||
} |
|||
metrics = &serviceMetric{ |
|||
Metrics: s, |
|||
Stash: s, |
|||
connectionOpen: 0, |
|||
queue: []int{}, |
|||
mux: &m, |
|||
ctx: ctx, |
|||
} |
|||
|
|||
go RunMetricLogger(ctx, metrics, logger, interval) |
|||
|
|||
return metrics |
|||
} |
|||
|
|||
func RunMetricLogger(ctx context.Context, m ServiceMetric, logger Log, interval time.Duration) { |
|||
ticker := time.NewTicker(interval) |
|||
defer ticker.Stop() |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
// сохраняем значение метрик в лог
|
|||
m.Generate() // сгенерировали метрики
|
|||
m.SaveToStash() // сохранили в карман
|
|||
m.Clear() // очистили объект метрик для приема новых данных
|
|||
mes, _ := json.Marshal(m.Get()) |
|||
logger.Trace(string(mes)) // записали в лог из кармана
|
|||
|
|||
ticker = time.NewTicker(interval) |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,114 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"strings" |
|||
|
|||
"github.com/go-kit/kit/metrics" |
|||
kitprometheus "github.com/go-kit/kit/metrics/prometheus" |
|||
"github.com/prometheus/client_golang/prometheus" |
|||
) |
|||
|
|||
var ( |
|||
// uid
|
|||
service_uid metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_uid", |
|||
}, []string{"value"}) |
|||
|
|||
service_name metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_name", |
|||
}, []string{"value"}) |
|||
|
|||
service_version metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_version", |
|||
}, []string{"value"}) |
|||
|
|||
service_status metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_status", |
|||
}, []string{"value"}) |
|||
|
|||
service_port_http metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_port_http", |
|||
}, []string{"value"}) |
|||
|
|||
service_pid metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_pid", |
|||
}, []string{"value"}) |
|||
|
|||
service_replicas metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_replicas", |
|||
}, []string{"value"}) |
|||
|
|||
service_port_https metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_port_https", |
|||
}, []string{"value"}) |
|||
|
|||
service_dead_time metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_dead_time", |
|||
}, []string{"value"}) |
|||
|
|||
service_follower metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_follower", |
|||
}, []string{"value"}) |
|||
|
|||
service_port_grpc metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_port_grpc", |
|||
}, []string{"value"}) |
|||
|
|||
service_port_metrics metrics.Gauge = kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ |
|||
Name: "service_port_metrics", |
|||
}, []string{"value"}) |
|||
) |
|||
|
|||
func SendServiceParamsToMetric(uid, name, version, status, pid, replicas, portHTTP, portGRPC, portMetrics, portHTTPS, dead_time, follower string) { |
|||
var count float64 |
|||
service_uid.With("value", uid).Set(count) |
|||
service_name.With("value", name).Set(count) |
|||
service_version.With("value", version).Set(count) |
|||
service_status.With("value", status).Set(count) |
|||
service_port_http.With("value", portHTTP).Set(count) |
|||
service_pid.With("value", pid).Set(count) |
|||
service_replicas.With("value", replicas).Set(count) |
|||
service_port_https.With("value", portHTTPS).Set(count) |
|||
service_dead_time.With("value", dead_time).Set(count) |
|||
service_follower.With("value", follower).Set(count) |
|||
service_port_grpc.With("value", portGRPC).Set(count) |
|||
service_port_metrics.With("value", portMetrics).Set(count) |
|||
} |
|||
|
|||
// ValidateNameVersion - формирует правильные имя проекта и версию сервиса исходя из того, что пришло из настроек
|
|||
func ValidateNameVersion(project, version, domain string) (resName, resVersion string) { |
|||
name := "unknown" |
|||
|
|||
if project != "" { |
|||
if len(strings.Split(project, "-")) > 3 { // признак того, что получили UID (для совместимости)
|
|||
if domain != "" { |
|||
project = strings.Split(domain, "/")[0] |
|||
} |
|||
} |
|||
name = project // название проекта
|
|||
} |
|||
|
|||
if name == "unknown" && domain != "" { |
|||
name = strings.Split(domain, "/")[0] |
|||
} |
|||
|
|||
// TODO deplicated - удалить когда все сервисы переедут на адресацию по короткому имени проекта
|
|||
if version == "" || name == "" { |
|||
pp := strings.Split(domain, "/") |
|||
if len(pp) == 1 { |
|||
if pp[0] != "" { |
|||
name = pp[0] |
|||
} |
|||
} |
|||
if len(pp) == 2 { |
|||
if pp[0] != "" { |
|||
name = pp[0] |
|||
} |
|||
if pp[1] != "" { |
|||
version = pp[1] |
|||
} |
|||
} |
|||
} |
|||
|
|||
return name, version |
|||
} |
@ -0,0 +1,41 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"math" |
|||
"runtime" |
|||
|
|||
"github.com/shirou/gopsutil/mem" |
|||
) |
|||
|
|||
type StateHost struct { |
|||
PercentageCPU, |
|||
PercentageMemory, |
|||
PercentageDisk, |
|||
TotalCPU, |
|||
TotalMemory, |
|||
TotalDisk, |
|||
UsedCPU, |
|||
UsedMemory, |
|||
UsedDisk float64 |
|||
Goroutines int |
|||
} |
|||
|
|||
func (c *StateHost) Tick() { |
|||
//var pcpu, i float64
|
|||
|
|||
memoryStat, _ := mem.VirtualMemory() |
|||
//percentage, _ := cpu.Percent(0, true)
|
|||
//diskStat, _ := disk.Usage("/")
|
|||
//
|
|||
//for _, cpupercent := range percentage {
|
|||
// pcpu = (pcpu + cpupercent)
|
|||
// i ++
|
|||
//}
|
|||
|
|||
//c.PercentageCPU = math.Round(pcpu / i)
|
|||
c.PercentageMemory = math.Round(memoryStat.UsedPercent) |
|||
//c.PercentageDisk = math.Round(diskStat.UsedPercent)
|
|||
c.Goroutines = runtime.NumGoroutine() |
|||
|
|||
return |
|||
} |
@ -0,0 +1,218 @@ |
|||
// Package lib/vfs позволяет хранить файлы на разных источниках без необходимости учитывать особенности
|
|||
// каждой реализации файлового хранилища
|
|||
// поддерживаются local, s3, azure (остальные активировать по-необходимости)
|
|||
package lib |
|||
|
|||
import ( |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/url" |
|||
"strings" |
|||
|
|||
"github.com/graymeta/stow" |
|||
"github.com/graymeta/stow/azure" |
|||
"github.com/graymeta/stow/local" |
|||
"github.com/graymeta/stow/s3" |
|||
|
|||
// support Azure storage
|
|||
_ "github.com/graymeta/stow/azure" |
|||
// support Google storage
|
|||
_ "github.com/graymeta/stow/google" |
|||
// support local storage
|
|||
_ "github.com/graymeta/stow/local" |
|||
// support swift storage
|
|||
_ "github.com/graymeta/stow/swift" |
|||
// support s3 storage
|
|||
_ "github.com/graymeta/stow/s3" |
|||
// support oracle storage
|
|||
_ "github.com/graymeta/stow/oracle" |
|||
) |
|||
|
|||
type vfs struct { |
|||
bucket string |
|||
kind, endpoint, accessKeyID, secretKey, region string |
|||
location stow.Location |
|||
container stow.Container |
|||
comma string |
|||
} |
|||
|
|||
type Vfs interface { |
|||
List(prefix string, pageSize int) (files []Item, err error) |
|||
Read(file string) (data []byte, mimeType string, err error) |
|||
ReadFromBucket(file, bucket string) (data []byte, mimeType string, err error) |
|||
Write(file string, data []byte) (err error) |
|||
Connect() (err error) |
|||
Close() (err error) |
|||
} |
|||
|
|||
type Item interface { |
|||
stow.Item |
|||
} |
|||
|
|||
// Connect инициируем подключение к хранилищу, в зависимости от типа соединения
|
|||
func (v *vfs) Connect() (err error) { |
|||
var config = stow.ConfigMap{} |
|||
var flagBucketExist bool |
|||
|
|||
if v.region == "" { |
|||
v.region = "eu-west-1" |
|||
} |
|||
switch v.kind { |
|||
case "s3": |
|||
config = stow.ConfigMap{ |
|||
s3.ConfigEndpoint: v.endpoint, |
|||
s3.ConfigAccessKeyID: v.accessKeyID, |
|||
s3.ConfigSecretKey: v.secretKey, |
|||
s3.ConfigRegion: v.region, |
|||
} |
|||
case "azure": |
|||
config = stow.ConfigMap{ |
|||
azure.ConfigAccount: v.accessKeyID, |
|||
azure.ConfigKey: v.secretKey, |
|||
} |
|||
case "local": |
|||
config = stow.ConfigMap{ |
|||
local.ConfigKeyPath: v.endpoint, |
|||
local.MetadataDir: v.bucket, |
|||
} |
|||
} |
|||
|
|||
// подсключаемся к хранилищу
|
|||
v.location, err = stow.Dial(v.kind, config) |
|||
if err != nil { |
|||
return fmt.Errorf("error create container from config. err: %s", err) |
|||
} |
|||
|
|||
// ищем переданных бакет, если нет, то создаем его
|
|||
err = stow.WalkContainers(v.location, stow.NoPrefix, 10000, func(c stow.Container, err error) error { |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if c.Name() == v.bucket { |
|||
flagBucketExist = true |
|||
return nil |
|||
} |
|||
return nil |
|||
}) |
|||
if err != nil { |
|||
return fmt.Errorf("error list to containers from config. err: %s", err) |
|||
} |
|||
|
|||
// создаем если нет
|
|||
if !flagBucketExist { |
|||
v.container, err = v.location.CreateContainer(v.bucket) |
|||
if err != nil { |
|||
return fmt.Errorf("error create container from config. err: %s", err) |
|||
} |
|||
} |
|||
|
|||
// инициируем переданный контейнер
|
|||
v.container, err = v.location.Container(v.bucket) |
|||
if err != nil { |
|||
return fmt.Errorf("error create container from config. err: %s", err) |
|||
} |
|||
|
|||
return err |
|||
} |
|||
|
|||
// Close закрываем соединение
|
|||
func (v *vfs) Close() (err error) { |
|||
err = v.location.Close() |
|||
|
|||
return err |
|||
} |
|||
|
|||
// Read чтение по указанному пути из бакета проекта
|
|||
func (v *vfs) Read(file string) (data []byte, mimeType string, err error) { |
|||
return v.ReadFromBucket(file, v.bucket) |
|||
} |
|||
|
|||
// Read чтение по указанному пути из указанного бакета
|
|||
func (v *vfs) ReadFromBucket(file, bucket string) (data []byte, mimeType string, err error) { |
|||
var urlPath url.URL |
|||
|
|||
// если передан разделитель, то заменяем / на него (возможно понадобится для совместимости плоских хранилищ)
|
|||
if v.comma != "" { |
|||
file = strings.Replace(file, v.comma, sep, -1) |
|||
} |
|||
|
|||
// если локально, то добавляем к endpoint бакет
|
|||
if v.kind == "local" { |
|||
file = v.endpoint + sep + bucket + sep + file |
|||
// подчищаем //
|
|||
file = strings.Replace(file, sep+sep, sep, -1) |
|||
} else { |
|||
// подчищаем от части путей, которая использовалась раньше в локальном хранилище
|
|||
// легаси, удалить когда все сайты переедут на использование только vfs
|
|||
//localPrefix := sep + "upload" + sep + v.bucket
|
|||
localPrefix := "upload" + sep + bucket |
|||
file = strings.Replace(file, localPrefix, "", -1) |
|||
file = strings.Replace(file, sep+sep, sep, -1) |
|||
} |
|||
|
|||
//fmt.Printf("file: %s, bucket: %s, container: %-v\n", file, bucket, v.container)
|
|||
|
|||
urlPath.Host = bucket |
|||
urlPath.Path = file |
|||
|
|||
item, err := v.location.ItemByURL(&urlPath) |
|||
if item != nil { |
|||
r, err := item.Open() |
|||
if err != nil { |
|||
return data, mimeType, err |
|||
} |
|||
data, err = ioutil.ReadAll(r) |
|||
mimeType = detectMIME(data, file) // - определяем MimeType отдаваемого файла
|
|||
} |
|||
|
|||
//fmt.Printf("item: %+v, len data: %-v, mimeType: %s, err: %s", item, len(data), mimeType, err)
|
|||
|
|||
if err != nil { |
|||
err = fmt.Errorf("%s. urlPath: %+v, file: %s, bucket: %s, v.container: %+v\n", err, urlPath, file, bucket, v.container) |
|||
} |
|||
return data, mimeType, err |
|||
} |
|||
|
|||
// Write создаем объект в хранилище
|
|||
func (v *vfs) Write(file string, data []byte) (err error) { |
|||
sdata := string(data) |
|||
r := strings.NewReader(sdata) |
|||
size := int64(len(sdata)) |
|||
|
|||
// если передан разделитель, то заменяем / на него (возможно понадобится для совместимости плоских хранилищ)
|
|||
if v.comma != "" { |
|||
file = strings.Replace(file, sep, v.comma, -1) |
|||
} |
|||
|
|||
_, err = v.container.Put(file, r, size, nil) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return err |
|||
} |
|||
|
|||
// List список файлов выбранного
|
|||
func (v *vfs) List(prefix string, pageSize int) (files []Item, err error) { |
|||
err = stow.Walk(v.container, prefix, pageSize, func(item stow.Item, err error) error { |
|||
if err != nil { |
|||
fmt.Printf("error Walk from list vfs. connect:%+v, prefix: %s, err: %s\n", v, prefix, err) |
|||
return err |
|||
} |
|||
files = append(files, item) |
|||
return nil |
|||
}) |
|||
|
|||
return files, err |
|||
} |
|||
|
|||
func NewVfs(kind, endpoint, accessKeyID, secretKey, region, bucket, comma string) Vfs { |
|||
return &vfs{ |
|||
kind: kind, |
|||
endpoint: endpoint, |
|||
accessKeyID: accessKeyID, |
|||
secretKey: secretKey, |
|||
region: region, |
|||
bucket: bucket, |
|||
comma: comma, |
|||
} |
|||
} |
@ -0,0 +1,205 @@ |
|||
package lib |
|||
|
|||
import ( |
|||
"github.com/gabriel-vasile/mimetype" |
|||
"path/filepath" |
|||
"sync" |
|||
) |
|||
|
|||
var mimeDetector = map[string]string{} |
|||
var mu sync.RWMutex |
|||
|
|||
var ( |
|||
css = addMIME("text/css", ".css") |
|||
xz = addMIME("application/x-xz", ".xz") |
|||
gzip = addMIME("application/gzip", ".gz") |
|||
sevenZ = addMIME("application/x-7z-compressed", ".7z") |
|||
zipZ = addMIME("application/zip", ".zip") |
|||
tar = addMIME("application/x-tar", ".tar") |
|||
xar = addMIME("application/x-xar", ".xar") |
|||
bz2 = addMIME("application/x-bzip2", ".bz2") |
|||
pdf = addMIME("application/pdf", ".pdf") |
|||
fdf = addMIME("application/vnd.fdf", ".fdf") |
|||
xlsx = addMIME("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ".xlsx") |
|||
docx = addMIME("application/vnd.openxmlformats-officedocument.wordprocessingml.document", ".docx") |
|||
pptx = addMIME("application/vnd.openxmlformats-officedocument.presentationml.presentation", ".pptx") |
|||
epub = addMIME("application/epub+zip", ".epub") |
|||
jar = addMIME("application/jar", ".jar") |
|||
ole = addMIME("application/x-ole-storage", "") |
|||
msi = addMIME("application/x-ms-installer", ".msi") |
|||
aaf = addMIME("application/octet-stream", ".aaf") |
|||
doc = addMIME("application/msword", ".doc") |
|||
ppt = addMIME("application/vnd.ms-powerpoint", ".ppt") |
|||
pub = addMIME("application/vnd.ms-publisher", ".pub") |
|||
xls = addMIME("application/vnd.ms-excel", ".xls") |
|||
msg = addMIME("application/vnd.ms-outlook", ".msg") |
|||
ps = addMIME("application/postscript", ".ps") |
|||
fits = addMIME("application/fits", ".fits") |
|||
ogg = addMIME("application/ogg", ".ogg") |
|||
oggAudio = addMIME("audio/ogg", ".oga") |
|||
oggVideo = addMIME("video/ogg", ".ogv") |
|||
text = addMIME("text/plain", ".txt") |
|||
xml = addMIME("text/xml", ".xml") |
|||
jsonZ = addMIME("application/json", ".json") |
|||
har = addMIME("application/json", ".har") |
|||
csv = addMIME("text/csv", ".csv") |
|||
tsv = addMIME("text/tab-separated-values", ".tsv") |
|||
geoJSON = addMIME("application/geo+json", ".geojson") |
|||
ndJSON = addMIME("application/x-ndjson", ".ndjson") |
|||
html = addMIME("text/html", ".html") |
|||
php = addMIME("text/x-php", ".php") |
|||
rtf = addMIME("text/rtf", ".rtf") |
|||
js = addMIME("application/javascript", ".js") |
|||
srt = addMIME("application/x-subrip", ".srt") |
|||
vtt = addMIME("text/vtt", ".vtt") |
|||
lua = addMIME("text/x-lua", ".lua") |
|||
perl = addMIME("text/x-perl", ".pl") |
|||
python = addMIME("text/x-python", ".py") |
|||
tcl = addMIME("text/x-tcl", ".tcl") |
|||
vCard = addMIME("text/vcard", ".vcf") |
|||
iCalendar = addMIME("text/calendar", ".ics") |
|||
svg = addMIME("image/svg+xml", ".svg") |
|||
rss = addMIME("application/rss+xml", ".rss") |
|||
owl2 = addMIME("application/owl+xml", ".owl") |
|||
atom = addMIME("application/atom+xml", ".atom") |
|||
x3d = addMIME("model/x3d+xml", ".x3d") |
|||
kml = addMIME("application/vnd.google-earth.kml+xml", ".kml") |
|||
xliff = addMIME("application/x-xliff+xml", ".xlf") |
|||
collada = addMIME("model/vnd.collada+xml", ".dae") |
|||
gml = addMIME("application/gml+xml", ".gml") |
|||
gpx = addMIME("application/gpx+xml", ".gpx") |
|||
tcx = addMIME("application/vnd.garmin.tcx+xml", ".tcx") |
|||
amf = addMIME("application/x-amf", ".amf") |
|||
threemf = addMIME("application/vnd.ms-package.3dmanufacturing-3dmodel+xml", ".3mf") |
|||
png = addMIME("image/png", ".png") |
|||
apng = addMIME("image/vnd.mozilla.apng", ".png") |
|||
jpg = addMIME("image/jpeg", ".jpg") |
|||
jxl = addMIME("image/jxl", ".jxl") |
|||
jp2 = addMIME("image/jp2", ".jp2") |
|||
jpx = addMIME("image/jpx", ".jpf") |
|||
jpm = addMIME("image/jpm", ".jpm") |
|||
xpm = addMIME("image/x-xpixmap", ".xpm") |
|||
bpg = addMIME("image/bpg", ".bpg") |
|||
gif = addMIME("image/gif", ".gif") |
|||
webp = addMIME("image/webp", ".webp") |
|||
tiff = addMIME("image/tiff", ".tiff") |
|||
bmp = addMIME("image/bmp", ".bmp") |
|||
ico = addMIME("image/x-icon", ".ico") |
|||
icns = addMIME("image/x-icns", ".icns") |
|||
psd = addMIME("image/vnd.adobe.photoshop", ".psd") |
|||
heic = addMIME("image/heic", ".heic") |
|||
heicSeq = addMIME("image/heic-sequence", ".heic") |
|||
heif = addMIME("image/heif", ".heif") |
|||
heifSeq = addMIME("image/heif-sequence", ".heif") |
|||
hdr = addMIME("image/vnd.radiance", ".hdr") |
|||
avif = addMIME("image/avif", ".avif") |
|||
mp3 = addMIME("audio/mpeg", ".mp3") |
|||
flac = addMIME("audio/flac", ".flac") |
|||
midi = addMIME("audio/midi", ".midi") |
|||
ape = addMIME("audio/ape", ".ape") |
|||
musePack = addMIME("audio/musepack", ".mpc") |
|||
wav = addMIME("audio/wav", ".wav") |
|||
aiff = addMIME("audio/aiff", ".aiff") |
|||
au = addMIME("audio/basic", ".au") |
|||
amr = addMIME("audio/amr", ".amr") |
|||
aac = addMIME("audio/aac", ".aac") |
|||
voc = addMIME("audio/x-unknown", ".voc") |
|||
aMp4 = addMIME("audio/mp4", ".mp4") |
|||
m4a = addMIME("audio/x-m4a", ".m4a") |
|||
m3u = addMIME("application/vnd.apple.mpegurl", ".m3u") |
|||
m4v = addMIME("video/x-m4v", ".m4v") |
|||
mp4 = addMIME("video/mp4", ".mp4") |
|||
webM = addMIME("video/webm", ".webm") |
|||
mpeg = addMIME("video/mpeg", ".mpeg") |
|||
quickTime = addMIME("video/quicktime", ".mov") |
|||
mqv = addMIME("video/quicktime", ".mqv") |
|||
threeGP = addMIME("video/3gpp", ".3gp") |
|||
threeG2 = addMIME("video/3gpp2", ".3g2") |
|||
avi = addMIME("video/x-msvideo", ".avi") |
|||
flv = addMIME("video/x-flv", ".flv") |
|||
mkv = addMIME("video/x-matroska", ".mkv") |
|||
asf = addMIME("video/x-ms-asf", ".asf") |
|||
rmvb = addMIME("application/vnd.rn-realmedia-vbr", ".rmvb") |
|||
class = addMIME("application/x-java-applet", ".class") |
|||
swf = addMIME("application/x-shockwave-flash", ".swf") |
|||
crx = addMIME("application/x-chrome-extension", ".crx") |
|||
ttf = addMIME("font/ttf", ".ttf") |
|||
woff = addMIME("font/woff", ".woff") |
|||
woff2 = addMIME("font/woff2", ".woff2") |
|||
otf = addMIME("font/otf", ".otf") |
|||
ttc = addMIME("font/collection", ".ttc") |
|||
eot = addMIME("application/vnd.ms-fontobject", ".eot") |
|||
wasm = addMIME("application/wasm", ".wasm") |
|||
shp = addMIME("application/vnd.shp", ".shp") |
|||
shx = addMIME("application/vnd.shx", ".shx") |
|||
dbf = addMIME("application/x-dbf", ".dbf") |
|||
exe = addMIME("application/vnd.microsoft.portable-executable", ".exe") |
|||
elf = addMIME("application/x-elf", "") |
|||
elfObj = addMIME("application/x-object", "") |
|||
elfExe = addMIME("application/x-executable", "") |
|||
elfLib = addMIME("application/x-sharedlib", ".so") |
|||
elfDump = addMIME("application/x-coredump", "") |
|||
ar = addMIME("application/x-archive", ".a") |
|||
deb = addMIME("application/vnd.debian.binary-package", ".deb") |
|||
rpm = addMIME("application/x-rpm", ".rpm") |
|||
dcm = addMIME("application/dicom", ".dcm") |
|||
odt = addMIME("application/vnd.oasis.opendocument.text", ".odt") |
|||
ott = addMIME("application/vnd.oasis.opendocument.text-template", ".ott") |
|||
ods = addMIME("application/vnd.oasis.opendocument.spreadsheet", ".ods") |
|||
ots = addMIME("application/vnd.oasis.opendocument.spreadsheet-template", ".ots") |
|||
odp = addMIME("application/vnd.oasis.opendocument.presentation", ".odp") |
|||
otp = addMIME("application/vnd.oasis.opendocument.presentation-template", ".otp") |
|||
odg = addMIME("application/vnd.oasis.opendocument.graphics", ".odg") |
|||
otg = addMIME("application/vnd.oasis.opendocument.graphics-template", ".otg") |
|||
odf = addMIME("application/vnd.oasis.opendocument.formula", ".odf") |
|||
odc = addMIME("application/vnd.oasis.opendocument.chart", ".odc") |
|||
sxc = addMIME("application/vnd.sun.xml.calc", ".sxc") |
|||
rar = addMIME("application/x-rar-compressed", ".rar") |
|||
djvu = addMIME("image/vnd.djvu", ".djvu") |
|||
mobi = addMIME("application/x-mobipocket-ebook", ".mobi") |
|||
lit = addMIME("application/x-ms-reader", ".lit") |
|||
sqlite3 = addMIME("application/vnd.sqlite3", ".sqlite") |
|||
dwg = addMIME("image/vnd.dwg", ".dwg") |
|||
warc = addMIME("application/warc", ".warc") |
|||
nes = addMIME("application/vnd.nintendo.snes.rom", ".nes") |
|||
lnk = addMIME("application/x-ms-shortcut", ".lnk") |
|||
macho = addMIME("application/x-mach-binary", ".macho") |
|||
qcp = addMIME("audio/qcelp", ".qcp") |
|||
mrc = addMIME("application/marc", ".mrc") |
|||
mdb = addMIME("application/x-msaccess", ".mdb") |
|||
accdb = addMIME("application/x-msaccess", ".accdb") |
|||
zstd = addMIME("application/zstd", ".zst") |
|||
cab = addMIME("application/vnd.ms-cab-compressed", ".cab") |
|||
cabIS = addMIME("application/x-installshield", ".cab") |
|||
lzip = addMIME("application/lzip", ".lz") |
|||
torrent = addMIME("application/x-bittorrent", ".torrent") |
|||
cpio = addMIME("application/x-cpio", ".cpio") |
|||
tzif = addMIME("application/tzif", "") |
|||
p7s = addMIME("application/pkcs7-signature", ".p7s") |
|||
xcf = addMIME("image/x-xcf", ".xcf") |
|||
pat = addMIME("image/x-gimp-pat", ".pat") |
|||
gbr = addMIME("image/x-gimp-gbr", ".gbr") |
|||
xfdf = addMIME("application/vnd.adobe.xfdf", ".xfdf") |
|||
glb = addMIME("model/gltf-binary", ".glb") |
|||
) |
|||
|
|||
// addMIME наполняем мапку значениями
|
|||
func addMIME(mimeType, ext string) error { |
|||
mimeDetector[ext] = mimeType |
|||
return nil |
|||
} |
|||
|
|||
func detectMIME(data []byte, file string) (mimeType string) { |
|||
mu.RLock() |
|||
defer mu.RUnlock() |
|||
|
|||
// определяем по расширения, если нашли - возвращаем
|
|||
if v, found := mimeDetector[filepath.Ext(file)]; found { |
|||
return v |
|||
} |
|||
|
|||
// если не нашли или расширение пустое - пытаемся определить по содержимому файла (через сторонний пакет)
|
|||
mimeType = mimetype.Detect(data).String() // - работает не быстро (по мануалу), но как доп.определялка можно включить
|
|||
|
|||
return mimeType |
|||
} |
@ -0,0 +1,2 @@ |
|||
[url "ssh://git@git.lowcodeplatform.net/"] |
|||
insteadOf = https://git.lowcodeplatform.net/ |
@ -0,0 +1,9 @@ |
|||
.history |
|||
.idea |
|||
.vscode |
|||
.DS_Store |
|||
*~merged* |
|||
*~merged |
|||
/public |
|||
.env |
|||
local |
@ -0,0 +1,3 @@ |
|||
# models |
|||
|
|||
Модели общих сущностей проекта Lowcodeplatform Fabric |
@ -0,0 +1,98 @@ |
|||
package models |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
) |
|||
|
|||
var StatusCode = RStatus{ |
|||
"OK": {"Запрос выполнен", 200, "", nil}, |
|||
"OKLicenseActivation": {"Лицензия была активирована", 200, "", nil}, |
|||
"Unauthorized": {"Ошибка авторизации", 401, "", nil}, |
|||
"NotCache": {"Доступно только в Турбо-режиме", 200, "", nil}, |
|||
"NotStatus": {"Ответ сервера не содержит статус выполнения запроса", 501, "", nil}, |
|||
"NotExtended": {"На сервере отсутствует расширение, которое желает использовать клиент", 501, "", nil}, |
|||
"ErrorFormatJson": {"Ошибка формата JSON-запроса", 500, "ErrorFormatJson", nil}, |
|||
"ErrorTransactionFalse": {"Ошибка выполнения тразакции SQL", 500, "ErrorTransactionFalse", nil}, |
|||
"ErrorBeginDB": {"Ошибка подключения к БД", 500, "ErrorBeginDB", nil}, |
|||
"ErrorPrepareSQL": {"Ошибка подготовки запроса SQL", 500, "ErrorPrepareSQL", nil}, |
|||
"ErrorNullParameter": {"Ошибка! Не передан параметр", 503, "ErrorNullParameter", nil}, |
|||
"ErrorQuery": {"Ошибка запроса на выборку данных", 500, "ErrorQuery", nil}, |
|||
"ErrorScanRows": {"Ошибка переноса данных из запроса в объект", 500, "ErrorScanRows", nil}, |
|||
"ErrorNullFields": {"Не все поля заполнены", 500, "ErrorScanRows", nil}, |
|||
"ErrorAccessType": {"Ошибка доступа к элементу типа", 500, "ErrorAccessType", nil}, |
|||
"ErrorGetData": {"Ошибка доступа данным объекта", 500, "ErrorGetData", nil}, |
|||
"ErrorRevElement": {"Значение было изменено ранее.", 409, "ErrorRevElement", nil}, |
|||
"ErrorForbiddenElement": {"Значение занято другим пользователем.", 403, "ErrorForbiddenElement", nil}, |
|||
"ErrorUnprocessableEntity": {"Необрабатываемый экземпляр", 422, "ErrorUnprocessableEntity", nil}, |
|||
"ErrorNotFound": {"Значение не найдено", 404, "ErrorNotFound", nil}, |
|||
"ErrorReadDir": {"Ошибка чтения директории", 403, "ErrorReadDir", nil}, |
|||
"ErrorReadConfigDir": {"Ошибка чтения директории конфигураций", 403, "ErrorReadConfigDir", nil}, |
|||
"errorOpenConfigDir": {"Ошибка открытия директории конфигураций", 403, "errorOpenConfigDir", nil}, |
|||
"ErrorReadConfigFile": {"Ошибка чтения файла конфигураций", 403, "ErrorReadConfigFile", nil}, |
|||
"ErrorReadLogFile": {"Ошибка чтения файла логирования", 403, "ErrorReadLogFile", nil}, |
|||
"ErrorScanLogFile": {"Ошибка построчного чтения файла логирования", 403, "ErrorScanLogFile", nil}, |
|||
"ErrorPortBusy": {"Указанный порт занят", 403, "ErrorPortBusy", nil}, |
|||
"ErrorGone": {"Объект был удален ранее", 410, "ErrorGone", nil}, |
|||
"ErrorShema": {"Ошибка формата заданной схемы формирования запроса", 410, "ErrorShema", nil}, |
|||
"ErrorInitBase": {"Ошибка инициализации новой базы данных", 410, "ErrorInitBase", nil}, |
|||
"ErrorCreateCacheRecord": {"Ошибка создания объекта в кеше", 410, "ErrorCreateCacheRecord", nil}, |
|||
"ErrorUpdateParams": {"Не переданы параметры для обновления серверов (сервер источник, сервер получатель)", 410, "ErrorUpdateParams", nil}, |
|||
"ErrorIntervalProxy": {"Ошибка переданного интервала (формат: 1000:2000)", 410, "ErrorIntervalProxy", nil}, |
|||
"ErrorReservPortProxy": {"Ошибка выделения порта proxy-сервером", 410, "ErrorReservPortProxy", nil}, |
|||
} |
|||
|
|||
type RStatus map[string]RestStatus |
|||
type RestStatus struct { |
|||
Description string `json:"description"` |
|||
Status int `json:"status"` |
|||
Code string `json:"code"` |
|||
Error error `json:"error"` |
|||
} |
|||
|
|||
func (r RestStatus) MarshalJSON() ([]byte, error) { |
|||
type RestStatusJson struct { |
|||
Description string `json:"description"` |
|||
Status int `json:"status"` |
|||
Code string `json:"code"` |
|||
Error string `json:"error"` |
|||
} |
|||
|
|||
var n = RestStatusJson{} |
|||
n.Description = r.Description |
|||
n.Status = r.Status |
|||
n.Code = r.Code |
|||
n.Error = fmt.Sprint(r.Error) |
|||
if r.Error == nil { |
|||
n.Error = "" |
|||
} |
|||
|
|||
res, err := json.Marshal(n) |
|||
return res, err |
|||
} |
|||
|
|||
func (r RestStatus) UnmarshalJSON(b []byte) error { |
|||
type RestStatusJson struct { |
|||
Description string `json:"description"` |
|||
Status int `json:"status"` |
|||
Code string `json:"code"` |
|||
Error string `json:"error"` |
|||
} |
|||
t := RestStatusJson{} |
|||
|
|||
err := json.Unmarshal(b, &t) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
r.Description = t.Description |
|||
r.Code = t.Code |
|||
r.Status = t.Status |
|||
if t.Error != "" { |
|||
r.Error = nil |
|||
} else { |
|||
r.Error = fmt.Errorf("%s", t.Error) |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,176 @@ |
|||
package models |
|||
|
|||
import "strings" |
|||
|
|||
type Data struct { |
|||
Uid string `json:"uid"` |
|||
Id string `json:"id"` |
|||
Source string `json:"source"` |
|||
Parent string `json:"parent"` |
|||
Type string `json:"type"` |
|||
Title string `json:"title"` |
|||
Rev string `json:"rev"` |
|||
Сopies string `json:"copies"` |
|||
Attributes map[string]Attribute `json:"attributes"` |
|||
} |
|||
|
|||
type Attribute struct { |
|||
Value string `json:"value"` |
|||
Src string `json:"src"` |
|||
Tpls string `json:"tpls"` |
|||
Status string `json:"status"` |
|||
Rev string `json:"rev"` |
|||
Editor string `json:"editor"` |
|||
} |
|||
|
|||
type Response struct { |
|||
Data interface{} `json:"data"` |
|||
Status RestStatus `json:"status"` |
|||
Metrics Metrics `json:"metrics"` |
|||
} |
|||
|
|||
type ResponseData struct { |
|||
Data []Data `json:"data"` |
|||
Res interface{} `json:"res"` |
|||
Status RestStatus `json:"status"` |
|||
Metrics Metrics `json:"metrics"` |
|||
} |
|||
|
|||
type Metrics struct { |
|||
ResultSize int `json:"result_size"` |
|||
ResultCount int `json:"result_count"` |
|||
ResultOffset int `json:"result_offset"` |
|||
ResultLimit int `json:"result_limit"` |
|||
ResultPage int `json:"result_page"` |
|||
TimeExecution string `json:"time_execution"` |
|||
TimeQuery string `json:"time_query"` |
|||
|
|||
PageLast int `json:"page_last"` |
|||
PageCurrent int `json:"page_current"` |
|||
PageList []int `json:"page_list"` |
|||
PageFrom int `json:"page_from"` |
|||
PageTo int `json:"page_to"` |
|||
} |
|||
|
|||
// возвращаем необходимый значение атрибута для объекта если он есть, инае пусто
|
|||
// а также из заголовка объекта
|
|||
func (p *Data) Attr(name, element string) (result string, found bool) { |
|||
|
|||
if _, found := p.Attributes[name]; found { |
|||
|
|||
// фикс для тех объектов, на которых добавлено скрытое поле Uid
|
|||
if name == "uid" { |
|||
return p.Uid, true |
|||
} |
|||
|
|||
switch element { |
|||
case "src": |
|||
return p.Attributes[name].Src, true |
|||
case "value": |
|||
return p.Attributes[name].Value, true |
|||
case "tpls": |
|||
return p.Attributes[name].Tpls, true |
|||
case "rev": |
|||
return p.Attributes[name].Rev, true |
|||
case "status": |
|||
return p.Attributes[name].Status, true |
|||
case "uid": |
|||
return p.Uid, true |
|||
case "source": |
|||
return p.Source, true |
|||
case "id": |
|||
return p.Id, true |
|||
case "title": |
|||
return p.Title, true |
|||
case "type": |
|||
return p.Type, true |
|||
} |
|||
} else { |
|||
switch name { |
|||
case "uid": |
|||
return p.Uid, true |
|||
case "source": |
|||
return p.Source, true |
|||
case "id": |
|||
return p.Id, true |
|||
case "title": |
|||
return p.Title, true |
|||
case "type": |
|||
return p.Type, true |
|||
} |
|||
} |
|||
return "", false |
|||
} |
|||
|
|||
// заменяем значение аттрибутов в объекте профиля
|
|||
func (p *Data) AttrSet(name, element, value string) bool { |
|||
g := Attribute{} |
|||
|
|||
for k, v := range p.Attributes { |
|||
if k == name { |
|||
g = v |
|||
} |
|||
} |
|||
|
|||
switch element { |
|||
case "src": |
|||
g.Src = value |
|||
case "value": |
|||
g.Value = value |
|||
case "tpls": |
|||
g.Tpls = value |
|||
case "rev": |
|||
g.Rev = value |
|||
case "status": |
|||
g.Status = value |
|||
} |
|||
|
|||
f := p.Attributes |
|||
|
|||
for k, _ := range f { |
|||
if k == name { |
|||
f[k] = g |
|||
return true |
|||
} |
|||
} |
|||
|
|||
return false |
|||
} |
|||
|
|||
// удаляем элемент из слайса
|
|||
func (p *ResponseData) RemoveData(i int) bool { |
|||
|
|||
if i < len(p.Data) { |
|||
p.Data = append(p.Data[:i], p.Data[i+1:]...) |
|||
} else { |
|||
//log.Warning("Error! Position invalid (", i, ")")
|
|||
return false |
|||
} |
|||
|
|||
return true |
|||
} |
|||
|
|||
// FilterRole применяем ограничения доступа для объектов типа ResponseData
|
|||
// фильтруем массив данных
|
|||
// если непустое поле access_read, значит назначены права, а следовательно проверяем право просмотра для роли пользователя
|
|||
// также возвращаем
|
|||
func (p *ResponseData) FilterRole(role string) { |
|||
sliceData := p.Data |
|||
|
|||
for i := len(sliceData) - 1; i >= 0; i-- { |
|||
v := sliceData[i] |
|||
attr_read, _ := v.Attr("access_read", "src") |
|||
attr_write, _ := v.Attr("attr_write", "src") |
|||
attr_delete, _ := v.Attr("attr_delete", "src") |
|||
attr_admin, _ := v.Attr("attr_admin", "src") |
|||
|
|||
if (!strings.Contains(attr_read, role) || attr_read == "") && |
|||
(!strings.Contains(attr_write, role) || attr_write == "") && |
|||
(!strings.Contains(attr_delete, role) || attr_delete == "") && |
|||
(!strings.Contains(attr_admin, role) || attr_admin == "") { |
|||
p.RemoveData(i) |
|||
} |
|||
} |
|||
|
|||
return |
|||
} |
@ -0,0 +1,41 @@ |
|||
package models |
|||
|
|||
import "time" |
|||
|
|||
// Pong тип ответа, который сервис отдает прокси при периодическом опросе (ping-е)
|
|||
type Pong struct { |
|||
Uid string `json:"uid"` |
|||
Config string `json:"config"` |
|||
Name string `json:"name"` |
|||
Version string `json:"version"` |
|||
Path string `json:"path"` |
|||
Status string `json:"status"` |
|||
Port int `json:"port"` |
|||
Pid string `json:"pid"` |
|||
State string `json:"state"` |
|||
Replicas int `json:"replicas"` |
|||
Https bool `json:"https"` |
|||
DeadTime int64 `json:"dead_time"` |
|||
Follower string `json:"follower"` |
|||
Grpc int `json:"grpc"` |
|||
Metric int `json:"metric"` |
|||
Host string `json:"host"` |
|||
Metrics []MetricsField `json:"metrics"` |
|||
} |
|||
|
|||
type MetricsField struct { |
|||
Help string `json:"help"` |
|||
Type string `json:"type"` |
|||
Count float64 `json:"count"` |
|||
Value string `json:"value"` |
|||
Viewer string `json:"viewer"` |
|||
SaveInterval time.Duration `json:"saveInterval"` |
|||
SavePeriod time.Duration `json:"savePeriod"` |
|||
} |
|||
|
|||
type Hosts struct { |
|||
Host string `json:"host"` |
|||
PortFrom int `json:"portfrom"` |
|||
PortTo int `json:"portto"` |
|||
Protocol string `json:"protocol"` |
|||
} |
@ -0,0 +1,77 @@ |
|||
package models |
|||
|
|||
type ProfileData struct { |
|||
Revision string `json:"revision"` // ревизия текущей сессии (если сессия обновляется (меняется профиль) - ID-сессии остается, но ревизия меняется
|
|||
Hash string `json:"hash"` |
|||
Email string `json:"email"` |
|||
Uid string `json:"uid"` |
|||
ObjUid string `json:"obj_uid"` |
|||
FirstName string `json:"first_name"` |
|||
LastName string `json:"last_name"` |
|||
Photo string `json:"photo"` |
|||
Age string `json:"age"` |
|||
City string `json:"city"` |
|||
Country string `json:"country"` |
|||
Oauth_identity string `json:"oauth_identity"` |
|||
Status string `json:"status"` // - src поля Status в профиле (иногда необходимо для доп.фильтрации)
|
|||
Raw []Data `json:"raw"` // объект пользователя (нужен при сборки проекта для данного юзера при добавлении прав на базу)
|
|||
Tables []Data `json:"tables"` |
|||
Roles []Data |
|||
Homepage string `json:"homepage"` |
|||
Maket string `json:"maket"` |
|||
UpdateFlag bool `json:"update_flag"` |
|||
UpdateData []Data `json:"update_data"` |
|||
CurrentRole Data `json:"current_role"` |
|||
Profiles []Data `json:"profiles"` |
|||
CurrentProfile Data `json:"current_profile"` |
|||
Navigator []*Items `json:"navigator"` |
|||
|
|||
Groups string |
|||
GroupsValue string |
|||
GroupsDefaultSrc string |
|||
GroupsDefaultValue string |
|||
|
|||
ButtonsNavTop []Data |
|||
CountLicense int |
|||
BaseMode map[string]string |
|||
|
|||
// TODO проверить где используется и выпилить
|
|||
RolesOld map[string]string `json:"roles"` //deplicated
|
|||
First_name string //deplicated
|
|||
Last_name string //deplicated
|
|||
|
|||
} |
|||
|
|||
type Items struct { |
|||
Title string `json:"title"` |
|||
ExtentedLink string `json:"extentedLink"` |
|||
Uid string `json:"uid"` |
|||
Source string `json:"source"` |
|||
Icon string `json:"icon"` |
|||
Leader string `json:"leader"` |
|||
Order string `json:"order"` |
|||
Type string `json:"type"` |
|||
Preview string `json:"preview"` |
|||
Url string `json:"url"` |
|||
Sub []string `json:"sub"` |
|||
Incl []*Items `json:"incl"` |
|||
Class string `json:"class"` |
|||
FinderMode string `json:"finder_mode"` |
|||
} |
|||
|
|||
// ScanSub метод типа Items (перемещаем структуры в карте, исходя из заявленной вложенности элементов)
|
|||
// (переделать дубль фукнции)
|
|||
func (p *Items) ScanSub(maps *map[string]*Items) { |
|||
if p.Sub != nil && len(p.Sub) != 0 { |
|||
for _, c := range p.Sub { |
|||
gg := *maps |
|||
fromP := gg[c] |
|||
if fromP != nil { |
|||
copyPolygon := *fromP |
|||
p.Incl = append(p.Incl, ©Polygon) |
|||
delete(*maps, c) |
|||
copyPolygon.ScanSub(maps) |
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,20 @@ |
|||
package models |
|||
|
|||
import "github.com/golang-jwt/jwt" |
|||
|
|||
type Token struct { |
|||
Uid string |
|||
Role string |
|||
Profile string |
|||
Groups string |
|||
Local string |
|||
Type string |
|||
Session string |
|||
SessionRev string // ревизия текущей сессии (если сессия обновляется (меняется профиль) - ID-сессии остается, но ревизия меняется
|
|||
jwt.StandardClaims |
|||
} |
|||
|
|||
type Roles struct { |
|||
Title string |
|||
Uid string |
|||
} |
@ -0,0 +1,50 @@ |
|||
package models |
|||
|
|||
type DataTree struct { |
|||
Uid string `json:"uid"` |
|||
Id string `json:"id"` |
|||
Source string `json:"source"` |
|||
Parent string `json:"parent"` |
|||
Type string `json:"type"` |
|||
Title string `json:"title"` |
|||
Rev string `json:"rev"` |
|||
Сopies string `json:"copies"` |
|||
Attributes map[string]Attribute `json:"attributes"` |
|||
Sub []string `json:"sub"` |
|||
Incl []*DataTree `json:"incl"` |
|||
} |
|||
|
|||
type DataTreeOut struct { |
|||
Uid string `json:"uid"` |
|||
Id string `json:"id"` |
|||
Source string `json:"source"` |
|||
Parent string `json:"parent"` |
|||
Type string `json:"type"` |
|||
Title string `json:"title"` |
|||
Rev string `json:"rev"` |
|||
Сopies string `json:"copies"` |
|||
Attributes map[string]Attribute `json:"attributes"` |
|||
Sub []string `json:"sub"` |
|||
Incl []DataTree `json:"incl"` |
|||
} |
|||
|
|||
////////////////////////////////////////////////////////////////////////////////////////
|
|||
////////////////////////////////////////////////////////////////////////////////////////
|
|||
////////////////////////////////////////////////////////////////////////////////////////
|
|||
|
|||
// метод типа Items (перемещаем структуры в карте, исходя из заявленной вложенности элементов)
|
|||
// (переделать дубль фукнции)
|
|||
func (p *DataTree) ScanSub(maps *map[string]*DataTree) { |
|||
if p.Sub != nil && len(p.Sub) != 0 { |
|||
for _, c := range p.Sub { |
|||
gg := *maps |
|||
fromP := gg[c] |
|||
if fromP != nil { |
|||
copyPolygon := *fromP |
|||
p.Incl = append(p.Incl, ©Polygon) |
|||
delete(*maps, c) |
|||
copyPolygon.ScanSub(maps) |
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,242 @@ |
|||
package types |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io" |
|||
"strings" |
|||
|
|||
"github.com/pkg/errors" |
|||
"go.uber.org/zap" |
|||
"go.uber.org/zap/zapcore" |
|||
) |
|||
|
|||
// JSON предоставляет тип данных для логирование JSON-строк через zap-логгер с учетом
|
|||
// маскирования полей с приватными данными.
|
|||
func JSON(key string, val string) zap.Field { |
|||
hashedVal, err := MaskSensitiveJSONFields(val, nil, nil, nil) |
|||
if err != nil { |
|||
hashedVal = fmt.Sprintf(`{"error": "%s", "val": "%s"}`, |
|||
strings.ReplaceAll(val, `"`, `\"`), |
|||
strings.ReplaceAll(err.Error(), `"`, `\"`), |
|||
) |
|||
} |
|||
|
|||
return zap.Field{Key: key, Type: zapcore.StringType, String: hashedVal, Integer: 0, Interface: nil} |
|||
} |
|||
|
|||
const ( |
|||
unknown = iota |
|||
object |
|||
objectEnd |
|||
fieldName |
|||
fieldValue |
|||
array |
|||
arrayValue |
|||
arrayEnd |
|||
) |
|||
|
|||
// MaskSensitiveJSONFields маскирует приватные данные в JSON-строках.
|
|||
func MaskSensitiveJSONFields(jsonString string, excludeKeys, hideKeys, excludeValues []string) (string, error) { |
|||
decoder := json.NewDecoder(strings.NewReader(jsonString)) |
|||
decoder.UseNumber() |
|||
|
|||
strBuilder := new(strings.Builder) |
|||
|
|||
var ( |
|||
token json.Token |
|||
stateStack intStack |
|||
lastFieldName string |
|||
err error |
|||
) |
|||
|
|||
state := unknown |
|||
beforeNextValue := func() error { |
|||
switch state { |
|||
case fieldValue: |
|||
if err := strBuilder.WriteByte(','); err != nil { |
|||
return errors.Wrap(err, "logger: json mask encoder") |
|||
} |
|||
|
|||
state = fieldName |
|||
case fieldName: |
|||
if err := strBuilder.WriteByte(':'); err != nil { |
|||
return errors.Wrap(err, "logger: json mask encoder") |
|||
} |
|||
|
|||
state = fieldValue |
|||
case arrayValue: |
|||
if err := strBuilder.WriteByte(','); err != nil { |
|||
return errors.Wrap(err, "logger: json mask encoder") |
|||
} |
|||
case array: |
|||
state = arrayValue |
|||
case object: |
|||
state = fieldName |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
pushState := func() error { |
|||
switch state { |
|||
case object, fieldValue: |
|||
stateStack = stateStack.push(fieldValue) |
|||
case array, arrayValue: |
|||
stateStack = stateStack.push(arrayValue) |
|||
case unknown: |
|||
stateStack = stateStack.push(unknown) |
|||
default: |
|||
return fmt.Errorf("logger: json mask encoder: invalid state (json is invalid?): %d", state) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
setState := func(newState int) error { |
|||
switch newState { |
|||
case object: |
|||
if err := beforeNextValue(); err != nil { |
|||
return err |
|||
} |
|||
|
|||
if err := strBuilder.WriteByte('{'); err != nil { |
|||
return errors.Wrap(err, "logger: json mask encoder") |
|||
} |
|||
|
|||
if err := pushState(); err != nil { |
|||
return err |
|||
} |
|||
|
|||
state = newState |
|||
case objectEnd: |
|||
if err := strBuilder.WriteByte('}'); err != nil { |
|||
return errors.Wrap(err, "logger: json mask encoder") |
|||
} |
|||
|
|||
stateStack, state = stateStack.pop() |
|||
case array: |
|||
if err := beforeNextValue(); err != nil { |
|||
return err |
|||
} |
|||
|
|||
if err := strBuilder.WriteByte('['); err != nil { |
|||
return errors.Wrap(err, "logger: json mask encoder") |
|||
} |
|||
|
|||
if err := pushState(); err != nil { |
|||
return err |
|||
} |
|||
|
|||
state = newState |
|||
case arrayEnd: |
|||
if err := strBuilder.WriteByte(']'); err != nil { |
|||
return errors.Wrap(err, "logger: json mask encoder") |
|||
} |
|||
|
|||
stateStack, state = stateStack.pop() |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
excludeKeys = append(excludeKeys, defaultExcludeKeys...) |
|||
hideKeys = append(hideKeys, defaultHideKeys...) |
|||
|
|||
for { |
|||
token, err = decoder.Token() |
|||
if err != nil { |
|||
break |
|||
} |
|||
|
|||
if v, ok := token.(json.Delim); ok { |
|||
switch v.String() { |
|||
case "{": |
|||
if err := setState(object); err != nil { |
|||
return "", err |
|||
} |
|||
case "}": |
|||
if err := setState(objectEnd); err != nil { |
|||
return "", err |
|||
} |
|||
case "[": |
|||
if err := setState(array); err != nil { |
|||
return "", err |
|||
} |
|||
case "]": |
|||
if err := setState(arrayEnd); err != nil { |
|||
return "", err |
|||
} |
|||
} |
|||
} else { |
|||
if err := beforeNextValue(); err != nil { |
|||
return "", err |
|||
} |
|||
var jsonBytes []byte |
|||
switch dataType := token.(type) { |
|||
case string: |
|||
currentFieldName := "" |
|||
if state == fieldName { |
|||
lastFieldName = dataType |
|||
} else if state == fieldValue { |
|||
currentFieldName = lastFieldName |
|||
} |
|||
jsonBytes, err = json.Marshal(hashSensitiveValue(currentFieldName, dataType, excludeKeys, hideKeys, excludeValues)) |
|||
default: |
|||
jsonBytes, err = json.Marshal(dataType) |
|||
} |
|||
if err != nil { |
|||
return "", errors.Wrap(err, "logger: json mask encoder") |
|||
} |
|||
if _, err = strBuilder.Write(jsonBytes); err != nil { |
|||
return "", errors.Wrap(err, "logger: json mask encoder") |
|||
} |
|||
} |
|||
} |
|||
|
|||
if !errors.Is(err, io.EOF) { |
|||
return jsonString, nil |
|||
} |
|||
|
|||
return strBuilder.String(), nil |
|||
} |
|||
|
|||
func hashSensitiveValue(fieldName, src string, excludeKeys, hideKeys, excludeValues []string) string { |
|||
fieldName = strings.ToLower(fieldName) |
|||
|
|||
for _, hideKey := range hideKeys { |
|||
if strings.Contains(fieldName, hideKey) { |
|||
return Hide(src) |
|||
} |
|||
} |
|||
|
|||
for _, excludeKey := range excludeKeys { |
|||
if strings.Contains(fieldName, excludeKey) { |
|||
return Mask(src) |
|||
} |
|||
} |
|||
|
|||
for _, excludeValue := range excludeValues { |
|||
if strings.HasPrefix(src, excludeValue) { |
|||
return Mask(src) |
|||
} |
|||
} |
|||
|
|||
return src |
|||
} |
|||
|
|||
type intStack []int |
|||
|
|||
func (s intStack) push(v int) intStack { |
|||
return append(s, v) |
|||
} |
|||
|
|||
func (s intStack) pop() (intStack, int) { |
|||
if len(s) == 0 { |
|||
return s, 0 |
|||
} |
|||
|
|||
l := len(s) |
|||
|
|||
return s[:l-1], s[l-1] |
|||
} |
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue