loveckiy.ivan
11 months ago
758 changed files with 384639 additions and 0 deletions
@ -0,0 +1,8 @@ |
|||
# Default ignored files |
|||
/shelf/ |
|||
/workspace.xml |
|||
# Editor-based HTTP Client requests |
|||
/httpRequests/ |
|||
# Datasource local storage ignored files |
|||
/dataSources/ |
|||
/dataSources.local.xml |
@ -0,0 +1,9 @@ |
|||
<?xml version="1.0" encoding="UTF-8"?> |
|||
<module type="WEB_MODULE" version="4"> |
|||
<component name="Go" enabled="true" /> |
|||
<component name="NewModuleRootManager"> |
|||
<content url="file://$MODULE_DIR$" /> |
|||
<orderEntry type="inheritedJdk" /> |
|||
<orderEntry type="sourceFolder" forTests="false" /> |
|||
</component> |
|||
</module> |
@ -0,0 +1,8 @@ |
|||
<?xml version="1.0" encoding="UTF-8"?> |
|||
<project version="4"> |
|||
<component name="ProjectModuleManager"> |
|||
<modules> |
|||
<module fileurl="file://$PROJECT_DIR$/.idea/logger.iml" filepath="$PROJECT_DIR$/.idea/logger.iml" /> |
|||
</modules> |
|||
</component> |
|||
</project> |
@ -0,0 +1,6 @@ |
|||
<?xml version="1.0" encoding="UTF-8"?> |
|||
<project version="4"> |
|||
<component name="VcsDirectoryMappings"> |
|||
<mapping directory="$PROJECT_DIR$" vcs="Git" /> |
|||
</component> |
|||
</project> |
@ -0,0 +1,21 @@ |
|||
module git.lowcodeplatform.net/packages/logger |
|||
|
|||
go 1.19 |
|||
|
|||
require ( |
|||
git.lowcodeplatform.net/fabric/logbox-client v0.1.3 |
|||
github.com/sirupsen/logrus v1.9.3 |
|||
) |
|||
|
|||
require ( |
|||
git.lowcodeplatform.net/fabric/logbox v0.1.1 // indirect |
|||
git.lowcodeplatform.net/fabric/packages v0.0.0-20230129123752-a3dc6393a856 // indirect |
|||
github.com/Jille/grpc-multi-resolver v1.1.0 // indirect |
|||
github.com/golang/protobuf v1.5.2 // indirect |
|||
golang.org/x/net v0.4.0 // indirect |
|||
golang.org/x/sys v0.3.0 // indirect |
|||
golang.org/x/text v0.5.0 // indirect |
|||
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect |
|||
google.golang.org/grpc v1.52.3 // indirect |
|||
google.golang.org/protobuf v1.28.1 // indirect |
|||
) |
@ -0,0 +1,43 @@ |
|||
git.lowcodeplatform.net/fabric/logbox v0.1.1 h1:pmzzXM2lXQFugBa6mliGErB7qPYAJ9fAj+E37o/A0xY= |
|||
git.lowcodeplatform.net/fabric/logbox v0.1.1/go.mod h1:nrzOFnMhVIBrtokITlnEPYjLbDfyu7oDkkJAFA/K4S8= |
|||
git.lowcodeplatform.net/fabric/logbox-client v0.1.3 h1:6hp3kf6qnZaexilEBh+k+xgTNGdiCKwkj7bpZ2dfR2Y= |
|||
git.lowcodeplatform.net/fabric/logbox-client v0.1.3/go.mod h1:uucit5AcbZ2MjK7sGsnvFC5InwBGiaaC5sFgEcFymiI= |
|||
git.lowcodeplatform.net/fabric/packages v0.0.0-20230129123752-a3dc6393a856 h1:jZP6kGB6bKcXUEtW/FAtPVTPWPdBvPwYvbo0u25wWVU= |
|||
git.lowcodeplatform.net/fabric/packages v0.0.0-20230129123752-a3dc6393a856/go.mod h1:fBsBvUTmm+1rM5Es6RbCQeEE/QFDIPb1iy6/dmUgch8= |
|||
github.com/Jille/grpc-multi-resolver v1.1.0 h1:+SfnPGnoMjJVkURNfzpR8D+0Oy4w7IGAZ1azX1AQScY= |
|||
github.com/Jille/grpc-multi-resolver v1.1.0/go.mod h1:mvSEHOdOT1ju0ySQXhWMNCI4QsAGhuUqy6+wHGVJS+8= |
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= |
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
|||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= |
|||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= |
|||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= |
|||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= |
|||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= |
|||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= |
|||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
|||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
|||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= |
|||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= |
|||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= |
|||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= |
|||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= |
|||
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= |
|||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= |
|||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= |
|||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= |
|||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= |
|||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= |
|||
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= |
|||
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= |
|||
google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= |
|||
google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= |
|||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= |
|||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= |
|||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= |
|||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= |
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= |
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
@ -0,0 +1,333 @@ |
|||
// обертка для логирования, которая дополняем аттрибутами логируемого процесса logrus
|
|||
// дополняем значениями, идентифицирующими запущенный сервис UID,Name,Service
|
|||
|
|||
package logger |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"io" |
|||
"os" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
|
|||
"git.lowcodeplatform.net/fabric/lib" |
|||
"github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
var logrusB = logrus.New() |
|||
|
|||
const sep = string(os.PathSeparator) |
|||
|
|||
// LogLine структура строк лог-файла. нужна для анмаршалинга
|
|||
type LogLine struct { |
|||
Config string `json:"config"` |
|||
Level string `json:"level"` |
|||
Msg interface{} `json:"msg"` |
|||
Name string `json:"name"` |
|||
Srv string `json:"srv"` |
|||
Time string `json:"time"` |
|||
Uid string `json:"uid"` |
|||
} |
|||
|
|||
type log struct { |
|||
|
|||
// куда логируем? stdout/;*os.File на файл, в который будем писать логи
|
|||
Output io.Writer `json:"output"` |
|||
//Debug:
|
|||
// сообщения отладки, профилирования.
|
|||
// В production системе обычно сообщения этого уровня включаются при первоначальном
|
|||
// запуске системы или для поиска узких мест (bottleneck-ов).
|
|||
|
|||
//Info: - логировать процесс выполнения
|
|||
// обычные сообщения, информирующие о действиях системы.
|
|||
// Реагировать на такие сообщения вообще не надо, но они могут помочь, например,
|
|||
// при поиске багов, расследовании интересных ситуаций итд.
|
|||
|
|||
//Warning: - логировать странные операции
|
|||
// записывая такое сообщение, система пытается привлечь внимание обслуживающего персонала.
|
|||
// Произошло что-то странное. Возможно, это новый тип ситуации, ещё не известный системе.
|
|||
// Следует разобраться в том, что произошло, что это означает, и отнести ситуацию либо к
|
|||
// инфо-сообщению, либо к ошибке. Соответственно, придётся доработать код обработки таких ситуаций.
|
|||
|
|||
//Error: - логировать ошибки
|
|||
// ошибка в работе системы, требующая вмешательства. Что-то не сохранилось, что-то отвалилось.
|
|||
// Необходимо принимать меры довольно быстро! Ошибки этого уровня и выше требуют немедленной записи в лог,
|
|||
// чтобы ускорить реакцию на них. Нужно понимать, что ошибка пользователя – это не ошибка системы.
|
|||
// Если пользователь ввёл в поле -1, где это не предполагалось – не надо писать об этом в лог ошибок.
|
|||
|
|||
//Panic: - логировать критические ошибки
|
|||
// это особый класс ошибок. Такие ошибки приводят к неработоспособности системы в целом, или
|
|||
// неработоспособности одной из подсистем. Чаще всего случаются фатальные ошибки из-за неверной конфигурации
|
|||
// или отказов оборудования. Требуют срочной, немедленной реакции. Возможно, следует предусмотреть уведомление о таких ошибках по SMS.
|
|||
// указываем уровни логирования Error/Warning/Debug/Info/Panic
|
|||
|
|||
//Trace: - логировать обработки запросов
|
|||
|
|||
// можно указывать через | разные уровени логирования, например Error|Warning
|
|||
// можно указать All - логирование всех уровней
|
|||
Levels string `json:"levels"` |
|||
// uid процесса (сервиса), который логируется (случайная величина)
|
|||
UID string `json:"uid"` |
|||
// имя процесса (сервиса), который логируется
|
|||
Name string `json:"name"` |
|||
// название сервиса (app/gui...)
|
|||
Service string `json:"service"` |
|||
// директория сохранения логов
|
|||
Dir string `json:"dir"` |
|||
// uid-конфигурации с которой был запущен процесс
|
|||
Config string `json:"config"` |
|||
// интервал между проверками актуального файла логирования (для текущего дня)
|
|||
IntervalReload time.Duration `json:"delay_reload"` |
|||
// интервал проверками на наличие файлов на удаление
|
|||
IntervalClearFiles time.Duration `json:"interval_clear_files"` |
|||
// период хранения файлов лет-месяцев-дней (например: 0-1-0 - хранить 1 месяц)
|
|||
PeriodSaveFiles string `json:"period_save_files"` |
|||
|
|||
// путь к сервису отправки логов в хранилище (Logbox)
|
|||
LogboxURL string |
|||
// интервал отправки (в промежутках сохраняем в буфер)
|
|||
LogboxSendInterval time.Duration |
|||
|
|||
File *os.File |
|||
|
|||
mux *sync.Mutex |
|||
} |
|||
|
|||
// ConfigLogger общий конфигуратор логирования
|
|||
type ConfigLogger struct { |
|||
Level, Uid, Name, Srv, Config string |
|||
|
|||
File ConfigFileLogger |
|||
Vfs ConfigVfsLogger |
|||
Logbox ConfigLogboxLogger |
|||
Priority []string |
|||
} |
|||
|
|||
type Log interface { |
|||
Trace(args ...interface{}) |
|||
Debug(args ...interface{}) |
|||
Info(args ...interface{}) |
|||
Warning(args ...interface{}) |
|||
Error(err error, args ...interface{}) |
|||
Panic(err error, args ...interface{}) |
|||
Exit(err error, args ...interface{}) |
|||
|
|||
Close() |
|||
} |
|||
|
|||
func (l *log) Trace(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Trace") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.TraceLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Trace(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Trace: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Debug(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Debug") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
|
|||
// Only log the warning severity or above.
|
|||
logrusB.SetLevel(logrus.DebugLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Debug(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Debug: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Info(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Info") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
|
|||
logrusB.SetLevel(logrus.InfoLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Info(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Info: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Warning(args ...interface{}) { |
|||
if strings.Contains(l.Levels, "Warning") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.WarnLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Warn(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Warn: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Error(err error, args ...interface{}) { |
|||
if err != nil { |
|||
if args != nil { |
|||
args = append(args, "; error:", err) |
|||
} else { |
|||
args = append(args, "error:", err) |
|||
} |
|||
} |
|||
if strings.Contains(l.Levels, "Error") { |
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.ErrorLevel) |
|||
|
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Error(args...) |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Error: %+v\n", args) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (l *log) Panic(err error, args ...interface{}) { |
|||
if err != nil { |
|||
if args != nil { |
|||
args = append(args, "; error:", err) |
|||
} else { |
|||
args = append(args, "error:", err) |
|||
} |
|||
} |
|||
if strings.Contains(l.Levels, "Panic") { |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Panic: %+v\n", args) |
|||
} |
|||
|
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.PanicLevel) |
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Panic(args...) |
|||
} |
|||
} |
|||
|
|||
// Exit внутренняя ф-ция логирования и прекращения работы программы
|
|||
func (l *log) Exit(err error, args ...interface{}) { |
|||
if err != nil { |
|||
if args != nil { |
|||
args = append(args, "; error:", err) |
|||
} else { |
|||
args = append(args, "error:", err) |
|||
} |
|||
} |
|||
if strings.Contains(l.Levels, "Fatal") { |
|||
if strings.Contains(l.Levels, "Stdout") { |
|||
fmt.Printf("Exit: %+v\n", args) |
|||
} |
|||
|
|||
logrusB.SetOutput(l.Output) |
|||
logrusB.SetFormatter(&logrus.JSONFormatter{}) |
|||
logrusB.SetLevel(logrus.FatalLevel) |
|||
logrusB.WithFields(logrus.Fields{ |
|||
"name": l.Name, |
|||
"uid": l.UID, |
|||
"srv": l.Service, |
|||
"config": l.Config, |
|||
}).Fatal(args...) |
|||
} |
|||
} |
|||
|
|||
func (l *log) Close() { |
|||
l.File.Close() |
|||
} |
|||
|
|||
func NewLogger(ctx context.Context, cfg ConfigLogger) (logger Log, initType string, err error) { |
|||
var errI error |
|||
err = fmt.Errorf("logger init") |
|||
|
|||
for _, v := range cfg.Priority { |
|||
|
|||
if v == "file" && err != nil { |
|||
// если путь указан относительно / значит задан абсолютный путь, иначе в директории
|
|||
if cfg.File.Dir[:1] != sep { |
|||
rootDir, _ := lib.RootDir() |
|||
cfg.File.Dir = rootDir + sep + "logs" + sep + cfg.File.Dir |
|||
} |
|||
|
|||
// инициализировать лог и его ротацию
|
|||
logger, errI = NewFileLogger(ctx, cfg) |
|||
if errI != nil { |
|||
err = fmt.Errorf("%s %s failed init files-logger, (err: %s)", err, "→", errI) |
|||
fmt.Println(err, cfg) |
|||
} else { |
|||
initType = v |
|||
err = nil |
|||
} |
|||
} |
|||
|
|||
if v == "vfs" && err != nil { |
|||
// инициализировать лог и его ротацию
|
|||
vs := strings.Split(cfg.Vfs.Dir, sep) // берем только последнее значение в пути для vfs-логера
|
|||
vs = vs[len(vs)-1:] |
|||
if len(vs) != 0 { |
|||
cfg.Vfs.Dir = "logs" |
|||
} |
|||
|
|||
// инициализировать лог и его ротацию
|
|||
logger, errI = NewVfsLogger(ctx, cfg) |
|||
fmt.Println(logger, errI) |
|||
if errI != nil { |
|||
err = fmt.Errorf("%s %s failed init files-vfs, (err: %s)", err, "→", errI) |
|||
fmt.Println(err, cfg) |
|||
} else { |
|||
initType = v |
|||
err = nil |
|||
} |
|||
} |
|||
|
|||
if v == "logbox" && err != nil { |
|||
// инициализировать лог и его ротацию
|
|||
logger, errI = NewLogboxLogger(ctx, cfg) |
|||
if errI != nil { |
|||
err = fmt.Errorf("%s %s failed init files-logbox, (err: %s)", err, "→", errI) |
|||
} else { |
|||
initType = v |
|||
err = nil |
|||
} |
|||
} |
|||
|
|||
} |
|||
|
|||
return logger, initType, err |
|||
} |
@ -0,0 +1,196 @@ |
|||
package logger |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"io" |
|||
"os" |
|||
"runtime/debug" |
|||
"strconv" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
|
|||
"git.lowcodeplatform.net/fabric/lib" |
|||
"github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
type ConfigFileLogger struct { |
|||
Dir string |
|||
IntervalReload, IntervalClearFiles time.Duration |
|||
PeriodSaveFiles string |
|||
} |
|||
|
|||
// вспомогательная фукнция очистки старых файлов для файлового логера
|
|||
func (l *log) fileLoggerClearing(ctx context.Context) { |
|||
|
|||
// попытка очистки старых файлов (каждые пол часа)
|
|||
go func() { |
|||
ticker := time.NewTicker(l.IntervalClearFiles) |
|||
defer ticker.Stop() |
|||
|
|||
// получаем период, через который мы будем удалять файлы
|
|||
period := l.PeriodSaveFiles |
|||
if period == "" { |
|||
l.Error(fmt.Errorf("%s", "Fail perion save log files. (expected format: year-month-day; eg: 0-1-0)")) |
|||
return |
|||
} |
|||
slPeriod := strings.Split(period, "-") |
|||
if len(slPeriod) < 3 { |
|||
l.Error(fmt.Errorf("%s", "Fail perion save log files. (expected format: year-month-day; eg: 0-1-0)")) |
|||
return |
|||
} |
|||
|
|||
// получаем числовые значения года месяца и дня для расчета даты удаления файлов
|
|||
year, err := strconv.Atoi(slPeriod[0]) |
|||
if err != nil { |
|||
l.Error(err, "Fail converted Year from period saved log files. (expected format: year-month-day; eg: 0-1-0)") |
|||
} |
|||
month, err := strconv.Atoi(slPeriod[1]) |
|||
if err != nil { |
|||
l.Error(err, "Fail converted Month from period saved log files. (expected format: year-month-day; eg: 0-1-0)") |
|||
} |
|||
day, err := strconv.Atoi(slPeriod[2]) |
|||
if err != nil { |
|||
l.Error(err, "Fail converted Day from period saved log files. (expected format: year-month-day; eg: 0-1-0)") |
|||
} |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
oneMonthAgo := time.Now().AddDate(-year, -month, -day) // minus 1 месяц
|
|||
fileMonthAgoDate := oneMonthAgo.Format("2006.01.02") |
|||
|
|||
// пробегаем директорию и читаем все файлы, если имя меньше текущее время - месяц = удаляем
|
|||
directory, _ := os.Open(l.Dir) |
|||
objects, err := directory.Readdir(-1) |
|||
if err != nil { |
|||
l.Error(err, "Error read directory: ", directory) |
|||
return |
|||
} |
|||
|
|||
for _, obj := range objects { |
|||
filename := obj.Name() |
|||
filenameMonthAgoDate := fileMonthAgoDate + "_" + l.Service |
|||
|
|||
if filenameMonthAgoDate > filename { |
|||
pathFile := l.Dir + sep + filename |
|||
err = os.Remove(pathFile) |
|||
if err != nil { |
|||
l.Error(err, "Error deleted file: ", pathFile) |
|||
return |
|||
} |
|||
} |
|||
} |
|||
ticker = time.NewTicker(l.IntervalClearFiles) |
|||
} |
|||
} |
|||
}() |
|||
} |
|||
|
|||
// NewFileLogger инициируем логер, которых хранит логи в файлах по указанному пути
|
|||
func NewFileLogger(ctx context.Context, cfg ConfigLogger) (Log, error) { |
|||
var output io.Writer |
|||
var file *os.File |
|||
var err error |
|||
var mode os.FileMode |
|||
m := sync.Mutex{} |
|||
|
|||
l := &log{ |
|||
Output: output, |
|||
Levels: cfg.Level, |
|||
UID: cfg.Uid, |
|||
Name: cfg.Name, |
|||
Service: cfg.Srv, |
|||
Config: cfg.Config, |
|||
Dir: cfg.File.Dir, |
|||
IntervalReload: cfg.File.IntervalReload, |
|||
IntervalClearFiles: cfg.File.IntervalClearFiles, |
|||
PeriodSaveFiles: cfg.File.PeriodSaveFiles, |
|||
mux: &m, |
|||
File: file, |
|||
} |
|||
|
|||
datefile := time.Now().Format("2006.01.02") |
|||
logName := datefile + "_" + cfg.Srv + "_" + cfg.Uid + ".log" |
|||
|
|||
fmt.Println(logName) |
|||
|
|||
// создаем/открываем файл логирования и назначаем его логеру
|
|||
mode = 0711 |
|||
err = lib.CreateDir(cfg.File.Dir, mode) |
|||
if err != nil { |
|||
logrus.Error(err, "Error creating directory") |
|||
return nil, err |
|||
} |
|||
|
|||
pathFile := cfg.File.Dir + "/" + logName |
|||
if !lib.IsExist(pathFile) { |
|||
err = lib.CreateFile(pathFile) |
|||
if err != nil { |
|||
logrus.Error(err, "Error creating file") |
|||
return nil, err |
|||
} |
|||
} |
|||
|
|||
file, err = os.OpenFile(pathFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) |
|||
defer file.Close() |
|||
|
|||
l.File = file |
|||
l.Output = file |
|||
if err != nil { |
|||
logrus.Panic(err, "error opening file") |
|||
return nil, err |
|||
} |
|||
|
|||
defer func() { |
|||
rec := recover() |
|||
if rec != nil { |
|||
b := string(debug.Stack()) |
|||
fmt.Printf("panic in loggier (RotateInit). stack: %+v", b) |
|||
//cancel()
|
|||
//os.Exit(1)
|
|||
} |
|||
}() |
|||
|
|||
// попытка обновить файл (раз в 10 минут)
|
|||
go func() { |
|||
ticker := time.NewTicker(l.IntervalReload) |
|||
defer ticker.Stop() |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
l.File.Close() // закрыл старый файл
|
|||
datefile = time.Now().Format("2006.01.02") |
|||
logName = datefile + "_" + cfg.Srv + "_" + cfg.Uid + ".log" |
|||
pathFile = cfg.File.Dir + "/" + logName |
|||
if !lib.IsExist(pathFile) { |
|||
err := lib.CreateFile(pathFile) |
|||
if err != nil { |
|||
logrus.Error(err, "Error creating file") |
|||
return |
|||
} |
|||
} |
|||
|
|||
file, err = os.OpenFile(pathFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) |
|||
if err != nil { |
|||
logrus.Panic(err, "error opening file") |
|||
return |
|||
} |
|||
|
|||
output = file |
|||
l.Output = output |
|||
l.File = file |
|||
ticker = time.NewTicker(l.IntervalReload) |
|||
} |
|||
} |
|||
}() |
|||
l.fileLoggerClearing(ctx) |
|||
|
|||
return l, err |
|||
} |
@ -0,0 +1,76 @@ |
|||
package logger |
|||
|
|||
import ( |
|||
"bytes" |
|||
"context" |
|||
"encoding/json" |
|||
"fmt" |
|||
"io" |
|||
"sync" |
|||
"time" |
|||
|
|||
logboxclient "git.lowcodeplatform.net/fabric/logbox-client" |
|||
) |
|||
|
|||
type ConfigLogboxLogger struct { |
|||
Endpoint, AccessKeyID, SecretKey string |
|||
RequestTimeout time.Duration |
|||
} |
|||
|
|||
// NewLogboxLogger инициализация логер, которых отправляет логи на сервер сбора (logbox)
|
|||
func NewLogboxLogger(ctx context.Context, cfg ConfigLogger) (logger Log, err error) { |
|||
var output io.Writer |
|||
m := sync.Mutex{} |
|||
|
|||
client, err := logboxclient.New(ctx, cfg.Logbox.Endpoint, cfg.Logbox.RequestTimeout) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("error create connection whith logbox-server. err: %s", err) |
|||
} |
|||
|
|||
sender := newLogboxSender(client, cfg.Logbox.RequestTimeout) |
|||
output = sender |
|||
|
|||
l := &log{ |
|||
Output: output, |
|||
Config: cfg.Config, |
|||
Levels: cfg.Level, |
|||
UID: cfg.Uid, |
|||
Name: cfg.Name, |
|||
Service: cfg.Srv, |
|||
mux: &m, |
|||
} |
|||
|
|||
return l, err |
|||
} |
|||
|
|||
type logboxSender struct { |
|||
requestTimeout time.Duration |
|||
logboxClient logboxclient.Client |
|||
} |
|||
|
|||
func (v *logboxSender) Write(p []byte) (n int, err error) { |
|||
reqTimeout, cancel := context.WithTimeout(context.Background(), v.requestTimeout) |
|||
defer cancel() |
|||
|
|||
newReq := v.logboxClient.NewUpsertReq() |
|||
recordsBytes := bytes.Split(p, []byte("\n\n")) |
|||
for _, value := range recordsBytes { |
|||
l := LogLine{} |
|||
err = json.Unmarshal(value, &l) |
|||
if err != nil { |
|||
return 0, fmt.Errorf("error unmarshal to logline. err: %s, value: %s", err, string(value)) |
|||
} |
|||
newReq.AddEvent(*v.logboxClient.NewEvent(l.Config, l.Level, l.Msg.(string), l.Name, l.Srv, l.Time, l.Uid)) |
|||
} |
|||
|
|||
_, err = v.logboxClient.Upsert(reqTimeout, *newReq) |
|||
|
|||
return len(p), err |
|||
} |
|||
|
|||
func newLogboxSender(logboxClient logboxclient.Client, requestTimeout time.Duration) io.Writer { |
|||
return &logboxSender{ |
|||
requestTimeout, |
|||
logboxClient, |
|||
} |
|||
} |
@ -0,0 +1,98 @@ |
|||
package logger |
|||
|
|||
import ( |
|||
"bytes" |
|||
"context" |
|||
"io" |
|||
"sync" |
|||
"time" |
|||
|
|||
"git.lowcodeplatform.net/fabric/lib" |
|||
) |
|||
|
|||
type ConfigVfsLogger struct { |
|||
Kind, Endpoint, AccessKeyID, SecretKey, Region, Bucket, Comma string |
|||
Dir string |
|||
IntervalReload time.Duration |
|||
} |
|||
|
|||
// NewVfsLogger инициализация отправки логов на сервер сбора
|
|||
// ВНИМАНИЕ! крайне неэффективно
|
|||
// при добавлении лога выкачивется весь файл лога, добавляется строка и перезаписывается
|
|||
func NewVfsLogger(ctx context.Context, cfg ConfigLogger) (logger Log, err error) { |
|||
var output io.Writer |
|||
m := sync.Mutex{} |
|||
|
|||
vfs := lib.NewVfs(cfg.Vfs.Kind, cfg.Vfs.Endpoint, cfg.Vfs.AccessKeyID, cfg.Vfs.SecretKey, cfg.Vfs.Region, cfg.Vfs.Bucket, cfg.Vfs.Comma) |
|||
err = vfs.Connect() |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
sender := newVfsSender(ctx, vfs, cfg.Vfs.Dir, cfg.Srv, cfg.Uid, cfg.Vfs.IntervalReload) |
|||
output = sender |
|||
|
|||
l := &log{ |
|||
Output: output, |
|||
Levels: cfg.Level, |
|||
UID: cfg.Uid, |
|||
Name: cfg.Name, |
|||
Service: cfg.Srv, |
|||
IntervalReload: cfg.Vfs.IntervalReload, |
|||
mux: &m, |
|||
} |
|||
|
|||
return l, nil |
|||
} |
|||
|
|||
type vfsSender struct { |
|||
vfsStorage lib.Vfs |
|||
file string |
|||
} |
|||
|
|||
func (v *vfsSender) Write(p []byte) (n int, err error) { |
|||
dataFile, _, err := v.vfsStorage.Read(v.file) |
|||
concatSlices := [][]byte{ |
|||
dataFile, |
|||
p, |
|||
} |
|||
resultSlice := bytes.Join(concatSlices, []byte("")) |
|||
|
|||
err = v.vfsStorage.Write(v.file, resultSlice) |
|||
if err != nil { |
|||
return 0, err |
|||
} |
|||
return len(p), nil |
|||
} |
|||
|
|||
func newVfsSender(ctx context.Context, vfsStorage lib.Vfs, dir, srv, uid string, intervalReload time.Duration) io.Writer { |
|||
|
|||
sender := &vfsSender{ |
|||
vfsStorage, |
|||
"", |
|||
} |
|||
|
|||
//datefile := time.Now().Format("2006.01.02")
|
|||
datefile := time.Now().Format("2006.01.02") |
|||
sender.file = "/" + dir + "/" + datefile + "_" + srv + "_" + uid + ".log" |
|||
|
|||
// попытка обновить файл (раз в 10 минут)
|
|||
go func() { |
|||
ticker := time.NewTicker(intervalReload) |
|||
defer ticker.Stop() |
|||
|
|||
for { |
|||
select { |
|||
case <-ctx.Done(): |
|||
return |
|||
case <-ticker.C: |
|||
datefile = time.Now().Format("2006.01.02") |
|||
|
|||
sender.file = "/" + dir + "/" + datefile + "_" + srv + "_" + uid + ".log" |
|||
ticker = time.NewTicker(intervalReload) |
|||
} |
|||
} |
|||
}() |
|||
|
|||
return sender |
|||
} |
@ -0,0 +1,2 @@ |
|||
[url "ssh://git.lowcodeplatform.net/"] |
|||
insteadOf = https://git.lowcodeplatform.net/ |
@ -0,0 +1,8 @@ |
|||
.history |
|||
.idea |
|||
.vscode |
|||
.DS_Store |
|||
*~merged* |
|||
*~merged |
|||
/public |
|||
.env |
@ -0,0 +1,11 @@ |
|||
# LogBox Client |
|||
|
|||
## Описание |
|||
Предназначен работы с сервисом logbox |
|||
|
|||
## Генерация |
|||
|
|||
- переходим в директорию pkg/model |
|||
- выполните команду (для SDK Go) |
|||
|
|||
`protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative ./model.proto` |
@ -0,0 +1,54 @@ |
|||
package logbox_client |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"time" |
|||
|
|||
"git.lowcodeplatform.net/fabric/packages/grpcbalancer" |
|||
) |
|||
|
|||
var timeoutDefault = 1 * time.Second |
|||
|
|||
type client struct { |
|||
client *grpcbalancer.Client |
|||
} |
|||
|
|||
type Client interface { |
|||
Upsert(ctx context.Context, in upsertReq) (out upsertRes, err error) |
|||
Search(ctx context.Context, in searchRes) (out searchReq, err error) |
|||
|
|||
NewUpsertReq() *upsertReq |
|||
NewEvent(config, level, msg, name, srv, time, uid string) *event |
|||
} |
|||
|
|||
func (c *client) NewUpsertReq() *upsertReq { |
|||
return &upsertReq{} |
|||
} |
|||
|
|||
func New(ctx context.Context, url string, reqTimeout time.Duration) (Client, error) { |
|||
if reqTimeout == 0 { |
|||
reqTimeout = timeoutDefault |
|||
} |
|||
b, err := grpcbalancer.New( |
|||
grpcbalancer.WithUrls(url), |
|||
grpcbalancer.WithInsecure(), |
|||
grpcbalancer.WithTimeout(reqTimeout), |
|||
) |
|||
if err != nil { |
|||
fmt.Printf("failed init grpcbalancer, err: %s", err) |
|||
|
|||
return nil, err |
|||
} |
|||
if b == nil { |
|||
return nil, fmt.Errorf("error init connect (grpcbalancer) client") |
|||
} |
|||
// фикс, если нет соединения, то возвращается {}
|
|||
if fmt.Sprint(b) == "{}" { |
|||
return nil, fmt.Errorf("error init connect (grpcbalancer) client") |
|||
} |
|||
|
|||
return &client{ |
|||
client: b, |
|||
}, err |
|||
} |
@ -0,0 +1,67 @@ |
|||
package logbox_client |
|||
|
|||
type event struct { |
|||
config string `json:"config"` |
|||
level string `json:"level"` |
|||
msg string `json:"msg"` |
|||
name string `json:"name"` |
|||
srv string `json:"srv"` |
|||
time string `json:"time"` |
|||
uid string `json:"uid"` |
|||
} |
|||
|
|||
type Column struct { |
|||
Name string |
|||
SearchValue string |
|||
} |
|||
|
|||
type upsertReq struct { |
|||
Events []event |
|||
} |
|||
|
|||
type upsertRes struct { |
|||
Status bool |
|||
Error string |
|||
} |
|||
|
|||
type searchReq struct { |
|||
Draw int `json:"draw"` |
|||
RecordsTotal int `json:"recordsTotal"` |
|||
RecordsFiltered int `json:"recordsFiltered"` |
|||
Data []event `json:"data"` |
|||
} |
|||
|
|||
type searchRes struct { |
|||
Columns []Column |
|||
Search string `json:"search"` |
|||
Draw int `json:"draw"` |
|||
Limit int `json:"limit"` |
|||
Skip int `json:"skip"` //nolint
|
|||
OrderBy int `json:"order_by"` |
|||
Dir string `json:"dir"` |
|||
} |
|||
|
|||
func (u *upsertReq) AddEvent(event event) { |
|||
u.Events = append(u.Events, event) |
|||
return |
|||
} |
|||
|
|||
func (c *client) NewEvent( |
|||
Config string, |
|||
Level string, |
|||
Msg string, |
|||
Name string, |
|||
Srv string, |
|||
Time string, |
|||
Uid string, |
|||
) *event { |
|||
return &event{ |
|||
config: Config, |
|||
level: Level, |
|||
msg: Msg, |
|||
name: Name, |
|||
srv: Srv, |
|||
time: Time, |
|||
uid: Uid, |
|||
} |
|||
} |
@ -0,0 +1,64 @@ |
|||
package logbox_client |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"time" |
|||
|
|||
pb "git.lowcodeplatform.net/fabric/logbox/pkg/model/sdk" |
|||
) |
|||
|
|||
func (c *client) Search(ctx context.Context, in searchRes) (out searchReq, err error) { |
|||
conn, err := c.client.Conn(ctx) |
|||
if err != nil || conn == nil { |
|||
err = fmt.Errorf("[client] [logbox] cannot get grpc connection") |
|||
return out, err |
|||
} |
|||
|
|||
// добавили выход по контексту, для случаев, если соединение таймаутит
|
|||
ctxWithDeadline, cancel := context.WithTimeout(ctx, 1*time.Second) |
|||
defer cancel() |
|||
|
|||
columns := []*pb.Column{} |
|||
|
|||
for _, v := range in.Columns { |
|||
column := pb.Column{ |
|||
Name: v.Name, |
|||
SearchValue: v.SearchValue, |
|||
} |
|||
columns = append(columns, &column) |
|||
} |
|||
|
|||
inClient := &pb.SearchRequest{ |
|||
Columns: columns, |
|||
Search: in.Search, |
|||
Draw: uint64(in.Draw), |
|||
Limit: uint64(in.Limit), |
|||
Skip: uint64(in.Skip), |
|||
OrderBy: uint64(in.OrderBy), |
|||
Dir: in.Dir, |
|||
} |
|||
|
|||
client := pb.NewLogboxClient(conn) |
|||
|
|||
result, err := client.Search(ctxWithDeadline, inClient) |
|||
if err != nil { |
|||
err = fmt.Errorf("[client] [logbox] error request Search. err: %s", err) |
|||
return out, err |
|||
} |
|||
|
|||
if result == nil { |
|||
return out, fmt.Errorf("[client] [logbox] error request Search. result is empty") |
|||
} |
|||
|
|||
out.Draw = int(result.Draw) |
|||
out.RecordsTotal = int(result.RecordsTotal) |
|||
out.RecordsFiltered = int(result.RecordsFiltered) |
|||
out.Data = []event{} |
|||
for _, m := range result.Data { |
|||
ev := c.NewEvent(m.Config, m.Level, m.Msg, m.Name, m.Srv, m.Time, m.Uid) |
|||
out.Data = append(out.Data, *ev) |
|||
} |
|||
|
|||
return out, err |
|||
} |
@ -0,0 +1,59 @@ |
|||
package logbox_client |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"time" |
|||
|
|||
pb "git.lowcodeplatform.net/fabric/logbox/pkg/model/sdk" |
|||
) |
|||
|
|||
func (c *client) Upsert(ctx context.Context, in upsertReq) (out upsertRes, err error) { |
|||
conn, err := c.client.Conn(ctx) |
|||
if err != nil { |
|||
err = fmt.Errorf("cannot get grpc connection. err: %s, client: %+v", err, c.client) |
|||
return out, err |
|||
} |
|||
if conn == nil { |
|||
err = fmt.Errorf("cannot get grpc connection (connection is null)") |
|||
return out, err |
|||
} |
|||
|
|||
// добавил выход по контексту, для случаев, если соединение таймаутит
|
|||
ctxWithDeadline, cancel := context.WithTimeout(ctx, 1*time.Second) |
|||
defer cancel() |
|||
|
|||
events := &pb.UpsertRequest{} |
|||
for _, v := range in.Events { |
|||
events.Events = append(events.Events, &pb.Event{ |
|||
Config: v.config, |
|||
Level: v.level, |
|||
Uid: v.uid, |
|||
Time: v.time, |
|||
Srv: v.srv, |
|||
Msg: v.msg, |
|||
Name: v.name, |
|||
}) |
|||
} |
|||
|
|||
client := pb.NewLogboxClient(conn) |
|||
res, err := client.Upsert(ctxWithDeadline, events) |
|||
if err != nil { |
|||
return out, err |
|||
} |
|||
if res == nil { |
|||
return out, fmt.Errorf("error upsert message to logbox. result from upsert is empty") |
|||
} |
|||
|
|||
out.Status = res.Status |
|||
out.Error = res.Error |
|||
|
|||
if res.Error != "" { |
|||
err = fmt.Errorf("error send message. err: %s", res.Error) |
|||
} |
|||
if !res.Status { |
|||
err = fmt.Errorf("fail status. err: %s", err) |
|||
} |
|||
|
|||
return out, err |
|||
} |
@ -0,0 +1,655 @@ |
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|||
// versions:
|
|||
// protoc-gen-go v1.28.1
|
|||
// protoc v3.21.4
|
|||
// source: model.proto
|
|||
|
|||
package __ |
|||
|
|||
import ( |
|||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
|||
reflect "reflect" |
|||
sync "sync" |
|||
) |
|||
|
|||
const ( |
|||
// Verify that this generated code is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) |
|||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) |
|||
) |
|||
|
|||
type Event struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
Config string `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` |
|||
Level string `protobuf:"bytes,2,opt,name=level,proto3" json:"level,omitempty"` |
|||
Msg string `protobuf:"bytes,3,opt,name=msg,proto3" json:"msg,omitempty"` |
|||
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` |
|||
Srv string `protobuf:"bytes,5,opt,name=srv,proto3" json:"srv,omitempty"` |
|||
Time string `protobuf:"bytes,6,opt,name=time,proto3" json:"time,omitempty"` |
|||
Uid string `protobuf:"bytes,7,opt,name=uid,proto3" json:"uid,omitempty"` |
|||
} |
|||
|
|||
func (x *Event) Reset() { |
|||
*x = Event{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_model_proto_msgTypes[0] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *Event) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*Event) ProtoMessage() {} |
|||
|
|||
func (x *Event) ProtoReflect() protoreflect.Message { |
|||
mi := &file_model_proto_msgTypes[0] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use Event.ProtoReflect.Descriptor instead.
|
|||
func (*Event) Descriptor() ([]byte, []int) { |
|||
return file_model_proto_rawDescGZIP(), []int{0} |
|||
} |
|||
|
|||
func (x *Event) GetConfig() string { |
|||
if x != nil { |
|||
return x.Config |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *Event) GetLevel() string { |
|||
if x != nil { |
|||
return x.Level |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *Event) GetMsg() string { |
|||
if x != nil { |
|||
return x.Msg |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *Event) GetName() string { |
|||
if x != nil { |
|||
return x.Name |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *Event) GetSrv() string { |
|||
if x != nil { |
|||
return x.Srv |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *Event) GetTime() string { |
|||
if x != nil { |
|||
return x.Time |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *Event) GetUid() string { |
|||
if x != nil { |
|||
return x.Uid |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
//TODO: Если делать поиск с возможностью учитывать регист создать структуру Search (используется в корне и колонках)
|
|||
type Column struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` |
|||
SearchValue string `protobuf:"bytes,2,opt,name=SearchValue,proto3" json:"SearchValue,omitempty"` |
|||
} |
|||
|
|||
func (x *Column) Reset() { |
|||
*x = Column{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_model_proto_msgTypes[1] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *Column) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*Column) ProtoMessage() {} |
|||
|
|||
func (x *Column) ProtoReflect() protoreflect.Message { |
|||
mi := &file_model_proto_msgTypes[1] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use Column.ProtoReflect.Descriptor instead.
|
|||
func (*Column) Descriptor() ([]byte, []int) { |
|||
return file_model_proto_rawDescGZIP(), []int{1} |
|||
} |
|||
|
|||
func (x *Column) GetName() string { |
|||
if x != nil { |
|||
return x.Name |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *Column) GetSearchValue() string { |
|||
if x != nil { |
|||
return x.SearchValue |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type UpsertRequest struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
Events []*Event `protobuf:"bytes,1,rep,name=Events,proto3" json:"Events,omitempty"` |
|||
} |
|||
|
|||
func (x *UpsertRequest) Reset() { |
|||
*x = UpsertRequest{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_model_proto_msgTypes[2] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *UpsertRequest) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*UpsertRequest) ProtoMessage() {} |
|||
|
|||
func (x *UpsertRequest) ProtoReflect() protoreflect.Message { |
|||
mi := &file_model_proto_msgTypes[2] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use UpsertRequest.ProtoReflect.Descriptor instead.
|
|||
func (*UpsertRequest) Descriptor() ([]byte, []int) { |
|||
return file_model_proto_rawDescGZIP(), []int{2} |
|||
} |
|||
|
|||
func (x *UpsertRequest) GetEvents() []*Event { |
|||
if x != nil { |
|||
return x.Events |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type UpsertResponse struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
Status bool `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` |
|||
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` |
|||
} |
|||
|
|||
func (x *UpsertResponse) Reset() { |
|||
*x = UpsertResponse{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_model_proto_msgTypes[3] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *UpsertResponse) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*UpsertResponse) ProtoMessage() {} |
|||
|
|||
func (x *UpsertResponse) ProtoReflect() protoreflect.Message { |
|||
mi := &file_model_proto_msgTypes[3] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use UpsertResponse.ProtoReflect.Descriptor instead.
|
|||
func (*UpsertResponse) Descriptor() ([]byte, []int) { |
|||
return file_model_proto_rawDescGZIP(), []int{3} |
|||
} |
|||
|
|||
func (x *UpsertResponse) GetStatus() bool { |
|||
if x != nil { |
|||
return x.Status |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func (x *UpsertResponse) GetError() string { |
|||
if x != nil { |
|||
return x.Error |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type SearchRequest struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
Columns []*Column `protobuf:"bytes,1,rep,name=Columns,proto3" json:"Columns,omitempty"` |
|||
Search string `protobuf:"bytes,2,opt,name=Search,proto3" json:"Search,omitempty"` |
|||
Draw uint64 `protobuf:"varint,3,opt,name=Draw,proto3" json:"Draw,omitempty"` |
|||
Limit uint64 `protobuf:"varint,4,opt,name=Limit,proto3" json:"Limit,omitempty"` |
|||
Skip uint64 `protobuf:"varint,5,opt,name=Skip,proto3" json:"Skip,omitempty"` |
|||
OrderBy uint64 `protobuf:"varint,6,opt,name=OrderBy,proto3" json:"OrderBy,omitempty"` |
|||
Dir string `protobuf:"bytes,7,opt,name=Dir,proto3" json:"Dir,omitempty"` |
|||
} |
|||
|
|||
func (x *SearchRequest) Reset() { |
|||
*x = SearchRequest{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_model_proto_msgTypes[4] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *SearchRequest) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*SearchRequest) ProtoMessage() {} |
|||
|
|||
func (x *SearchRequest) ProtoReflect() protoreflect.Message { |
|||
mi := &file_model_proto_msgTypes[4] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use SearchRequest.ProtoReflect.Descriptor instead.
|
|||
func (*SearchRequest) Descriptor() ([]byte, []int) { |
|||
return file_model_proto_rawDescGZIP(), []int{4} |
|||
} |
|||
|
|||
func (x *SearchRequest) GetColumns() []*Column { |
|||
if x != nil { |
|||
return x.Columns |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (x *SearchRequest) GetSearch() string { |
|||
if x != nil { |
|||
return x.Search |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *SearchRequest) GetDraw() uint64 { |
|||
if x != nil { |
|||
return x.Draw |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (x *SearchRequest) GetLimit() uint64 { |
|||
if x != nil { |
|||
return x.Limit |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (x *SearchRequest) GetSkip() uint64 { |
|||
if x != nil { |
|||
return x.Skip |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (x *SearchRequest) GetOrderBy() uint64 { |
|||
if x != nil { |
|||
return x.OrderBy |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (x *SearchRequest) GetDir() string { |
|||
if x != nil { |
|||
return x.Dir |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type SearchResponse struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
Draw uint64 `protobuf:"varint,1,opt,name=Draw,proto3" json:"Draw,omitempty"` |
|||
RecordsTotal uint64 `protobuf:"varint,2,opt,name=RecordsTotal,proto3" json:"RecordsTotal,omitempty"` |
|||
RecordsFiltered uint64 `protobuf:"varint,3,opt,name=RecordsFiltered,proto3" json:"RecordsFiltered,omitempty"` |
|||
Data []*Event `protobuf:"bytes,4,rep,name=Data,proto3" json:"Data,omitempty"` |
|||
Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` |
|||
Status bool `protobuf:"varint,6,opt,name=status,proto3" json:"status,omitempty"` |
|||
} |
|||
|
|||
func (x *SearchResponse) Reset() { |
|||
*x = SearchResponse{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_model_proto_msgTypes[5] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *SearchResponse) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*SearchResponse) ProtoMessage() {} |
|||
|
|||
func (x *SearchResponse) ProtoReflect() protoreflect.Message { |
|||
mi := &file_model_proto_msgTypes[5] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use SearchResponse.ProtoReflect.Descriptor instead.
|
|||
func (*SearchResponse) Descriptor() ([]byte, []int) { |
|||
return file_model_proto_rawDescGZIP(), []int{5} |
|||
} |
|||
|
|||
func (x *SearchResponse) GetDraw() uint64 { |
|||
if x != nil { |
|||
return x.Draw |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (x *SearchResponse) GetRecordsTotal() uint64 { |
|||
if x != nil { |
|||
return x.RecordsTotal |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (x *SearchResponse) GetRecordsFiltered() uint64 { |
|||
if x != nil { |
|||
return x.RecordsFiltered |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (x *SearchResponse) GetData() []*Event { |
|||
if x != nil { |
|||
return x.Data |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (x *SearchResponse) GetError() string { |
|||
if x != nil { |
|||
return x.Error |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (x *SearchResponse) GetStatus() bool { |
|||
if x != nil { |
|||
return x.Status |
|||
} |
|||
return false |
|||
} |
|||
|
|||
var File_model_proto protoreflect.FileDescriptor |
|||
|
|||
var file_model_proto_rawDesc = []byte{ |
|||
0x0a, 0x0b, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6c, |
|||
0x6f, 0x67, 0x62, 0x6f, 0x78, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x93, 0x01, 0x0a, |
|||
0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, |
|||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x14, |
|||
0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, |
|||
0x65, 0x76, 0x65, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, |
|||
0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, |
|||
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x72, |
|||
0x76, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x72, 0x76, 0x12, 0x12, 0x0a, 0x04, |
|||
0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, |
|||
0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, |
|||
0x69, 0x64, 0x22, 0x3e, 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, |
|||
0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, |
|||
0x12, 0x20, 0x0a, 0x0b, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, |
|||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x56, 0x61, 0x6c, |
|||
0x75, 0x65, 0x22, 0x3d, 0x0a, 0x0d, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, |
|||
0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, |
|||
0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6c, 0x6f, 0x67, 0x62, 0x6f, 0x78, 0x5f, 0x63, 0x6c, 0x69, |
|||
0x65, 0x6e, 0x74, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, |
|||
0x73, 0x22, 0x3e, 0x0a, 0x0e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, |
|||
0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, |
|||
0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, |
|||
0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, |
|||
0x72, 0x22, 0xc2, 0x01, 0x0a, 0x0d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, |
|||
0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x01, |
|||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6c, 0x6f, 0x67, 0x62, 0x6f, 0x78, 0x5f, 0x63, 0x6c, |
|||
0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x07, 0x43, 0x6f, 0x6c, |
|||
0x75, 0x6d, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x18, 0x02, |
|||
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, 0x12, 0x0a, 0x04, |
|||
0x44, 0x72, 0x61, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x44, 0x72, 0x61, 0x77, |
|||
0x12, 0x14, 0x0a, 0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, |
|||
0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x6b, 0x69, 0x70, 0x18, 0x05, |
|||
0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x53, 0x6b, 0x69, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x4f, 0x72, |
|||
0x64, 0x65, 0x72, 0x42, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x4f, 0x72, 0x64, |
|||
0x65, 0x72, 0x42, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x44, 0x69, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, |
|||
0x09, 0x52, 0x03, 0x44, 0x69, 0x72, 0x22, 0xca, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x61, 0x72, 0x63, |
|||
0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x72, 0x61, |
|||
0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x44, 0x72, 0x61, 0x77, 0x12, 0x22, 0x0a, |
|||
0x0c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, |
|||
0x01, 0x28, 0x04, 0x52, 0x0c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x74, 0x61, |
|||
0x6c, 0x12, 0x28, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, |
|||
0x65, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x52, 0x65, 0x63, 0x6f, |
|||
0x72, 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x04, 0x44, |
|||
0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6c, 0x6f, 0x67, 0x62, |
|||
0x6f, 0x78, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, |
|||
0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, |
|||
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x73, |
|||
0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x61, |
|||
0x74, 0x75, 0x73, 0x32, 0x96, 0x01, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x62, 0x6f, 0x78, 0x12, 0x45, |
|||
0x0a, 0x06, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x12, 0x1c, 0x2e, 0x6c, 0x6f, 0x67, 0x62, 0x6f, |
|||
0x78, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, |
|||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6c, 0x6f, 0x67, 0x62, 0x6f, 0x78, 0x5f, |
|||
0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x73, |
|||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, |
|||
0x1c, 0x2e, 0x6c, 0x6f, 0x67, 0x62, 0x6f, 0x78, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, |
|||
0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, |
|||
0x6c, 0x6f, 0x67, 0x62, 0x6f, 0x78, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, |
|||
0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x03, 0x5a, 0x01, |
|||
0x2e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, |
|||
} |
|||
|
|||
var ( |
|||
file_model_proto_rawDescOnce sync.Once |
|||
file_model_proto_rawDescData = file_model_proto_rawDesc |
|||
) |
|||
|
|||
func file_model_proto_rawDescGZIP() []byte { |
|||
file_model_proto_rawDescOnce.Do(func() { |
|||
file_model_proto_rawDescData = protoimpl.X.CompressGZIP(file_model_proto_rawDescData) |
|||
}) |
|||
return file_model_proto_rawDescData |
|||
} |
|||
|
|||
var file_model_proto_msgTypes = make([]protoimpl.MessageInfo, 6) |
|||
var file_model_proto_goTypes = []interface{}{ |
|||
(*Event)(nil), // 0: logbox_client.Event
|
|||
(*Column)(nil), // 1: logbox_client.Column
|
|||
(*UpsertRequest)(nil), // 2: logbox_client.UpsertRequest
|
|||
(*UpsertResponse)(nil), // 3: logbox_client.UpsertResponse
|
|||
(*SearchRequest)(nil), // 4: logbox_client.SearchRequest
|
|||
(*SearchResponse)(nil), // 5: logbox_client.SearchResponse
|
|||
} |
|||
var file_model_proto_depIdxs = []int32{ |
|||
0, // 0: logbox_client.UpsertRequest.Events:type_name -> logbox_client.Event
|
|||
1, // 1: logbox_client.SearchRequest.Columns:type_name -> logbox_client.Column
|
|||
0, // 2: logbox_client.SearchResponse.Data:type_name -> logbox_client.Event
|
|||
2, // 3: logbox_client.Logbox.Upsert:input_type -> logbox_client.UpsertRequest
|
|||
4, // 4: logbox_client.Logbox.Search:input_type -> logbox_client.SearchRequest
|
|||
3, // 5: logbox_client.Logbox.Upsert:output_type -> logbox_client.UpsertResponse
|
|||
5, // 6: logbox_client.Logbox.Search:output_type -> logbox_client.SearchResponse
|
|||
5, // [5:7] is the sub-list for method output_type
|
|||
3, // [3:5] is the sub-list for method input_type
|
|||
3, // [3:3] is the sub-list for extension type_name
|
|||
3, // [3:3] is the sub-list for extension extendee
|
|||
0, // [0:3] is the sub-list for field type_name
|
|||
} |
|||
|
|||
func init() { file_model_proto_init() } |
|||
func file_model_proto_init() { |
|||
if File_model_proto != nil { |
|||
return |
|||
} |
|||
if !protoimpl.UnsafeEnabled { |
|||
file_model_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*Event); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_model_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*Column); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_model_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*UpsertRequest); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_model_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*UpsertResponse); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_model_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*SearchRequest); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_model_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*SearchResponse); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
} |
|||
type x struct{} |
|||
out := protoimpl.TypeBuilder{ |
|||
File: protoimpl.DescBuilder{ |
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
|||
RawDescriptor: file_model_proto_rawDesc, |
|||
NumEnums: 0, |
|||
NumMessages: 6, |
|||
NumExtensions: 0, |
|||
NumServices: 1, |
|||
}, |
|||
GoTypes: file_model_proto_goTypes, |
|||
DependencyIndexes: file_model_proto_depIdxs, |
|||
MessageInfos: file_model_proto_msgTypes, |
|||
}.Build() |
|||
File_model_proto = out.File |
|||
file_model_proto_rawDesc = nil |
|||
file_model_proto_goTypes = nil |
|||
file_model_proto_depIdxs = nil |
|||
} |
@ -0,0 +1,54 @@ |
|||
syntax = "proto3"; |
|||
|
|||
package logbox_client; |
|||
|
|||
option go_package = "."; |
|||
|
|||
message Event { |
|||
string config = 1; |
|||
string level = 2; |
|||
string msg = 3; |
|||
string name = 4; |
|||
string srv = 5; |
|||
string time = 6; |
|||
string uid = 7; |
|||
} |
|||
|
|||
//TODO: Если делать поиск с возможностью учитывать регист создать структуру Search (используется в корне и колонках) |
|||
message Column { |
|||
string Name = 1; |
|||
string SearchValue = 2; |
|||
} |
|||
|
|||
message UpsertRequest { |
|||
repeated Event Events = 1; |
|||
} |
|||
|
|||
message UpsertResponse { |
|||
bool status = 1; |
|||
string error = 2; |
|||
} |
|||
|
|||
message SearchRequest { |
|||
repeated Column Columns = 1; |
|||
string Search = 2; |
|||
uint64 Draw = 3; |
|||
uint64 Limit = 4; |
|||
uint64 Skip = 5; |
|||
uint64 OrderBy = 6; |
|||
string Dir = 7; |
|||
} |
|||
|
|||
message SearchResponse { |
|||
uint64 Draw = 1; |
|||
uint64 RecordsTotal = 2; |
|||
uint64 RecordsFiltered = 3; |
|||
repeated Event Data = 4; |
|||
string error = 5; |
|||
bool status = 6; |
|||
} |
|||
|
|||
service Logbox { |
|||
rpc Upsert(UpsertRequest) returns (UpsertResponse); |
|||
rpc Search(SearchRequest) returns (SearchResponse); |
|||
} |
@ -0,0 +1,141 @@ |
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
|||
// versions:
|
|||
// - protoc-gen-go-grpc v1.2.0
|
|||
// - protoc v3.21.4
|
|||
// source: model.proto
|
|||
|
|||
package __ |
|||
|
|||
import ( |
|||
context "context" |
|||
grpc "google.golang.org/grpc" |
|||
codes "google.golang.org/grpc/codes" |
|||
status "google.golang.org/grpc/status" |
|||
) |
|||
|
|||
// This is a compile-time assertion to ensure that this generated file
|
|||
// is compatible with the grpc package it is being compiled against.
|
|||
// Requires gRPC-Go v1.32.0 or later.
|
|||
const _ = grpc.SupportPackageIsVersion7 |
|||
|
|||
// LogboxClient is the client API for Logbox service.
|
|||
//
|
|||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
|||
type LogboxClient interface { |
|||
Upsert(ctx context.Context, in *UpsertRequest, opts ...grpc.CallOption) (*UpsertResponse, error) |
|||
Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) |
|||
} |
|||
|
|||
type logboxClient struct { |
|||
cc grpc.ClientConnInterface |
|||
} |
|||
|
|||
func NewLogboxClient(cc grpc.ClientConnInterface) LogboxClient { |
|||
return &logboxClient{cc} |
|||
} |
|||
|
|||
func (c *logboxClient) Upsert(ctx context.Context, in *UpsertRequest, opts ...grpc.CallOption) (*UpsertResponse, error) { |
|||
out := new(UpsertResponse) |
|||
err := c.cc.Invoke(ctx, "/logbox_client.Logbox/Upsert", in, out, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
func (c *logboxClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { |
|||
out := new(SearchResponse) |
|||
err := c.cc.Invoke(ctx, "/logbox_client.Logbox/Search", in, out, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
// LogboxServer is the server API for Logbox service.
|
|||
// All implementations must embed UnimplementedLogboxServer
|
|||
// for forward compatibility
|
|||
type LogboxServer interface { |
|||
Upsert(context.Context, *UpsertRequest) (*UpsertResponse, error) |
|||
Search(context.Context, *SearchRequest) (*SearchResponse, error) |
|||
mustEmbedUnimplementedLogboxServer() |
|||
} |
|||
|
|||
// UnimplementedLogboxServer must be embedded to have forward compatible implementations.
|
|||
type UnimplementedLogboxServer struct { |
|||
} |
|||
|
|||
func (UnimplementedLogboxServer) Upsert(context.Context, *UpsertRequest) (*UpsertResponse, error) { |
|||
return nil, status.Errorf(codes.Unimplemented, "method Upsert not implemented") |
|||
} |
|||
func (UnimplementedLogboxServer) Search(context.Context, *SearchRequest) (*SearchResponse, error) { |
|||
return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") |
|||
} |
|||
func (UnimplementedLogboxServer) mustEmbedUnimplementedLogboxServer() {} |
|||
|
|||
// UnsafeLogboxServer may be embedded to opt out of forward compatibility for this service.
|
|||
// Use of this interface is not recommended, as added methods to LogboxServer will
|
|||
// result in compilation errors.
|
|||
type UnsafeLogboxServer interface { |
|||
mustEmbedUnimplementedLogboxServer() |
|||
} |
|||
|
|||
func RegisterLogboxServer(s grpc.ServiceRegistrar, srv LogboxServer) { |
|||
s.RegisterService(&Logbox_ServiceDesc, srv) |
|||
} |
|||
|
|||
func _Logbox_Upsert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(UpsertRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(LogboxServer).Upsert(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/logbox_client.Logbox/Upsert", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(LogboxServer).Upsert(ctx, req.(*UpsertRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
func _Logbox_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(SearchRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(LogboxServer).Search(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/logbox_client.Logbox/Search", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(LogboxServer).Search(ctx, req.(*SearchRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
// Logbox_ServiceDesc is the grpc.ServiceDesc for Logbox service.
|
|||
// It's only intended for direct use with grpc.RegisterService,
|
|||
// and not to be introspected or modified (even as a copy)
|
|||
var Logbox_ServiceDesc = grpc.ServiceDesc{ |
|||
ServiceName: "logbox_client.Logbox", |
|||
HandlerType: (*LogboxServer)(nil), |
|||
Methods: []grpc.MethodDesc{ |
|||
{ |
|||
MethodName: "Upsert", |
|||
Handler: _Logbox_Upsert_Handler, |
|||
}, |
|||
{ |
|||
MethodName: "Search", |
|||
Handler: _Logbox_Search_Handler, |
|||
}, |
|||
}, |
|||
Streams: []grpc.StreamDesc{}, |
|||
Metadata: "model.proto", |
|||
} |
@ -0,0 +1,3 @@ |
|||
# grpcbalancer |
|||
|
|||
Пакет для переиспользование grpc-соединений |
@ -0,0 +1,117 @@ |
|||
package grpcbalancer |
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
"errors" |
|||
"strings" |
|||
"time" |
|||
|
|||
_ "github.com/Jille/grpc-multi-resolver" |
|||
"google.golang.org/grpc" |
|||
"google.golang.org/grpc/connectivity" |
|||
"google.golang.org/grpc/resolver" |
|||
) |
|||
|
|||
const ( |
|||
resolverDefaultScheme = "dns" |
|||
resolverSchemePrefixMulti = "multi:///" |
|||
) |
|||
|
|||
var ( |
|||
ErrNoTarget = errors.New("no target to dial") |
|||
ErrSerializing = errors.New("error serializing grpc config") |
|||
ErrEmptyTarget = errors.New("empty target") |
|||
timeoutDefault = 1 * time.Second |
|||
) |
|||
|
|||
type Client struct { |
|||
conn *grpc.ClientConn |
|||
target string |
|||
balancing BalancingPolicy |
|||
insecure bool |
|||
timeout time.Duration |
|||
cancel context.CancelFunc |
|||
} |
|||
|
|||
func (c *Client) Conn(ctx context.Context) (*grpc.ClientConn, error) { |
|||
conn := c.GetConn() |
|||
if conn != nil { |
|||
return conn, nil |
|||
} |
|||
|
|||
f, err := c.SetConn(ctx) |
|||
return f, err |
|||
} |
|||
|
|||
func (c *Client) GetConn() *grpc.ClientConn { |
|||
if c.conn != nil { |
|||
if c.conn.GetState() == connectivity.Ready { |
|||
return c.conn |
|||
} |
|||
_ = c.conn.Close() |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (c *Client) SetConn(ctx context.Context) (*grpc.ClientConn, error) { |
|||
if c.target == "" { |
|||
return nil, ErrNoTarget |
|||
} |
|||
|
|||
resolver.SetDefaultScheme(resolverDefaultScheme) |
|||
schema := "" |
|||
if strings.Contains(c.target, ",") { |
|||
schema = resolverSchemePrefixMulti |
|||
} |
|||
|
|||
opts := []grpc.DialOption{ |
|||
grpc.WithBlock(), |
|||
} |
|||
if c.insecure { |
|||
opts = append(opts, grpc.WithInsecure()) |
|||
} |
|||
conf := grpcConfig{ |
|||
Balancing: c.balancing, |
|||
} |
|||
confSerialized, err := json.Marshal(conf) |
|||
if err != nil { |
|||
return nil, ErrSerializing |
|||
} |
|||
opts = append(opts, grpc.WithDefaultServiceConfig(string(confSerialized))) |
|||
|
|||
ctxWithTimeout, cancel := context.WithTimeout(ctx, c.timeout) |
|||
c.cancel = cancel |
|||
conn, err := grpc.DialContext(ctxWithTimeout, schema+c.target, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
c.conn = conn |
|||
return c.conn, nil |
|||
} |
|||
|
|||
func (c *Client) Close() error { |
|||
if c.conn == nil { |
|||
return nil |
|||
} |
|||
c.cancel() |
|||
return c.conn.Close() |
|||
} |
|||
|
|||
func New(opts ...BalancerOption) (*Client, error) { |
|||
c := &Client{} |
|||
|
|||
for _, opt := range opts { |
|||
opt(c) |
|||
} |
|||
|
|||
if c.timeout == 0 { |
|||
c.timeout = timeoutDefault |
|||
} |
|||
if c.target == "" { |
|||
return nil, ErrEmptyTarget |
|||
} |
|||
|
|||
return c, nil |
|||
} |
@ -0,0 +1,33 @@ |
|||
package grpcbalancer |
|||
|
|||
import ( |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
type BalancerOption func(*Client) |
|||
|
|||
func WithUrls(target ...string) BalancerOption { |
|||
return func(c *Client) { |
|||
c.target = strings.Join(target, ",") |
|||
} |
|||
} |
|||
|
|||
func WithInsecure() BalancerOption { |
|||
return func(c *Client) { |
|||
c.insecure = true |
|||
} |
|||
} |
|||
|
|||
func WithTimeout(time time.Duration) BalancerOption { |
|||
return func(c *Client) { |
|||
c.timeout = time |
|||
} |
|||
} |
|||
|
|||
// Default mode — round robin
|
|||
func WithBalancingMode(bp BalancingPolicy) BalancerOption { |
|||
return func(c *Client) { |
|||
c.balancing = bp |
|||
} |
|||
} |
@ -0,0 +1,34 @@ |
|||
package grpcbalancer |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
) |
|||
|
|||
const ( |
|||
RoundRobin BalancingPolicy = iota |
|||
) |
|||
|
|||
var ErrEmptyVal = errors.New("empty value") |
|||
|
|||
type BalancingPolicy int8 |
|||
|
|||
type grpcConfig struct { |
|||
Balancing BalancingPolicy `json:"loadBalancingPolicy"` |
|||
} |
|||
|
|||
func (bp BalancingPolicy) String() string { |
|||
switch bp { |
|||
case RoundRobin: |
|||
return "round_robin" |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (bp BalancingPolicy) MarshalJSON() ([]byte, error) { |
|||
val := bp.String() |
|||
if val == "" { |
|||
return nil, ErrEmptyVal |
|||
} |
|||
return []byte(fmt.Sprintf(`"%s"`, val)), nil |
|||
} |
@ -0,0 +1,202 @@ |
|||
|
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
APPENDIX: How to apply the Apache License to your work. |
|||
|
|||
To apply the Apache License to your work, attach the following |
|||
boilerplate notice, with the fields enclosed by brackets "[]" |
|||
replaced with your own identifying information. (Don't include |
|||
the brackets!) The text should be enclosed in the appropriate |
|||
comment syntax for the file format. We also recommend that a |
|||
file or class name and description of purpose be included on the |
|||
same "printed page" as the copyright notice for easier |
|||
identification within third-party archives. |
|||
|
|||
Copyright [yyyy] [name of copyright owner] |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,19 @@ |
|||
# grpc-multi-resolver |
|||
|
|||
Package multiresolver allows you to Dial to multiple hosts/IPs as a single ClientConn. |
|||
|
|||
Make sure to import this package: |
|||
|
|||
```go |
|||
import _ "github.com/Jille/grpc-multi-resolver" |
|||
``` |
|||
|
|||
and then you can use it with grpc.Dial(): |
|||
|
|||
```go |
|||
grpc.Dial("multi:///127.0.0.1:1234,dns://example.org:1234") |
|||
``` |
|||
|
|||
Note the triple slash at the beginning. |
|||
|
|||
Note: The ServiceConfig and Attributes from the first target are used, the rest are ignored. |
@ -0,0 +1,154 @@ |
|||
/* |
|||
* Package multiresolver allows you to Dial to multiple hosts/IPs as a single ClientConn. |
|||
* |
|||
* Usage: multi:///127.0.0.1:1234,dns://example.org:1234
|
|||
* Note the triple slash at the beginning. |
|||
* |
|||
* Make sure to import this package: |
|||
* ``` |
|||
* import _ "github.com/Jille/grpc-multi-resolver" |
|||
* ``` |
|||
*/ |
|||
package multiresolver |
|||
|
|||
import ( |
|||
"fmt" |
|||
"strings" |
|||
"sync" |
|||
|
|||
"google.golang.org/grpc/resolver" |
|||
"google.golang.org/grpc/serviceconfig" |
|||
) |
|||
|
|||
func init() { |
|||
resolver.Register(builder{}) |
|||
} |
|||
|
|||
type builder struct { |
|||
} |
|||
|
|||
func (builder) Scheme() string { |
|||
return "multi" |
|||
} |
|||
|
|||
func (builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { |
|||
pccg := &partialClientConnGroup{ |
|||
cc: cc, |
|||
} |
|||
var mr multiResolver |
|||
for _, t := range strings.Split(target.Endpoint, ",") { |
|||
parsedTarget := ParseTarget(t) |
|||
resolverBuilder := resolver.Get(parsedTarget.Scheme) |
|||
if resolverBuilder == nil { |
|||
parsedTarget = resolver.Target{ |
|||
Scheme: resolver.GetDefaultScheme(), |
|||
Endpoint: t, |
|||
} |
|||
resolverBuilder = resolver.Get(parsedTarget.Scheme) |
|||
if resolverBuilder == nil { |
|||
return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) |
|||
} |
|||
} |
|||
pcc := &partialClientConn{parent: pccg} |
|||
pccg.parts = append(pccg.parts, pcc) |
|||
resolver, err := resolverBuilder.Build(parsedTarget, pcc, opts) |
|||
if err != nil { |
|||
mr.Close() |
|||
return nil, err |
|||
} |
|||
mr.children = append(mr.children, resolver) |
|||
} |
|||
return mr, nil |
|||
} |
|||
|
|||
type partialClientConnGroup struct { |
|||
cc resolver.ClientConn |
|||
parts []*partialClientConn |
|||
} |
|||
|
|||
func (pccg *partialClientConnGroup) updateState() { |
|||
s := resolver.State{} |
|||
pccg.parts[0].mtx.Lock() |
|||
s.ServiceConfig = pccg.parts[0].state.ServiceConfig |
|||
s.Attributes = pccg.parts[0].state.Attributes |
|||
pccg.parts[0].mtx.Unlock() |
|||
for _, p := range pccg.parts { |
|||
p.mtx.Lock() |
|||
s.Addresses = append(s.Addresses, p.state.Addresses...) |
|||
p.mtx.Unlock() |
|||
} |
|||
pccg.cc.UpdateState(s) |
|||
} |
|||
|
|||
type partialClientConn struct { |
|||
parent *partialClientConnGroup |
|||
|
|||
mtx sync.Mutex |
|||
state resolver.State |
|||
} |
|||
|
|||
// UpdateState updates the state of the ClientConn appropriately.
|
|||
func (cc *partialClientConn) UpdateState(s resolver.State) error { |
|||
cc.mtx.Lock() |
|||
cc.state = s |
|||
cc.mtx.Unlock() |
|||
cc.parent.updateState() |
|||
return nil |
|||
} |
|||
|
|||
// ReportError notifies the ClientConn that the Resolver encountered an
|
|||
// error. The ClientConn will notify the load balancer and begin calling
|
|||
// ResolveNow on the Resolver with exponential backoff.
|
|||
func (cc *partialClientConn) ReportError(err error) { |
|||
cc.parent.cc.ReportError(err) |
|||
} |
|||
|
|||
// NewAddress is called by resolver to notify ClientConn a new list
|
|||
// of resolved addresses.
|
|||
// The address list should be the complete list of resolved addresses.
|
|||
//
|
|||
// Deprecated: Use UpdateState instead.
|
|||
func (cc *partialClientConn) NewAddress(addresses []resolver.Address) { |
|||
cc.mtx.Lock() |
|||
cc.state.Addresses = addresses |
|||
cc.mtx.Unlock() |
|||
cc.parent.updateState() |
|||
} |
|||
|
|||
// NewServiceConfig is called by resolver to notify ClientConn a new
|
|||
// service config. The service config should be provided as a json string.
|
|||
//
|
|||
// Deprecated: Use UpdateState instead.
|
|||
func (cc *partialClientConn) NewServiceConfig(serviceConfig string) { |
|||
cc.mtx.Lock() |
|||
cc.state.ServiceConfig = cc.ParseServiceConfig(serviceConfig) |
|||
cc.mtx.Unlock() |
|||
cc.parent.updateState() |
|||
} |
|||
|
|||
// ParseServiceConfig parses the provided service config and returns an
|
|||
// object that provides the parsed config.
|
|||
func (cc *partialClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult { |
|||
return cc.parent.cc.ParseServiceConfig(serviceConfigJSON) |
|||
} |
|||
|
|||
type multiResolver struct { |
|||
children []resolver.Resolver |
|||
} |
|||
|
|||
// ResolveNow will be called by gRPC to try to resolve the target name
|
|||
// again. It's just a hint, resolver can ignore this if it's not necessary.
|
|||
//
|
|||
// It could be called multiple times concurrently.
|
|||
func (m multiResolver) ResolveNow(opts resolver.ResolveNowOptions) { |
|||
for _, r := range m.children { |
|||
r.ResolveNow(opts) |
|||
} |
|||
} |
|||
|
|||
// Close closes the resolver.
|
|||
func (m multiResolver) Close() { |
|||
for _, r := range m.children { |
|||
r.Close() |
|||
} |
|||
} |
@ -0,0 +1,54 @@ |
|||
// This file was blatantly stolen from https://github.com/grpc/grpc-go/blob/e38032e927812bb354297adcab933bedeff6c177/internal/grpcutil/target.go
|
|||
/* |
|||
* |
|||
* Copyright 2020 gRPC authors. |
|||
* |
|||
* Licensed under the Apache License, Version 2.0 (the "License"); |
|||
* you may not use this file except in compliance with the License. |
|||
* You may obtain a copy of the License at |
|||
* |
|||
* http://www.apache.org/licenses/LICENSE-2.0
|
|||
* |
|||
* Unless required by applicable law or agreed to in writing, software |
|||
* distributed under the License is distributed on an "AS IS" BASIS, |
|||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
* See the License for the specific language governing permissions and |
|||
* limitations under the License. |
|||
* |
|||
*/ |
|||
|
|||
package multiresolver |
|||
|
|||
import ( |
|||
"strings" |
|||
|
|||
"google.golang.org/grpc/resolver" |
|||
) |
|||
|
|||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
|||
// If sep is not found, it returns ("", "", false) instead.
|
|||
func split2(s, sep string) (string, string, bool) { |
|||
spl := strings.SplitN(s, sep, 2) |
|||
if len(spl) < 2 { |
|||
return "", "", false |
|||
} |
|||
return spl[0], spl[1], true |
|||
} |
|||
|
|||
// ParseTarget splits target into a resolver.Target struct containing scheme,
|
|||
// authority and endpoint.
|
|||
//
|
|||
// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
|
|||
// target}.
|
|||
func ParseTarget(target string) (ret resolver.Target) { |
|||
var ok bool |
|||
ret.Scheme, ret.Endpoint, ok = split2(target, "://") |
|||
if !ok { |
|||
return resolver.Target{Endpoint: target} |
|||
} |
|||
ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") |
|||
if !ok { |
|||
return resolver.Target{Endpoint: target} |
|||
} |
|||
return ret |
|||
} |
@ -0,0 +1,3 @@ |
|||
# This source code refers to The Go Authors for copyright purposes. |
|||
# The master list of authors is in the main Go distribution, |
|||
# visible at http://tip.golang.org/AUTHORS. |
@ -0,0 +1,3 @@ |
|||
# This source code was written by the Go contributors. |
|||
# The master list of contributors is in the main Go distribution, |
|||
# visible at http://tip.golang.org/CONTRIBUTORS. |
@ -0,0 +1,28 @@ |
|||
Copyright 2010 The Go Authors. All rights reserved. |
|||
|
|||
Redistribution and use in source and binary forms, with or without |
|||
modification, are permitted provided that the following conditions are |
|||
met: |
|||
|
|||
* Redistributions of source code must retain the above copyright |
|||
notice, this list of conditions and the following disclaimer. |
|||
* Redistributions in binary form must reproduce the above |
|||
copyright notice, this list of conditions and the following disclaimer |
|||
in the documentation and/or other materials provided with the |
|||
distribution. |
|||
* Neither the name of Google Inc. nor the names of its |
|||
contributors may be used to endorse or promote products derived from |
|||
this software without specific prior written permission. |
|||
|
|||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|||
|
@ -0,0 +1,524 @@ |
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package jsonpb |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"math" |
|||
"reflect" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
"google.golang.org/protobuf/encoding/protojson" |
|||
protoV2 "google.golang.org/protobuf/proto" |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/reflect/protoregistry" |
|||
) |
|||
|
|||
const wrapJSONUnmarshalV2 = false |
|||
|
|||
// UnmarshalNext unmarshals the next JSON object from d into m.
|
|||
func UnmarshalNext(d *json.Decoder, m proto.Message) error { |
|||
return new(Unmarshaler).UnmarshalNext(d, m) |
|||
} |
|||
|
|||
// Unmarshal unmarshals a JSON object from r into m.
|
|||
func Unmarshal(r io.Reader, m proto.Message) error { |
|||
return new(Unmarshaler).Unmarshal(r, m) |
|||
} |
|||
|
|||
// UnmarshalString unmarshals a JSON object from s into m.
|
|||
func UnmarshalString(s string, m proto.Message) error { |
|||
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) |
|||
} |
|||
|
|||
// Unmarshaler is a configurable object for converting from a JSON
|
|||
// representation to a protocol buffer object.
|
|||
type Unmarshaler struct { |
|||
// AllowUnknownFields specifies whether to allow messages to contain
|
|||
// unknown JSON fields, as opposed to failing to unmarshal.
|
|||
AllowUnknownFields bool |
|||
|
|||
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
|||
// If unset, the global registry is used by default.
|
|||
AnyResolver AnyResolver |
|||
} |
|||
|
|||
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
|
|||
// they are unmarshaled from JSON. Messages that implement this should also
|
|||
// implement JSONPBMarshaler so that the custom format can be produced.
|
|||
//
|
|||
// The JSON unmarshaling must follow the JSON to proto specification:
|
|||
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
|||
//
|
|||
// Deprecated: Custom types should implement protobuf reflection instead.
|
|||
type JSONPBUnmarshaler interface { |
|||
UnmarshalJSONPB(*Unmarshaler, []byte) error |
|||
} |
|||
|
|||
// Unmarshal unmarshals a JSON object from r into m.
|
|||
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { |
|||
return u.UnmarshalNext(json.NewDecoder(r), m) |
|||
} |
|||
|
|||
// UnmarshalNext unmarshals the next JSON object from d into m.
|
|||
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { |
|||
if m == nil { |
|||
return errors.New("invalid nil message") |
|||
} |
|||
|
|||
// Parse the next JSON object from the stream.
|
|||
raw := json.RawMessage{} |
|||
if err := d.Decode(&raw); err != nil { |
|||
return err |
|||
} |
|||
|
|||
// Check for custom unmarshalers first since they may not properly
|
|||
// implement protobuf reflection that the logic below relies on.
|
|||
if jsu, ok := m.(JSONPBUnmarshaler); ok { |
|||
return jsu.UnmarshalJSONPB(u, raw) |
|||
} |
|||
|
|||
mr := proto.MessageReflect(m) |
|||
|
|||
// NOTE: For historical reasons, a top-level null is treated as a noop.
|
|||
// This is incorrect, but kept for compatibility.
|
|||
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { |
|||
return nil |
|||
} |
|||
|
|||
if wrapJSONUnmarshalV2 { |
|||
// NOTE: If input message is non-empty, we need to preserve merge semantics
|
|||
// of the old jsonpb implementation. These semantics are not supported by
|
|||
// the protobuf JSON specification.
|
|||
isEmpty := true |
|||
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { |
|||
isEmpty = false // at least one iteration implies non-empty
|
|||
return false |
|||
}) |
|||
if !isEmpty { |
|||
// Perform unmarshaling into a newly allocated, empty message.
|
|||
mr = mr.New() |
|||
|
|||
// Use a defer to copy all unmarshaled fields into the original message.
|
|||
dst := proto.MessageReflect(m) |
|||
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { |
|||
dst.Set(fd, v) |
|||
return true |
|||
}) |
|||
} |
|||
|
|||
// Unmarshal using the v2 JSON unmarshaler.
|
|||
opts := protojson.UnmarshalOptions{ |
|||
DiscardUnknown: u.AllowUnknownFields, |
|||
} |
|||
if u.AnyResolver != nil { |
|||
opts.Resolver = anyResolver{u.AnyResolver} |
|||
} |
|||
return opts.Unmarshal(raw, mr.Interface()) |
|||
} else { |
|||
if err := u.unmarshalMessage(mr, raw); err != nil { |
|||
return err |
|||
} |
|||
return protoV2.CheckInitialized(mr.Interface()) |
|||
} |
|||
} |
|||
|
|||
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { |
|||
md := m.Descriptor() |
|||
fds := md.Fields() |
|||
|
|||
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { |
|||
return jsu.UnmarshalJSONPB(u, in) |
|||
} |
|||
|
|||
if string(in) == "null" && md.FullName() != "google.protobuf.Value" { |
|||
return nil |
|||
} |
|||
|
|||
switch wellKnownType(md.FullName()) { |
|||
case "Any": |
|||
var jsonObject map[string]json.RawMessage |
|||
if err := json.Unmarshal(in, &jsonObject); err != nil { |
|||
return err |
|||
} |
|||
|
|||
rawTypeURL, ok := jsonObject["@type"] |
|||
if !ok { |
|||
return errors.New("Any JSON doesn't have '@type'") |
|||
} |
|||
typeURL, err := unquoteString(string(rawTypeURL)) |
|||
if err != nil { |
|||
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) |
|||
} |
|||
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) |
|||
|
|||
var m2 protoreflect.Message |
|||
if u.AnyResolver != nil { |
|||
mi, err := u.AnyResolver.Resolve(typeURL) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
m2 = proto.MessageReflect(mi) |
|||
} else { |
|||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) |
|||
if err != nil { |
|||
if err == protoregistry.NotFound { |
|||
return fmt.Errorf("could not resolve Any message type: %v", typeURL) |
|||
} |
|||
return err |
|||
} |
|||
m2 = mt.New() |
|||
} |
|||
|
|||
if wellKnownType(m2.Descriptor().FullName()) != "" { |
|||
rawValue, ok := jsonObject["value"] |
|||
if !ok { |
|||
return errors.New("Any JSON doesn't have 'value'") |
|||
} |
|||
if err := u.unmarshalMessage(m2, rawValue); err != nil { |
|||
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) |
|||
} |
|||
} else { |
|||
delete(jsonObject, "@type") |
|||
rawJSON, err := json.Marshal(jsonObject) |
|||
if err != nil { |
|||
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) |
|||
} |
|||
if err = u.unmarshalMessage(m2, rawJSON); err != nil { |
|||
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) |
|||
} |
|||
} |
|||
|
|||
rawWire, err := protoV2.Marshal(m2.Interface()) |
|||
if err != nil { |
|||
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) |
|||
} |
|||
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) |
|||
return nil |
|||
case "BoolValue", "BytesValue", "StringValue", |
|||
"Int32Value", "UInt32Value", "FloatValue", |
|||
"Int64Value", "UInt64Value", "DoubleValue": |
|||
fd := fds.ByNumber(1) |
|||
v, err := u.unmarshalValue(m.NewField(fd), in, fd) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
m.Set(fd, v) |
|||
return nil |
|||
case "Duration": |
|||
v, err := unquoteString(string(in)) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
d, err := time.ParseDuration(v) |
|||
if err != nil { |
|||
return fmt.Errorf("bad Duration: %v", err) |
|||
} |
|||
|
|||
sec := d.Nanoseconds() / 1e9 |
|||
nsec := d.Nanoseconds() % 1e9 |
|||
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) |
|||
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) |
|||
return nil |
|||
case "Timestamp": |
|||
v, err := unquoteString(string(in)) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
t, err := time.Parse(time.RFC3339Nano, v) |
|||
if err != nil { |
|||
return fmt.Errorf("bad Timestamp: %v", err) |
|||
} |
|||
|
|||
sec := t.Unix() |
|||
nsec := t.Nanosecond() |
|||
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) |
|||
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) |
|||
return nil |
|||
case "Value": |
|||
switch { |
|||
case string(in) == "null": |
|||
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) |
|||
case string(in) == "true": |
|||
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) |
|||
case string(in) == "false": |
|||
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) |
|||
case hasPrefixAndSuffix('"', in, '"'): |
|||
s, err := unquoteString(string(in)) |
|||
if err != nil { |
|||
return fmt.Errorf("unrecognized type for Value %q", in) |
|||
} |
|||
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) |
|||
case hasPrefixAndSuffix('[', in, ']'): |
|||
v := m.Mutable(fds.ByNumber(6)) |
|||
return u.unmarshalMessage(v.Message(), in) |
|||
case hasPrefixAndSuffix('{', in, '}'): |
|||
v := m.Mutable(fds.ByNumber(5)) |
|||
return u.unmarshalMessage(v.Message(), in) |
|||
default: |
|||
f, err := strconv.ParseFloat(string(in), 0) |
|||
if err != nil { |
|||
return fmt.Errorf("unrecognized type for Value %q", in) |
|||
} |
|||
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) |
|||
} |
|||
return nil |
|||
case "ListValue": |
|||
var jsonArray []json.RawMessage |
|||
if err := json.Unmarshal(in, &jsonArray); err != nil { |
|||
return fmt.Errorf("bad ListValue: %v", err) |
|||
} |
|||
|
|||
lv := m.Mutable(fds.ByNumber(1)).List() |
|||
for _, raw := range jsonArray { |
|||
ve := lv.NewElement() |
|||
if err := u.unmarshalMessage(ve.Message(), raw); err != nil { |
|||
return err |
|||
} |
|||
lv.Append(ve) |
|||
} |
|||
return nil |
|||
case "Struct": |
|||
var jsonObject map[string]json.RawMessage |
|||
if err := json.Unmarshal(in, &jsonObject); err != nil { |
|||
return fmt.Errorf("bad StructValue: %v", err) |
|||
} |
|||
|
|||
mv := m.Mutable(fds.ByNumber(1)).Map() |
|||
for key, raw := range jsonObject { |
|||
kv := protoreflect.ValueOf(key).MapKey() |
|||
vv := mv.NewValue() |
|||
if err := u.unmarshalMessage(vv.Message(), raw); err != nil { |
|||
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) |
|||
} |
|||
mv.Set(kv, vv) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
var jsonObject map[string]json.RawMessage |
|||
if err := json.Unmarshal(in, &jsonObject); err != nil { |
|||
return err |
|||
} |
|||
|
|||
// Handle known fields.
|
|||
for i := 0; i < fds.Len(); i++ { |
|||
fd := fds.Get(i) |
|||
if fd.IsWeak() && fd.Message().IsPlaceholder() { |
|||
continue // weak reference is not linked in
|
|||
} |
|||
|
|||
// Search for any raw JSON value associated with this field.
|
|||
var raw json.RawMessage |
|||
name := string(fd.Name()) |
|||
if fd.Kind() == protoreflect.GroupKind { |
|||
name = string(fd.Message().Name()) |
|||
} |
|||
if v, ok := jsonObject[name]; ok { |
|||
delete(jsonObject, name) |
|||
raw = v |
|||
} |
|||
name = string(fd.JSONName()) |
|||
if v, ok := jsonObject[name]; ok { |
|||
delete(jsonObject, name) |
|||
raw = v |
|||
} |
|||
|
|||
field := m.NewField(fd) |
|||
// Unmarshal the field value.
|
|||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { |
|||
continue |
|||
} |
|||
v, err := u.unmarshalValue(field, raw, fd) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
m.Set(fd, v) |
|||
} |
|||
|
|||
// Handle extension fields.
|
|||
for name, raw := range jsonObject { |
|||
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { |
|||
continue |
|||
} |
|||
|
|||
// Resolve the extension field by name.
|
|||
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) |
|||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) |
|||
if xt == nil && isMessageSet(md) { |
|||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) |
|||
} |
|||
if xt == nil { |
|||
continue |
|||
} |
|||
delete(jsonObject, name) |
|||
fd := xt.TypeDescriptor() |
|||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { |
|||
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) |
|||
} |
|||
|
|||
field := m.NewField(fd) |
|||
// Unmarshal the field value.
|
|||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { |
|||
continue |
|||
} |
|||
v, err := u.unmarshalValue(field, raw, fd) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
m.Set(fd, v) |
|||
} |
|||
|
|||
if !u.AllowUnknownFields && len(jsonObject) > 0 { |
|||
for name := range jsonObject { |
|||
return fmt.Errorf("unknown field %q in %v", name, md.FullName()) |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { |
|||
if md := fd.Message(); md != nil { |
|||
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { |
|||
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { |
|||
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) |
|||
return ok |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { |
|||
switch { |
|||
case fd.IsList(): |
|||
var jsonArray []json.RawMessage |
|||
if err := json.Unmarshal(in, &jsonArray); err != nil { |
|||
return v, err |
|||
} |
|||
lv := v.List() |
|||
for _, raw := range jsonArray { |
|||
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) |
|||
if err != nil { |
|||
return v, err |
|||
} |
|||
lv.Append(ve) |
|||
} |
|||
return v, nil |
|||
case fd.IsMap(): |
|||
var jsonObject map[string]json.RawMessage |
|||
if err := json.Unmarshal(in, &jsonObject); err != nil { |
|||
return v, err |
|||
} |
|||
kfd := fd.MapKey() |
|||
vfd := fd.MapValue() |
|||
mv := v.Map() |
|||
for key, raw := range jsonObject { |
|||
var kv protoreflect.MapKey |
|||
if kfd.Kind() == protoreflect.StringKind { |
|||
kv = protoreflect.ValueOf(key).MapKey() |
|||
} else { |
|||
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) |
|||
if err != nil { |
|||
return v, err |
|||
} |
|||
kv = v.MapKey() |
|||
} |
|||
|
|||
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) |
|||
if err != nil { |
|||
return v, err |
|||
} |
|||
mv.Set(kv, vv) |
|||
} |
|||
return v, nil |
|||
default: |
|||
return u.unmarshalSingularValue(v, in, fd) |
|||
} |
|||
} |
|||
|
|||
var nonFinite = map[string]float64{ |
|||
`"NaN"`: math.NaN(), |
|||
`"Infinity"`: math.Inf(+1), |
|||
`"-Infinity"`: math.Inf(-1), |
|||
} |
|||
|
|||
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { |
|||
switch fd.Kind() { |
|||
case protoreflect.BoolKind: |
|||
return unmarshalValue(in, new(bool)) |
|||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: |
|||
return unmarshalValue(trimQuote(in), new(int32)) |
|||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: |
|||
return unmarshalValue(trimQuote(in), new(int64)) |
|||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: |
|||
return unmarshalValue(trimQuote(in), new(uint32)) |
|||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: |
|||
return unmarshalValue(trimQuote(in), new(uint64)) |
|||
case protoreflect.FloatKind: |
|||
if f, ok := nonFinite[string(in)]; ok { |
|||
return protoreflect.ValueOfFloat32(float32(f)), nil |
|||
} |
|||
return unmarshalValue(trimQuote(in), new(float32)) |
|||
case protoreflect.DoubleKind: |
|||
if f, ok := nonFinite[string(in)]; ok { |
|||
return protoreflect.ValueOfFloat64(float64(f)), nil |
|||
} |
|||
return unmarshalValue(trimQuote(in), new(float64)) |
|||
case protoreflect.StringKind: |
|||
return unmarshalValue(in, new(string)) |
|||
case protoreflect.BytesKind: |
|||
return unmarshalValue(in, new([]byte)) |
|||
case protoreflect.EnumKind: |
|||
if hasPrefixAndSuffix('"', in, '"') { |
|||
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) |
|||
if vd == nil { |
|||
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) |
|||
} |
|||
return protoreflect.ValueOfEnum(vd.Number()), nil |
|||
} |
|||
return unmarshalValue(in, new(protoreflect.EnumNumber)) |
|||
case protoreflect.MessageKind, protoreflect.GroupKind: |
|||
err := u.unmarshalMessage(v.Message(), in) |
|||
return v, err |
|||
default: |
|||
panic(fmt.Sprintf("invalid kind %v", fd.Kind())) |
|||
} |
|||
} |
|||
|
|||
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { |
|||
err := json.Unmarshal(in, v) |
|||
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err |
|||
} |
|||
|
|||
func unquoteString(in string) (out string, err error) { |
|||
err = json.Unmarshal([]byte(in), &out) |
|||
return out, err |
|||
} |
|||
|
|||
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { |
|||
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
// trimQuote is like unquoteString but simply strips surrounding quotes.
|
|||
// This is incorrect, but is behavior done by the legacy implementation.
|
|||
func trimQuote(in []byte) []byte { |
|||
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { |
|||
in = in[1 : len(in)-1] |
|||
} |
|||
return in |
|||
} |
@ -0,0 +1,559 @@ |
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package jsonpb |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"math" |
|||
"reflect" |
|||
"sort" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
"google.golang.org/protobuf/encoding/protojson" |
|||
protoV2 "google.golang.org/protobuf/proto" |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/reflect/protoregistry" |
|||
) |
|||
|
|||
const wrapJSONMarshalV2 = false |
|||
|
|||
// Marshaler is a configurable object for marshaling protocol buffer messages
|
|||
// to the specified JSON representation.
|
|||
type Marshaler struct { |
|||
// OrigName specifies whether to use the original protobuf name for fields.
|
|||
OrigName bool |
|||
|
|||
// EnumsAsInts specifies whether to render enum values as integers,
|
|||
// as opposed to string values.
|
|||
EnumsAsInts bool |
|||
|
|||
// EmitDefaults specifies whether to render fields with zero values.
|
|||
EmitDefaults bool |
|||
|
|||
// Indent controls whether the output is compact or not.
|
|||
// If empty, the output is compact JSON. Otherwise, every JSON object
|
|||
// entry and JSON array value will be on its own line.
|
|||
// Each line will be preceded by repeated copies of Indent, where the
|
|||
// number of copies is the current indentation depth.
|
|||
Indent string |
|||
|
|||
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
|||
// If unset, the global registry is used by default.
|
|||
AnyResolver AnyResolver |
|||
} |
|||
|
|||
// JSONPBMarshaler is implemented by protobuf messages that customize the
|
|||
// way they are marshaled to JSON. Messages that implement this should also
|
|||
// implement JSONPBUnmarshaler so that the custom format can be parsed.
|
|||
//
|
|||
// The JSON marshaling must follow the proto to JSON specification:
|
|||
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
|||
//
|
|||
// Deprecated: Custom types should implement protobuf reflection instead.
|
|||
type JSONPBMarshaler interface { |
|||
MarshalJSONPB(*Marshaler) ([]byte, error) |
|||
} |
|||
|
|||
// Marshal serializes a protobuf message as JSON into w.
|
|||
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { |
|||
b, err := jm.marshal(m) |
|||
if len(b) > 0 { |
|||
if _, err := w.Write(b); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
return err |
|||
} |
|||
|
|||
// MarshalToString serializes a protobuf message as JSON in string form.
|
|||
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { |
|||
b, err := jm.marshal(m) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
return string(b), nil |
|||
} |
|||
|
|||
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { |
|||
v := reflect.ValueOf(m) |
|||
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { |
|||
return nil, errors.New("Marshal called with nil") |
|||
} |
|||
|
|||
// Check for custom marshalers first since they may not properly
|
|||
// implement protobuf reflection that the logic below relies on.
|
|||
if jsm, ok := m.(JSONPBMarshaler); ok { |
|||
return jsm.MarshalJSONPB(jm) |
|||
} |
|||
|
|||
if wrapJSONMarshalV2 { |
|||
opts := protojson.MarshalOptions{ |
|||
UseProtoNames: jm.OrigName, |
|||
UseEnumNumbers: jm.EnumsAsInts, |
|||
EmitUnpopulated: jm.EmitDefaults, |
|||
Indent: jm.Indent, |
|||
} |
|||
if jm.AnyResolver != nil { |
|||
opts.Resolver = anyResolver{jm.AnyResolver} |
|||
} |
|||
return opts.Marshal(proto.MessageReflect(m).Interface()) |
|||
} else { |
|||
// Check for unpopulated required fields first.
|
|||
m2 := proto.MessageReflect(m) |
|||
if err := protoV2.CheckInitialized(m2.Interface()); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
w := jsonWriter{Marshaler: jm} |
|||
err := w.marshalMessage(m2, "", "") |
|||
return w.buf, err |
|||
} |
|||
} |
|||
|
|||
type jsonWriter struct { |
|||
*Marshaler |
|||
buf []byte |
|||
} |
|||
|
|||
func (w *jsonWriter) write(s string) { |
|||
w.buf = append(w.buf, s...) |
|||
} |
|||
|
|||
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { |
|||
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { |
|||
b, err := jsm.MarshalJSONPB(w.Marshaler) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if typeURL != "" { |
|||
// we are marshaling this object to an Any type
|
|||
var js map[string]*json.RawMessage |
|||
if err = json.Unmarshal(b, &js); err != nil { |
|||
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) |
|||
} |
|||
turl, err := json.Marshal(typeURL) |
|||
if err != nil { |
|||
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) |
|||
} |
|||
js["@type"] = (*json.RawMessage)(&turl) |
|||
if b, err = json.Marshal(js); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
w.write(string(b)) |
|||
return nil |
|||
} |
|||
|
|||
md := m.Descriptor() |
|||
fds := md.Fields() |
|||
|
|||
// Handle well-known types.
|
|||
const secondInNanos = int64(time.Second / time.Nanosecond) |
|||
switch wellKnownType(md.FullName()) { |
|||
case "Any": |
|||
return w.marshalAny(m, indent) |
|||
case "BoolValue", "BytesValue", "StringValue", |
|||
"Int32Value", "UInt32Value", "FloatValue", |
|||
"Int64Value", "UInt64Value", "DoubleValue": |
|||
fd := fds.ByNumber(1) |
|||
return w.marshalValue(fd, m.Get(fd), indent) |
|||
case "Duration": |
|||
const maxSecondsInDuration = 315576000000 |
|||
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
|
|||
// depending on required precision."
|
|||
s := m.Get(fds.ByNumber(1)).Int() |
|||
ns := m.Get(fds.ByNumber(2)).Int() |
|||
if s < -maxSecondsInDuration || s > maxSecondsInDuration { |
|||
return fmt.Errorf("seconds out of range %v", s) |
|||
} |
|||
if ns <= -secondInNanos || ns >= secondInNanos { |
|||
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) |
|||
} |
|||
if (s > 0 && ns < 0) || (s < 0 && ns > 0) { |
|||
return errors.New("signs of seconds and nanos do not match") |
|||
} |
|||
var sign string |
|||
if s < 0 || ns < 0 { |
|||
sign, s, ns = "-", -1*s, -1*ns |
|||
} |
|||
x := fmt.Sprintf("%s%d.%09d", sign, s, ns) |
|||
x = strings.TrimSuffix(x, "000") |
|||
x = strings.TrimSuffix(x, "000") |
|||
x = strings.TrimSuffix(x, ".000") |
|||
w.write(fmt.Sprintf(`"%vs"`, x)) |
|||
return nil |
|||
case "Timestamp": |
|||
// "RFC 3339, where generated output will always be Z-normalized
|
|||
// and uses 0, 3, 6 or 9 fractional digits."
|
|||
s := m.Get(fds.ByNumber(1)).Int() |
|||
ns := m.Get(fds.ByNumber(2)).Int() |
|||
if ns < 0 || ns >= secondInNanos { |
|||
return fmt.Errorf("ns out of range [0, %v)", secondInNanos) |
|||
} |
|||
t := time.Unix(s, ns).UTC() |
|||
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
|
|||
x := t.Format("2006-01-02T15:04:05.000000000") |
|||
x = strings.TrimSuffix(x, "000") |
|||
x = strings.TrimSuffix(x, "000") |
|||
x = strings.TrimSuffix(x, ".000") |
|||
w.write(fmt.Sprintf(`"%vZ"`, x)) |
|||
return nil |
|||
case "Value": |
|||
// JSON value; which is a null, number, string, bool, object, or array.
|
|||
od := md.Oneofs().Get(0) |
|||
fd := m.WhichOneof(od) |
|||
if fd == nil { |
|||
return errors.New("nil Value") |
|||
} |
|||
return w.marshalValue(fd, m.Get(fd), indent) |
|||
case "Struct", "ListValue": |
|||
// JSON object or array.
|
|||
fd := fds.ByNumber(1) |
|||
return w.marshalValue(fd, m.Get(fd), indent) |
|||
} |
|||
|
|||
w.write("{") |
|||
if w.Indent != "" { |
|||
w.write("\n") |
|||
} |
|||
|
|||
firstField := true |
|||
if typeURL != "" { |
|||
if err := w.marshalTypeURL(indent, typeURL); err != nil { |
|||
return err |
|||
} |
|||
firstField = false |
|||
} |
|||
|
|||
for i := 0; i < fds.Len(); { |
|||
fd := fds.Get(i) |
|||
if od := fd.ContainingOneof(); od != nil { |
|||
fd = m.WhichOneof(od) |
|||
i += od.Fields().Len() |
|||
if fd == nil { |
|||
continue |
|||
} |
|||
} else { |
|||
i++ |
|||
} |
|||
|
|||
v := m.Get(fd) |
|||
|
|||
if !m.Has(fd) { |
|||
if !w.EmitDefaults || fd.ContainingOneof() != nil { |
|||
continue |
|||
} |
|||
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { |
|||
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
|
|||
} |
|||
} |
|||
|
|||
if !firstField { |
|||
w.writeComma() |
|||
} |
|||
if err := w.marshalField(fd, v, indent); err != nil { |
|||
return err |
|||
} |
|||
firstField = false |
|||
} |
|||
|
|||
// Handle proto2 extensions.
|
|||
if md.ExtensionRanges().Len() > 0 { |
|||
// Collect a sorted list of all extension descriptor and values.
|
|||
type ext struct { |
|||
desc protoreflect.FieldDescriptor |
|||
val protoreflect.Value |
|||
} |
|||
var exts []ext |
|||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { |
|||
if fd.IsExtension() { |
|||
exts = append(exts, ext{fd, v}) |
|||
} |
|||
return true |
|||
}) |
|||
sort.Slice(exts, func(i, j int) bool { |
|||
return exts[i].desc.Number() < exts[j].desc.Number() |
|||
}) |
|||
|
|||
for _, ext := range exts { |
|||
if !firstField { |
|||
w.writeComma() |
|||
} |
|||
if err := w.marshalField(ext.desc, ext.val, indent); err != nil { |
|||
return err |
|||
} |
|||
firstField = false |
|||
} |
|||
} |
|||
|
|||
if w.Indent != "" { |
|||
w.write("\n") |
|||
w.write(indent) |
|||
} |
|||
w.write("}") |
|||
return nil |
|||
} |
|||
|
|||
func (w *jsonWriter) writeComma() { |
|||
if w.Indent != "" { |
|||
w.write(",\n") |
|||
} else { |
|||
w.write(",") |
|||
} |
|||
} |
|||
|
|||
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { |
|||
// "If the Any contains a value that has a special JSON mapping,
|
|||
// it will be converted as follows: {"@type": xxx, "value": yyy}.
|
|||
// Otherwise, the value will be converted into a JSON object,
|
|||
// and the "@type" field will be inserted to indicate the actual data type."
|
|||
md := m.Descriptor() |
|||
typeURL := m.Get(md.Fields().ByNumber(1)).String() |
|||
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() |
|||
|
|||
var m2 protoreflect.Message |
|||
if w.AnyResolver != nil { |
|||
mi, err := w.AnyResolver.Resolve(typeURL) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
m2 = proto.MessageReflect(mi) |
|||
} else { |
|||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
m2 = mt.New() |
|||
} |
|||
|
|||
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { |
|||
return err |
|||
} |
|||
|
|||
if wellKnownType(m2.Descriptor().FullName()) == "" { |
|||
return w.marshalMessage(m2, indent, typeURL) |
|||
} |
|||
|
|||
w.write("{") |
|||
if w.Indent != "" { |
|||
w.write("\n") |
|||
} |
|||
if err := w.marshalTypeURL(indent, typeURL); err != nil { |
|||
return err |
|||
} |
|||
w.writeComma() |
|||
if w.Indent != "" { |
|||
w.write(indent) |
|||
w.write(w.Indent) |
|||
w.write(`"value": `) |
|||
} else { |
|||
w.write(`"value":`) |
|||
} |
|||
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { |
|||
return err |
|||
} |
|||
if w.Indent != "" { |
|||
w.write("\n") |
|||
w.write(indent) |
|||
} |
|||
w.write("}") |
|||
return nil |
|||
} |
|||
|
|||
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { |
|||
if w.Indent != "" { |
|||
w.write(indent) |
|||
w.write(w.Indent) |
|||
} |
|||
w.write(`"@type":`) |
|||
if w.Indent != "" { |
|||
w.write(" ") |
|||
} |
|||
b, err := json.Marshal(typeURL) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
w.write(string(b)) |
|||
return nil |
|||
} |
|||
|
|||
// marshalField writes field description and value to the Writer.
|
|||
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { |
|||
if w.Indent != "" { |
|||
w.write(indent) |
|||
w.write(w.Indent) |
|||
} |
|||
w.write(`"`) |
|||
switch { |
|||
case fd.IsExtension(): |
|||
// For message set, use the fname of the message as the extension name.
|
|||
name := string(fd.FullName()) |
|||
if isMessageSet(fd.ContainingMessage()) { |
|||
name = strings.TrimSuffix(name, ".message_set_extension") |
|||
} |
|||
|
|||
w.write("[" + name + "]") |
|||
case w.OrigName: |
|||
name := string(fd.Name()) |
|||
if fd.Kind() == protoreflect.GroupKind { |
|||
name = string(fd.Message().Name()) |
|||
} |
|||
w.write(name) |
|||
default: |
|||
w.write(string(fd.JSONName())) |
|||
} |
|||
w.write(`":`) |
|||
if w.Indent != "" { |
|||
w.write(" ") |
|||
} |
|||
return w.marshalValue(fd, v, indent) |
|||
} |
|||
|
|||
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { |
|||
switch { |
|||
case fd.IsList(): |
|||
w.write("[") |
|||
comma := "" |
|||
lv := v.List() |
|||
for i := 0; i < lv.Len(); i++ { |
|||
w.write(comma) |
|||
if w.Indent != "" { |
|||
w.write("\n") |
|||
w.write(indent) |
|||
w.write(w.Indent) |
|||
w.write(w.Indent) |
|||
} |
|||
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { |
|||
return err |
|||
} |
|||
comma = "," |
|||
} |
|||
if w.Indent != "" { |
|||
w.write("\n") |
|||
w.write(indent) |
|||
w.write(w.Indent) |
|||
} |
|||
w.write("]") |
|||
return nil |
|||
case fd.IsMap(): |
|||
kfd := fd.MapKey() |
|||
vfd := fd.MapValue() |
|||
mv := v.Map() |
|||
|
|||
// Collect a sorted list of all map keys and values.
|
|||
type entry struct{ key, val protoreflect.Value } |
|||
var entries []entry |
|||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { |
|||
entries = append(entries, entry{k.Value(), v}) |
|||
return true |
|||
}) |
|||
sort.Slice(entries, func(i, j int) bool { |
|||
switch kfd.Kind() { |
|||
case protoreflect.BoolKind: |
|||
return !entries[i].key.Bool() && entries[j].key.Bool() |
|||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: |
|||
return entries[i].key.Int() < entries[j].key.Int() |
|||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: |
|||
return entries[i].key.Uint() < entries[j].key.Uint() |
|||
case protoreflect.StringKind: |
|||
return entries[i].key.String() < entries[j].key.String() |
|||
default: |
|||
panic("invalid kind") |
|||
} |
|||
}) |
|||
|
|||
w.write(`{`) |
|||
comma := "" |
|||
for _, entry := range entries { |
|||
w.write(comma) |
|||
if w.Indent != "" { |
|||
w.write("\n") |
|||
w.write(indent) |
|||
w.write(w.Indent) |
|||
w.write(w.Indent) |
|||
} |
|||
|
|||
s := fmt.Sprint(entry.key.Interface()) |
|||
b, err := json.Marshal(s) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
w.write(string(b)) |
|||
|
|||
w.write(`:`) |
|||
if w.Indent != "" { |
|||
w.write(` `) |
|||
} |
|||
|
|||
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { |
|||
return err |
|||
} |
|||
comma = "," |
|||
} |
|||
if w.Indent != "" { |
|||
w.write("\n") |
|||
w.write(indent) |
|||
w.write(w.Indent) |
|||
} |
|||
w.write(`}`) |
|||
return nil |
|||
default: |
|||
return w.marshalSingularValue(fd, v, indent) |
|||
} |
|||
} |
|||
|
|||
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { |
|||
switch { |
|||
case !v.IsValid(): |
|||
w.write("null") |
|||
return nil |
|||
case fd.Message() != nil: |
|||
return w.marshalMessage(v.Message(), indent+w.Indent, "") |
|||
case fd.Enum() != nil: |
|||
if fd.Enum().FullName() == "google.protobuf.NullValue" { |
|||
w.write("null") |
|||
return nil |
|||
} |
|||
|
|||
vd := fd.Enum().Values().ByNumber(v.Enum()) |
|||
if vd == nil || w.EnumsAsInts { |
|||
w.write(strconv.Itoa(int(v.Enum()))) |
|||
} else { |
|||
w.write(`"` + string(vd.Name()) + `"`) |
|||
} |
|||
return nil |
|||
default: |
|||
switch v.Interface().(type) { |
|||
case float32, float64: |
|||
switch { |
|||
case math.IsInf(v.Float(), +1): |
|||
w.write(`"Infinity"`) |
|||
return nil |
|||
case math.IsInf(v.Float(), -1): |
|||
w.write(`"-Infinity"`) |
|||
return nil |
|||
case math.IsNaN(v.Float()): |
|||
w.write(`"NaN"`) |
|||
return nil |
|||
} |
|||
case int64, uint64: |
|||
w.write(fmt.Sprintf(`"%d"`, v.Interface())) |
|||
return nil |
|||
} |
|||
|
|||
b, err := json.Marshal(v.Interface()) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
w.write(string(b)) |
|||
return nil |
|||
} |
|||
} |
@ -0,0 +1,69 @@ |
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
// Package jsonpb provides functionality to marshal and unmarshal between a
|
|||
// protocol buffer message and JSON. It follows the specification at
|
|||
// https://developers.google.com/protocol-buffers/docs/proto3#json.
|
|||
//
|
|||
// Do not rely on the default behavior of the standard encoding/json package
|
|||
// when called on generated message types as it does not operate correctly.
|
|||
//
|
|||
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
|
|||
// package instead.
|
|||
package jsonpb |
|||
|
|||
import ( |
|||
"github.com/golang/protobuf/proto" |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/reflect/protoregistry" |
|||
"google.golang.org/protobuf/runtime/protoimpl" |
|||
) |
|||
|
|||
// AnyResolver takes a type URL, present in an Any message,
|
|||
// and resolves it into an instance of the associated message.
|
|||
type AnyResolver interface { |
|||
Resolve(typeURL string) (proto.Message, error) |
|||
} |
|||
|
|||
type anyResolver struct{ AnyResolver } |
|||
|
|||
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { |
|||
return r.FindMessageByURL(string(message)) |
|||
} |
|||
|
|||
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { |
|||
m, err := r.Resolve(url) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return protoimpl.X.MessageTypeOf(m), nil |
|||
} |
|||
|
|||
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { |
|||
return protoregistry.GlobalTypes.FindExtensionByName(field) |
|||
} |
|||
|
|||
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { |
|||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) |
|||
} |
|||
|
|||
func wellKnownType(s protoreflect.FullName) string { |
|||
if s.Parent() == "google.protobuf" { |
|||
switch s.Name() { |
|||
case "Empty", "Any", |
|||
"BoolValue", "BytesValue", "StringValue", |
|||
"Int32Value", "UInt32Value", "FloatValue", |
|||
"Int64Value", "UInt64Value", "DoubleValue", |
|||
"Duration", "Timestamp", |
|||
"NullValue", "Struct", "Value", "ListValue": |
|||
return string(s.Name()) |
|||
} |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func isMessageSet(md protoreflect.MessageDescriptor) bool { |
|||
ms, ok := md.(interface{ IsMessageSet() bool }) |
|||
return ok && ms.IsMessageSet() |
|||
} |
@ -0,0 +1,324 @@ |
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
|
|||
"google.golang.org/protobuf/encoding/prototext" |
|||
"google.golang.org/protobuf/encoding/protowire" |
|||
"google.golang.org/protobuf/runtime/protoimpl" |
|||
) |
|||
|
|||
const ( |
|||
WireVarint = 0 |
|||
WireFixed32 = 5 |
|||
WireFixed64 = 1 |
|||
WireBytes = 2 |
|||
WireStartGroup = 3 |
|||
WireEndGroup = 4 |
|||
) |
|||
|
|||
// EncodeVarint returns the varint encoded bytes of v.
|
|||
func EncodeVarint(v uint64) []byte { |
|||
return protowire.AppendVarint(nil, v) |
|||
} |
|||
|
|||
// SizeVarint returns the length of the varint encoded bytes of v.
|
|||
// This is equal to len(EncodeVarint(v)).
|
|||
func SizeVarint(v uint64) int { |
|||
return protowire.SizeVarint(v) |
|||
} |
|||
|
|||
// DecodeVarint parses a varint encoded integer from b,
|
|||
// returning the integer value and the length of the varint.
|
|||
// It returns (0, 0) if there is a parse error.
|
|||
func DecodeVarint(b []byte) (uint64, int) { |
|||
v, n := protowire.ConsumeVarint(b) |
|||
if n < 0 { |
|||
return 0, 0 |
|||
} |
|||
return v, n |
|||
} |
|||
|
|||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
|||
// It may be reused between invocations to reduce memory usage.
|
|||
type Buffer struct { |
|||
buf []byte |
|||
idx int |
|||
deterministic bool |
|||
} |
|||
|
|||
// NewBuffer allocates a new Buffer initialized with buf,
|
|||
// where the contents of buf are considered the unread portion of the buffer.
|
|||
func NewBuffer(buf []byte) *Buffer { |
|||
return &Buffer{buf: buf} |
|||
} |
|||
|
|||
// SetDeterministic specifies whether to use deterministic serialization.
|
|||
//
|
|||
// Deterministic serialization guarantees that for a given binary, equal
|
|||
// messages will always be serialized to the same bytes. This implies:
|
|||
//
|
|||
// - Repeated serialization of a message will return the same bytes.
|
|||
// - Different processes of the same binary (which may be executing on
|
|||
// different machines) will serialize equal messages to the same bytes.
|
|||
//
|
|||
// Note that the deterministic serialization is NOT canonical across
|
|||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
|||
// across different builds with schema changes due to unknown fields.
|
|||
// Users who need canonical serialization (e.g., persistent storage in a
|
|||
// canonical form, fingerprinting, etc.) should define their own
|
|||
// canonicalization specification and implement their own serializer rather
|
|||
// than relying on this API.
|
|||
//
|
|||
// If deterministic serialization is requested, map entries will be sorted
|
|||
// by keys in lexographical order. This is an implementation detail and
|
|||
// subject to change.
|
|||
func (b *Buffer) SetDeterministic(deterministic bool) { |
|||
b.deterministic = deterministic |
|||
} |
|||
|
|||
// SetBuf sets buf as the internal buffer,
|
|||
// where the contents of buf are considered the unread portion of the buffer.
|
|||
func (b *Buffer) SetBuf(buf []byte) { |
|||
b.buf = buf |
|||
b.idx = 0 |
|||
} |
|||
|
|||
// Reset clears the internal buffer of all written and unread data.
|
|||
func (b *Buffer) Reset() { |
|||
b.buf = b.buf[:0] |
|||
b.idx = 0 |
|||
} |
|||
|
|||
// Bytes returns the internal buffer.
|
|||
func (b *Buffer) Bytes() []byte { |
|||
return b.buf |
|||
} |
|||
|
|||
// Unread returns the unread portion of the buffer.
|
|||
func (b *Buffer) Unread() []byte { |
|||
return b.buf[b.idx:] |
|||
} |
|||
|
|||
// Marshal appends the wire-format encoding of m to the buffer.
|
|||
func (b *Buffer) Marshal(m Message) error { |
|||
var err error |
|||
b.buf, err = marshalAppend(b.buf, m, b.deterministic) |
|||
return err |
|||
} |
|||
|
|||
// Unmarshal parses the wire-format message in the buffer and
|
|||
// places the decoded results in m.
|
|||
// It does not reset m before unmarshaling.
|
|||
func (b *Buffer) Unmarshal(m Message) error { |
|||
err := UnmarshalMerge(b.Unread(), m) |
|||
b.idx = len(b.buf) |
|||
return err |
|||
} |
|||
|
|||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } |
|||
|
|||
func (m *unknownFields) String() string { panic("not implemented") } |
|||
func (m *unknownFields) Reset() { panic("not implemented") } |
|||
func (m *unknownFields) ProtoMessage() { panic("not implemented") } |
|||
|
|||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
|||
// to stdout. This is only intended for debugging.
|
|||
func (*Buffer) DebugPrint(s string, b []byte) { |
|||
m := MessageReflect(new(unknownFields)) |
|||
m.SetUnknown(b) |
|||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) |
|||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) |
|||
} |
|||
|
|||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
|||
func (b *Buffer) EncodeVarint(v uint64) error { |
|||
b.buf = protowire.AppendVarint(b.buf, v) |
|||
return nil |
|||
} |
|||
|
|||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
|||
func (b *Buffer) EncodeZigzag32(v uint64) error { |
|||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) |
|||
} |
|||
|
|||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
|||
func (b *Buffer) EncodeZigzag64(v uint64) error { |
|||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) |
|||
} |
|||
|
|||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
|||
func (b *Buffer) EncodeFixed32(v uint64) error { |
|||
b.buf = protowire.AppendFixed32(b.buf, uint32(v)) |
|||
return nil |
|||
} |
|||
|
|||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
|||
func (b *Buffer) EncodeFixed64(v uint64) error { |
|||
b.buf = protowire.AppendFixed64(b.buf, uint64(v)) |
|||
return nil |
|||
} |
|||
|
|||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
|||
func (b *Buffer) EncodeRawBytes(v []byte) error { |
|||
b.buf = protowire.AppendBytes(b.buf, v) |
|||
return nil |
|||
} |
|||
|
|||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
|||
// It does not validate whether v contains valid UTF-8.
|
|||
func (b *Buffer) EncodeStringBytes(v string) error { |
|||
b.buf = protowire.AppendString(b.buf, v) |
|||
return nil |
|||
} |
|||
|
|||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
|||
func (b *Buffer) EncodeMessage(m Message) error { |
|||
var err error |
|||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) |
|||
b.buf, err = marshalAppend(b.buf, m, b.deterministic) |
|||
return err |
|||
} |
|||
|
|||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
|||
func (b *Buffer) DecodeVarint() (uint64, error) { |
|||
v, n := protowire.ConsumeVarint(b.buf[b.idx:]) |
|||
if n < 0 { |
|||
return 0, protowire.ParseError(n) |
|||
} |
|||
b.idx += n |
|||
return uint64(v), nil |
|||
} |
|||
|
|||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
|||
func (b *Buffer) DecodeZigzag32() (uint64, error) { |
|||
v, err := b.DecodeVarint() |
|||
if err != nil { |
|||
return 0, err |
|||
} |
|||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil |
|||
} |
|||
|
|||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
|||
func (b *Buffer) DecodeZigzag64() (uint64, error) { |
|||
v, err := b.DecodeVarint() |
|||
if err != nil { |
|||
return 0, err |
|||
} |
|||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil |
|||
} |
|||
|
|||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
|||
func (b *Buffer) DecodeFixed32() (uint64, error) { |
|||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) |
|||
if n < 0 { |
|||
return 0, protowire.ParseError(n) |
|||
} |
|||
b.idx += n |
|||
return uint64(v), nil |
|||
} |
|||
|
|||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
|||
func (b *Buffer) DecodeFixed64() (uint64, error) { |
|||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) |
|||
if n < 0 { |
|||
return 0, protowire.ParseError(n) |
|||
} |
|||
b.idx += n |
|||
return uint64(v), nil |
|||
} |
|||
|
|||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
|||
// If alloc is specified, it returns a copy the raw bytes
|
|||
// rather than a sub-slice of the buffer.
|
|||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { |
|||
v, n := protowire.ConsumeBytes(b.buf[b.idx:]) |
|||
if n < 0 { |
|||
return nil, protowire.ParseError(n) |
|||
} |
|||
b.idx += n |
|||
if alloc { |
|||
v = append([]byte(nil), v...) |
|||
} |
|||
return v, nil |
|||
} |
|||
|
|||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
|||
// It does not validate whether the raw bytes contain valid UTF-8.
|
|||
func (b *Buffer) DecodeStringBytes() (string, error) { |
|||
v, n := protowire.ConsumeString(b.buf[b.idx:]) |
|||
if n < 0 { |
|||
return "", protowire.ParseError(n) |
|||
} |
|||
b.idx += n |
|||
return v, nil |
|||
} |
|||
|
|||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
|||
// It does not reset m before unmarshaling.
|
|||
func (b *Buffer) DecodeMessage(m Message) error { |
|||
v, err := b.DecodeRawBytes(false) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return UnmarshalMerge(v, m) |
|||
} |
|||
|
|||
// DecodeGroup consumes a message group from the buffer.
|
|||
// It assumes that the start group marker has already been consumed and
|
|||
// consumes all bytes until (and including the end group marker).
|
|||
// It does not reset m before unmarshaling.
|
|||
func (b *Buffer) DecodeGroup(m Message) error { |
|||
v, n, err := consumeGroup(b.buf[b.idx:]) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
b.idx += n |
|||
return UnmarshalMerge(v, m) |
|||
} |
|||
|
|||
// consumeGroup parses b until it finds an end group marker, returning
|
|||
// the raw bytes of the message (excluding the end group marker) and the
|
|||
// the total length of the message (including the end group marker).
|
|||
func consumeGroup(b []byte) ([]byte, int, error) { |
|||
b0 := b |
|||
depth := 1 // assume this follows a start group marker
|
|||
for { |
|||
_, wtyp, tagLen := protowire.ConsumeTag(b) |
|||
if tagLen < 0 { |
|||
return nil, 0, protowire.ParseError(tagLen) |
|||
} |
|||
b = b[tagLen:] |
|||
|
|||
var valLen int |
|||
switch wtyp { |
|||
case protowire.VarintType: |
|||
_, valLen = protowire.ConsumeVarint(b) |
|||
case protowire.Fixed32Type: |
|||
_, valLen = protowire.ConsumeFixed32(b) |
|||
case protowire.Fixed64Type: |
|||
_, valLen = protowire.ConsumeFixed64(b) |
|||
case protowire.BytesType: |
|||
_, valLen = protowire.ConsumeBytes(b) |
|||
case protowire.StartGroupType: |
|||
depth++ |
|||
case protowire.EndGroupType: |
|||
depth-- |
|||
default: |
|||
return nil, 0, errors.New("proto: cannot parse reserved wire type") |
|||
} |
|||
if valLen < 0 { |
|||
return nil, 0, protowire.ParseError(valLen) |
|||
} |
|||
b = b[valLen:] |
|||
|
|||
if depth == 0 { |
|||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,63 @@ |
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
) |
|||
|
|||
// SetDefaults sets unpopulated scalar fields to their default values.
|
|||
// Fields within a oneof are not set even if they have a default value.
|
|||
// SetDefaults is recursively called upon any populated message fields.
|
|||
func SetDefaults(m Message) { |
|||
if m != nil { |
|||
setDefaults(MessageReflect(m)) |
|||
} |
|||
} |
|||
|
|||
func setDefaults(m protoreflect.Message) { |
|||
fds := m.Descriptor().Fields() |
|||
for i := 0; i < fds.Len(); i++ { |
|||
fd := fds.Get(i) |
|||
if !m.Has(fd) { |
|||
if fd.HasDefault() && fd.ContainingOneof() == nil { |
|||
v := fd.Default() |
|||
if fd.Kind() == protoreflect.BytesKind { |
|||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
|||
} |
|||
m.Set(fd, v) |
|||
} |
|||
continue |
|||
} |
|||
} |
|||
|
|||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { |
|||
switch { |
|||
// Handle singular message.
|
|||
case fd.Cardinality() != protoreflect.Repeated: |
|||
if fd.Message() != nil { |
|||
setDefaults(m.Get(fd).Message()) |
|||
} |
|||
// Handle list of messages.
|
|||
case fd.IsList(): |
|||
if fd.Message() != nil { |
|||
ls := m.Get(fd).List() |
|||
for i := 0; i < ls.Len(); i++ { |
|||
setDefaults(ls.Get(i).Message()) |
|||
} |
|||
} |
|||
// Handle map of messages.
|
|||
case fd.IsMap(): |
|||
if fd.MapValue().Message() != nil { |
|||
ms := m.Get(fd).Map() |
|||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { |
|||
setDefaults(v.Message()) |
|||
return true |
|||
}) |
|||
} |
|||
} |
|||
return true |
|||
}) |
|||
} |
@ -0,0 +1,113 @@ |
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"errors" |
|||
"fmt" |
|||
"strconv" |
|||
|
|||
protoV2 "google.golang.org/protobuf/proto" |
|||
) |
|||
|
|||
var ( |
|||
// Deprecated: No longer returned.
|
|||
ErrNil = errors.New("proto: Marshal called with nil") |
|||
|
|||
// Deprecated: No longer returned.
|
|||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB") |
|||
|
|||
// Deprecated: No longer returned.
|
|||
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") |
|||
) |
|||
|
|||
// Deprecated: Do not use.
|
|||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } |
|||
|
|||
// Deprecated: Do not use.
|
|||
func GetStats() Stats { return Stats{} } |
|||
|
|||
// Deprecated: Do not use.
|
|||
func MarshalMessageSet(interface{}) ([]byte, error) { |
|||
return nil, errors.New("proto: not implemented") |
|||
} |
|||
|
|||
// Deprecated: Do not use.
|
|||
func UnmarshalMessageSet([]byte, interface{}) error { |
|||
return errors.New("proto: not implemented") |
|||
} |
|||
|
|||
// Deprecated: Do not use.
|
|||
func MarshalMessageSetJSON(interface{}) ([]byte, error) { |
|||
return nil, errors.New("proto: not implemented") |
|||
} |
|||
|
|||
// Deprecated: Do not use.
|
|||
func UnmarshalMessageSetJSON([]byte, interface{}) error { |
|||
return errors.New("proto: not implemented") |
|||
} |
|||
|
|||
// Deprecated: Do not use.
|
|||
func RegisterMessageSetType(Message, int32, string) {} |
|||
|
|||
// Deprecated: Do not use.
|
|||
func EnumName(m map[int32]string, v int32) string { |
|||
s, ok := m[v] |
|||
if ok { |
|||
return s |
|||
} |
|||
return strconv.Itoa(int(v)) |
|||
} |
|||
|
|||
// Deprecated: Do not use.
|
|||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { |
|||
if data[0] == '"' { |
|||
// New style: enums are strings.
|
|||
var repr string |
|||
if err := json.Unmarshal(data, &repr); err != nil { |
|||
return -1, err |
|||
} |
|||
val, ok := m[repr] |
|||
if !ok { |
|||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) |
|||
} |
|||
return val, nil |
|||
} |
|||
// Old style: enums are ints.
|
|||
var val int32 |
|||
if err := json.Unmarshal(data, &val); err != nil { |
|||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) |
|||
} |
|||
return val, nil |
|||
} |
|||
|
|||
// Deprecated: Do not use; this type existed for intenal-use only.
|
|||
type InternalMessageInfo struct{} |
|||
|
|||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|||
func (*InternalMessageInfo) DiscardUnknown(m Message) { |
|||
DiscardUnknown(m) |
|||
} |
|||
|
|||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|||
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { |
|||
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) |
|||
} |
|||
|
|||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|||
func (*InternalMessageInfo) Merge(dst, src Message) { |
|||
protoV2.Merge(MessageV2(dst), MessageV2(src)) |
|||
} |
|||
|
|||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|||
func (*InternalMessageInfo) Size(m Message) int { |
|||
return protoV2.Size(MessageV2(m)) |
|||
} |
|||
|
|||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|||
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { |
|||
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) |
|||
} |
@ -0,0 +1,58 @@ |
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
) |
|||
|
|||
// DiscardUnknown recursively discards all unknown fields from this message
|
|||
// and all embedded messages.
|
|||
//
|
|||
// When unmarshaling a message with unrecognized fields, the tags and values
|
|||
// of such fields are preserved in the Message. This allows a later call to
|
|||
// marshal to be able to produce a message that continues to have those
|
|||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
|||
// explicitly clear the unknown fields after unmarshaling.
|
|||
func DiscardUnknown(m Message) { |
|||
if m != nil { |
|||
discardUnknown(MessageReflect(m)) |
|||
} |
|||
} |
|||
|
|||
func discardUnknown(m protoreflect.Message) { |
|||
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { |
|||
switch { |
|||
// Handle singular message.
|
|||
case fd.Cardinality() != protoreflect.Repeated: |
|||
if fd.Message() != nil { |
|||
discardUnknown(m.Get(fd).Message()) |
|||
} |
|||
// Handle list of messages.
|
|||
case fd.IsList(): |
|||
if fd.Message() != nil { |
|||
ls := m.Get(fd).List() |
|||
for i := 0; i < ls.Len(); i++ { |
|||
discardUnknown(ls.Get(i).Message()) |
|||
} |
|||
} |
|||
// Handle map of messages.
|
|||
case fd.IsMap(): |
|||
if fd.MapValue().Message() != nil { |
|||
ms := m.Get(fd).Map() |
|||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { |
|||
discardUnknown(v.Message()) |
|||
return true |
|||
}) |
|||
} |
|||
} |
|||
return true |
|||
}) |
|||
|
|||
// Discard unknown fields.
|
|||
if len(m.GetUnknown()) > 0 { |
|||
m.SetUnknown(nil) |
|||
} |
|||
} |
@ -0,0 +1,356 @@ |
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"reflect" |
|||
|
|||
"google.golang.org/protobuf/encoding/protowire" |
|||
"google.golang.org/protobuf/proto" |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/reflect/protoregistry" |
|||
"google.golang.org/protobuf/runtime/protoiface" |
|||
"google.golang.org/protobuf/runtime/protoimpl" |
|||
) |
|||
|
|||
type ( |
|||
// ExtensionDesc represents an extension descriptor and
|
|||
// is used to interact with an extension field in a message.
|
|||
//
|
|||
// Variables of this type are generated in code by protoc-gen-go.
|
|||
ExtensionDesc = protoimpl.ExtensionInfo |
|||
|
|||
// ExtensionRange represents a range of message extensions.
|
|||
// Used in code generated by protoc-gen-go.
|
|||
ExtensionRange = protoiface.ExtensionRangeV1 |
|||
|
|||
// Deprecated: Do not use; this is an internal type.
|
|||
Extension = protoimpl.ExtensionFieldV1 |
|||
|
|||
// Deprecated: Do not use; this is an internal type.
|
|||
XXX_InternalExtensions = protoimpl.ExtensionFields |
|||
) |
|||
|
|||
// ErrMissingExtension reports whether the extension was not present.
|
|||
var ErrMissingExtension = errors.New("proto: missing extension") |
|||
|
|||
var errNotExtendable = errors.New("proto: not an extendable proto.Message") |
|||
|
|||
// HasExtension reports whether the extension field is present in m
|
|||
// either as an explicitly populated field or as an unknown field.
|
|||
func HasExtension(m Message, xt *ExtensionDesc) (has bool) { |
|||
mr := MessageReflect(m) |
|||
if mr == nil || !mr.IsValid() { |
|||
return false |
|||
} |
|||
|
|||
// Check whether any populated known field matches the field number.
|
|||
xtd := xt.TypeDescriptor() |
|||
if isValidExtension(mr.Descriptor(), xtd) { |
|||
has = mr.Has(xtd) |
|||
} else { |
|||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { |
|||
has = int32(fd.Number()) == xt.Field |
|||
return !has |
|||
}) |
|||
} |
|||
|
|||
// Check whether any unknown field matches the field number.
|
|||
for b := mr.GetUnknown(); !has && len(b) > 0; { |
|||
num, _, n := protowire.ConsumeField(b) |
|||
has = int32(num) == xt.Field |
|||
b = b[n:] |
|||
} |
|||
return has |
|||
} |
|||
|
|||
// ClearExtension removes the extension field from m
|
|||
// either as an explicitly populated field or as an unknown field.
|
|||
func ClearExtension(m Message, xt *ExtensionDesc) { |
|||
mr := MessageReflect(m) |
|||
if mr == nil || !mr.IsValid() { |
|||
return |
|||
} |
|||
|
|||
xtd := xt.TypeDescriptor() |
|||
if isValidExtension(mr.Descriptor(), xtd) { |
|||
mr.Clear(xtd) |
|||
} else { |
|||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { |
|||
if int32(fd.Number()) == xt.Field { |
|||
mr.Clear(fd) |
|||
return false |
|||
} |
|||
return true |
|||
}) |
|||
} |
|||
clearUnknown(mr, fieldNum(xt.Field)) |
|||
} |
|||
|
|||
// ClearAllExtensions clears all extensions from m.
|
|||
// This includes populated fields and unknown fields in the extension range.
|
|||
func ClearAllExtensions(m Message) { |
|||
mr := MessageReflect(m) |
|||
if mr == nil || !mr.IsValid() { |
|||
return |
|||
} |
|||
|
|||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { |
|||
if fd.IsExtension() { |
|||
mr.Clear(fd) |
|||
} |
|||
return true |
|||
}) |
|||
clearUnknown(mr, mr.Descriptor().ExtensionRanges()) |
|||
} |
|||
|
|||
// GetExtension retrieves a proto2 extended field from m.
|
|||
//
|
|||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
|||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
|||
// If the field is not present, then the default value is returned (if one is specified),
|
|||
// otherwise ErrMissingExtension is reported.
|
|||
//
|
|||
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
|
|||
// then GetExtension returns the raw encoded bytes for the extension field.
|
|||
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { |
|||
mr := MessageReflect(m) |
|||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { |
|||
return nil, errNotExtendable |
|||
} |
|||
|
|||
// Retrieve the unknown fields for this extension field.
|
|||
var bo protoreflect.RawFields |
|||
for bi := mr.GetUnknown(); len(bi) > 0; { |
|||
num, _, n := protowire.ConsumeField(bi) |
|||
if int32(num) == xt.Field { |
|||
bo = append(bo, bi[:n]...) |
|||
} |
|||
bi = bi[n:] |
|||
} |
|||
|
|||
// For type incomplete descriptors, only retrieve the unknown fields.
|
|||
if xt.ExtensionType == nil { |
|||
return []byte(bo), nil |
|||
} |
|||
|
|||
// If the extension field only exists as unknown fields, unmarshal it.
|
|||
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
|
|||
xtd := xt.TypeDescriptor() |
|||
if !isValidExtension(mr.Descriptor(), xtd) { |
|||
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) |
|||
} |
|||
if !mr.Has(xtd) && len(bo) > 0 { |
|||
m2 := mr.New() |
|||
if err := (proto.UnmarshalOptions{ |
|||
Resolver: extensionResolver{xt}, |
|||
}.Unmarshal(bo, m2.Interface())); err != nil { |
|||
return nil, err |
|||
} |
|||
if m2.Has(xtd) { |
|||
mr.Set(xtd, m2.Get(xtd)) |
|||
clearUnknown(mr, fieldNum(xt.Field)) |
|||
} |
|||
} |
|||
|
|||
// Check whether the message has the extension field set or a default.
|
|||
var pv protoreflect.Value |
|||
switch { |
|||
case mr.Has(xtd): |
|||
pv = mr.Get(xtd) |
|||
case xtd.HasDefault(): |
|||
pv = xtd.Default() |
|||
default: |
|||
return nil, ErrMissingExtension |
|||
} |
|||
|
|||
v := xt.InterfaceOf(pv) |
|||
rv := reflect.ValueOf(v) |
|||
if isScalarKind(rv.Kind()) { |
|||
rv2 := reflect.New(rv.Type()) |
|||
rv2.Elem().Set(rv) |
|||
v = rv2.Interface() |
|||
} |
|||
return v, nil |
|||
} |
|||
|
|||
// extensionResolver is a custom extension resolver that stores a single
|
|||
// extension type that takes precedence over the global registry.
|
|||
type extensionResolver struct{ xt protoreflect.ExtensionType } |
|||
|
|||
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { |
|||
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { |
|||
return r.xt, nil |
|||
} |
|||
return protoregistry.GlobalTypes.FindExtensionByName(field) |
|||
} |
|||
|
|||
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { |
|||
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { |
|||
return r.xt, nil |
|||
} |
|||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) |
|||
} |
|||
|
|||
// GetExtensions returns a list of the extensions values present in m,
|
|||
// corresponding with the provided list of extension descriptors, xts.
|
|||
// If an extension is missing in m, the corresponding value is nil.
|
|||
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { |
|||
mr := MessageReflect(m) |
|||
if mr == nil || !mr.IsValid() { |
|||
return nil, errNotExtendable |
|||
} |
|||
|
|||
vs := make([]interface{}, len(xts)) |
|||
for i, xt := range xts { |
|||
v, err := GetExtension(m, xt) |
|||
if err != nil { |
|||
if err == ErrMissingExtension { |
|||
continue |
|||
} |
|||
return vs, err |
|||
} |
|||
vs[i] = v |
|||
} |
|||
return vs, nil |
|||
} |
|||
|
|||
// SetExtension sets an extension field in m to the provided value.
|
|||
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { |
|||
mr := MessageReflect(m) |
|||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { |
|||
return errNotExtendable |
|||
} |
|||
|
|||
rv := reflect.ValueOf(v) |
|||
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { |
|||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) |
|||
} |
|||
if rv.Kind() == reflect.Ptr { |
|||
if rv.IsNil() { |
|||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) |
|||
} |
|||
if isScalarKind(rv.Elem().Kind()) { |
|||
v = rv.Elem().Interface() |
|||
} |
|||
} |
|||
|
|||
xtd := xt.TypeDescriptor() |
|||
if !isValidExtension(mr.Descriptor(), xtd) { |
|||
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) |
|||
} |
|||
mr.Set(xtd, xt.ValueOf(v)) |
|||
clearUnknown(mr, fieldNum(xt.Field)) |
|||
return nil |
|||
} |
|||
|
|||
// SetRawExtension inserts b into the unknown fields of m.
|
|||
//
|
|||
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
|
|||
func SetRawExtension(m Message, fnum int32, b []byte) { |
|||
mr := MessageReflect(m) |
|||
if mr == nil || !mr.IsValid() { |
|||
return |
|||
} |
|||
|
|||
// Verify that the raw field is valid.
|
|||
for b0 := b; len(b0) > 0; { |
|||
num, _, n := protowire.ConsumeField(b0) |
|||
if int32(num) != fnum { |
|||
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) |
|||
} |
|||
b0 = b0[n:] |
|||
} |
|||
|
|||
ClearExtension(m, &ExtensionDesc{Field: fnum}) |
|||
mr.SetUnknown(append(mr.GetUnknown(), b...)) |
|||
} |
|||
|
|||
// ExtensionDescs returns a list of extension descriptors found in m,
|
|||
// containing descriptors for both populated extension fields in m and
|
|||
// also unknown fields of m that are in the extension range.
|
|||
// For the later case, an type incomplete descriptor is provided where only
|
|||
// the ExtensionDesc.Field field is populated.
|
|||
// The order of the extension descriptors is undefined.
|
|||
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { |
|||
mr := MessageReflect(m) |
|||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { |
|||
return nil, errNotExtendable |
|||
} |
|||
|
|||
// Collect a set of known extension descriptors.
|
|||
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) |
|||
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { |
|||
if fd.IsExtension() { |
|||
xt := fd.(protoreflect.ExtensionTypeDescriptor) |
|||
if xd, ok := xt.Type().(*ExtensionDesc); ok { |
|||
extDescs[fd.Number()] = xd |
|||
} |
|||
} |
|||
return true |
|||
}) |
|||
|
|||
// Collect a set of unknown extension descriptors.
|
|||
extRanges := mr.Descriptor().ExtensionRanges() |
|||
for b := mr.GetUnknown(); len(b) > 0; { |
|||
num, _, n := protowire.ConsumeField(b) |
|||
if extRanges.Has(num) && extDescs[num] == nil { |
|||
extDescs[num] = nil |
|||
} |
|||
b = b[n:] |
|||
} |
|||
|
|||
// Transpose the set of descriptors into a list.
|
|||
var xts []*ExtensionDesc |
|||
for num, xt := range extDescs { |
|||
if xt == nil { |
|||
xt = &ExtensionDesc{Field: int32(num)} |
|||
} |
|||
xts = append(xts, xt) |
|||
} |
|||
return xts, nil |
|||
} |
|||
|
|||
// isValidExtension reports whether xtd is a valid extension descriptor for md.
|
|||
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { |
|||
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) |
|||
} |
|||
|
|||
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
|
|||
// This function exists for historical reasons since the representation of
|
|||
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
|
|||
func isScalarKind(k reflect.Kind) bool { |
|||
switch k { |
|||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: |
|||
return true |
|||
default: |
|||
return false |
|||
} |
|||
} |
|||
|
|||
// clearUnknown removes unknown fields from m where remover.Has reports true.
|
|||
func clearUnknown(m protoreflect.Message, remover interface { |
|||
Has(protoreflect.FieldNumber) bool |
|||
}) { |
|||
var bo protoreflect.RawFields |
|||
for bi := m.GetUnknown(); len(bi) > 0; { |
|||
num, _, n := protowire.ConsumeField(bi) |
|||
if !remover.Has(num) { |
|||
bo = append(bo, bi[:n]...) |
|||
} |
|||
bi = bi[n:] |
|||
} |
|||
if bi := m.GetUnknown(); len(bi) != len(bo) { |
|||
m.SetUnknown(bo) |
|||
} |
|||
} |
|||
|
|||
type fieldNum protoreflect.FieldNumber |
|||
|
|||
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { |
|||
return protoreflect.FieldNumber(n1) == n2 |
|||
} |
@ -0,0 +1,306 @@ |
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"fmt" |
|||
"reflect" |
|||
"strconv" |
|||
"strings" |
|||
"sync" |
|||
|
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/runtime/protoimpl" |
|||
) |
|||
|
|||
// StructProperties represents protocol buffer type information for a
|
|||
// generated protobuf message in the open-struct API.
|
|||
//
|
|||
// Deprecated: Do not use.
|
|||
type StructProperties struct { |
|||
// Prop are the properties for each field.
|
|||
//
|
|||
// Fields belonging to a oneof are stored in OneofTypes instead, with a
|
|||
// single Properties representing the parent oneof held here.
|
|||
//
|
|||
// The order of Prop matches the order of fields in the Go struct.
|
|||
// Struct fields that are not related to protobufs have a "XXX_" prefix
|
|||
// in the Properties.Name and must be ignored by the user.
|
|||
Prop []*Properties |
|||
|
|||
// OneofTypes contains information about the oneof fields in this message.
|
|||
// It is keyed by the protobuf field name.
|
|||
OneofTypes map[string]*OneofProperties |
|||
} |
|||
|
|||
// Properties represents the type information for a protobuf message field.
|
|||
//
|
|||
// Deprecated: Do not use.
|
|||
type Properties struct { |
|||
// Name is a placeholder name with little meaningful semantic value.
|
|||
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
|
|||
Name string |
|||
// OrigName is the protobuf field name or oneof name.
|
|||
OrigName string |
|||
// JSONName is the JSON name for the protobuf field.
|
|||
JSONName string |
|||
// Enum is a placeholder name for enums.
|
|||
// For historical reasons, this is neither the Go name for the enum,
|
|||
// nor the protobuf name for the enum.
|
|||
Enum string // Deprecated: Do not use.
|
|||
// Weak contains the full name of the weakly referenced message.
|
|||
Weak string |
|||
// Wire is a string representation of the wire type.
|
|||
Wire string |
|||
// WireType is the protobuf wire type for the field.
|
|||
WireType int |
|||
// Tag is the protobuf field number.
|
|||
Tag int |
|||
// Required reports whether this is a required field.
|
|||
Required bool |
|||
// Optional reports whether this is a optional field.
|
|||
Optional bool |
|||
// Repeated reports whether this is a repeated field.
|
|||
Repeated bool |
|||
// Packed reports whether this is a packed repeated field of scalars.
|
|||
Packed bool |
|||
// Proto3 reports whether this field operates under the proto3 syntax.
|
|||
Proto3 bool |
|||
// Oneof reports whether this field belongs within a oneof.
|
|||
Oneof bool |
|||
|
|||
// Default is the default value in string form.
|
|||
Default string |
|||
// HasDefault reports whether the field has a default value.
|
|||
HasDefault bool |
|||
|
|||
// MapKeyProp is the properties for the key field for a map field.
|
|||
MapKeyProp *Properties |
|||
// MapValProp is the properties for the value field for a map field.
|
|||
MapValProp *Properties |
|||
} |
|||
|
|||
// OneofProperties represents the type information for a protobuf oneof.
|
|||
//
|
|||
// Deprecated: Do not use.
|
|||
type OneofProperties struct { |
|||
// Type is a pointer to the generated wrapper type for the field value.
|
|||
// This is nil for messages that are not in the open-struct API.
|
|||
Type reflect.Type |
|||
// Field is the index into StructProperties.Prop for the containing oneof.
|
|||
Field int |
|||
// Prop is the properties for the field.
|
|||
Prop *Properties |
|||
} |
|||
|
|||
// String formats the properties in the protobuf struct field tag style.
|
|||
func (p *Properties) String() string { |
|||
s := p.Wire |
|||
s += "," + strconv.Itoa(p.Tag) |
|||
if p.Required { |
|||
s += ",req" |
|||
} |
|||
if p.Optional { |
|||
s += ",opt" |
|||
} |
|||
if p.Repeated { |
|||
s += ",rep" |
|||
} |
|||
if p.Packed { |
|||
s += ",packed" |
|||
} |
|||
s += ",name=" + p.OrigName |
|||
if p.JSONName != "" { |
|||
s += ",json=" + p.JSONName |
|||
} |
|||
if len(p.Enum) > 0 { |
|||
s += ",enum=" + p.Enum |
|||
} |
|||
if len(p.Weak) > 0 { |
|||
s += ",weak=" + p.Weak |
|||
} |
|||
if p.Proto3 { |
|||
s += ",proto3" |
|||
} |
|||
if p.Oneof { |
|||
s += ",oneof" |
|||
} |
|||
if p.HasDefault { |
|||
s += ",def=" + p.Default |
|||
} |
|||
return s |
|||
} |
|||
|
|||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
|||
func (p *Properties) Parse(tag string) { |
|||
// For example: "bytes,49,opt,name=foo,def=hello!"
|
|||
for len(tag) > 0 { |
|||
i := strings.IndexByte(tag, ',') |
|||
if i < 0 { |
|||
i = len(tag) |
|||
} |
|||
switch s := tag[:i]; { |
|||
case strings.HasPrefix(s, "name="): |
|||
p.OrigName = s[len("name="):] |
|||
case strings.HasPrefix(s, "json="): |
|||
p.JSONName = s[len("json="):] |
|||
case strings.HasPrefix(s, "enum="): |
|||
p.Enum = s[len("enum="):] |
|||
case strings.HasPrefix(s, "weak="): |
|||
p.Weak = s[len("weak="):] |
|||
case strings.Trim(s, "0123456789") == "": |
|||
n, _ := strconv.ParseUint(s, 10, 32) |
|||
p.Tag = int(n) |
|||
case s == "opt": |
|||
p.Optional = true |
|||
case s == "req": |
|||
p.Required = true |
|||
case s == "rep": |
|||
p.Repeated = true |
|||
case s == "varint" || s == "zigzag32" || s == "zigzag64": |
|||
p.Wire = s |
|||
p.WireType = WireVarint |
|||
case s == "fixed32": |
|||
p.Wire = s |
|||
p.WireType = WireFixed32 |
|||
case s == "fixed64": |
|||
p.Wire = s |
|||
p.WireType = WireFixed64 |
|||
case s == "bytes": |
|||
p.Wire = s |
|||
p.WireType = WireBytes |
|||
case s == "group": |
|||
p.Wire = s |
|||
p.WireType = WireStartGroup |
|||
case s == "packed": |
|||
p.Packed = true |
|||
case s == "proto3": |
|||
p.Proto3 = true |
|||
case s == "oneof": |
|||
p.Oneof = true |
|||
case strings.HasPrefix(s, "def="): |
|||
// The default tag is special in that everything afterwards is the
|
|||
// default regardless of the presence of commas.
|
|||
p.HasDefault = true |
|||
p.Default, i = tag[len("def="):], len(tag) |
|||
} |
|||
tag = strings.TrimPrefix(tag[i:], ",") |
|||
} |
|||
} |
|||
|
|||
// Init populates the properties from a protocol buffer struct tag.
|
|||
//
|
|||
// Deprecated: Do not use.
|
|||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { |
|||
p.Name = name |
|||
p.OrigName = name |
|||
if tag == "" { |
|||
return |
|||
} |
|||
p.Parse(tag) |
|||
|
|||
if typ != nil && typ.Kind() == reflect.Map { |
|||
p.MapKeyProp = new(Properties) |
|||
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) |
|||
p.MapValProp = new(Properties) |
|||
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) |
|||
} |
|||
} |
|||
|
|||
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
|
|||
|
|||
// GetProperties returns the list of properties for the type represented by t,
|
|||
// which must be a generated protocol buffer message in the open-struct API,
|
|||
// where protobuf message fields are represented by exported Go struct fields.
|
|||
//
|
|||
// Deprecated: Use protobuf reflection instead.
|
|||
func GetProperties(t reflect.Type) *StructProperties { |
|||
if p, ok := propertiesCache.Load(t); ok { |
|||
return p.(*StructProperties) |
|||
} |
|||
p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) |
|||
return p.(*StructProperties) |
|||
} |
|||
|
|||
func newProperties(t reflect.Type) *StructProperties { |
|||
if t.Kind() != reflect.Struct { |
|||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) |
|||
} |
|||
|
|||
var hasOneof bool |
|||
prop := new(StructProperties) |
|||
|
|||
// Construct a list of properties for each field in the struct.
|
|||
for i := 0; i < t.NumField(); i++ { |
|||
p := new(Properties) |
|||
f := t.Field(i) |
|||
tagField := f.Tag.Get("protobuf") |
|||
p.Init(f.Type, f.Name, tagField, &f) |
|||
|
|||
tagOneof := f.Tag.Get("protobuf_oneof") |
|||
if tagOneof != "" { |
|||
hasOneof = true |
|||
p.OrigName = tagOneof |
|||
} |
|||
|
|||
// Rename unrelated struct fields with the "XXX_" prefix since so much
|
|||
// user code simply checks for this to exclude special fields.
|
|||
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { |
|||
p.Name = "XXX_" + p.Name |
|||
p.OrigName = "XXX_" + p.OrigName |
|||
} else if p.Weak != "" { |
|||
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
|
|||
} |
|||
|
|||
prop.Prop = append(prop.Prop, p) |
|||
} |
|||
|
|||
// Construct a mapping of oneof field names to properties.
|
|||
if hasOneof { |
|||
var oneofWrappers []interface{} |
|||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { |
|||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) |
|||
} |
|||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { |
|||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) |
|||
} |
|||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { |
|||
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { |
|||
oneofWrappers = m.ProtoMessageInfo().OneofWrappers |
|||
} |
|||
} |
|||
|
|||
prop.OneofTypes = make(map[string]*OneofProperties) |
|||
for _, wrapper := range oneofWrappers { |
|||
p := &OneofProperties{ |
|||
Type: reflect.ValueOf(wrapper).Type(), // *T
|
|||
Prop: new(Properties), |
|||
} |
|||
f := p.Type.Elem().Field(0) |
|||
p.Prop.Name = f.Name |
|||
p.Prop.Parse(f.Tag.Get("protobuf")) |
|||
|
|||
// Determine the struct field that contains this oneof.
|
|||
// Each wrapper is assignable to exactly one parent field.
|
|||
var foundOneof bool |
|||
for i := 0; i < t.NumField() && !foundOneof; i++ { |
|||
if p.Type.AssignableTo(t.Field(i).Type) { |
|||
p.Field = i |
|||
foundOneof = true |
|||
} |
|||
} |
|||
if !foundOneof { |
|||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) |
|||
} |
|||
prop.OneofTypes[p.Prop.OrigName] = p |
|||
} |
|||
} |
|||
|
|||
return prop |
|||
} |
|||
|
|||
func (sp *StructProperties) Len() int { return len(sp.Prop) } |
|||
func (sp *StructProperties) Less(i, j int) bool { return false } |
|||
func (sp *StructProperties) Swap(i, j int) { return } |
@ -0,0 +1,167 @@ |
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
// Package proto provides functionality for handling protocol buffer messages.
|
|||
// In particular, it provides marshaling and unmarshaling between a protobuf
|
|||
// message and the binary wire format.
|
|||
//
|
|||
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
|||
// more information.
|
|||
//
|
|||
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
|||
package proto |
|||
|
|||
import ( |
|||
protoV2 "google.golang.org/protobuf/proto" |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/runtime/protoiface" |
|||
"google.golang.org/protobuf/runtime/protoimpl" |
|||
) |
|||
|
|||
const ( |
|||
ProtoPackageIsVersion1 = true |
|||
ProtoPackageIsVersion2 = true |
|||
ProtoPackageIsVersion3 = true |
|||
ProtoPackageIsVersion4 = true |
|||
) |
|||
|
|||
// GeneratedEnum is any enum type generated by protoc-gen-go
|
|||
// which is a named int32 kind.
|
|||
// This type exists for documentation purposes.
|
|||
type GeneratedEnum interface{} |
|||
|
|||
// GeneratedMessage is any message type generated by protoc-gen-go
|
|||
// which is a pointer to a named struct kind.
|
|||
// This type exists for documentation purposes.
|
|||
type GeneratedMessage interface{} |
|||
|
|||
// Message is a protocol buffer message.
|
|||
//
|
|||
// This is the v1 version of the message interface and is marginally better
|
|||
// than an empty interface as it lacks any method to programatically interact
|
|||
// with the contents of the message.
|
|||
//
|
|||
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
|||
// exposes protobuf reflection as a first-class feature of the interface.
|
|||
//
|
|||
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
|||
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
|||
type Message = protoiface.MessageV1 |
|||
|
|||
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
|||
// It returns nil if m is nil.
|
|||
func MessageV1(m GeneratedMessage) protoiface.MessageV1 { |
|||
return protoimpl.X.ProtoMessageV1Of(m) |
|||
} |
|||
|
|||
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
|||
// It returns nil if m is nil.
|
|||
func MessageV2(m GeneratedMessage) protoV2.Message { |
|||
return protoimpl.X.ProtoMessageV2Of(m) |
|||
} |
|||
|
|||
// MessageReflect returns a reflective view for a message.
|
|||
// It returns nil if m is nil.
|
|||
func MessageReflect(m Message) protoreflect.Message { |
|||
return protoimpl.X.MessageOf(m) |
|||
} |
|||
|
|||
// Marshaler is implemented by messages that can marshal themselves.
|
|||
// This interface is used by the following functions: Size, Marshal,
|
|||
// Buffer.Marshal, and Buffer.EncodeMessage.
|
|||
//
|
|||
// Deprecated: Do not implement.
|
|||
type Marshaler interface { |
|||
// Marshal formats the encoded bytes of the message.
|
|||
// It should be deterministic and emit valid protobuf wire data.
|
|||
// The caller takes ownership of the returned buffer.
|
|||
Marshal() ([]byte, error) |
|||
} |
|||
|
|||
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
|||
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
|||
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
|||
//
|
|||
// Deprecated: Do not implement.
|
|||
type Unmarshaler interface { |
|||
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
|||
// The provided buffer is only valid for during method call.
|
|||
// It should not reset the receiver message.
|
|||
Unmarshal([]byte) error |
|||
} |
|||
|
|||
// Merger is implemented by messages that can merge themselves.
|
|||
// This interface is used by the following functions: Clone and Merge.
|
|||
//
|
|||
// Deprecated: Do not implement.
|
|||
type Merger interface { |
|||
// Merge merges the contents of src into the receiver message.
|
|||
// It clones all data structures in src such that it aliases no mutable
|
|||
// memory referenced by src.
|
|||
Merge(src Message) |
|||
} |
|||
|
|||
// RequiredNotSetError is an error type returned when
|
|||
// marshaling or unmarshaling a message with missing required fields.
|
|||
type RequiredNotSetError struct { |
|||
err error |
|||
} |
|||
|
|||
func (e *RequiredNotSetError) Error() string { |
|||
if e.err != nil { |
|||
return e.err.Error() |
|||
} |
|||
return "proto: required field not set" |
|||
} |
|||
func (e *RequiredNotSetError) RequiredNotSet() bool { |
|||
return true |
|||
} |
|||
|
|||
func checkRequiredNotSet(m protoV2.Message) error { |
|||
if err := protoV2.CheckInitialized(m); err != nil { |
|||
return &RequiredNotSetError{err: err} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Clone returns a deep copy of src.
|
|||
func Clone(src Message) Message { |
|||
return MessageV1(protoV2.Clone(MessageV2(src))) |
|||
} |
|||
|
|||
// Merge merges src into dst, which must be messages of the same type.
|
|||
//
|
|||
// Populated scalar fields in src are copied to dst, while populated
|
|||
// singular messages in src are merged into dst by recursively calling Merge.
|
|||
// The elements of every list field in src is appended to the corresponded
|
|||
// list fields in dst. The entries of every map field in src is copied into
|
|||
// the corresponding map field in dst, possibly replacing existing entries.
|
|||
// The unknown fields of src are appended to the unknown fields of dst.
|
|||
func Merge(dst, src Message) { |
|||
protoV2.Merge(MessageV2(dst), MessageV2(src)) |
|||
} |
|||
|
|||
// Equal reports whether two messages are equal.
|
|||
// If two messages marshal to the same bytes under deterministic serialization,
|
|||
// then Equal is guaranteed to report true.
|
|||
//
|
|||
// Two messages are equal if they are the same protobuf message type,
|
|||
// have the same set of populated known and extension field values,
|
|||
// and the same set of unknown fields values.
|
|||
//
|
|||
// Scalar values are compared with the equivalent of the == operator in Go,
|
|||
// except bytes values which are compared using bytes.Equal and
|
|||
// floating point values which specially treat NaNs as equal.
|
|||
// Message values are compared by recursively calling Equal.
|
|||
// Lists are equal if each element value is also equal.
|
|||
// Maps are equal if they have the same set of keys, where the pair of values
|
|||
// for each key is also equal.
|
|||
func Equal(x, y Message) bool { |
|||
return protoV2.Equal(MessageV2(x), MessageV2(y)) |
|||
} |
|||
|
|||
func isMessageSet(md protoreflect.MessageDescriptor) bool { |
|||
ms, ok := md.(interface{ IsMessageSet() bool }) |
|||
return ok && ms.IsMessageSet() |
|||
} |
@ -0,0 +1,317 @@ |
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"bytes" |
|||
"compress/gzip" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"reflect" |
|||
"strings" |
|||
"sync" |
|||
|
|||
"google.golang.org/protobuf/reflect/protodesc" |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/reflect/protoregistry" |
|||
"google.golang.org/protobuf/runtime/protoimpl" |
|||
) |
|||
|
|||
// filePath is the path to the proto source file.
|
|||
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
|||
|
|||
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
|||
type fileDescGZIP = []byte |
|||
|
|||
var fileCache sync.Map // map[filePath]fileDescGZIP
|
|||
|
|||
// RegisterFile is called from generated code to register the compressed
|
|||
// FileDescriptorProto with the file path for a proto source file.
|
|||
//
|
|||
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
|||
func RegisterFile(s filePath, d fileDescGZIP) { |
|||
// Decompress the descriptor.
|
|||
zr, err := gzip.NewReader(bytes.NewReader(d)) |
|||
if err != nil { |
|||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) |
|||
} |
|||
b, err := ioutil.ReadAll(zr) |
|||
if err != nil { |
|||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) |
|||
} |
|||
|
|||
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
|||
// Note that DescBuilder.Build automatically registers the constructed
|
|||
// file descriptor with the v2 registry.
|
|||
protoimpl.DescBuilder{RawDescriptor: b}.Build() |
|||
|
|||
// Locally cache the raw descriptor form for the file.
|
|||
fileCache.Store(s, d) |
|||
} |
|||
|
|||
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
|||
// for a proto source file. It returns nil if not found.
|
|||
//
|
|||
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
|||
func FileDescriptor(s filePath) fileDescGZIP { |
|||
if v, ok := fileCache.Load(s); ok { |
|||
return v.(fileDescGZIP) |
|||
} |
|||
|
|||
// Find the descriptor in the v2 registry.
|
|||
var b []byte |
|||
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { |
|||
b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) |
|||
} |
|||
|
|||
// Locally cache the raw descriptor form for the file.
|
|||
if len(b) > 0 { |
|||
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) |
|||
return v.(fileDescGZIP) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// enumName is the name of an enum. For historical reasons, the enum name is
|
|||
// neither the full Go name nor the full protobuf name of the enum.
|
|||
// The name is the dot-separated combination of just the proto package that the
|
|||
// enum is declared within followed by the Go type name of the generated enum.
|
|||
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
|||
|
|||
// enumsByName maps enum values by name to their numeric counterpart.
|
|||
type enumsByName = map[string]int32 |
|||
|
|||
// enumsByNumber maps enum values by number to their name counterpart.
|
|||
type enumsByNumber = map[int32]string |
|||
|
|||
var enumCache sync.Map // map[enumName]enumsByName
|
|||
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
|||
|
|||
// RegisterEnum is called from the generated code to register the mapping of
|
|||
// enum value names to enum numbers for the enum identified by s.
|
|||
//
|
|||
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
|||
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { |
|||
if _, ok := enumCache.Load(s); ok { |
|||
panic("proto: duplicate enum registered: " + s) |
|||
} |
|||
enumCache.Store(s, m) |
|||
|
|||
// This does not forward registration to the v2 registry since this API
|
|||
// lacks sufficient information to construct a complete v2 enum descriptor.
|
|||
} |
|||
|
|||
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
|||
// the enum of the given name. It returns nil if not found.
|
|||
//
|
|||
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
|||
func EnumValueMap(s enumName) enumsByName { |
|||
if v, ok := enumCache.Load(s); ok { |
|||
return v.(enumsByName) |
|||
} |
|||
|
|||
// Check whether the cache is stale. If the number of files in the current
|
|||
// package differs, then it means that some enums may have been recently
|
|||
// registered upstream that we do not know about.
|
|||
var protoPkg protoreflect.FullName |
|||
if i := strings.LastIndexByte(s, '.'); i >= 0 { |
|||
protoPkg = protoreflect.FullName(s[:i]) |
|||
} |
|||
v, _ := numFilesCache.Load(protoPkg) |
|||
numFiles, _ := v.(int) |
|||
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { |
|||
return nil // cache is up-to-date; was not found earlier
|
|||
} |
|||
|
|||
// Update the enum cache for all enums declared in the given proto package.
|
|||
numFiles = 0 |
|||
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { |
|||
walkEnums(fd, func(ed protoreflect.EnumDescriptor) { |
|||
name := protoimpl.X.LegacyEnumName(ed) |
|||
if _, ok := enumCache.Load(name); !ok { |
|||
m := make(enumsByName) |
|||
evs := ed.Values() |
|||
for i := evs.Len() - 1; i >= 0; i-- { |
|||
ev := evs.Get(i) |
|||
m[string(ev.Name())] = int32(ev.Number()) |
|||
} |
|||
enumCache.LoadOrStore(name, m) |
|||
} |
|||
}) |
|||
numFiles++ |
|||
return true |
|||
}) |
|||
numFilesCache.Store(protoPkg, numFiles) |
|||
|
|||
// Check cache again for enum map.
|
|||
if v, ok := enumCache.Load(s); ok { |
|||
return v.(enumsByName) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// walkEnums recursively walks all enums declared in d.
|
|||
func walkEnums(d interface { |
|||
Enums() protoreflect.EnumDescriptors |
|||
Messages() protoreflect.MessageDescriptors |
|||
}, f func(protoreflect.EnumDescriptor)) { |
|||
eds := d.Enums() |
|||
for i := eds.Len() - 1; i >= 0; i-- { |
|||
f(eds.Get(i)) |
|||
} |
|||
mds := d.Messages() |
|||
for i := mds.Len() - 1; i >= 0; i-- { |
|||
walkEnums(mds.Get(i), f) |
|||
} |
|||
} |
|||
|
|||
// messageName is the full name of protobuf message.
|
|||
type messageName = string |
|||
|
|||
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
|||
|
|||
// RegisterType is called from generated code to register the message Go type
|
|||
// for a message of the given name.
|
|||
//
|
|||
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
|||
func RegisterType(m Message, s messageName) { |
|||
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) |
|||
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { |
|||
panic(err) |
|||
} |
|||
messageTypeCache.Store(s, reflect.TypeOf(m)) |
|||
} |
|||
|
|||
// RegisterMapType is called from generated code to register the Go map type
|
|||
// for a protobuf message representing a map entry.
|
|||
//
|
|||
// Deprecated: Do not use.
|
|||
func RegisterMapType(m interface{}, s messageName) { |
|||
t := reflect.TypeOf(m) |
|||
if t.Kind() != reflect.Map { |
|||
panic(fmt.Sprintf("invalid map kind: %v", t)) |
|||
} |
|||
if _, ok := messageTypeCache.Load(s); ok { |
|||
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) |
|||
} |
|||
messageTypeCache.Store(s, t) |
|||
} |
|||
|
|||
// MessageType returns the message type for a named message.
|
|||
// It returns nil if not found.
|
|||
//
|
|||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
|||
func MessageType(s messageName) reflect.Type { |
|||
if v, ok := messageTypeCache.Load(s); ok { |
|||
return v.(reflect.Type) |
|||
} |
|||
|
|||
// Derive the message type from the v2 registry.
|
|||
var t reflect.Type |
|||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { |
|||
t = messageGoType(mt) |
|||
} |
|||
|
|||
// If we could not get a concrete type, it is possible that it is a
|
|||
// pseudo-message for a map entry.
|
|||
if t == nil { |
|||
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) |
|||
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { |
|||
kt := goTypeForField(md.Fields().ByNumber(1)) |
|||
vt := goTypeForField(md.Fields().ByNumber(2)) |
|||
t = reflect.MapOf(kt, vt) |
|||
} |
|||
} |
|||
|
|||
// Locally cache the message type for the given name.
|
|||
if t != nil { |
|||
v, _ := messageTypeCache.LoadOrStore(s, t) |
|||
return v.(reflect.Type) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { |
|||
switch k := fd.Kind(); k { |
|||
case protoreflect.EnumKind: |
|||
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { |
|||
return enumGoType(et) |
|||
} |
|||
return reflect.TypeOf(protoreflect.EnumNumber(0)) |
|||
case protoreflect.MessageKind, protoreflect.GroupKind: |
|||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { |
|||
return messageGoType(mt) |
|||
} |
|||
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() |
|||
default: |
|||
return reflect.TypeOf(fd.Default().Interface()) |
|||
} |
|||
} |
|||
|
|||
func enumGoType(et protoreflect.EnumType) reflect.Type { |
|||
return reflect.TypeOf(et.New(0)) |
|||
} |
|||
|
|||
func messageGoType(mt protoreflect.MessageType) reflect.Type { |
|||
return reflect.TypeOf(MessageV1(mt.Zero().Interface())) |
|||
} |
|||
|
|||
// MessageName returns the full protobuf name for the given message type.
|
|||
//
|
|||
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
|||
func MessageName(m Message) messageName { |
|||
if m == nil { |
|||
return "" |
|||
} |
|||
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { |
|||
return m.XXX_MessageName() |
|||
} |
|||
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) |
|||
} |
|||
|
|||
// RegisterExtension is called from the generated code to register
|
|||
// the extension descriptor.
|
|||
//
|
|||
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
|||
func RegisterExtension(d *ExtensionDesc) { |
|||
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { |
|||
panic(err) |
|||
} |
|||
} |
|||
|
|||
type extensionsByNumber = map[int32]*ExtensionDesc |
|||
|
|||
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
|||
|
|||
// RegisteredExtensions returns a map of the registered extensions for the
|
|||
// provided protobuf message, indexed by the extension field number.
|
|||
//
|
|||
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
|||
func RegisteredExtensions(m Message) extensionsByNumber { |
|||
// Check whether the cache is stale. If the number of extensions for
|
|||
// the given message differs, then it means that some extensions were
|
|||
// recently registered upstream that we do not know about.
|
|||
s := MessageName(m) |
|||
v, _ := extensionCache.Load(s) |
|||
xs, _ := v.(extensionsByNumber) |
|||
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { |
|||
return xs // cache is up-to-date
|
|||
} |
|||
|
|||
// Cache is stale, re-compute the extensions map.
|
|||
xs = make(extensionsByNumber) |
|||
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { |
|||
if xd, ok := xt.(*ExtensionDesc); ok { |
|||
xs[int32(xt.TypeDescriptor().Number())] = xd |
|||
} else { |
|||
// TODO: This implies that the protoreflect.ExtensionType is a
|
|||
// custom type not generated by protoc-gen-go. We could try and
|
|||
// convert the type to an ExtensionDesc.
|
|||
} |
|||
return true |
|||
}) |
|||
extensionCache.Store(s, xs) |
|||
return xs |
|||
} |
@ -0,0 +1,801 @@ |
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"encoding" |
|||
"errors" |
|||
"fmt" |
|||
"reflect" |
|||
"strconv" |
|||
"strings" |
|||
"unicode/utf8" |
|||
|
|||
"google.golang.org/protobuf/encoding/prototext" |
|||
protoV2 "google.golang.org/protobuf/proto" |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/reflect/protoregistry" |
|||
) |
|||
|
|||
const wrapTextUnmarshalV2 = false |
|||
|
|||
// ParseError is returned by UnmarshalText.
|
|||
type ParseError struct { |
|||
Message string |
|||
|
|||
// Deprecated: Do not use.
|
|||
Line, Offset int |
|||
} |
|||
|
|||
func (e *ParseError) Error() string { |
|||
if wrapTextUnmarshalV2 { |
|||
return e.Message |
|||
} |
|||
if e.Line == 1 { |
|||
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) |
|||
} |
|||
return fmt.Sprintf("line %d: %v", e.Line, e.Message) |
|||
} |
|||
|
|||
// UnmarshalText parses a proto text formatted string into m.
|
|||
func UnmarshalText(s string, m Message) error { |
|||
if u, ok := m.(encoding.TextUnmarshaler); ok { |
|||
return u.UnmarshalText([]byte(s)) |
|||
} |
|||
|
|||
m.Reset() |
|||
mi := MessageV2(m) |
|||
|
|||
if wrapTextUnmarshalV2 { |
|||
err := prototext.UnmarshalOptions{ |
|||
AllowPartial: true, |
|||
}.Unmarshal([]byte(s), mi) |
|||
if err != nil { |
|||
return &ParseError{Message: err.Error()} |
|||
} |
|||
return checkRequiredNotSet(mi) |
|||
} else { |
|||
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { |
|||
return err |
|||
} |
|||
return checkRequiredNotSet(mi) |
|||
} |
|||
} |
|||
|
|||
type textParser struct { |
|||
s string // remaining input
|
|||
done bool // whether the parsing is finished (success or error)
|
|||
backed bool // whether back() was called
|
|||
offset, line int |
|||
cur token |
|||
} |
|||
|
|||
type token struct { |
|||
value string |
|||
err *ParseError |
|||
line int // line number
|
|||
offset int // byte number from start of input, not start of line
|
|||
unquoted string // the unquoted version of value, if it was a quoted string
|
|||
} |
|||
|
|||
func newTextParser(s string) *textParser { |
|||
p := new(textParser) |
|||
p.s = s |
|||
p.line = 1 |
|||
p.cur.line = 1 |
|||
return p |
|||
} |
|||
|
|||
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { |
|||
md := m.Descriptor() |
|||
fds := md.Fields() |
|||
|
|||
// A struct is a sequence of "name: value", terminated by one of
|
|||
// '>' or '}', or the end of the input. A name may also be
|
|||
// "[extension]" or "[type/url]".
|
|||
//
|
|||
// The whole struct can also be an expanded Any message, like:
|
|||
// [type/url] < ... struct contents ... >
|
|||
seen := make(map[protoreflect.FieldNumber]bool) |
|||
for { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value == terminator { |
|||
break |
|||
} |
|||
if tok.value == "[" { |
|||
if err := p.unmarshalExtensionOrAny(m, seen); err != nil { |
|||
return err |
|||
} |
|||
continue |
|||
} |
|||
|
|||
// This is a normal, non-extension field.
|
|||
name := protoreflect.Name(tok.value) |
|||
fd := fds.ByName(name) |
|||
switch { |
|||
case fd == nil: |
|||
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) |
|||
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { |
|||
fd = gd |
|||
} |
|||
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: |
|||
fd = nil |
|||
case fd.IsWeak() && fd.Message().IsPlaceholder(): |
|||
fd = nil |
|||
} |
|||
if fd == nil { |
|||
typeName := string(md.FullName()) |
|||
if m, ok := m.Interface().(Message); ok { |
|||
t := reflect.TypeOf(m) |
|||
if t.Kind() == reflect.Ptr { |
|||
typeName = t.Elem().String() |
|||
} |
|||
} |
|||
return p.errorf("unknown field name %q in %v", name, typeName) |
|||
} |
|||
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { |
|||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) |
|||
} |
|||
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { |
|||
return p.errorf("non-repeated field %q was repeated", fd.Name()) |
|||
} |
|||
seen[fd.Number()] = true |
|||
|
|||
// Consume any colon.
|
|||
if err := p.checkForColon(fd); err != nil { |
|||
return err |
|||
} |
|||
|
|||
// Parse into the field.
|
|||
v := m.Get(fd) |
|||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { |
|||
v = m.Mutable(fd) |
|||
} |
|||
if v, err = p.unmarshalValue(v, fd); err != nil { |
|||
return err |
|||
} |
|||
m.Set(fd, v) |
|||
|
|||
if err := p.consumeOptionalSeparator(); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { |
|||
name, err := p.consumeExtensionOrAnyName() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
// If it contains a slash, it's an Any type URL.
|
|||
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
// consume an optional colon
|
|||
if tok.value == ":" { |
|||
tok = p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
} |
|||
|
|||
var terminator string |
|||
switch tok.value { |
|||
case "<": |
|||
terminator = ">" |
|||
case "{": |
|||
terminator = "}" |
|||
default: |
|||
return p.errorf("expected '{' or '<', found %q", tok.value) |
|||
} |
|||
|
|||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) |
|||
if err != nil { |
|||
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) |
|||
} |
|||
m2 := mt.New() |
|||
if err := p.unmarshalMessage(m2, terminator); err != nil { |
|||
return err |
|||
} |
|||
b, err := protoV2.Marshal(m2.Interface()) |
|||
if err != nil { |
|||
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) |
|||
} |
|||
|
|||
urlFD := m.Descriptor().Fields().ByName("type_url") |
|||
valFD := m.Descriptor().Fields().ByName("value") |
|||
if seen[urlFD.Number()] { |
|||
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) |
|||
} |
|||
if seen[valFD.Number()] { |
|||
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) |
|||
} |
|||
m.Set(urlFD, protoreflect.ValueOfString(name)) |
|||
m.Set(valFD, protoreflect.ValueOfBytes(b)) |
|||
seen[urlFD.Number()] = true |
|||
seen[valFD.Number()] = true |
|||
return nil |
|||
} |
|||
|
|||
xname := protoreflect.FullName(name) |
|||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) |
|||
if xt == nil && isMessageSet(m.Descriptor()) { |
|||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) |
|||
} |
|||
if xt == nil { |
|||
return p.errorf("unrecognized extension %q", name) |
|||
} |
|||
fd := xt.TypeDescriptor() |
|||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { |
|||
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) |
|||
} |
|||
|
|||
if err := p.checkForColon(fd); err != nil { |
|||
return err |
|||
} |
|||
|
|||
v := m.Get(fd) |
|||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { |
|||
v = m.Mutable(fd) |
|||
} |
|||
v, err = p.unmarshalValue(v, fd) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
m.Set(fd, v) |
|||
return p.consumeOptionalSeparator() |
|||
} |
|||
|
|||
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return v, tok.err |
|||
} |
|||
if tok.value == "" { |
|||
return v, p.errorf("unexpected EOF") |
|||
} |
|||
|
|||
switch { |
|||
case fd.IsList(): |
|||
lv := v.List() |
|||
var err error |
|||
if tok.value == "[" { |
|||
// Repeated field with list notation, like [1,2,3].
|
|||
for { |
|||
vv := lv.NewElement() |
|||
vv, err = p.unmarshalSingularValue(vv, fd) |
|||
if err != nil { |
|||
return v, err |
|||
} |
|||
lv.Append(vv) |
|||
|
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return v, tok.err |
|||
} |
|||
if tok.value == "]" { |
|||
break |
|||
} |
|||
if tok.value != "," { |
|||
return v, p.errorf("Expected ']' or ',' found %q", tok.value) |
|||
} |
|||
} |
|||
return v, nil |
|||
} |
|||
|
|||
// One value of the repeated field.
|
|||
p.back() |
|||
vv := lv.NewElement() |
|||
vv, err = p.unmarshalSingularValue(vv, fd) |
|||
if err != nil { |
|||
return v, err |
|||
} |
|||
lv.Append(vv) |
|||
return v, nil |
|||
case fd.IsMap(): |
|||
// The map entry should be this sequence of tokens:
|
|||
// < key : KEY value : VALUE >
|
|||
// However, implementations may omit key or value, and technically
|
|||
// we should support them in any order.
|
|||
var terminator string |
|||
switch tok.value { |
|||
case "<": |
|||
terminator = ">" |
|||
case "{": |
|||
terminator = "}" |
|||
default: |
|||
return v, p.errorf("expected '{' or '<', found %q", tok.value) |
|||
} |
|||
|
|||
keyFD := fd.MapKey() |
|||
valFD := fd.MapValue() |
|||
|
|||
mv := v.Map() |
|||
kv := keyFD.Default() |
|||
vv := mv.NewValue() |
|||
for { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return v, tok.err |
|||
} |
|||
if tok.value == terminator { |
|||
break |
|||
} |
|||
var err error |
|||
switch tok.value { |
|||
case "key": |
|||
if err := p.consumeToken(":"); err != nil { |
|||
return v, err |
|||
} |
|||
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { |
|||
return v, err |
|||
} |
|||
if err := p.consumeOptionalSeparator(); err != nil { |
|||
return v, err |
|||
} |
|||
case "value": |
|||
if err := p.checkForColon(valFD); err != nil { |
|||
return v, err |
|||
} |
|||
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { |
|||
return v, err |
|||
} |
|||
if err := p.consumeOptionalSeparator(); err != nil { |
|||
return v, err |
|||
} |
|||
default: |
|||
p.back() |
|||
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) |
|||
} |
|||
} |
|||
mv.Set(kv.MapKey(), vv) |
|||
return v, nil |
|||
default: |
|||
p.back() |
|||
return p.unmarshalSingularValue(v, fd) |
|||
} |
|||
} |
|||
|
|||
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return v, tok.err |
|||
} |
|||
if tok.value == "" { |
|||
return v, p.errorf("unexpected EOF") |
|||
} |
|||
|
|||
switch fd.Kind() { |
|||
case protoreflect.BoolKind: |
|||
switch tok.value { |
|||
case "true", "1", "t", "True": |
|||
return protoreflect.ValueOfBool(true), nil |
|||
case "false", "0", "f", "False": |
|||
return protoreflect.ValueOfBool(false), nil |
|||
} |
|||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: |
|||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { |
|||
return protoreflect.ValueOfInt32(int32(x)), nil |
|||
} |
|||
|
|||
// The C++ parser accepts large positive hex numbers that uses
|
|||
// two's complement arithmetic to represent negative numbers.
|
|||
// This feature is here for backwards compatibility with C++.
|
|||
if strings.HasPrefix(tok.value, "0x") { |
|||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { |
|||
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil |
|||
} |
|||
} |
|||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: |
|||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { |
|||
return protoreflect.ValueOfInt64(int64(x)), nil |
|||
} |
|||
|
|||
// The C++ parser accepts large positive hex numbers that uses
|
|||
// two's complement arithmetic to represent negative numbers.
|
|||
// This feature is here for backwards compatibility with C++.
|
|||
if strings.HasPrefix(tok.value, "0x") { |
|||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { |
|||
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil |
|||
} |
|||
} |
|||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: |
|||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { |
|||
return protoreflect.ValueOfUint32(uint32(x)), nil |
|||
} |
|||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: |
|||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { |
|||
return protoreflect.ValueOfUint64(uint64(x)), nil |
|||
} |
|||
case protoreflect.FloatKind: |
|||
// Ignore 'f' for compatibility with output generated by C++,
|
|||
// but don't remove 'f' when the value is "-inf" or "inf".
|
|||
v := tok.value |
|||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { |
|||
v = v[:len(v)-len("f")] |
|||
} |
|||
if x, err := strconv.ParseFloat(v, 32); err == nil { |
|||
return protoreflect.ValueOfFloat32(float32(x)), nil |
|||
} |
|||
case protoreflect.DoubleKind: |
|||
// Ignore 'f' for compatibility with output generated by C++,
|
|||
// but don't remove 'f' when the value is "-inf" or "inf".
|
|||
v := tok.value |
|||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { |
|||
v = v[:len(v)-len("f")] |
|||
} |
|||
if x, err := strconv.ParseFloat(v, 64); err == nil { |
|||
return protoreflect.ValueOfFloat64(float64(x)), nil |
|||
} |
|||
case protoreflect.StringKind: |
|||
if isQuote(tok.value[0]) { |
|||
return protoreflect.ValueOfString(tok.unquoted), nil |
|||
} |
|||
case protoreflect.BytesKind: |
|||
if isQuote(tok.value[0]) { |
|||
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil |
|||
} |
|||
case protoreflect.EnumKind: |
|||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { |
|||
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil |
|||
} |
|||
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) |
|||
if vd != nil { |
|||
return protoreflect.ValueOfEnum(vd.Number()), nil |
|||
} |
|||
case protoreflect.MessageKind, protoreflect.GroupKind: |
|||
var terminator string |
|||
switch tok.value { |
|||
case "{": |
|||
terminator = "}" |
|||
case "<": |
|||
terminator = ">" |
|||
default: |
|||
return v, p.errorf("expected '{' or '<', found %q", tok.value) |
|||
} |
|||
err := p.unmarshalMessage(v.Message(), terminator) |
|||
return v, err |
|||
default: |
|||
panic(fmt.Sprintf("invalid kind %v", fd.Kind())) |
|||
} |
|||
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) |
|||
} |
|||
|
|||
// Consume a ':' from the input stream (if the next token is a colon),
|
|||
// returning an error if a colon is needed but not present.
|
|||
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value != ":" { |
|||
if fd.Message() == nil { |
|||
return p.errorf("expected ':', found %q", tok.value) |
|||
} |
|||
p.back() |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
|||
// the following ']'. It returns the name or URL consumed.
|
|||
func (p *textParser) consumeExtensionOrAnyName() (string, error) { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return "", tok.err |
|||
} |
|||
|
|||
// If extension name or type url is quoted, it's a single token.
|
|||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { |
|||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
return name, p.consumeToken("]") |
|||
} |
|||
|
|||
// Consume everything up to "]"
|
|||
var parts []string |
|||
for tok.value != "]" { |
|||
parts = append(parts, tok.value) |
|||
tok = p.next() |
|||
if tok.err != nil { |
|||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) |
|||
} |
|||
if p.done && tok.value != "]" { |
|||
return "", p.errorf("unclosed type_url or extension name") |
|||
} |
|||
} |
|||
return strings.Join(parts, ""), nil |
|||
} |
|||
|
|||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
|||
// It is used in unmarshalMessage to provide backward compatibility.
|
|||
func (p *textParser) consumeOptionalSeparator() error { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value != ";" && tok.value != "," { |
|||
p.back() |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError { |
|||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} |
|||
p.cur.err = pe |
|||
p.done = true |
|||
return pe |
|||
} |
|||
|
|||
func (p *textParser) skipWhitespace() { |
|||
i := 0 |
|||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { |
|||
if p.s[i] == '#' { |
|||
// comment; skip to end of line or input
|
|||
for i < len(p.s) && p.s[i] != '\n' { |
|||
i++ |
|||
} |
|||
if i == len(p.s) { |
|||
break |
|||
} |
|||
} |
|||
if p.s[i] == '\n' { |
|||
p.line++ |
|||
} |
|||
i++ |
|||
} |
|||
p.offset += i |
|||
p.s = p.s[i:len(p.s)] |
|||
if len(p.s) == 0 { |
|||
p.done = true |
|||
} |
|||
} |
|||
|
|||
func (p *textParser) advance() { |
|||
// Skip whitespace
|
|||
p.skipWhitespace() |
|||
if p.done { |
|||
return |
|||
} |
|||
|
|||
// Start of non-whitespace
|
|||
p.cur.err = nil |
|||
p.cur.offset, p.cur.line = p.offset, p.line |
|||
p.cur.unquoted = "" |
|||
switch p.s[0] { |
|||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': |
|||
// Single symbol
|
|||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] |
|||
case '"', '\'': |
|||
// Quoted string
|
|||
i := 1 |
|||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { |
|||
if p.s[i] == '\\' && i+1 < len(p.s) { |
|||
// skip escaped char
|
|||
i++ |
|||
} |
|||
i++ |
|||
} |
|||
if i >= len(p.s) || p.s[i] != p.s[0] { |
|||
p.errorf("unmatched quote") |
|||
return |
|||
} |
|||
unq, err := unquoteC(p.s[1:i], rune(p.s[0])) |
|||
if err != nil { |
|||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) |
|||
return |
|||
} |
|||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] |
|||
p.cur.unquoted = unq |
|||
default: |
|||
i := 0 |
|||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { |
|||
i++ |
|||
} |
|||
if i == 0 { |
|||
p.errorf("unexpected byte %#x", p.s[0]) |
|||
return |
|||
} |
|||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] |
|||
} |
|||
p.offset += len(p.cur.value) |
|||
} |
|||
|
|||
// Back off the parser by one token. Can only be done between calls to next().
|
|||
// It makes the next advance() a no-op.
|
|||
func (p *textParser) back() { p.backed = true } |
|||
|
|||
// Advances the parser and returns the new current token.
|
|||
func (p *textParser) next() *token { |
|||
if p.backed || p.done { |
|||
p.backed = false |
|||
return &p.cur |
|||
} |
|||
p.advance() |
|||
if p.done { |
|||
p.cur.value = "" |
|||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { |
|||
// Look for multiple quoted strings separated by whitespace,
|
|||
// and concatenate them.
|
|||
cat := p.cur |
|||
for { |
|||
p.skipWhitespace() |
|||
if p.done || !isQuote(p.s[0]) { |
|||
break |
|||
} |
|||
p.advance() |
|||
if p.cur.err != nil { |
|||
return &p.cur |
|||
} |
|||
cat.value += " " + p.cur.value |
|||
cat.unquoted += p.cur.unquoted |
|||
} |
|||
p.done = false // parser may have seen EOF, but we want to return cat
|
|||
p.cur = cat |
|||
} |
|||
return &p.cur |
|||
} |
|||
|
|||
func (p *textParser) consumeToken(s string) error { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value != s { |
|||
p.back() |
|||
return p.errorf("expected %q, found %q", s, tok.value) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
var errBadUTF8 = errors.New("proto: bad UTF-8") |
|||
|
|||
func unquoteC(s string, quote rune) (string, error) { |
|||
// This is based on C++'s tokenizer.cc.
|
|||
// Despite its name, this is *not* parsing C syntax.
|
|||
// For instance, "\0" is an invalid quoted string.
|
|||
|
|||
// Avoid allocation in trivial cases.
|
|||
simple := true |
|||
for _, r := range s { |
|||
if r == '\\' || r == quote { |
|||
simple = false |
|||
break |
|||
} |
|||
} |
|||
if simple { |
|||
return s, nil |
|||
} |
|||
|
|||
buf := make([]byte, 0, 3*len(s)/2) |
|||
for len(s) > 0 { |
|||
r, n := utf8.DecodeRuneInString(s) |
|||
if r == utf8.RuneError && n == 1 { |
|||
return "", errBadUTF8 |
|||
} |
|||
s = s[n:] |
|||
if r != '\\' { |
|||
if r < utf8.RuneSelf { |
|||
buf = append(buf, byte(r)) |
|||
} else { |
|||
buf = append(buf, string(r)...) |
|||
} |
|||
continue |
|||
} |
|||
|
|||
ch, tail, err := unescape(s) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
buf = append(buf, ch...) |
|||
s = tail |
|||
} |
|||
return string(buf), nil |
|||
} |
|||
|
|||
func unescape(s string) (ch string, tail string, err error) { |
|||
r, n := utf8.DecodeRuneInString(s) |
|||
if r == utf8.RuneError && n == 1 { |
|||
return "", "", errBadUTF8 |
|||
} |
|||
s = s[n:] |
|||
switch r { |
|||
case 'a': |
|||
return "\a", s, nil |
|||
case 'b': |
|||
return "\b", s, nil |
|||
case 'f': |
|||
return "\f", s, nil |
|||
case 'n': |
|||
return "\n", s, nil |
|||
case 'r': |
|||
return "\r", s, nil |
|||
case 't': |
|||
return "\t", s, nil |
|||
case 'v': |
|||
return "\v", s, nil |
|||
case '?': |
|||
return "?", s, nil // trigraph workaround
|
|||
case '\'', '"', '\\': |
|||
return string(r), s, nil |
|||
case '0', '1', '2', '3', '4', '5', '6', '7': |
|||
if len(s) < 2 { |
|||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) |
|||
} |
|||
ss := string(r) + s[:2] |
|||
s = s[2:] |
|||
i, err := strconv.ParseUint(ss, 8, 8) |
|||
if err != nil { |
|||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) |
|||
} |
|||
return string([]byte{byte(i)}), s, nil |
|||
case 'x', 'X', 'u', 'U': |
|||
var n int |
|||
switch r { |
|||
case 'x', 'X': |
|||
n = 2 |
|||
case 'u': |
|||
n = 4 |
|||
case 'U': |
|||
n = 8 |
|||
} |
|||
if len(s) < n { |
|||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) |
|||
} |
|||
ss := s[:n] |
|||
s = s[n:] |
|||
i, err := strconv.ParseUint(ss, 16, 64) |
|||
if err != nil { |
|||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) |
|||
} |
|||
if r == 'x' || r == 'X' { |
|||
return string([]byte{byte(i)}), s, nil |
|||
} |
|||
if i > utf8.MaxRune { |
|||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) |
|||
} |
|||
return string(rune(i)), s, nil |
|||
} |
|||
return "", "", fmt.Errorf(`unknown escape \%c`, r) |
|||
} |
|||
|
|||
func isIdentOrNumberChar(c byte) bool { |
|||
switch { |
|||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': |
|||
return true |
|||
case '0' <= c && c <= '9': |
|||
return true |
|||
} |
|||
switch c { |
|||
case '-', '+', '.', '_': |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func isWhitespace(c byte) bool { |
|||
switch c { |
|||
case ' ', '\t', '\n', '\r': |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func isQuote(c byte) bool { |
|||
switch c { |
|||
case '"', '\'': |
|||
return true |
|||
} |
|||
return false |
|||
} |
@ -0,0 +1,560 @@ |
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding" |
|||
"fmt" |
|||
"io" |
|||
"math" |
|||
"sort" |
|||
"strings" |
|||
|
|||
"google.golang.org/protobuf/encoding/prototext" |
|||
"google.golang.org/protobuf/encoding/protowire" |
|||
"google.golang.org/protobuf/proto" |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/reflect/protoregistry" |
|||
) |
|||
|
|||
const wrapTextMarshalV2 = false |
|||
|
|||
// TextMarshaler is a configurable text format marshaler.
|
|||
type TextMarshaler struct { |
|||
Compact bool // use compact text format (one line)
|
|||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
|||
} |
|||
|
|||
// Marshal writes the proto text format of m to w.
|
|||
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { |
|||
b, err := tm.marshal(m) |
|||
if len(b) > 0 { |
|||
if _, err := w.Write(b); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
return err |
|||
} |
|||
|
|||
// Text returns a proto text formatted string of m.
|
|||
func (tm *TextMarshaler) Text(m Message) string { |
|||
b, _ := tm.marshal(m) |
|||
return string(b) |
|||
} |
|||
|
|||
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { |
|||
mr := MessageReflect(m) |
|||
if mr == nil || !mr.IsValid() { |
|||
return []byte("<nil>"), nil |
|||
} |
|||
|
|||
if wrapTextMarshalV2 { |
|||
if m, ok := m.(encoding.TextMarshaler); ok { |
|||
return m.MarshalText() |
|||
} |
|||
|
|||
opts := prototext.MarshalOptions{ |
|||
AllowPartial: true, |
|||
EmitUnknown: true, |
|||
} |
|||
if !tm.Compact { |
|||
opts.Indent = " " |
|||
} |
|||
if !tm.ExpandAny { |
|||
opts.Resolver = (*protoregistry.Types)(nil) |
|||
} |
|||
return opts.Marshal(mr.Interface()) |
|||
} else { |
|||
w := &textWriter{ |
|||
compact: tm.Compact, |
|||
expandAny: tm.ExpandAny, |
|||
complete: true, |
|||
} |
|||
|
|||
if m, ok := m.(encoding.TextMarshaler); ok { |
|||
b, err := m.MarshalText() |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
w.Write(b) |
|||
return w.buf, nil |
|||
} |
|||
|
|||
err := w.writeMessage(mr) |
|||
return w.buf, err |
|||
} |
|||
} |
|||
|
|||
var ( |
|||
defaultTextMarshaler = TextMarshaler{} |
|||
compactTextMarshaler = TextMarshaler{Compact: true} |
|||
) |
|||
|
|||
// MarshalText writes the proto text format of m to w.
|
|||
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } |
|||
|
|||
// MarshalTextString returns a proto text formatted string of m.
|
|||
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } |
|||
|
|||
// CompactText writes the compact proto text format of m to w.
|
|||
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } |
|||
|
|||
// CompactTextString returns a compact proto text formatted string of m.
|
|||
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } |
|||
|
|||
var ( |
|||
newline = []byte("\n") |
|||
endBraceNewline = []byte("}\n") |
|||
posInf = []byte("inf") |
|||
negInf = []byte("-inf") |
|||
nan = []byte("nan") |
|||
) |
|||
|
|||
// textWriter is an io.Writer that tracks its indentation level.
|
|||
type textWriter struct { |
|||
compact bool // same as TextMarshaler.Compact
|
|||
expandAny bool // same as TextMarshaler.ExpandAny
|
|||
complete bool // whether the current position is a complete line
|
|||
indent int // indentation level; never negative
|
|||
buf []byte |
|||
} |
|||
|
|||
func (w *textWriter) Write(p []byte) (n int, _ error) { |
|||
newlines := bytes.Count(p, newline) |
|||
if newlines == 0 { |
|||
if !w.compact && w.complete { |
|||
w.writeIndent() |
|||
} |
|||
w.buf = append(w.buf, p...) |
|||
w.complete = false |
|||
return len(p), nil |
|||
} |
|||
|
|||
frags := bytes.SplitN(p, newline, newlines+1) |
|||
if w.compact { |
|||
for i, frag := range frags { |
|||
if i > 0 { |
|||
w.buf = append(w.buf, ' ') |
|||
n++ |
|||
} |
|||
w.buf = append(w.buf, frag...) |
|||
n += len(frag) |
|||
} |
|||
return n, nil |
|||
} |
|||
|
|||
for i, frag := range frags { |
|||
if w.complete { |
|||
w.writeIndent() |
|||
} |
|||
w.buf = append(w.buf, frag...) |
|||
n += len(frag) |
|||
if i+1 < len(frags) { |
|||
w.buf = append(w.buf, '\n') |
|||
n++ |
|||
} |
|||
} |
|||
w.complete = len(frags[len(frags)-1]) == 0 |
|||
return n, nil |
|||
} |
|||
|
|||
func (w *textWriter) WriteByte(c byte) error { |
|||
if w.compact && c == '\n' { |
|||
c = ' ' |
|||
} |
|||
if !w.compact && w.complete { |
|||
w.writeIndent() |
|||
} |
|||
w.buf = append(w.buf, c) |
|||
w.complete = c == '\n' |
|||
return nil |
|||
} |
|||
|
|||
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { |
|||
if !w.compact && w.complete { |
|||
w.writeIndent() |
|||
} |
|||
w.complete = false |
|||
|
|||
if fd.Kind() != protoreflect.GroupKind { |
|||
w.buf = append(w.buf, fd.Name()...) |
|||
w.WriteByte(':') |
|||
} else { |
|||
// Use message type name for group field name.
|
|||
w.buf = append(w.buf, fd.Message().Name()...) |
|||
} |
|||
|
|||
if !w.compact { |
|||
w.WriteByte(' ') |
|||
} |
|||
} |
|||
|
|||
func requiresQuotes(u string) bool { |
|||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
|||
for _, ch := range u { |
|||
switch { |
|||
case ch == '.' || ch == '/' || ch == '_': |
|||
continue |
|||
case '0' <= ch && ch <= '9': |
|||
continue |
|||
case 'A' <= ch && ch <= 'Z': |
|||
continue |
|||
case 'a' <= ch && ch <= 'z': |
|||
continue |
|||
default: |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
|
|||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
|||
//
|
|||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
|||
// required messages are not linked in).
|
|||
//
|
|||
// It returns (true, error) when sv was written in expanded format or an error
|
|||
// was encountered.
|
|||
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { |
|||
md := m.Descriptor() |
|||
fdURL := md.Fields().ByName("type_url") |
|||
fdVal := md.Fields().ByName("value") |
|||
|
|||
url := m.Get(fdURL).String() |
|||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) |
|||
if err != nil { |
|||
return false, nil |
|||
} |
|||
|
|||
b := m.Get(fdVal).Bytes() |
|||
m2 := mt.New() |
|||
if err := proto.Unmarshal(b, m2.Interface()); err != nil { |
|||
return false, nil |
|||
} |
|||
w.Write([]byte("[")) |
|||
if requiresQuotes(url) { |
|||
w.writeQuotedString(url) |
|||
} else { |
|||
w.Write([]byte(url)) |
|||
} |
|||
if w.compact { |
|||
w.Write([]byte("]:<")) |
|||
} else { |
|||
w.Write([]byte("]: <\n")) |
|||
w.indent++ |
|||
} |
|||
if err := w.writeMessage(m2); err != nil { |
|||
return true, err |
|||
} |
|||
if w.compact { |
|||
w.Write([]byte("> ")) |
|||
} else { |
|||
w.indent-- |
|||
w.Write([]byte(">\n")) |
|||
} |
|||
return true, nil |
|||
} |
|||
|
|||
func (w *textWriter) writeMessage(m protoreflect.Message) error { |
|||
md := m.Descriptor() |
|||
if w.expandAny && md.FullName() == "google.protobuf.Any" { |
|||
if canExpand, err := w.writeProto3Any(m); canExpand { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
fds := md.Fields() |
|||
for i := 0; i < fds.Len(); { |
|||
fd := fds.Get(i) |
|||
if od := fd.ContainingOneof(); od != nil { |
|||
fd = m.WhichOneof(od) |
|||
i += od.Fields().Len() |
|||
} else { |
|||
i++ |
|||
} |
|||
if fd == nil || !m.Has(fd) { |
|||
continue |
|||
} |
|||
|
|||
switch { |
|||
case fd.IsList(): |
|||
lv := m.Get(fd).List() |
|||
for j := 0; j < lv.Len(); j++ { |
|||
w.writeName(fd) |
|||
v := lv.Get(j) |
|||
if err := w.writeSingularValue(v, fd); err != nil { |
|||
return err |
|||
} |
|||
w.WriteByte('\n') |
|||
} |
|||
case fd.IsMap(): |
|||
kfd := fd.MapKey() |
|||
vfd := fd.MapValue() |
|||
mv := m.Get(fd).Map() |
|||
|
|||
type entry struct{ key, val protoreflect.Value } |
|||
var entries []entry |
|||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { |
|||
entries = append(entries, entry{k.Value(), v}) |
|||
return true |
|||
}) |
|||
sort.Slice(entries, func(i, j int) bool { |
|||
switch kfd.Kind() { |
|||
case protoreflect.BoolKind: |
|||
return !entries[i].key.Bool() && entries[j].key.Bool() |
|||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: |
|||
return entries[i].key.Int() < entries[j].key.Int() |
|||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: |
|||
return entries[i].key.Uint() < entries[j].key.Uint() |
|||
case protoreflect.StringKind: |
|||
return entries[i].key.String() < entries[j].key.String() |
|||
default: |
|||
panic("invalid kind") |
|||
} |
|||
}) |
|||
for _, entry := range entries { |
|||
w.writeName(fd) |
|||
w.WriteByte('<') |
|||
if !w.compact { |
|||
w.WriteByte('\n') |
|||
} |
|||
w.indent++ |
|||
w.writeName(kfd) |
|||
if err := w.writeSingularValue(entry.key, kfd); err != nil { |
|||
return err |
|||
} |
|||
w.WriteByte('\n') |
|||
w.writeName(vfd) |
|||
if err := w.writeSingularValue(entry.val, vfd); err != nil { |
|||
return err |
|||
} |
|||
w.WriteByte('\n') |
|||
w.indent-- |
|||
w.WriteByte('>') |
|||
w.WriteByte('\n') |
|||
} |
|||
default: |
|||
w.writeName(fd) |
|||
if err := w.writeSingularValue(m.Get(fd), fd); err != nil { |
|||
return err |
|||
} |
|||
w.WriteByte('\n') |
|||
} |
|||
} |
|||
|
|||
if b := m.GetUnknown(); len(b) > 0 { |
|||
w.writeUnknownFields(b) |
|||
} |
|||
return w.writeExtensions(m) |
|||
} |
|||
|
|||
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { |
|||
switch fd.Kind() { |
|||
case protoreflect.FloatKind, protoreflect.DoubleKind: |
|||
switch vf := v.Float(); { |
|||
case math.IsInf(vf, +1): |
|||
w.Write(posInf) |
|||
case math.IsInf(vf, -1): |
|||
w.Write(negInf) |
|||
case math.IsNaN(vf): |
|||
w.Write(nan) |
|||
default: |
|||
fmt.Fprint(w, v.Interface()) |
|||
} |
|||
case protoreflect.StringKind: |
|||
// NOTE: This does not validate UTF-8 for historical reasons.
|
|||
w.writeQuotedString(string(v.String())) |
|||
case protoreflect.BytesKind: |
|||
w.writeQuotedString(string(v.Bytes())) |
|||
case protoreflect.MessageKind, protoreflect.GroupKind: |
|||
var bra, ket byte = '<', '>' |
|||
if fd.Kind() == protoreflect.GroupKind { |
|||
bra, ket = '{', '}' |
|||
} |
|||
w.WriteByte(bra) |
|||
if !w.compact { |
|||
w.WriteByte('\n') |
|||
} |
|||
w.indent++ |
|||
m := v.Message() |
|||
if m2, ok := m.Interface().(encoding.TextMarshaler); ok { |
|||
b, err := m2.MarshalText() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
w.Write(b) |
|||
} else { |
|||
w.writeMessage(m) |
|||
} |
|||
w.indent-- |
|||
w.WriteByte(ket) |
|||
case protoreflect.EnumKind: |
|||
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { |
|||
fmt.Fprint(w, ev.Name()) |
|||
} else { |
|||
fmt.Fprint(w, v.Enum()) |
|||
} |
|||
default: |
|||
fmt.Fprint(w, v.Interface()) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
|||
func (w *textWriter) writeQuotedString(s string) { |
|||
w.WriteByte('"') |
|||
for i := 0; i < len(s); i++ { |
|||
switch c := s[i]; c { |
|||
case '\n': |
|||
w.buf = append(w.buf, `\n`...) |
|||
case '\r': |
|||
w.buf = append(w.buf, `\r`...) |
|||
case '\t': |
|||
w.buf = append(w.buf, `\t`...) |
|||
case '"': |
|||
w.buf = append(w.buf, `\"`...) |
|||
case '\\': |
|||
w.buf = append(w.buf, `\\`...) |
|||
default: |
|||
if isPrint := c >= 0x20 && c < 0x7f; isPrint { |
|||
w.buf = append(w.buf, c) |
|||
} else { |
|||
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) |
|||
} |
|||
} |
|||
} |
|||
w.WriteByte('"') |
|||
} |
|||
|
|||
func (w *textWriter) writeUnknownFields(b []byte) { |
|||
if !w.compact { |
|||
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) |
|||
} |
|||
|
|||
for len(b) > 0 { |
|||
num, wtyp, n := protowire.ConsumeTag(b) |
|||
if n < 0 { |
|||
return |
|||
} |
|||
b = b[n:] |
|||
|
|||
if wtyp == protowire.EndGroupType { |
|||
w.indent-- |
|||
w.Write(endBraceNewline) |
|||
continue |
|||
} |
|||
fmt.Fprint(w, num) |
|||
if wtyp != protowire.StartGroupType { |
|||
w.WriteByte(':') |
|||
} |
|||
if !w.compact || wtyp == protowire.StartGroupType { |
|||
w.WriteByte(' ') |
|||
} |
|||
switch wtyp { |
|||
case protowire.VarintType: |
|||
v, n := protowire.ConsumeVarint(b) |
|||
if n < 0 { |
|||
return |
|||
} |
|||
b = b[n:] |
|||
fmt.Fprint(w, v) |
|||
case protowire.Fixed32Type: |
|||
v, n := protowire.ConsumeFixed32(b) |
|||
if n < 0 { |
|||
return |
|||
} |
|||
b = b[n:] |
|||
fmt.Fprint(w, v) |
|||
case protowire.Fixed64Type: |
|||
v, n := protowire.ConsumeFixed64(b) |
|||
if n < 0 { |
|||
return |
|||
} |
|||
b = b[n:] |
|||
fmt.Fprint(w, v) |
|||
case protowire.BytesType: |
|||
v, n := protowire.ConsumeBytes(b) |
|||
if n < 0 { |
|||
return |
|||
} |
|||
b = b[n:] |
|||
fmt.Fprintf(w, "%q", v) |
|||
case protowire.StartGroupType: |
|||
w.WriteByte('{') |
|||
w.indent++ |
|||
default: |
|||
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) |
|||
} |
|||
w.WriteByte('\n') |
|||
} |
|||
} |
|||
|
|||
// writeExtensions writes all the extensions in m.
|
|||
func (w *textWriter) writeExtensions(m protoreflect.Message) error { |
|||
md := m.Descriptor() |
|||
if md.ExtensionRanges().Len() == 0 { |
|||
return nil |
|||
} |
|||
|
|||
type ext struct { |
|||
desc protoreflect.FieldDescriptor |
|||
val protoreflect.Value |
|||
} |
|||
var exts []ext |
|||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { |
|||
if fd.IsExtension() { |
|||
exts = append(exts, ext{fd, v}) |
|||
} |
|||
return true |
|||
}) |
|||
sort.Slice(exts, func(i, j int) bool { |
|||
return exts[i].desc.Number() < exts[j].desc.Number() |
|||
}) |
|||
|
|||
for _, ext := range exts { |
|||
// For message set, use the name of the message as the extension name.
|
|||
name := string(ext.desc.FullName()) |
|||
if isMessageSet(ext.desc.ContainingMessage()) { |
|||
name = strings.TrimSuffix(name, ".message_set_extension") |
|||
} |
|||
|
|||
if !ext.desc.IsList() { |
|||
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { |
|||
return err |
|||
} |
|||
} else { |
|||
lv := ext.val.List() |
|||
for i := 0; i < lv.Len(); i++ { |
|||
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { |
|||
fmt.Fprintf(w, "[%s]:", name) |
|||
if !w.compact { |
|||
w.WriteByte(' ') |
|||
} |
|||
if err := w.writeSingularValue(v, fd); err != nil { |
|||
return err |
|||
} |
|||
w.WriteByte('\n') |
|||
return nil |
|||
} |
|||
|
|||
func (w *textWriter) writeIndent() { |
|||
if !w.complete { |
|||
return |
|||
} |
|||
for i := 0; i < w.indent*2; i++ { |
|||
w.buf = append(w.buf, ' ') |
|||
} |
|||
w.complete = false |
|||
} |
@ -0,0 +1,78 @@ |
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
protoV2 "google.golang.org/protobuf/proto" |
|||
"google.golang.org/protobuf/runtime/protoiface" |
|||
) |
|||
|
|||
// Size returns the size in bytes of the wire-format encoding of m.
|
|||
func Size(m Message) int { |
|||
if m == nil { |
|||
return 0 |
|||
} |
|||
mi := MessageV2(m) |
|||
return protoV2.Size(mi) |
|||
} |
|||
|
|||
// Marshal returns the wire-format encoding of m.
|
|||
func Marshal(m Message) ([]byte, error) { |
|||
b, err := marshalAppend(nil, m, false) |
|||
if b == nil { |
|||
b = zeroBytes |
|||
} |
|||
return b, err |
|||
} |
|||
|
|||
var zeroBytes = make([]byte, 0, 0) |
|||
|
|||
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { |
|||
if m == nil { |
|||
return nil, ErrNil |
|||
} |
|||
mi := MessageV2(m) |
|||
nbuf, err := protoV2.MarshalOptions{ |
|||
Deterministic: deterministic, |
|||
AllowPartial: true, |
|||
}.MarshalAppend(buf, mi) |
|||
if err != nil { |
|||
return buf, err |
|||
} |
|||
if len(buf) == len(nbuf) { |
|||
if !mi.ProtoReflect().IsValid() { |
|||
return buf, ErrNil |
|||
} |
|||
} |
|||
return nbuf, checkRequiredNotSet(mi) |
|||
} |
|||
|
|||
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
|||
//
|
|||
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
|||
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
|||
func Unmarshal(b []byte, m Message) error { |
|||
m.Reset() |
|||
return UnmarshalMerge(b, m) |
|||
} |
|||
|
|||
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
|||
func UnmarshalMerge(b []byte, m Message) error { |
|||
mi := MessageV2(m) |
|||
out, err := protoV2.UnmarshalOptions{ |
|||
AllowPartial: true, |
|||
Merge: true, |
|||
}.UnmarshalState(protoiface.UnmarshalInput{ |
|||
Buf: b, |
|||
Message: mi.ProtoReflect(), |
|||
}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if out.Flags&protoiface.UnmarshalInitialized > 0 { |
|||
return nil |
|||
} |
|||
return checkRequiredNotSet(mi) |
|||
} |
@ -0,0 +1,34 @@ |
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package proto |
|||
|
|||
// Bool stores v in a new bool value and returns a pointer to it.
|
|||
func Bool(v bool) *bool { return &v } |
|||
|
|||
// Int stores v in a new int32 value and returns a pointer to it.
|
|||
//
|
|||
// Deprecated: Use Int32 instead.
|
|||
func Int(v int) *int32 { return Int32(int32(v)) } |
|||
|
|||
// Int32 stores v in a new int32 value and returns a pointer to it.
|
|||
func Int32(v int32) *int32 { return &v } |
|||
|
|||
// Int64 stores v in a new int64 value and returns a pointer to it.
|
|||
func Int64(v int64) *int64 { return &v } |
|||
|
|||
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
|||
func Uint32(v uint32) *uint32 { return &v } |
|||
|
|||
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
|||
func Uint64(v uint64) *uint64 { return &v } |
|||
|
|||
// Float32 stores v in a new float32 value and returns a pointer to it.
|
|||
func Float32(v float32) *float32 { return &v } |
|||
|
|||
// Float64 stores v in a new float64 value and returns a pointer to it.
|
|||
func Float64(v float64) *float64 { return &v } |
|||
|
|||
// String stores v in a new string value and returns a pointer to it.
|
|||
func String(v string) *string { return &v } |
@ -0,0 +1,179 @@ |
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package ptypes |
|||
|
|||
import ( |
|||
"fmt" |
|||
"strings" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
"google.golang.org/protobuf/reflect/protoreflect" |
|||
"google.golang.org/protobuf/reflect/protoregistry" |
|||
|
|||
anypb "github.com/golang/protobuf/ptypes/any" |
|||
) |
|||
|
|||
const urlPrefix = "type.googleapis.com/" |
|||
|
|||
// AnyMessageName returns the message name contained in an anypb.Any message.
|
|||
// Most type assertions should use the Is function instead.
|
|||
//
|
|||
// Deprecated: Call the any.MessageName method instead.
|
|||
func AnyMessageName(any *anypb.Any) (string, error) { |
|||
name, err := anyMessageName(any) |
|||
return string(name), err |
|||
} |
|||
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { |
|||
if any == nil { |
|||
return "", fmt.Errorf("message is nil") |
|||
} |
|||
name := protoreflect.FullName(any.TypeUrl) |
|||
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { |
|||
name = name[i+len("/"):] |
|||
} |
|||
if !name.IsValid() { |
|||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) |
|||
} |
|||
return name, nil |
|||
} |
|||
|
|||
// MarshalAny marshals the given message m into an anypb.Any message.
|
|||
//
|
|||
// Deprecated: Call the anypb.New function instead.
|
|||
func MarshalAny(m proto.Message) (*anypb.Any, error) { |
|||
switch dm := m.(type) { |
|||
case DynamicAny: |
|||
m = dm.Message |
|||
case *DynamicAny: |
|||
if dm == nil { |
|||
return nil, proto.ErrNil |
|||
} |
|||
m = dm.Message |
|||
} |
|||
b, err := proto.Marshal(m) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil |
|||
} |
|||
|
|||
// Empty returns a new message of the type specified in an anypb.Any message.
|
|||
// It returns protoregistry.NotFound if the corresponding message type could not
|
|||
// be resolved in the global registry.
|
|||
//
|
|||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
|
|||
// to resolve the message name and create a new instance of it.
|
|||
func Empty(any *anypb.Any) (proto.Message, error) { |
|||
name, err := anyMessageName(any) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
mt, err := protoregistry.GlobalTypes.FindMessageByName(name) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return proto.MessageV1(mt.New().Interface()), nil |
|||
} |
|||
|
|||
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
|||
// into the provided message m. It returns an error if the target message
|
|||
// does not match the type in the Any message or if an unmarshal error occurs.
|
|||
//
|
|||
// The target message m may be a *DynamicAny message. If the underlying message
|
|||
// type could not be resolved, then this returns protoregistry.NotFound.
|
|||
//
|
|||
// Deprecated: Call the any.UnmarshalTo method instead.
|
|||
func UnmarshalAny(any *anypb.Any, m proto.Message) error { |
|||
if dm, ok := m.(*DynamicAny); ok { |
|||
if dm.Message == nil { |
|||
var err error |
|||
dm.Message, err = Empty(any) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
} |
|||
m = dm.Message |
|||
} |
|||
|
|||
anyName, err := AnyMessageName(any) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
msgName := proto.MessageName(m) |
|||
if anyName != msgName { |
|||
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) |
|||
} |
|||
return proto.Unmarshal(any.Value, m) |
|||
} |
|||
|
|||
// Is reports whether the Any message contains a message of the specified type.
|
|||
//
|
|||
// Deprecated: Call the any.MessageIs method instead.
|
|||
func Is(any *anypb.Any, m proto.Message) bool { |
|||
if any == nil || m == nil { |
|||
return false |
|||
} |
|||
name := proto.MessageName(m) |
|||
if !strings.HasSuffix(any.TypeUrl, name) { |
|||
return false |
|||
} |
|||
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' |
|||
} |
|||
|
|||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
|||
// allocate a proto.Message for the type specified in an anypb.Any message.
|
|||
// The allocated message is stored in the embedded proto.Message.
|
|||
//
|
|||
// Example:
|
|||
// var x ptypes.DynamicAny
|
|||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
|||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
|||
//
|
|||
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
|
|||
// the any message contents into a new instance of the underlying message.
|
|||
type DynamicAny struct{ proto.Message } |
|||
|
|||
func (m DynamicAny) String() string { |
|||
if m.Message == nil { |
|||
return "<nil>" |
|||
} |
|||
return m.Message.String() |
|||
} |
|||
func (m DynamicAny) Reset() { |
|||
if m.Message == nil { |
|||
return |
|||
} |
|||
m.Message.Reset() |
|||
} |
|||
func (m DynamicAny) ProtoMessage() { |
|||
return |
|||
} |
|||
func (m DynamicAny) ProtoReflect() protoreflect.Message { |
|||
if m.Message == nil { |
|||
return nil |
|||
} |
|||
return dynamicAny{proto.MessageReflect(m.Message)} |
|||
} |
|||
|
|||
type dynamicAny struct{ protoreflect.Message } |
|||
|
|||
func (m dynamicAny) Type() protoreflect.MessageType { |
|||
return dynamicAnyType{m.Message.Type()} |
|||
} |
|||
func (m dynamicAny) New() protoreflect.Message { |
|||
return dynamicAnyType{m.Message.Type()}.New() |
|||
} |
|||
func (m dynamicAny) Interface() protoreflect.ProtoMessage { |
|||
return DynamicAny{proto.MessageV1(m.Message.Interface())} |
|||
} |
|||
|
|||
type dynamicAnyType struct{ protoreflect.MessageType } |
|||
|
|||
func (t dynamicAnyType) New() protoreflect.Message { |
|||
return dynamicAny{t.MessageType.New()} |
|||
} |
|||
func (t dynamicAnyType) Zero() protoreflect.Message { |
|||
return dynamicAny{t.MessageType.Zero()} |
|||
} |
@ -0,0 +1,62 @@ |
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|||
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
|||
|
|||
package any |
|||
|
|||
import ( |
|||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
|||
anypb "google.golang.org/protobuf/types/known/anypb" |
|||
reflect "reflect" |
|||
) |
|||
|
|||
// Symbols defined in public import of google/protobuf/any.proto.
|
|||
|
|||
type Any = anypb.Any |
|||
|
|||
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor |
|||
|
|||
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ |
|||
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, |
|||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, |
|||
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, |
|||
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, |
|||
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, |
|||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, |
|||
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, |
|||
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, |
|||
0x74, 0x6f, 0x33, |
|||
} |
|||
|
|||
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} |
|||
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ |
|||
0, // [0:0] is the sub-list for method output_type
|
|||
0, // [0:0] is the sub-list for method input_type
|
|||
0, // [0:0] is the sub-list for extension type_name
|
|||
0, // [0:0] is the sub-list for extension extendee
|
|||
0, // [0:0] is the sub-list for field type_name
|
|||
} |
|||
|
|||
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } |
|||
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { |
|||
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { |
|||
return |
|||
} |
|||
type x struct{} |
|||
out := protoimpl.TypeBuilder{ |
|||
File: protoimpl.DescBuilder{ |
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
|||
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, |
|||
NumEnums: 0, |
|||
NumMessages: 0, |
|||
NumExtensions: 0, |
|||
NumServices: 0, |
|||
}, |
|||
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, |
|||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, |
|||
}.Build() |
|||
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File |
|||
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil |
|||
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil |
|||
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil |
|||
} |
@ -0,0 +1,10 @@ |
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
// Package ptypes provides functionality for interacting with well-known types.
|
|||
//
|
|||
// Deprecated: Well-known types have specialized functionality directly
|
|||
// injected into the generated packages for each message type.
|
|||
// See the deprecation notice for each function for the suggested alternative.
|
|||
package ptypes |
@ -0,0 +1,76 @@ |
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package ptypes |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"time" |
|||
|
|||
durationpb "github.com/golang/protobuf/ptypes/duration" |
|||
) |
|||
|
|||
// Range of google.protobuf.Duration as specified in duration.proto.
|
|||
// This is about 10,000 years in seconds.
|
|||
const ( |
|||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) |
|||
minSeconds = -maxSeconds |
|||
) |
|||
|
|||
// Duration converts a durationpb.Duration to a time.Duration.
|
|||
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
|||
//
|
|||
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
|
|||
func Duration(dur *durationpb.Duration) (time.Duration, error) { |
|||
if err := validateDuration(dur); err != nil { |
|||
return 0, err |
|||
} |
|||
d := time.Duration(dur.Seconds) * time.Second |
|||
if int64(d/time.Second) != dur.Seconds { |
|||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) |
|||
} |
|||
if dur.Nanos != 0 { |
|||
d += time.Duration(dur.Nanos) * time.Nanosecond |
|||
if (d < 0) != (dur.Nanos < 0) { |
|||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) |
|||
} |
|||
} |
|||
return d, nil |
|||
} |
|||
|
|||
// DurationProto converts a time.Duration to a durationpb.Duration.
|
|||
//
|
|||
// Deprecated: Call the durationpb.New function instead.
|
|||
func DurationProto(d time.Duration) *durationpb.Duration { |
|||
nanos := d.Nanoseconds() |
|||
secs := nanos / 1e9 |
|||
nanos -= secs * 1e9 |
|||
return &durationpb.Duration{ |
|||
Seconds: int64(secs), |
|||
Nanos: int32(nanos), |
|||
} |
|||
} |
|||
|
|||
// validateDuration determines whether the durationpb.Duration is valid
|
|||
// according to the definition in google/protobuf/duration.proto.
|
|||
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
|||
// Note that the range of durationpb.Duration is about 10,000 years,
|
|||
// while the range of time.Duration is about 290 years.
|
|||
func validateDuration(dur *durationpb.Duration) error { |
|||
if dur == nil { |
|||
return errors.New("duration: nil Duration") |
|||
} |
|||
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { |
|||
return fmt.Errorf("duration: %v: seconds out of range", dur) |
|||
} |
|||
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { |
|||
return fmt.Errorf("duration: %v: nanos out of range", dur) |
|||
} |
|||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
|||
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { |
|||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) |
|||
} |
|||
return nil |
|||
} |
@ -0,0 +1,63 @@ |
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
|||
|
|||
package duration |
|||
|
|||
import ( |
|||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
|||
durationpb "google.golang.org/protobuf/types/known/durationpb" |
|||
reflect "reflect" |
|||
) |
|||
|
|||
// Symbols defined in public import of google/protobuf/duration.proto.
|
|||
|
|||
type Duration = durationpb.Duration |
|||
|
|||
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor |
|||
|
|||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ |
|||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, |
|||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, |
|||
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, |
|||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, |
|||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, |
|||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, |
|||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, |
|||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, |
|||
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, |
|||
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, |
|||
} |
|||
|
|||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} |
|||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ |
|||
0, // [0:0] is the sub-list for method output_type
|
|||
0, // [0:0] is the sub-list for method input_type
|
|||
0, // [0:0] is the sub-list for extension type_name
|
|||
0, // [0:0] is the sub-list for extension extendee
|
|||
0, // [0:0] is the sub-list for field type_name
|
|||
} |
|||
|
|||
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } |
|||
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { |
|||
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { |
|||
return |
|||
} |
|||
type x struct{} |
|||
out := protoimpl.TypeBuilder{ |
|||
File: protoimpl.DescBuilder{ |
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
|||
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, |
|||
NumEnums: 0, |
|||
NumMessages: 0, |
|||
NumExtensions: 0, |
|||
NumServices: 0, |
|||
}, |
|||
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, |
|||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, |
|||
}.Build() |
|||
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File |
|||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil |
|||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil |
|||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil |
|||
} |
@ -0,0 +1,112 @@ |
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package ptypes |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"time" |
|||
|
|||
timestamppb "github.com/golang/protobuf/ptypes/timestamp" |
|||
) |
|||
|
|||
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
|||
const ( |
|||
// Seconds field of the earliest valid Timestamp.
|
|||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
|||
minValidSeconds = -62135596800 |
|||
// Seconds field just after the latest valid Timestamp.
|
|||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
|||
maxValidSeconds = 253402300800 |
|||
) |
|||
|
|||
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
|||
// It returns an error if the argument is invalid.
|
|||
//
|
|||
// Unlike most Go functions, if Timestamp returns an error, the first return
|
|||
// value is not the zero time.Time. Instead, it is the value obtained from the
|
|||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
|||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
|||
// do map to valid time.Times.
|
|||
//
|
|||
// A nil Timestamp returns an error. The first return value in that case is
|
|||
// undefined.
|
|||
//
|
|||
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
|
|||
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { |
|||
// Don't return the zero value on error, because corresponds to a valid
|
|||
// timestamp. Instead return whatever time.Unix gives us.
|
|||
var t time.Time |
|||
if ts == nil { |
|||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
|||
} else { |
|||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() |
|||
} |
|||
return t, validateTimestamp(ts) |
|||
} |
|||
|
|||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
|||
//
|
|||
// Deprecated: Call the timestamppb.Now function instead.
|
|||
func TimestampNow() *timestamppb.Timestamp { |
|||
ts, err := TimestampProto(time.Now()) |
|||
if err != nil { |
|||
panic("ptypes: time.Now() out of Timestamp range") |
|||
} |
|||
return ts |
|||
} |
|||
|
|||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
|||
// It returns an error if the resulting Timestamp is invalid.
|
|||
//
|
|||
// Deprecated: Call the timestamppb.New function instead.
|
|||
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { |
|||
ts := ×tamppb.Timestamp{ |
|||
Seconds: t.Unix(), |
|||
Nanos: int32(t.Nanosecond()), |
|||
} |
|||
if err := validateTimestamp(ts); err != nil { |
|||
return nil, err |
|||
} |
|||
return ts, nil |
|||
} |
|||
|
|||
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
|||
// For invalid Timestamps, it returns an error message in parentheses.
|
|||
//
|
|||
// Deprecated: Call the ts.AsTime method instead,
|
|||
// followed by a call to the Format method on the time.Time value.
|
|||
func TimestampString(ts *timestamppb.Timestamp) string { |
|||
t, err := Timestamp(ts) |
|||
if err != nil { |
|||
return fmt.Sprintf("(%v)", err) |
|||
} |
|||
return t.Format(time.RFC3339Nano) |
|||
} |
|||
|
|||
// validateTimestamp determines whether a Timestamp is valid.
|
|||
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
|||
// and has a Nanos field in the range [0, 1e9).
|
|||
//
|
|||
// If the Timestamp is valid, validateTimestamp returns nil.
|
|||
// Otherwise, it returns an error that describes the problem.
|
|||
//
|
|||
// Every valid Timestamp can be represented by a time.Time,
|
|||
// but the converse is not true.
|
|||
func validateTimestamp(ts *timestamppb.Timestamp) error { |
|||
if ts == nil { |
|||
return errors.New("timestamp: nil Timestamp") |
|||
} |
|||
if ts.Seconds < minValidSeconds { |
|||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts) |
|||
} |
|||
if ts.Seconds >= maxValidSeconds { |
|||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts) |
|||
} |
|||
if ts.Nanos < 0 || ts.Nanos >= 1e9 { |
|||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) |
|||
} |
|||
return nil |
|||
} |
@ -0,0 +1,64 @@ |
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
|||
|
|||
package timestamp |
|||
|
|||
import ( |
|||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
|||
timestamppb "google.golang.org/protobuf/types/known/timestamppb" |
|||
reflect "reflect" |
|||
) |
|||
|
|||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
|||
|
|||
type Timestamp = timestamppb.Timestamp |
|||
|
|||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor |
|||
|
|||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ |
|||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, |
|||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, |
|||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, |
|||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, |
|||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, |
|||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, |
|||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, |
|||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, |
|||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, |
|||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, |
|||
0x33, |
|||
} |
|||
|
|||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} |
|||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ |
|||
0, // [0:0] is the sub-list for method output_type
|
|||
0, // [0:0] is the sub-list for method input_type
|
|||
0, // [0:0] is the sub-list for extension type_name
|
|||
0, // [0:0] is the sub-list for extension extendee
|
|||
0, // [0:0] is the sub-list for field type_name
|
|||
} |
|||
|
|||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } |
|||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { |
|||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { |
|||
return |
|||
} |
|||
type x struct{} |
|||
out := protoimpl.TypeBuilder{ |
|||
File: protoimpl.DescBuilder{ |
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
|||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, |
|||
NumEnums: 0, |
|||
NumMessages: 0, |
|||
NumExtensions: 0, |
|||
NumServices: 0, |
|||
}, |
|||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, |
|||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, |
|||
}.Build() |
|||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File |
|||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil |
|||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil |
|||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil |
|||
} |
@ -0,0 +1,4 @@ |
|||
logrus |
|||
vendor |
|||
|
|||
.idea/ |
@ -0,0 +1,40 @@ |
|||
run: |
|||
# do not run on test files yet |
|||
tests: false |
|||
|
|||
# all available settings of specific linters |
|||
linters-settings: |
|||
errcheck: |
|||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`; |
|||
# default is false: such cases aren't reported by default. |
|||
check-type-assertions: false |
|||
|
|||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; |
|||
# default is false: such cases aren't reported by default. |
|||
check-blank: false |
|||
|
|||
lll: |
|||
line-length: 100 |
|||
tab-width: 4 |
|||
|
|||
prealloc: |
|||
simple: false |
|||
range-loops: false |
|||
for-loops: false |
|||
|
|||
whitespace: |
|||
multi-if: false # Enforces newlines (or comments) after every multi-line if statement |
|||
multi-func: false # Enforces newlines (or comments) after every multi-line function signature |
|||
|
|||
linters: |
|||
enable: |
|||
- megacheck |
|||
- govet |
|||
disable: |
|||
- maligned |
|||
- prealloc |
|||
disable-all: false |
|||
presets: |
|||
- bugs |
|||
- unused |
|||
fast: false |
@ -0,0 +1,15 @@ |
|||
language: go |
|||
go_import_path: github.com/sirupsen/logrus |
|||
git: |
|||
depth: 1 |
|||
env: |
|||
- GO111MODULE=on |
|||
go: 1.15.x |
|||
os: linux |
|||
install: |
|||
- ./travis/install.sh |
|||
script: |
|||
- cd ci |
|||
- go run mage.go -v -w ../ crossBuild |
|||
- go run mage.go -v -w ../ lint |
|||
- go run mage.go -v -w ../ test |
@ -0,0 +1,259 @@ |
|||
# 1.8.1 |
|||
Code quality: |
|||
* move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer |
|||
* improve timestamp format documentation |
|||
|
|||
Fixes: |
|||
* fix race condition on logger hooks |
|||
|
|||
|
|||
# 1.8.0 |
|||
|
|||
Correct versioning number replacing v1.7.1. |
|||
|
|||
# 1.7.1 |
|||
|
|||
Beware this release has introduced a new public API and its semver is therefore incorrect. |
|||
|
|||
Code quality: |
|||
* use go 1.15 in travis |
|||
* use magefile as task runner |
|||
|
|||
Fixes: |
|||
* small fixes about new go 1.13 error formatting system |
|||
* Fix for long time race condiction with mutating data hooks |
|||
|
|||
Features: |
|||
* build support for zos |
|||
|
|||
# 1.7.0 |
|||
Fixes: |
|||
* the dependency toward a windows terminal library has been removed |
|||
|
|||
Features: |
|||
* a new buffer pool management API has been added |
|||
* a set of `<LogLevel>Fn()` functions have been added |
|||
|
|||
# 1.6.0 |
|||
Fixes: |
|||
* end of line cleanup |
|||
* revert the entry concurrency bug fix whic leads to deadlock under some circumstances |
|||
* update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 |
|||
|
|||
Features: |
|||
* add an option to the `TextFormatter` to completely disable fields quoting |
|||
|
|||
# 1.5.0 |
|||
Code quality: |
|||
* add golangci linter run on travis |
|||
|
|||
Fixes: |
|||
* add mutex for hooks concurrent access on `Entry` data |
|||
* caller function field for go1.14 |
|||
* fix build issue for gopherjs target |
|||
|
|||
Feature: |
|||
* add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level |
|||
* add a `DisableHTMLEscape` option in the `JSONFormatter` |
|||
* add `ForceQuote` and `PadLevelText` options in the `TextFormatter` |
|||
|
|||
# 1.4.2 |
|||
* Fixes build break for plan9, nacl, solaris |
|||
# 1.4.1 |
|||
This new release introduces: |
|||
* Enhance TextFormatter to not print caller information when they are empty (#944) |
|||
* Remove dependency on golang.org/x/crypto (#932, #943) |
|||
|
|||
Fixes: |
|||
* Fix Entry.WithContext method to return a copy of the initial entry (#941) |
|||
|
|||
# 1.4.0 |
|||
This new release introduces: |
|||
* Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). |
|||
* Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) |
|||
* Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). |
|||
|
|||
Fixes: |
|||
* Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). |
|||
* Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) |
|||
* Fix infinite recursion on unknown `Level.String()` (#907) |
|||
* Fix race condition in `getCaller` (#916). |
|||
|
|||
|
|||
# 1.3.0 |
|||
This new release introduces: |
|||
* Log, Logf, Logln functions for Logger and Entry that take a Level |
|||
|
|||
Fixes: |
|||
* Building prometheus node_exporter on AIX (#840) |
|||
* Race condition in TextFormatter (#468) |
|||
* Travis CI import path (#868) |
|||
* Remove coloured output on Windows (#862) |
|||
* Pointer to func as field in JSONFormatter (#870) |
|||
* Properly marshal Levels (#873) |
|||
|
|||
# 1.2.0 |
|||
This new release introduces: |
|||
* A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued |
|||
* A new trace level named `Trace` whose level is below `Debug` |
|||
* A configurable exit function to be called upon a Fatal trace |
|||
* The `Level` object now implements `encoding.TextUnmarshaler` interface |
|||
|
|||
# 1.1.1 |
|||
This is a bug fix release. |
|||
* fix the build break on Solaris |
|||
* don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized |
|||
|
|||
# 1.1.0 |
|||
This new release introduces: |
|||
* several fixes: |
|||
* a fix for a race condition on entry formatting |
|||
* proper cleanup of previously used entries before putting them back in the pool |
|||
* the extra new line at the end of message in text formatter has been removed |
|||
* a new global public API to check if a level is activated: IsLevelEnabled |
|||
* the following methods have been added to the Logger object |
|||
* IsLevelEnabled |
|||
* SetFormatter |
|||
* SetOutput |
|||
* ReplaceHooks |
|||
* introduction of go module |
|||
* an indent configuration for the json formatter |
|||
* output colour support for windows |
|||
* the field sort function is now configurable for text formatter |
|||
* the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater |
|||
|
|||
# 1.0.6 |
|||
|
|||
This new release introduces: |
|||
* a new api WithTime which allows to easily force the time of the log entry |
|||
which is mostly useful for logger wrapper |
|||
* a fix reverting the immutability of the entry given as parameter to the hooks |
|||
a new configuration field of the json formatter in order to put all the fields |
|||
in a nested dictionnary |
|||
* a new SetOutput method in the Logger |
|||
* a new configuration of the textformatter to configure the name of the default keys |
|||
* a new configuration of the text formatter to disable the level truncation |
|||
|
|||
# 1.0.5 |
|||
|
|||
* Fix hooks race (#707) |
|||
* Fix panic deadlock (#695) |
|||
|
|||
# 1.0.4 |
|||
|
|||
* Fix race when adding hooks (#612) |
|||
* Fix terminal check in AppEngine (#635) |
|||
|
|||
# 1.0.3 |
|||
|
|||
* Replace example files with testable examples |
|||
|
|||
# 1.0.2 |
|||
|
|||
* bug: quote non-string values in text formatter (#583) |
|||
* Make (*Logger) SetLevel a public method |
|||
|
|||
# 1.0.1 |
|||
|
|||
* bug: fix escaping in text formatter (#575) |
|||
|
|||
# 1.0.0 |
|||
|
|||
* Officially changed name to lower-case |
|||
* bug: colors on Windows 10 (#541) |
|||
* bug: fix race in accessing level (#512) |
|||
|
|||
# 0.11.5 |
|||
|
|||
* feature: add writer and writerlevel to entry (#372) |
|||
|
|||
# 0.11.4 |
|||
|
|||
* bug: fix undefined variable on solaris (#493) |
|||
|
|||
# 0.11.3 |
|||
|
|||
* formatter: configure quoting of empty values (#484) |
|||
* formatter: configure quoting character (default is `"`) (#484) |
|||
* bug: fix not importing io correctly in non-linux environments (#481) |
|||
|
|||
# 0.11.2 |
|||
|
|||
* bug: fix windows terminal detection (#476) |
|||
|
|||
# 0.11.1 |
|||
|
|||
* bug: fix tty detection with custom out (#471) |
|||
|
|||
# 0.11.0 |
|||
|
|||
* performance: Use bufferpool to allocate (#370) |
|||
* terminal: terminal detection for app-engine (#343) |
|||
* feature: exit handler (#375) |
|||
|
|||
# 0.10.0 |
|||
|
|||
* feature: Add a test hook (#180) |
|||
* feature: `ParseLevel` is now case-insensitive (#326) |
|||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) |
|||
* performance: avoid re-allocations on `WithFields` (#335) |
|||
|
|||
# 0.9.0 |
|||
|
|||
* logrus/text_formatter: don't emit empty msg |
|||
* logrus/hooks/airbrake: move out of main repository |
|||
* logrus/hooks/sentry: move out of main repository |
|||
* logrus/hooks/papertrail: move out of main repository |
|||
* logrus/hooks/bugsnag: move out of main repository |
|||
* logrus/core: run tests with `-race` |
|||
* logrus/core: detect TTY based on `stderr` |
|||
* logrus/core: support `WithError` on logger |
|||
* logrus/core: Solaris support |
|||
|
|||
# 0.8.7 |
|||
|
|||
* logrus/core: fix possible race (#216) |
|||
* logrus/doc: small typo fixes and doc improvements |
|||
|
|||
|
|||
# 0.8.6 |
|||
|
|||
* hooks/raven: allow passing an initialized client |
|||
|
|||
# 0.8.5 |
|||
|
|||
* logrus/core: revert #208 |
|||
|
|||
# 0.8.4 |
|||
|
|||
* formatter/text: fix data race (#218) |
|||
|
|||
# 0.8.3 |
|||
|
|||
* logrus/core: fix entry log level (#208) |
|||
* logrus/core: improve performance of text formatter by 40% |
|||
* logrus/core: expose `LevelHooks` type |
|||
* logrus/core: add support for DragonflyBSD and NetBSD |
|||
* formatter/text: print structs more verbosely |
|||
|
|||
# 0.8.2 |
|||
|
|||
* logrus: fix more Fatal family functions |
|||
|
|||
# 0.8.1 |
|||
|
|||
* logrus: fix not exiting on `Fatalf` and `Fatalln` |
|||
|
|||
# 0.8.0 |
|||
|
|||
* logrus: defaults to stderr instead of stdout |
|||
* hooks/sentry: add special field for `*http.Request` |
|||
* formatter/text: ignore Windows for colors |
|||
|
|||
# 0.7.3 |
|||
|
|||
* formatter/\*: allow configuration of timestamp layout |
|||
|
|||
# 0.7.2 |
|||
|
|||
* formatter/text: Add configuration option for time format (#158) |
@ -0,0 +1,21 @@ |
|||
The MIT License (MIT) |
|||
|
|||
Copyright (c) 2014 Simon Eskildsen |
|||
|
|||
Permission is hereby granted, free of charge, to any person obtaining a copy |
|||
of this software and associated documentation files (the "Software"), to deal |
|||
in the Software without restriction, including without limitation the rights |
|||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|||
copies of the Software, and to permit persons to whom the Software is |
|||
furnished to do so, subject to the following conditions: |
|||
|
|||
The above copyright notice and this permission notice shall be included in |
|||
all copies or substantial portions of the Software. |
|||
|
|||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
|||
THE SOFTWARE. |
@ -0,0 +1,515 @@ |
|||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) |
|||
|
|||
Logrus is a structured logger for Go (golang), completely API compatible with |
|||
the standard library logger. |
|||
|
|||
**Logrus is in maintenance-mode.** We will not be introducing new features. It's |
|||
simply too hard to do in a way that won't break many people's projects, which is |
|||
the last thing you want from your Logging library (again...). |
|||
|
|||
This does not mean Logrus is dead. Logrus will continue to be maintained for |
|||
security, (backwards compatible) bug fixes, and performance (where we are |
|||
limited by the interface). |
|||
|
|||
I believe Logrus' biggest contribution is to have played a part in today's |
|||
widespread use of structured logging in Golang. There doesn't seem to be a |
|||
reason to do a major, breaking iteration into Logrus V2, since the fantastic Go |
|||
community has built those independently. Many fantastic alternatives have sprung |
|||
up. Logrus would look like those, had it been re-designed with what we know |
|||
about structured logging in Go today. Check out, for example, |
|||
[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. |
|||
|
|||
[zerolog]: https://github.com/rs/zerolog |
|||
[zap]: https://github.com/uber-go/zap |
|||
[apex]: https://github.com/apex/log |
|||
|
|||
**Seeing weird case-sensitive problems?** It's in the past been possible to |
|||
import Logrus as both upper- and lower-case. Due to the Go package environment, |
|||
this caused issues in the community and we needed a standard. Some environments |
|||
experienced problems with the upper-case variant, so the lower-case was decided. |
|||
Everything using `logrus` will need to use the lower-case: |
|||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed. |
|||
|
|||
To fix Glide, see [these |
|||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). |
|||
For an in-depth explanation of the casing issue, see [this |
|||
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). |
|||
|
|||
Nicely color-coded in development (when a TTY is attached, otherwise just |
|||
plain text): |
|||
|
|||
![Colored](http://i.imgur.com/PY7qMwd.png) |
|||
|
|||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash |
|||
or Splunk: |
|||
|
|||
```text |
|||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the |
|||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} |
|||
|
|||
{"level":"warning","msg":"The group's number increased tremendously!", |
|||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} |
|||
|
|||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!", |
|||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} |
|||
|
|||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", |
|||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} |
|||
|
|||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, |
|||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} |
|||
``` |
|||
|
|||
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not |
|||
attached, the output is compatible with the |
|||
[logfmt](http://godoc.org/github.com/kr/logfmt) format: |
|||
|
|||
```text |
|||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 |
|||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 |
|||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true |
|||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 |
|||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 |
|||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true |
|||
``` |
|||
To ensure this behaviour even if a TTY is attached, set your formatter as follows: |
|||
|
|||
```go |
|||
log.SetFormatter(&log.TextFormatter{ |
|||
DisableColors: true, |
|||
FullTimestamp: true, |
|||
}) |
|||
``` |
|||
|
|||
#### Logging Method Name |
|||
|
|||
If you wish to add the calling method as a field, instruct the logger via: |
|||
```go |
|||
log.SetReportCaller(true) |
|||
``` |
|||
This adds the caller as 'method' like so: |
|||
|
|||
```json |
|||
{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", |
|||
"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} |
|||
``` |
|||
|
|||
```text |
|||
time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin |
|||
``` |
|||
Note that this does add measurable overhead - the cost will depend on the version of Go, but is |
|||
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your |
|||
environment via benchmarks: |
|||
``` |
|||
go test -bench=.*CallerTracing |
|||
``` |
|||
|
|||
|
|||
#### Case-sensitivity |
|||
|
|||
The organization's name was changed to lower-case--and this will not be changed |
|||
back. If you are getting import conflicts due to case sensitivity, please use |
|||
the lower-case import: `github.com/sirupsen/logrus`. |
|||
|
|||
#### Example |
|||
|
|||
The simplest way to use Logrus is simply the package-level exported logger: |
|||
|
|||
```go |
|||
package main |
|||
|
|||
import ( |
|||
log "github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
func main() { |
|||
log.WithFields(log.Fields{ |
|||
"animal": "walrus", |
|||
}).Info("A walrus appears") |
|||
} |
|||
``` |
|||
|
|||
Note that it's completely api-compatible with the stdlib logger, so you can |
|||
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` |
|||
and you'll now have the flexibility of Logrus. You can customize it all you |
|||
want: |
|||
|
|||
```go |
|||
package main |
|||
|
|||
import ( |
|||
"os" |
|||
log "github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
func init() { |
|||
// Log as JSON instead of the default ASCII formatter. |
|||
log.SetFormatter(&log.JSONFormatter{}) |
|||
|
|||
// Output to stdout instead of the default stderr |
|||
// Can be any io.Writer, see below for File example |
|||
log.SetOutput(os.Stdout) |
|||
|
|||
// Only log the warning severity or above. |
|||
log.SetLevel(log.WarnLevel) |
|||
} |
|||
|
|||
func main() { |
|||
log.WithFields(log.Fields{ |
|||
"animal": "walrus", |
|||
"size": 10, |
|||
}).Info("A group of walrus emerges from the ocean") |
|||
|
|||
log.WithFields(log.Fields{ |
|||
"omg": true, |
|||
"number": 122, |
|||
}).Warn("The group's number increased tremendously!") |
|||
|
|||
log.WithFields(log.Fields{ |
|||
"omg": true, |
|||
"number": 100, |
|||
}).Fatal("The ice breaks!") |
|||
|
|||
// A common pattern is to re-use fields between logging statements by re-using |
|||
// the logrus.Entry returned from WithFields() |
|||
contextLogger := log.WithFields(log.Fields{ |
|||
"common": "this is a common field", |
|||
"other": "I also should be logged always", |
|||
}) |
|||
|
|||
contextLogger.Info("I'll be logged with common and other field") |
|||
contextLogger.Info("Me too") |
|||
} |
|||
``` |
|||
|
|||
For more advanced usage such as logging to multiple locations from the same |
|||
application, you can also create an instance of the `logrus` Logger: |
|||
|
|||
```go |
|||
package main |
|||
|
|||
import ( |
|||
"os" |
|||
"github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
// Create a new instance of the logger. You can have any number of instances. |
|||
var log = logrus.New() |
|||
|
|||
func main() { |
|||
// The API for setting attributes is a little different than the package level |
|||
// exported logger. See Godoc. |
|||
log.Out = os.Stdout |
|||
|
|||
// You could set this to any `io.Writer` such as a file |
|||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) |
|||
// if err == nil { |
|||
// log.Out = file |
|||
// } else { |
|||
// log.Info("Failed to log to file, using default stderr") |
|||
// } |
|||
|
|||
log.WithFields(logrus.Fields{ |
|||
"animal": "walrus", |
|||
"size": 10, |
|||
}).Info("A group of walrus emerges from the ocean") |
|||
} |
|||
``` |
|||
|
|||
#### Fields |
|||
|
|||
Logrus encourages careful, structured logging through logging fields instead of |
|||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed |
|||
to send event %s to topic %s with key %d")`, you should log the much more |
|||
discoverable: |
|||
|
|||
```go |
|||
log.WithFields(log.Fields{ |
|||
"event": event, |
|||
"topic": topic, |
|||
"key": key, |
|||
}).Fatal("Failed to send event") |
|||
``` |
|||
|
|||
We've found this API forces you to think about logging in a way that produces |
|||
much more useful logging messages. We've been in countless situations where just |
|||
a single added field to a log statement that was already there would've saved us |
|||
hours. The `WithFields` call is optional. |
|||
|
|||
In general, with Logrus using any of the `printf`-family functions should be |
|||
seen as a hint you should add a field, however, you can still use the |
|||
`printf`-family functions with Logrus. |
|||
|
|||
#### Default Fields |
|||
|
|||
Often it's helpful to have fields _always_ attached to log statements in an |
|||
application or parts of one. For example, you may want to always log the |
|||
`request_id` and `user_ip` in the context of a request. Instead of writing |
|||
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on |
|||
every line, you can create a `logrus.Entry` to pass around instead: |
|||
|
|||
```go |
|||
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) |
|||
requestLogger.Info("something happened on that request") # will log request_id and user_ip |
|||
requestLogger.Warn("something not great happened") |
|||
``` |
|||
|
|||
#### Hooks |
|||
|
|||
You can add hooks for logging levels. For example to send errors to an exception |
|||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to |
|||
multiple places simultaneously, e.g. syslog. |
|||
|
|||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in |
|||
`init`: |
|||
|
|||
```go |
|||
import ( |
|||
log "github.com/sirupsen/logrus" |
|||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" |
|||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" |
|||
"log/syslog" |
|||
) |
|||
|
|||
func init() { |
|||
|
|||
// Use the Airbrake hook to report errors that have Error severity or above to |
|||
// an exception tracker. You can create custom hooks, see the Hooks section. |
|||
log.AddHook(airbrake.NewHook(123, "xyz", "production")) |
|||
|
|||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") |
|||
if err != nil { |
|||
log.Error("Unable to connect to local syslog daemon") |
|||
} else { |
|||
log.AddHook(hook) |
|||
} |
|||
} |
|||
``` |
|||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). |
|||
|
|||
A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) |
|||
|
|||
|
|||
#### Level logging |
|||
|
|||
Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. |
|||
|
|||
```go |
|||
log.Trace("Something very low level.") |
|||
log.Debug("Useful debugging information.") |
|||
log.Info("Something noteworthy happened!") |
|||
log.Warn("You should probably take a look at this.") |
|||
log.Error("Something failed but I'm not quitting.") |
|||
// Calls os.Exit(1) after logging |
|||
log.Fatal("Bye.") |
|||
// Calls panic() after logging |
|||
log.Panic("I'm bailing.") |
|||
``` |
|||
|
|||
You can set the logging level on a `Logger`, then it will only log entries with |
|||
that severity or anything above it: |
|||
|
|||
```go |
|||
// Will log anything that is info or above (warn, error, fatal, panic). Default. |
|||
log.SetLevel(log.InfoLevel) |
|||
``` |
|||
|
|||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose |
|||
environment if your application has that. |
|||
|
|||
Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). |
|||
|
|||
#### Entries |
|||
|
|||
Besides the fields added with `WithField` or `WithFields` some fields are |
|||
automatically added to all logging events: |
|||
|
|||
1. `time`. The timestamp when the entry was created. |
|||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after |
|||
the `AddFields` call. E.g. `Failed to send event.` |
|||
3. `level`. The logging level. E.g. `info`. |
|||
|
|||
#### Environments |
|||
|
|||
Logrus has no notion of environment. |
|||
|
|||
If you wish for hooks and formatters to only be used in specific environments, |
|||
you should handle that yourself. For example, if your application has a global |
|||
variable `Environment`, which is a string representation of the environment you |
|||
could do: |
|||
|
|||
```go |
|||
import ( |
|||
log "github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
func init() { |
|||
// do something here to set environment depending on an environment variable |
|||
// or command-line flag |
|||
if Environment == "production" { |
|||
log.SetFormatter(&log.JSONFormatter{}) |
|||
} else { |
|||
// The TextFormatter is default, you don't actually have to do this. |
|||
log.SetFormatter(&log.TextFormatter{}) |
|||
} |
|||
} |
|||
``` |
|||
|
|||
This configuration is how `logrus` was intended to be used, but JSON in |
|||
production is mostly only useful if you do log aggregation with tools like |
|||
Splunk or Logstash. |
|||
|
|||
#### Formatters |
|||
|
|||
The built-in logging formatters are: |
|||
|
|||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise |
|||
without colors. |
|||
* *Note:* to force colored output when there is no TTY, set the `ForceColors` |
|||
field to `true`. To force no colored output even if there is a TTY set the |
|||
`DisableColors` field to `true`. For Windows, see |
|||
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). |
|||
* When colors are enabled, levels are truncated to 4 characters by default. To disable |
|||
truncation set the `DisableLevelTruncation` field to `true`. |
|||
* When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. |
|||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). |
|||
* `logrus.JSONFormatter`. Logs fields as JSON. |
|||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). |
|||
|
|||
Third party logging formatters: |
|||
|
|||
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. |
|||
* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). |
|||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. |
|||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. |
|||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. |
|||
* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. |
|||
* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. |
|||
* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. |
|||
|
|||
You can define your formatter by implementing the `Formatter` interface, |
|||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a |
|||
`Fields` type (`map[string]interface{}`) with all your fields as well as the |
|||
default ones (see Entries section above): |
|||
|
|||
```go |
|||
type MyJSONFormatter struct { |
|||
} |
|||
|
|||
log.SetFormatter(new(MyJSONFormatter)) |
|||
|
|||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { |
|||
// Note this doesn't include Time, Level and Message which are available on |
|||
// the Entry. Consult `godoc` on information about those fields or read the |
|||
// source of the official loggers. |
|||
serialized, err := json.Marshal(entry.Data) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) |
|||
} |
|||
return append(serialized, '\n'), nil |
|||
} |
|||
``` |
|||
|
|||
#### Logger as an `io.Writer` |
|||
|
|||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. |
|||
|
|||
```go |
|||
w := logger.Writer() |
|||
defer w.Close() |
|||
|
|||
srv := http.Server{ |
|||
// create a stdlib log.Logger that writes to |
|||
// logrus.Logger. |
|||
ErrorLog: log.New(w, "", 0), |
|||
} |
|||
``` |
|||
|
|||
Each line written to that writer will be printed the usual way, using formatters |
|||
and hooks. The level for those entries is `info`. |
|||
|
|||
This means that we can override the standard library logger easily: |
|||
|
|||
```go |
|||
logger := logrus.New() |
|||
logger.Formatter = &logrus.JSONFormatter{} |
|||
|
|||
// Use logrus for standard log output |
|||
// Note that `log` here references stdlib's log |
|||
// Not logrus imported under the name `log`. |
|||
log.SetOutput(logger.Writer()) |
|||
``` |
|||
|
|||
#### Rotation |
|||
|
|||
Log rotation is not provided with Logrus. Log rotation should be done by an |
|||
external program (like `logrotate(8)`) that can compress and delete old log |
|||
entries. It should not be a feature of the application-level logger. |
|||
|
|||
#### Tools |
|||
|
|||
| Tool | Description | |
|||
| ---- | ----------- | |
|||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| |
|||
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | |
|||
|
|||
#### Testing |
|||
|
|||
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: |
|||
|
|||
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook |
|||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): |
|||
|
|||
```go |
|||
import( |
|||
"github.com/sirupsen/logrus" |
|||
"github.com/sirupsen/logrus/hooks/test" |
|||
"github.com/stretchr/testify/assert" |
|||
"testing" |
|||
) |
|||
|
|||
func TestSomething(t*testing.T){ |
|||
logger, hook := test.NewNullLogger() |
|||
logger.Error("Helloerror") |
|||
|
|||
assert.Equal(t, 1, len(hook.Entries)) |
|||
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) |
|||
assert.Equal(t, "Helloerror", hook.LastEntry().Message) |
|||
|
|||
hook.Reset() |
|||
assert.Nil(t, hook.LastEntry()) |
|||
} |
|||
``` |
|||
|
|||
#### Fatal handlers |
|||
|
|||
Logrus can register one or more functions that will be called when any `fatal` |
|||
level message is logged. The registered handlers will be executed before |
|||
logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need |
|||
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. |
|||
|
|||
``` |
|||
... |
|||
handler := func() { |
|||
// gracefully shutdown something... |
|||
} |
|||
logrus.RegisterExitHandler(handler) |
|||
... |
|||
``` |
|||
|
|||
#### Thread safety |
|||
|
|||
By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. |
|||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. |
|||
|
|||
Situation when locking is not needed includes: |
|||
|
|||
* You have no hooks registered, or hooks calling is already thread-safe. |
|||
|
|||
* Writing to logger.Out is already thread-safe, for example: |
|||
|
|||
1) logger.Out is protected by locks. |
|||
|
|||
2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) |
|||
|
|||
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) |
@ -0,0 +1,76 @@ |
|||
package logrus |
|||
|
|||
// The following code was sourced and modified from the
|
|||
// https://github.com/tebeka/atexit package governed by the following license:
|
|||
//
|
|||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
|||
//
|
|||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|||
// this software and associated documentation files (the "Software"), to deal in
|
|||
// the Software without restriction, including without limitation the rights to
|
|||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|||
// the Software, and to permit persons to whom the Software is furnished to do so,
|
|||
// subject to the following conditions:
|
|||
//
|
|||
// The above copyright notice and this permission notice shall be included in all
|
|||
// copies or substantial portions of the Software.
|
|||
//
|
|||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|||
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|||
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|||
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|||
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
|||
import ( |
|||
"fmt" |
|||
"os" |
|||
) |
|||
|
|||
var handlers = []func(){} |
|||
|
|||
func runHandler(handler func()) { |
|||
defer func() { |
|||
if err := recover(); err != nil { |
|||
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) |
|||
} |
|||
}() |
|||
|
|||
handler() |
|||
} |
|||
|
|||
func runHandlers() { |
|||
for _, handler := range handlers { |
|||
runHandler(handler) |
|||
} |
|||
} |
|||
|
|||
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
|
|||
func Exit(code int) { |
|||
runHandlers() |
|||
os.Exit(code) |
|||
} |
|||
|
|||
// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
|
|||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
|||
// any Fatal log entry is made.
|
|||
//
|
|||
// This method is useful when a caller wishes to use logrus to log a fatal
|
|||
// message but also needs to gracefully shutdown. An example usecase could be
|
|||
// closing database connections, or sending a alert that the application is
|
|||
// closing.
|
|||
func RegisterExitHandler(handler func()) { |
|||
handlers = append(handlers, handler) |
|||
} |
|||
|
|||
// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
|
|||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
|||
// any Fatal log entry is made.
|
|||
//
|
|||
// This method is useful when a caller wishes to use logrus to log a fatal
|
|||
// message but also needs to gracefully shutdown. An example usecase could be
|
|||
// closing database connections, or sending a alert that the application is
|
|||
// closing.
|
|||
func DeferExitHandler(handler func()) { |
|||
handlers = append([]func(){handler}, handlers...) |
|||
} |
@ -0,0 +1,14 @@ |
|||
version: "{build}" |
|||
platform: x64 |
|||
clone_folder: c:\gopath\src\github.com\sirupsen\logrus |
|||
environment: |
|||
GOPATH: c:\gopath |
|||
branches: |
|||
only: |
|||
- master |
|||
install: |
|||
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH% |
|||
- go version |
|||
build_script: |
|||
- go get -t |
|||
- go test |
@ -0,0 +1,43 @@ |
|||
package logrus |
|||
|
|||
import ( |
|||
"bytes" |
|||
"sync" |
|||
) |
|||
|
|||
var ( |
|||
bufferPool BufferPool |
|||
) |
|||
|
|||
type BufferPool interface { |
|||
Put(*bytes.Buffer) |
|||
Get() *bytes.Buffer |
|||
} |
|||
|
|||
type defaultPool struct { |
|||
pool *sync.Pool |
|||
} |
|||
|
|||
func (p *defaultPool) Put(buf *bytes.Buffer) { |
|||
p.pool.Put(buf) |
|||
} |
|||
|
|||
func (p *defaultPool) Get() *bytes.Buffer { |
|||
return p.pool.Get().(*bytes.Buffer) |
|||
} |
|||
|
|||
// SetBufferPool allows to replace the default logrus buffer pool
|
|||
// to better meets the specific needs of an application.
|
|||
func SetBufferPool(bp BufferPool) { |
|||
bufferPool = bp |
|||
} |
|||
|
|||
func init() { |
|||
SetBufferPool(&defaultPool{ |
|||
pool: &sync.Pool{ |
|||
New: func() interface{} { |
|||
return new(bytes.Buffer) |
|||
}, |
|||
}, |
|||
}) |
|||
} |
@ -0,0 +1,26 @@ |
|||
/* |
|||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger. |
|||
|
|||
|
|||
The simplest way to use Logrus is simply the package-level exported logger: |
|||
|
|||
package main |
|||
|
|||
import ( |
|||
log "github.com/sirupsen/logrus" |
|||
) |
|||
|
|||
func main() { |
|||
log.WithFields(log.Fields{ |
|||
"animal": "walrus", |
|||
"number": 1, |
|||
"size": 10, |
|||
}).Info("A walrus appears") |
|||
} |
|||
|
|||
Output: |
|||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 |
|||
|
|||
For a full guide visit https://github.com/sirupsen/logrus
|
|||
*/ |
|||
package logrus |
@ -0,0 +1,442 @@ |
|||
package logrus |
|||
|
|||
import ( |
|||
"bytes" |
|||
"context" |
|||
"fmt" |
|||
"os" |
|||
"reflect" |
|||
"runtime" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
) |
|||
|
|||
var ( |
|||
|
|||
// qualified package name, cached at first use
|
|||
logrusPackage string |
|||
|
|||
// Positions in the call stack when tracing to report the calling method
|
|||
minimumCallerDepth int |
|||
|
|||
// Used for caller information initialisation
|
|||
callerInitOnce sync.Once |
|||
) |
|||
|
|||
const ( |
|||
maximumCallerDepth int = 25 |
|||
knownLogrusFrames int = 4 |
|||
) |
|||
|
|||
func init() { |
|||
// start at the bottom of the stack before the package-name cache is primed
|
|||
minimumCallerDepth = 1 |
|||
} |
|||
|
|||
// Defines the key when adding errors using WithError.
|
|||
var ErrorKey = "error" |
|||
|
|||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
|||
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
|
|||
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
|
|||
// reused and passed around as much as you wish to avoid field duplication.
|
|||
type Entry struct { |
|||
Logger *Logger |
|||
|
|||
// Contains all the fields set by the user.
|
|||
Data Fields |
|||
|
|||
// Time at which the log entry was created
|
|||
Time time.Time |
|||
|
|||
// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
|
|||
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
|||
Level Level |
|||
|
|||
// Calling method, with package name
|
|||
Caller *runtime.Frame |
|||
|
|||
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
|
|||
Message string |
|||
|
|||
// When formatter is called in entry.log(), a Buffer may be set to entry
|
|||
Buffer *bytes.Buffer |
|||
|
|||
// Contains the context set by the user. Useful for hook processing etc.
|
|||
Context context.Context |
|||
|
|||
// err may contain a field formatting error
|
|||
err string |
|||
} |
|||
|
|||
func NewEntry(logger *Logger) *Entry { |
|||
return &Entry{ |
|||
Logger: logger, |
|||
// Default is three fields, plus one optional. Give a little extra room.
|
|||
Data: make(Fields, 6), |
|||
} |
|||
} |
|||
|
|||
func (entry *Entry) Dup() *Entry { |
|||
data := make(Fields, len(entry.Data)) |
|||
for k, v := range entry.Data { |
|||
data[k] = v |
|||
} |
|||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} |
|||
} |
|||
|
|||
// Returns the bytes representation of this entry from the formatter.
|
|||
func (entry *Entry) Bytes() ([]byte, error) { |
|||
return entry.Logger.Formatter.Format(entry) |
|||
} |
|||
|
|||
// Returns the string representation from the reader and ultimately the
|
|||
// formatter.
|
|||
func (entry *Entry) String() (string, error) { |
|||
serialized, err := entry.Bytes() |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
str := string(serialized) |
|||
return str, nil |
|||
} |
|||
|
|||
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
|||
func (entry *Entry) WithError(err error) *Entry { |
|||
return entry.WithField(ErrorKey, err) |
|||
} |
|||
|
|||
// Add a context to the Entry.
|
|||
func (entry *Entry) WithContext(ctx context.Context) *Entry { |
|||
dataCopy := make(Fields, len(entry.Data)) |
|||
for k, v := range entry.Data { |
|||
dataCopy[k] = v |
|||
} |
|||
return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} |
|||
} |
|||
|
|||
// Add a single field to the Entry.
|
|||
func (entry *Entry) WithField(key string, value interface{}) *Entry { |
|||
return entry.WithFields(Fields{key: value}) |
|||
} |
|||
|
|||
// Add a map of fields to the Entry.
|
|||
func (entry *Entry) WithFields(fields Fields) *Entry { |
|||
data := make(Fields, len(entry.Data)+len(fields)) |
|||
for k, v := range entry.Data { |
|||
data[k] = v |
|||
} |
|||
fieldErr := entry.err |
|||
for k, v := range fields { |
|||
isErrField := false |
|||
if t := reflect.TypeOf(v); t != nil { |
|||
switch { |
|||
case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: |
|||
isErrField = true |
|||
} |
|||
} |
|||
if isErrField { |
|||
tmp := fmt.Sprintf("can not add field %q", k) |
|||
if fieldErr != "" { |
|||
fieldErr = entry.err + ", " + tmp |
|||
} else { |
|||
fieldErr = tmp |
|||
} |
|||
} else { |
|||
data[k] = v |
|||
} |
|||
} |
|||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} |
|||
} |
|||
|
|||
// Overrides the time of the Entry.
|
|||
func (entry *Entry) WithTime(t time.Time) *Entry { |
|||
dataCopy := make(Fields, len(entry.Data)) |
|||
for k, v := range entry.Data { |
|||
dataCopy[k] = v |
|||
} |
|||
return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} |
|||
} |
|||
|
|||
// getPackageName reduces a fully qualified function name to the package name
|
|||
// There really ought to be to be a better way...
|
|||
func getPackageName(f string) string { |
|||
for { |
|||
lastPeriod := strings.LastIndex(f, ".") |
|||
lastSlash := strings.LastIndex(f, "/") |
|||
if lastPeriod > lastSlash { |
|||
f = f[:lastPeriod] |
|||
} else { |
|||
break |
|||
} |
|||
} |
|||
|
|||
return f |
|||
} |
|||
|
|||
// getCaller retrieves the name of the first non-logrus calling function
|
|||
func getCaller() *runtime.Frame { |
|||
// cache this package's fully-qualified name
|
|||
callerInitOnce.Do(func() { |
|||
pcs := make([]uintptr, maximumCallerDepth) |
|||
_ = runtime.Callers(0, pcs) |
|||
|
|||
// dynamic get the package name and the minimum caller depth
|
|||
for i := 0; i < maximumCallerDepth; i++ { |
|||
funcName := runtime.FuncForPC(pcs[i]).Name() |
|||
if strings.Contains(funcName, "getCaller") { |
|||
logrusPackage = getPackageName(funcName) |
|||
break |
|||
} |
|||
} |
|||
|
|||
minimumCallerDepth = knownLogrusFrames |
|||
}) |
|||
|
|||
// Restrict the lookback frames to avoid runaway lookups
|
|||
pcs := make([]uintptr, maximumCallerDepth) |
|||
depth := runtime.Callers(minimumCallerDepth, pcs) |
|||
frames := runtime.CallersFrames(pcs[:depth]) |
|||
|
|||
for f, again := frames.Next(); again; f, again = frames.Next() { |
|||
pkg := getPackageName(f.Function) |
|||
|
|||
// If the caller isn't part of this package, we're done
|
|||
if pkg != logrusPackage { |
|||
return &f //nolint:scopelint
|
|||
} |
|||
} |
|||
|
|||
// if we got here, we failed to find the caller's context
|
|||
return nil |
|||
} |
|||
|
|||
func (entry Entry) HasCaller() (has bool) { |
|||
return entry.Logger != nil && |
|||
entry.Logger.ReportCaller && |
|||
entry.Caller != nil |
|||
} |
|||
|
|||
func (entry *Entry) log(level Level, msg string) { |
|||
var buffer *bytes.Buffer |
|||
|
|||
newEntry := entry.Dup() |
|||
|
|||
if newEntry.Time.IsZero() { |
|||
newEntry.Time = time.Now() |
|||
} |
|||
|
|||
newEntry.Level = level |
|||
newEntry.Message = msg |
|||
|
|||
newEntry.Logger.mu.Lock() |
|||
reportCaller := newEntry.Logger.ReportCaller |
|||
bufPool := newEntry.getBufferPool() |
|||
newEntry.Logger.mu.Unlock() |
|||
|
|||
if reportCaller { |
|||
newEntry.Caller = getCaller() |
|||
} |
|||
|
|||
newEntry.fireHooks() |
|||
buffer = bufPool.Get() |
|||
defer func() { |
|||
newEntry.Buffer = nil |
|||
buffer.Reset() |
|||
bufPool.Put(buffer) |
|||
}() |
|||
buffer.Reset() |
|||
newEntry.Buffer = buffer |
|||
|
|||
newEntry.write() |
|||
|
|||
newEntry.Buffer = nil |
|||
|
|||
// To avoid Entry#log() returning a value that only would make sense for
|
|||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
|||
// directly here.
|
|||
if level <= PanicLevel { |
|||
panic(newEntry) |
|||
} |
|||
} |
|||
|
|||
func (entry *Entry) getBufferPool() (pool BufferPool) { |
|||
if entry.Logger.BufferPool != nil { |
|||
return entry.Logger.BufferPool |
|||
} |
|||
return bufferPool |
|||
} |
|||
|
|||
func (entry *Entry) fireHooks() { |
|||
var tmpHooks LevelHooks |
|||
entry.Logger.mu.Lock() |
|||
tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) |
|||
for k, v := range entry.Logger.Hooks { |
|||
tmpHooks[k] = v |
|||
} |
|||
entry.Logger.mu.Unlock() |
|||
|
|||
err := tmpHooks.Fire(entry.Level, entry) |
|||
if err != nil { |
|||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) |
|||
} |
|||
} |
|||
|
|||
func (entry *Entry) write() { |
|||
entry.Logger.mu.Lock() |
|||
defer entry.Logger.mu.Unlock() |
|||
serialized, err := entry.Logger.Formatter.Format(entry) |
|||
if err != nil { |
|||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) |
|||
return |
|||
} |
|||
if _, err := entry.Logger.Out.Write(serialized); err != nil { |
|||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) |
|||
} |
|||
} |
|||
|
|||
// Log will log a message at the level given as parameter.
|
|||
// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
|
|||
// For this behaviour Entry.Panic or Entry.Fatal should be used instead.
|
|||
func (entry *Entry) Log(level Level, args ...interface{}) { |
|||
if entry.Logger.IsLevelEnabled(level) { |
|||
entry.log(level, fmt.Sprint(args...)) |
|||
} |
|||
} |
|||
|
|||
func (entry *Entry) Trace(args ...interface{}) { |
|||
entry.Log(TraceLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Debug(args ...interface{}) { |
|||
entry.Log(DebugLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Print(args ...interface{}) { |
|||
entry.Info(args...) |
|||
} |
|||
|
|||
func (entry *Entry) Info(args ...interface{}) { |
|||
entry.Log(InfoLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Warn(args ...interface{}) { |
|||
entry.Log(WarnLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Warning(args ...interface{}) { |
|||
entry.Warn(args...) |
|||
} |
|||
|
|||
func (entry *Entry) Error(args ...interface{}) { |
|||
entry.Log(ErrorLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Fatal(args ...interface{}) { |
|||
entry.Log(FatalLevel, args...) |
|||
entry.Logger.Exit(1) |
|||
} |
|||
|
|||
func (entry *Entry) Panic(args ...interface{}) { |
|||
entry.Log(PanicLevel, args...) |
|||
} |
|||
|
|||
// Entry Printf family functions
|
|||
|
|||
func (entry *Entry) Logf(level Level, format string, args ...interface{}) { |
|||
if entry.Logger.IsLevelEnabled(level) { |
|||
entry.Log(level, fmt.Sprintf(format, args...)) |
|||
} |
|||
} |
|||
|
|||
func (entry *Entry) Tracef(format string, args ...interface{}) { |
|||
entry.Logf(TraceLevel, format, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Debugf(format string, args ...interface{}) { |
|||
entry.Logf(DebugLevel, format, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Infof(format string, args ...interface{}) { |
|||
entry.Logf(InfoLevel, format, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Printf(format string, args ...interface{}) { |
|||
entry.Infof(format, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Warnf(format string, args ...interface{}) { |
|||
entry.Logf(WarnLevel, format, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Warningf(format string, args ...interface{}) { |
|||
entry.Warnf(format, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Errorf(format string, args ...interface{}) { |
|||
entry.Logf(ErrorLevel, format, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Fatalf(format string, args ...interface{}) { |
|||
entry.Logf(FatalLevel, format, args...) |
|||
entry.Logger.Exit(1) |
|||
} |
|||
|
|||
func (entry *Entry) Panicf(format string, args ...interface{}) { |
|||
entry.Logf(PanicLevel, format, args...) |
|||
} |
|||
|
|||
// Entry Println family functions
|
|||
|
|||
func (entry *Entry) Logln(level Level, args ...interface{}) { |
|||
if entry.Logger.IsLevelEnabled(level) { |
|||
entry.Log(level, entry.sprintlnn(args...)) |
|||
} |
|||
} |
|||
|
|||
func (entry *Entry) Traceln(args ...interface{}) { |
|||
entry.Logln(TraceLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Debugln(args ...interface{}) { |
|||
entry.Logln(DebugLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Infoln(args ...interface{}) { |
|||
entry.Logln(InfoLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Println(args ...interface{}) { |
|||
entry.Infoln(args...) |
|||
} |
|||
|
|||
func (entry *Entry) Warnln(args ...interface{}) { |
|||
entry.Logln(WarnLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Warningln(args ...interface{}) { |
|||
entry.Warnln(args...) |
|||
} |
|||
|
|||
func (entry *Entry) Errorln(args ...interface{}) { |
|||
entry.Logln(ErrorLevel, args...) |
|||
} |
|||
|
|||
func (entry *Entry) Fatalln(args ...interface{}) { |
|||
entry.Logln(FatalLevel, args...) |
|||
entry.Logger.Exit(1) |
|||
} |
|||
|
|||
func (entry *Entry) Panicln(args ...interface{}) { |
|||
entry.Logln(PanicLevel, args...) |
|||
} |
|||
|
|||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
|||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
|||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
|||
// string allocation, we do the simplest thing.
|
|||
func (entry *Entry) sprintlnn(args ...interface{}) string { |
|||
msg := fmt.Sprintln(args...) |
|||
return msg[:len(msg)-1] |
|||
} |
@ -0,0 +1,270 @@ |
|||
package logrus |
|||
|
|||
import ( |
|||
"context" |
|||
"io" |
|||
"time" |
|||
) |
|||
|
|||
var ( |
|||
// std is the name of the standard logger in stdlib `log`
|
|||
std = New() |
|||
) |
|||
|
|||
func StandardLogger() *Logger { |
|||
return std |
|||
} |
|||
|
|||
// SetOutput sets the standard logger output.
|
|||
func SetOutput(out io.Writer) { |
|||
std.SetOutput(out) |
|||
} |
|||
|
|||
// SetFormatter sets the standard logger formatter.
|
|||
func SetFormatter(formatter Formatter) { |
|||
std.SetFormatter(formatter) |
|||
} |
|||
|
|||
// SetReportCaller sets whether the standard logger will include the calling
|
|||
// method as a field.
|
|||
func SetReportCaller(include bool) { |
|||
std.SetReportCaller(include) |
|||
} |
|||
|
|||
// SetLevel sets the standard logger level.
|
|||
func SetLevel(level Level) { |
|||
std.SetLevel(level) |
|||
} |
|||
|
|||
// GetLevel returns the standard logger level.
|
|||
func GetLevel() Level { |
|||
return std.GetLevel() |
|||
} |
|||
|
|||
// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
|
|||
func IsLevelEnabled(level Level) bool { |
|||
return std.IsLevelEnabled(level) |
|||
} |
|||
|
|||
// AddHook adds a hook to the standard logger hooks.
|
|||
func AddHook(hook Hook) { |
|||
std.AddHook(hook) |
|||
} |
|||
|
|||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
|||
func WithError(err error) *Entry { |
|||
return std.WithField(ErrorKey, err) |
|||
} |
|||
|
|||
// WithContext creates an entry from the standard logger and adds a context to it.
|
|||
func WithContext(ctx context.Context) *Entry { |
|||
return std.WithContext(ctx) |
|||
} |
|||
|
|||
// WithField creates an entry from the standard logger and adds a field to
|
|||
// it. If you want multiple fields, use `WithFields`.
|
|||
//
|
|||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
|||
// or Panic on the Entry it returns.
|
|||
func WithField(key string, value interface{}) *Entry { |
|||
return std.WithField(key, value) |
|||
} |
|||
|
|||
// WithFields creates an entry from the standard logger and adds multiple
|
|||
// fields to it. This is simply a helper for `WithField`, invoking it
|
|||
// once for each field.
|
|||
//
|
|||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
|||
// or Panic on the Entry it returns.
|
|||
func WithFields(fields Fields) *Entry { |
|||
return std.WithFields(fields) |
|||
} |
|||
|
|||
// WithTime creates an entry from the standard logger and overrides the time of
|
|||
// logs generated with it.
|
|||
//
|
|||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
|||
// or Panic on the Entry it returns.
|
|||
func WithTime(t time.Time) *Entry { |
|||
return std.WithTime(t) |
|||
} |
|||
|
|||
// Trace logs a message at level Trace on the standard logger.
|
|||
func Trace(args ...interface{}) { |
|||
std.Trace(args...) |
|||
} |
|||
|
|||
// Debug logs a message at level Debug on the standard logger.
|
|||
func Debug(args ...interface{}) { |
|||
std.Debug(args...) |
|||
} |
|||
|
|||
// Print logs a message at level Info on the standard logger.
|
|||
func Print(args ...interface{}) { |
|||
std.Print(args...) |
|||
} |
|||
|
|||
// Info logs a message at level Info on the standard logger.
|
|||
func Info(args ...interface{}) { |
|||
std.Info(args...) |
|||
} |
|||
|
|||
// Warn logs a message at level Warn on the standard logger.
|
|||
func Warn(args ...interface{}) { |
|||
std.Warn(args...) |
|||
} |
|||
|
|||
// Warning logs a message at level Warn on the standard logger.
|
|||
func Warning(args ...interface{}) { |
|||
std.Warning(args...) |
|||
} |
|||
|
|||
// Error logs a message at level Error on the standard logger.
|
|||
func Error(args ...interface{}) { |
|||
std.Error(args...) |
|||
} |
|||
|
|||
// Panic logs a message at level Panic on the standard logger.
|
|||
func Panic(args ...interface{}) { |
|||
std.Panic(args...) |
|||
} |
|||
|
|||
// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
|||
func Fatal(args ...interface{}) { |
|||
std.Fatal(args...) |
|||
} |
|||
|
|||
// TraceFn logs a message from a func at level Trace on the standard logger.
|
|||
func TraceFn(fn LogFunction) { |
|||
std.TraceFn(fn) |
|||
} |
|||
|
|||
// DebugFn logs a message from a func at level Debug on the standard logger.
|
|||
func DebugFn(fn LogFunction) { |
|||
std.DebugFn(fn) |
|||
} |
|||
|
|||
// PrintFn logs a message from a func at level Info on the standard logger.
|
|||
func PrintFn(fn LogFunction) { |
|||
std.PrintFn(fn) |
|||
} |
|||
|
|||
// InfoFn logs a message from a func at level Info on the standard logger.
|
|||
func InfoFn(fn LogFunction) { |
|||
std.InfoFn(fn) |
|||
} |
|||
|
|||
// WarnFn logs a message from a func at level Warn on the standard logger.
|
|||
func WarnFn(fn LogFunction) { |
|||
std.WarnFn(fn) |
|||
} |
|||
|
|||
// WarningFn logs a message from a func at level Warn on the standard logger.
|
|||
func WarningFn(fn LogFunction) { |
|||
std.WarningFn(fn) |
|||
} |
|||
|
|||
// ErrorFn logs a message from a func at level Error on the standard logger.
|
|||
func ErrorFn(fn LogFunction) { |
|||
std.ErrorFn(fn) |
|||
} |
|||
|
|||
// PanicFn logs a message from a func at level Panic on the standard logger.
|
|||
func PanicFn(fn LogFunction) { |
|||
std.PanicFn(fn) |
|||
} |
|||
|
|||
// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1.
|
|||
func FatalFn(fn LogFunction) { |
|||
std.FatalFn(fn) |
|||
} |
|||
|
|||
// Tracef logs a message at level Trace on the standard logger.
|
|||
func Tracef(format string, args ...interface{}) { |
|||
std.Tracef(format, args...) |
|||
} |
|||
|
|||
// Debugf logs a message at level Debug on the standard logger.
|
|||
func Debugf(format string, args ...interface{}) { |
|||
std.Debugf(format, args...) |
|||
} |
|||
|
|||
// Printf logs a message at level Info on the standard logger.
|
|||
func Printf(format string, args ...interface{}) { |
|||
std.Printf(format, args...) |
|||
} |
|||
|
|||
// Infof logs a message at level Info on the standard logger.
|
|||
func Infof(format string, args ...interface{}) { |
|||
std.Infof(format, args...) |
|||
} |
|||
|
|||
// Warnf logs a message at level Warn on the standard logger.
|
|||
func Warnf(format string, args ...interface{}) { |
|||
std.Warnf(format, args...) |
|||
} |
|||
|
|||
// Warningf logs a message at level Warn on the standard logger.
|
|||
func Warningf(format string, args ...interface{}) { |
|||
std.Warningf(format, args...) |
|||
} |
|||
|
|||
// Errorf logs a message at level Error on the standard logger.
|
|||
func Errorf(format string, args ...interface{}) { |
|||
std.Errorf(format, args...) |
|||
} |
|||
|
|||
// Panicf logs a message at level Panic on the standard logger.
|
|||
func Panicf(format string, args ...interface{}) { |
|||
std.Panicf(format, args...) |
|||
} |
|||
|
|||
// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
|||
func Fatalf(format string, args ...interface{}) { |
|||
std.Fatalf(format, args...) |
|||
} |
|||
|
|||
// Traceln logs a message at level Trace on the standard logger.
|
|||
func Traceln(args ...interface{}) { |
|||
std.Traceln(args...) |
|||
} |
|||
|
|||
// Debugln logs a message at level Debug on the standard logger.
|
|||
func Debugln(args ...interface{}) { |
|||
std.Debugln(args...) |
|||
} |
|||
|
|||
// Println logs a message at level Info on the standard logger.
|
|||
func Println(args ...interface{}) { |
|||
std.Println(args...) |
|||
} |
|||
|
|||
// Infoln logs a message at level Info on the standard logger.
|
|||
func Infoln(args ...interface{}) { |
|||
std.Infoln(args...) |
|||
} |
|||
|
|||
// Warnln logs a message at level Warn on the standard logger.
|
|||
func Warnln(args ...interface{}) { |
|||
std.Warnln(args...) |
|||
} |
|||
|
|||
// Warningln logs a message at level Warn on the standard logger.
|
|||
func Warningln(args ...interface{}) { |
|||
std.Warningln(args...) |
|||
} |
|||
|
|||
// Errorln logs a message at level Error on the standard logger.
|
|||
func Errorln(args ...interface{}) { |
|||
std.Errorln(args...) |
|||
} |
|||
|
|||
// Panicln logs a message at level Panic on the standard logger.
|
|||
func Panicln(args ...interface{}) { |
|||
std.Panicln(args...) |
|||
} |
|||
|
|||
// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
|||
func Fatalln(args ...interface{}) { |
|||
std.Fatalln(args...) |
|||
} |
@ -0,0 +1,78 @@ |
|||
package logrus |
|||
|
|||
import "time" |
|||
|
|||
// Default key names for the default fields
|
|||
const ( |
|||
defaultTimestampFormat = time.RFC3339 |
|||
FieldKeyMsg = "msg" |
|||
FieldKeyLevel = "level" |
|||
FieldKeyTime = "time" |
|||
FieldKeyLogrusError = "logrus_error" |
|||
FieldKeyFunc = "func" |
|||
FieldKeyFile = "file" |
|||
) |
|||
|
|||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
|||
// `Entry`. It exposes all the fields, including the default ones:
|
|||
//
|
|||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
|||
// * `entry.Data["time"]`. The timestamp.
|
|||
// * `entry.Data["level"]. The level the entry was logged at.
|
|||
//
|
|||
// Any additional fields added with `WithField` or `WithFields` are also in
|
|||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
|||
// logged to `logger.Out`.
|
|||
type Formatter interface { |
|||
Format(*Entry) ([]byte, error) |
|||
} |
|||
|
|||
// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
|
|||
// dumping it. If this code wasn't there doing:
|
|||
//
|
|||
// logrus.WithField("level", 1).Info("hello")
|
|||
//
|
|||
// Would just silently drop the user provided level. Instead with this code
|
|||
// it'll logged as:
|
|||
//
|
|||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
|||
//
|
|||
// It's not exported because it's still using Data in an opinionated way. It's to
|
|||
// avoid code duplication between the two default formatters.
|
|||
func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { |
|||
timeKey := fieldMap.resolve(FieldKeyTime) |
|||
if t, ok := data[timeKey]; ok { |
|||
data["fields."+timeKey] = t |
|||
delete(data, timeKey) |
|||
} |
|||
|
|||
msgKey := fieldMap.resolve(FieldKeyMsg) |
|||
if m, ok := data[msgKey]; ok { |
|||
data["fields."+msgKey] = m |
|||
delete(data, msgKey) |
|||
} |
|||
|
|||
levelKey := fieldMap.resolve(FieldKeyLevel) |
|||
if l, ok := data[levelKey]; ok { |
|||
data["fields."+levelKey] = l |
|||
delete(data, levelKey) |
|||
} |
|||
|
|||
logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) |
|||
if l, ok := data[logrusErrKey]; ok { |
|||
data["fields."+logrusErrKey] = l |
|||
delete(data, logrusErrKey) |
|||
} |
|||
|
|||
// If reportCaller is not set, 'func' will not conflict.
|
|||
if reportCaller { |
|||
funcKey := fieldMap.resolve(FieldKeyFunc) |
|||
if l, ok := data[funcKey]; ok { |
|||
data["fields."+funcKey] = l |
|||
} |
|||
fileKey := fieldMap.resolve(FieldKeyFile) |
|||
if l, ok := data[fileKey]; ok { |
|||
data["fields."+fileKey] = l |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,34 @@ |
|||
package logrus |
|||
|
|||
// A hook to be fired when logging on the logging levels returned from
|
|||
// `Levels()` on your implementation of the interface. Note that this is not
|
|||
// fired in a goroutine or a channel with workers, you should handle such
|
|||
// functionality yourself if your call is non-blocking and you don't wish for
|
|||
// the logging calls for levels returned from `Levels()` to block.
|
|||
type Hook interface { |
|||
Levels() []Level |
|||
Fire(*Entry) error |
|||
} |
|||
|
|||
// Internal type for storing the hooks on a logger instance.
|
|||
type LevelHooks map[Level][]Hook |
|||
|
|||
// Add a hook to an instance of logger. This is called with
|
|||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
|||
func (hooks LevelHooks) Add(hook Hook) { |
|||
for _, level := range hook.Levels() { |
|||
hooks[level] = append(hooks[level], hook) |
|||
} |
|||
} |
|||
|
|||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
|||
// appropriate hooks for a log entry.
|
|||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error { |
|||
for _, hook := range hooks[level] { |
|||
if err := hook.Fire(entry); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,128 @@ |
|||
package logrus |
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding/json" |
|||
"fmt" |
|||
"runtime" |
|||
) |
|||
|
|||
type fieldKey string |
|||
|
|||
// FieldMap allows customization of the key names for default fields.
|
|||
type FieldMap map[fieldKey]string |
|||
|
|||
func (f FieldMap) resolve(key fieldKey) string { |
|||
if k, ok := f[key]; ok { |
|||
return k |
|||
} |
|||
|
|||
return string(key) |
|||
} |
|||
|
|||
// JSONFormatter formats logs into parsable json
|
|||
type JSONFormatter struct { |
|||
// TimestampFormat sets the format used for marshaling timestamps.
|
|||
// The format to use is the same than for time.Format or time.Parse from the standard
|
|||
// library.
|
|||
// The standard Library already provides a set of predefined format.
|
|||
TimestampFormat string |
|||
|
|||
// DisableTimestamp allows disabling automatic timestamps in output
|
|||
DisableTimestamp bool |
|||
|
|||
// DisableHTMLEscape allows disabling html escaping in output
|
|||
DisableHTMLEscape bool |
|||
|
|||
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
|
|||
DataKey string |
|||
|
|||
// FieldMap allows users to customize the names of keys for default fields.
|
|||
// As an example:
|
|||
// formatter := &JSONFormatter{
|
|||
// FieldMap: FieldMap{
|
|||
// FieldKeyTime: "@timestamp",
|
|||
// FieldKeyLevel: "@level",
|
|||
// FieldKeyMsg: "@message",
|
|||
// FieldKeyFunc: "@caller",
|
|||
// },
|
|||
// }
|
|||
FieldMap FieldMap |
|||
|
|||
// CallerPrettyfier can be set by the user to modify the content
|
|||
// of the function and file keys in the json data when ReportCaller is
|
|||
// activated. If any of the returned value is the empty string the
|
|||
// corresponding key will be removed from json fields.
|
|||
CallerPrettyfier func(*runtime.Frame) (function string, file string) |
|||
|
|||
// PrettyPrint will indent all json logs
|
|||
PrettyPrint bool |
|||
} |
|||
|
|||
// Format renders a single log entry
|
|||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { |
|||
data := make(Fields, len(entry.Data)+4) |
|||
for k, v := range entry.Data { |
|||
switch v := v.(type) { |
|||
case error: |
|||
// Otherwise errors are ignored by `encoding/json`
|
|||
// https://github.com/sirupsen/logrus/issues/137
|
|||
data[k] = v.Error() |
|||
default: |
|||
data[k] = v |
|||
} |
|||
} |
|||
|
|||
if f.DataKey != "" { |
|||
newData := make(Fields, 4) |
|||
newData[f.DataKey] = data |
|||
data = newData |
|||
} |
|||
|
|||
prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) |
|||
|
|||
timestampFormat := f.TimestampFormat |
|||
if timestampFormat == "" { |
|||
timestampFormat = defaultTimestampFormat |
|||
} |
|||
|
|||
if entry.err != "" { |
|||
data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err |
|||
} |
|||
if !f.DisableTimestamp { |
|||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) |
|||
} |
|||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message |
|||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() |
|||
if entry.HasCaller() { |
|||
funcVal := entry.Caller.Function |
|||
fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) |
|||
if f.CallerPrettyfier != nil { |
|||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller) |
|||
} |
|||
if funcVal != "" { |
|||
data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal |
|||
} |
|||
if fileVal != "" { |
|||
data[f.FieldMap.resolve(FieldKeyFile)] = fileVal |
|||
} |
|||
} |
|||
|
|||
var b *bytes.Buffer |
|||
if entry.Buffer != nil { |
|||
b = entry.Buffer |
|||
} else { |
|||
b = &bytes.Buffer{} |
|||
} |
|||
|
|||
encoder := json.NewEncoder(b) |
|||
encoder.SetEscapeHTML(!f.DisableHTMLEscape) |
|||
if f.PrettyPrint { |
|||
encoder.SetIndent("", " ") |
|||
} |
|||
if err := encoder.Encode(data); err != nil { |
|||
return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) |
|||
} |
|||
|
|||
return b.Bytes(), nil |
|||
} |
@ -0,0 +1,417 @@ |
|||
package logrus |
|||
|
|||
import ( |
|||
"context" |
|||
"io" |
|||
"os" |
|||
"sync" |
|||
"sync/atomic" |
|||
"time" |
|||
) |
|||
|
|||
// LogFunction For big messages, it can be more efficient to pass a function
|
|||
// and only call it if the log level is actually enables rather than
|
|||
// generating the log message and then checking if the level is enabled
|
|||
type LogFunction func() []interface{} |
|||
|
|||
type Logger struct { |
|||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
|||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
|||
// something more adventurous, such as logging to Kafka.
|
|||
Out io.Writer |
|||
// Hooks for the logger instance. These allow firing events based on logging
|
|||
// levels and log entries. For example, to send errors to an error tracking
|
|||
// service, log to StatsD or dump the core on fatal errors.
|
|||
Hooks LevelHooks |
|||
// All log entries pass through the formatter before logged to Out. The
|
|||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
|||
// TextFormatter is the default. In development (when a TTY is attached) it
|
|||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
|||
// own that implements the `Formatter` interface, see the `README` or included
|
|||
// formatters for examples.
|
|||
Formatter Formatter |
|||
|
|||
// Flag for whether to log caller info (off by default)
|
|||
ReportCaller bool |
|||
|
|||
// The logging level the logger should log at. This is typically (and defaults
|
|||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
|||
// logged.
|
|||
Level Level |
|||
// Used to sync writing to the log. Locking is enabled by Default
|
|||
mu MutexWrap |
|||
// Reusable empty entry
|
|||
entryPool sync.Pool |
|||
// Function to exit the application, defaults to `os.Exit()`
|
|||
ExitFunc exitFunc |
|||
// The buffer pool used to format the log. If it is nil, the default global
|
|||
// buffer pool will be used.
|
|||
BufferPool BufferPool |
|||
} |
|||
|
|||
type exitFunc func(int) |
|||
|
|||
type MutexWrap struct { |
|||
lock sync.Mutex |
|||
disabled bool |
|||
} |
|||
|
|||
func (mw *MutexWrap) Lock() { |
|||
if !mw.disabled { |
|||
mw.lock.Lock() |
|||
} |
|||
} |
|||
|
|||
func (mw *MutexWrap) Unlock() { |
|||
if !mw.disabled { |
|||
mw.lock.Unlock() |
|||
} |
|||
} |
|||
|
|||
func (mw *MutexWrap) Disable() { |
|||
mw.disabled = true |
|||
} |
|||
|
|||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
|||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
|||
// instantiate your own:
|
|||
//
|
|||
// var log = &logrus.Logger{
|
|||
// Out: os.Stderr,
|
|||
// Formatter: new(logrus.TextFormatter),
|
|||
// Hooks: make(logrus.LevelHooks),
|
|||
// Level: logrus.DebugLevel,
|
|||
// }
|
|||
//
|
|||
// It's recommended to make this a global instance called `log`.
|
|||
func New() *Logger { |
|||
return &Logger{ |
|||
Out: os.Stderr, |
|||
Formatter: new(TextFormatter), |
|||
Hooks: make(LevelHooks), |
|||
Level: InfoLevel, |
|||
ExitFunc: os.Exit, |
|||
ReportCaller: false, |
|||
} |
|||
} |
|||
|
|||
func (logger *Logger) newEntry() *Entry { |
|||
entry, ok := logger.entryPool.Get().(*Entry) |
|||
if ok { |
|||
return entry |
|||
} |
|||
return NewEntry(logger) |
|||
} |
|||
|
|||
func (logger *Logger) releaseEntry(entry *Entry) { |
|||
entry.Data = map[string]interface{}{} |
|||
logger.entryPool.Put(entry) |
|||
} |
|||
|
|||
// WithField allocates a new entry and adds a field to it.
|
|||
// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to
|
|||
// this new returned entry.
|
|||
// If you want multiple fields, use `WithFields`.
|
|||
func (logger *Logger) WithField(key string, value interface{}) *Entry { |
|||
entry := logger.newEntry() |
|||
defer logger.releaseEntry(entry) |
|||
return entry.WithField(key, value) |
|||
} |
|||
|
|||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
|||
// each `Field`.
|
|||
func (logger *Logger) WithFields(fields Fields) *Entry { |
|||
entry := logger.newEntry() |
|||
defer logger.releaseEntry(entry) |
|||
return entry.WithFields(fields) |
|||
} |
|||
|
|||
// Add an error as single field to the log entry. All it does is call
|
|||
// `WithError` for the given `error`.
|
|||
func (logger *Logger) WithError(err error) *Entry { |
|||
entry := logger.newEntry() |
|||
defer logger.releaseEntry(entry) |
|||
return entry.WithError(err) |
|||
} |
|||
|
|||
// Add a context to the log entry.
|
|||
func (logger *Logger) WithContext(ctx context.Context) *Entry { |
|||
entry := logger.newEntry() |
|||
defer logger.releaseEntry(entry) |
|||
return entry.WithContext(ctx) |
|||
} |
|||
|
|||
// Overrides the time of the log entry.
|
|||
func (logger *Logger) WithTime(t time.Time) *Entry { |
|||
entry := logger.newEntry() |
|||
defer logger.releaseEntry(entry) |
|||
return entry.WithTime(t) |
|||
} |
|||
|
|||
func (logger *Logger) Logf(level Level, format string, args ...interface{}) { |
|||
if logger.IsLevelEnabled(level) { |
|||
entry := logger.newEntry() |
|||
entry.Logf(level, format, args...) |
|||
logger.releaseEntry(entry) |
|||
} |
|||
} |
|||
|
|||
func (logger *Logger) Tracef(format string, args ...interface{}) { |
|||
logger.Logf(TraceLevel, format, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Debugf(format string, args ...interface{}) { |
|||
logger.Logf(DebugLevel, format, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Infof(format string, args ...interface{}) { |
|||
logger.Logf(InfoLevel, format, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Printf(format string, args ...interface{}) { |
|||
entry := logger.newEntry() |
|||
entry.Printf(format, args...) |
|||
logger.releaseEntry(entry) |
|||
} |
|||
|
|||
func (logger *Logger) Warnf(format string, args ...interface{}) { |
|||
logger.Logf(WarnLevel, format, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Warningf(format string, args ...interface{}) { |
|||
logger.Warnf(format, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Errorf(format string, args ...interface{}) { |
|||
logger.Logf(ErrorLevel, format, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Fatalf(format string, args ...interface{}) { |
|||
logger.Logf(FatalLevel, format, args...) |
|||
logger.Exit(1) |
|||
} |
|||
|
|||
func (logger *Logger) Panicf(format string, args ...interface{}) { |
|||
logger.Logf(PanicLevel, format, args...) |
|||
} |
|||
|
|||
// Log will log a message at the level given as parameter.
|
|||
// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
|
|||
// For this behaviour Logger.Panic or Logger.Fatal should be used instead.
|
|||
func (logger *Logger) Log(level Level, args ...interface{}) { |
|||
if logger.IsLevelEnabled(level) { |
|||
entry := logger.newEntry() |
|||
entry.Log(level, args...) |
|||
logger.releaseEntry(entry) |
|||
} |
|||
} |
|||
|
|||
func (logger *Logger) LogFn(level Level, fn LogFunction) { |
|||
if logger.IsLevelEnabled(level) { |
|||
entry := logger.newEntry() |
|||
entry.Log(level, fn()...) |
|||
logger.releaseEntry(entry) |
|||
} |
|||
} |
|||
|
|||
func (logger *Logger) Trace(args ...interface{}) { |
|||
logger.Log(TraceLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Debug(args ...interface{}) { |
|||
logger.Log(DebugLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Info(args ...interface{}) { |
|||
logger.Log(InfoLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Print(args ...interface{}) { |
|||
entry := logger.newEntry() |
|||
entry.Print(args...) |
|||
logger.releaseEntry(entry) |
|||
} |
|||
|
|||
func (logger *Logger) Warn(args ...interface{}) { |
|||
logger.Log(WarnLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Warning(args ...interface{}) { |
|||
logger.Warn(args...) |
|||
} |
|||
|
|||
func (logger *Logger) Error(args ...interface{}) { |
|||
logger.Log(ErrorLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Fatal(args ...interface{}) { |
|||
logger.Log(FatalLevel, args...) |
|||
logger.Exit(1) |
|||
} |
|||
|
|||
func (logger *Logger) Panic(args ...interface{}) { |
|||
logger.Log(PanicLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) TraceFn(fn LogFunction) { |
|||
logger.LogFn(TraceLevel, fn) |
|||
} |
|||
|
|||
func (logger *Logger) DebugFn(fn LogFunction) { |
|||
logger.LogFn(DebugLevel, fn) |
|||
} |
|||
|
|||
func (logger *Logger) InfoFn(fn LogFunction) { |
|||
logger.LogFn(InfoLevel, fn) |
|||
} |
|||
|
|||
func (logger *Logger) PrintFn(fn LogFunction) { |
|||
entry := logger.newEntry() |
|||
entry.Print(fn()...) |
|||
logger.releaseEntry(entry) |
|||
} |
|||
|
|||
func (logger *Logger) WarnFn(fn LogFunction) { |
|||
logger.LogFn(WarnLevel, fn) |
|||
} |
|||
|
|||
func (logger *Logger) WarningFn(fn LogFunction) { |
|||
logger.WarnFn(fn) |
|||
} |
|||
|
|||
func (logger *Logger) ErrorFn(fn LogFunction) { |
|||
logger.LogFn(ErrorLevel, fn) |
|||
} |
|||
|
|||
func (logger *Logger) FatalFn(fn LogFunction) { |
|||
logger.LogFn(FatalLevel, fn) |
|||
logger.Exit(1) |
|||
} |
|||
|
|||
func (logger *Logger) PanicFn(fn LogFunction) { |
|||
logger.LogFn(PanicLevel, fn) |
|||
} |
|||
|
|||
func (logger *Logger) Logln(level Level, args ...interface{}) { |
|||
if logger.IsLevelEnabled(level) { |
|||
entry := logger.newEntry() |
|||
entry.Logln(level, args...) |
|||
logger.releaseEntry(entry) |
|||
} |
|||
} |
|||
|
|||
func (logger *Logger) Traceln(args ...interface{}) { |
|||
logger.Logln(TraceLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Debugln(args ...interface{}) { |
|||
logger.Logln(DebugLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Infoln(args ...interface{}) { |
|||
logger.Logln(InfoLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Println(args ...interface{}) { |
|||
entry := logger.newEntry() |
|||
entry.Println(args...) |
|||
logger.releaseEntry(entry) |
|||
} |
|||
|
|||
func (logger *Logger) Warnln(args ...interface{}) { |
|||
logger.Logln(WarnLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Warningln(args ...interface{}) { |
|||
logger.Warnln(args...) |
|||
} |
|||
|
|||
func (logger *Logger) Errorln(args ...interface{}) { |
|||
logger.Logln(ErrorLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Fatalln(args ...interface{}) { |
|||
logger.Logln(FatalLevel, args...) |
|||
logger.Exit(1) |
|||
} |
|||
|
|||
func (logger *Logger) Panicln(args ...interface{}) { |
|||
logger.Logln(PanicLevel, args...) |
|||
} |
|||
|
|||
func (logger *Logger) Exit(code int) { |
|||
runHandlers() |
|||
if logger.ExitFunc == nil { |
|||
logger.ExitFunc = os.Exit |
|||
} |
|||
logger.ExitFunc(code) |
|||
} |
|||
|
|||
//When file is opened with appending mode, it's safe to
|
|||
//write concurrently to a file (within 4k message on Linux).
|
|||
//In these cases user can choose to disable the lock.
|
|||
func (logger *Logger) SetNoLock() { |
|||
logger.mu.Disable() |
|||
} |
|||
|
|||
func (logger *Logger) level() Level { |
|||
return Level(atomic.LoadUint32((*uint32)(&logger.Level))) |
|||
} |
|||
|
|||
// SetLevel sets the logger level.
|
|||
func (logger *Logger) SetLevel(level Level) { |
|||
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) |
|||
} |
|||
|
|||
// GetLevel returns the logger level.
|
|||
func (logger *Logger) GetLevel() Level { |
|||
return logger.level() |
|||
} |
|||
|
|||
// AddHook adds a hook to the logger hooks.
|
|||
func (logger *Logger) AddHook(hook Hook) { |
|||
logger.mu.Lock() |
|||
defer logger.mu.Unlock() |
|||
logger.Hooks.Add(hook) |
|||
} |
|||
|
|||
// IsLevelEnabled checks if the log level of the logger is greater than the level param
|
|||
func (logger *Logger) IsLevelEnabled(level Level) bool { |
|||
return logger.level() >= level |
|||
} |
|||
|
|||
// SetFormatter sets the logger formatter.
|
|||
func (logger *Logger) SetFormatter(formatter Formatter) { |
|||
logger.mu.Lock() |
|||
defer logger.mu.Unlock() |
|||
logger.Formatter = formatter |
|||
} |
|||
|
|||
// SetOutput sets the logger output.
|
|||
func (logger *Logger) SetOutput(output io.Writer) { |
|||
logger.mu.Lock() |
|||
defer logger.mu.Unlock() |
|||
logger.Out = output |
|||
} |
|||
|
|||
func (logger *Logger) SetReportCaller(reportCaller bool) { |
|||
logger.mu.Lock() |
|||
defer logger.mu.Unlock() |
|||
logger.ReportCaller = reportCaller |
|||
} |
|||
|
|||
// ReplaceHooks replaces the logger hooks and returns the old ones
|
|||
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { |
|||
logger.mu.Lock() |
|||
oldHooks := logger.Hooks |
|||
logger.Hooks = hooks |
|||
logger.mu.Unlock() |
|||
return oldHooks |
|||
} |
|||
|
|||
// SetBufferPool sets the logger buffer pool.
|
|||
func (logger *Logger) SetBufferPool(pool BufferPool) { |
|||
logger.mu.Lock() |
|||
defer logger.mu.Unlock() |
|||
logger.BufferPool = pool |
|||
} |
@ -0,0 +1,186 @@ |
|||
package logrus |
|||
|
|||
import ( |
|||
"fmt" |
|||
"log" |
|||
"strings" |
|||
) |
|||
|
|||
// Fields type, used to pass to `WithFields`.
|
|||
type Fields map[string]interface{} |
|||
|
|||
// Level type
|
|||
type Level uint32 |
|||
|
|||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
|||
func (level Level) String() string { |
|||
if b, err := level.MarshalText(); err == nil { |
|||
return string(b) |
|||
} else { |
|||
return "unknown" |
|||
} |
|||
} |
|||
|
|||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
|||
func ParseLevel(lvl string) (Level, error) { |
|||
switch strings.ToLower(lvl) { |
|||
case "panic": |
|||
return PanicLevel, nil |
|||
case "fatal": |
|||
return FatalLevel, nil |
|||
case "error": |
|||
return ErrorLevel, nil |
|||
case "warn", "warning": |
|||
return WarnLevel, nil |
|||
case "info": |
|||
return InfoLevel, nil |
|||
case "debug": |
|||
return DebugLevel, nil |
|||
case "trace": |
|||
return TraceLevel, nil |
|||
} |
|||
|
|||
var l Level |
|||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl) |
|||
} |
|||
|
|||
// UnmarshalText implements encoding.TextUnmarshaler.
|
|||
func (level *Level) UnmarshalText(text []byte) error { |
|||
l, err := ParseLevel(string(text)) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
*level = l |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (level Level) MarshalText() ([]byte, error) { |
|||
switch level { |
|||
case TraceLevel: |
|||
return []byte("trace"), nil |
|||
case DebugLevel: |
|||
return []byte("debug"), nil |
|||
case InfoLevel: |
|||
return []byte("info"), nil |
|||
case WarnLevel: |
|||
return []byte("warning"), nil |
|||
case ErrorLevel: |
|||
return []byte("error"), nil |
|||
case FatalLevel: |
|||
return []byte("fatal"), nil |
|||
case PanicLevel: |
|||
return []byte("panic"), nil |
|||
} |
|||
|
|||
return nil, fmt.Errorf("not a valid logrus level %d", level) |
|||
} |
|||
|
|||
// A constant exposing all logging levels
|
|||
var AllLevels = []Level{ |
|||
PanicLevel, |
|||
FatalLevel, |
|||
ErrorLevel, |
|||
WarnLevel, |
|||
InfoLevel, |
|||
DebugLevel, |
|||
TraceLevel, |
|||
} |
|||
|
|||
// These are the different logging levels. You can set the logging level to log
|
|||
// on your instance of logger, obtained with `logrus.New()`.
|
|||
const ( |
|||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
|||
// message passed to Debug, Info, ...
|
|||
PanicLevel Level = iota |
|||
// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
|
|||
// logging level is set to Panic.
|
|||
FatalLevel |
|||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
|||
// Commonly used for hooks to send errors to an error tracking service.
|
|||
ErrorLevel |
|||
// WarnLevel level. Non-critical entries that deserve eyes.
|
|||
WarnLevel |
|||
// InfoLevel level. General operational entries about what's going on inside the
|
|||
// application.
|
|||
InfoLevel |
|||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
|||
DebugLevel |
|||
// TraceLevel level. Designates finer-grained informational events than the Debug.
|
|||
TraceLevel |
|||
) |
|||
|
|||
// Won't compile if StdLogger can't be realized by a log.Logger
|
|||
var ( |
|||
_ StdLogger = &log.Logger{} |
|||
_ StdLogger = &Entry{} |
|||
_ StdLogger = &Logger{} |
|||
) |
|||
|
|||
// StdLogger is what your logrus-enabled library should take, that way
|
|||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
|||
// interface, this is the closest we get, unfortunately.
|
|||
type StdLogger interface { |
|||
Print(...interface{}) |
|||
Printf(string, ...interface{}) |
|||
Println(...interface{}) |
|||
|
|||
Fatal(...interface{}) |
|||
Fatalf(string, ...interface{}) |
|||
Fatalln(...interface{}) |
|||
|
|||
Panic(...interface{}) |
|||
Panicf(string, ...interface{}) |
|||
Panicln(...interface{}) |
|||
} |
|||
|
|||
// The FieldLogger interface generalizes the Entry and Logger types
|
|||
type FieldLogger interface { |
|||
WithField(key string, value interface{}) *Entry |
|||
WithFields(fields Fields) *Entry |
|||
WithError(err error) *Entry |
|||
|
|||
Debugf(format string, args ...interface{}) |
|||
Infof(format string, args ...interface{}) |
|||
Printf(format string, args ...interface{}) |
|||
Warnf(format string, args ...interface{}) |
|||
Warningf(format string, args ...interface{}) |
|||
Errorf(format string, args ...interface{}) |
|||
Fatalf(format string, args ...interface{}) |
|||
Panicf(format string, args ...interface{}) |
|||
|
|||
Debug(args ...interface{}) |
|||
Info(args ...interface{}) |
|||
Print(args ...interface{}) |
|||
Warn(args ...interface{}) |
|||
Warning(args ...interface{}) |
|||
Error(args ...interface{}) |
|||
Fatal(args ...interface{}) |
|||
Panic(args ...interface{}) |
|||
|
|||
Debugln(args ...interface{}) |
|||
Infoln(args ...interface{}) |
|||
Println(args ...interface{}) |
|||
Warnln(args ...interface{}) |
|||
Warningln(args ...interface{}) |
|||
Errorln(args ...interface{}) |
|||
Fatalln(args ...interface{}) |
|||
Panicln(args ...interface{}) |
|||
|
|||
// IsDebugEnabled() bool
|
|||
// IsInfoEnabled() bool
|
|||
// IsWarnEnabled() bool
|
|||
// IsErrorEnabled() bool
|
|||
// IsFatalEnabled() bool
|
|||
// IsPanicEnabled() bool
|
|||
} |
|||
|
|||
// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
|
|||
// here for consistancy. Do not use. Use Logger or Entry instead.
|
|||
type Ext1FieldLogger interface { |
|||
FieldLogger |
|||
Tracef(format string, args ...interface{}) |
|||
Trace(args ...interface{}) |
|||
Traceln(args ...interface{}) |
|||
} |
@ -0,0 +1,11 @@ |
|||
// +build appengine
|
|||
|
|||
package logrus |
|||
|
|||
import ( |
|||
"io" |
|||
) |
|||
|
|||
func checkIfTerminal(w io.Writer) bool { |
|||
return true |
|||
} |
@ -0,0 +1,13 @@ |
|||
// +build darwin dragonfly freebsd netbsd openbsd
|
|||
// +build !js
|
|||
|
|||
package logrus |
|||
|
|||
import "golang.org/x/sys/unix" |
|||
|
|||
const ioctlReadTermios = unix.TIOCGETA |
|||
|
|||
func isTerminal(fd int) bool { |
|||
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios) |
|||
return err == nil |
|||
} |
@ -0,0 +1,7 @@ |
|||
// +build js
|
|||
|
|||
package logrus |
|||
|
|||
func isTerminal(fd int) bool { |
|||
return false |
|||
} |
@ -0,0 +1,11 @@ |
|||
// +build js nacl plan9
|
|||
|
|||
package logrus |
|||
|
|||
import ( |
|||
"io" |
|||
) |
|||
|
|||
func checkIfTerminal(w io.Writer) bool { |
|||
return false |
|||
} |
@ -0,0 +1,17 @@ |
|||
// +build !appengine,!js,!windows,!nacl,!plan9
|
|||
|
|||
package logrus |
|||
|
|||
import ( |
|||
"io" |
|||
"os" |
|||
) |
|||
|
|||
func checkIfTerminal(w io.Writer) bool { |
|||
switch v := w.(type) { |
|||
case *os.File: |
|||
return isTerminal(int(v.Fd())) |
|||
default: |
|||
return false |
|||
} |
|||
} |
@ -0,0 +1,11 @@ |
|||
package logrus |
|||
|
|||
import ( |
|||
"golang.org/x/sys/unix" |
|||
) |
|||
|
|||
// IsTerminal returns true if the given file descriptor is a terminal.
|
|||
func isTerminal(fd int) bool { |
|||
_, err := unix.IoctlGetTermio(fd, unix.TCGETA) |
|||
return err == nil |
|||
} |
@ -0,0 +1,13 @@ |
|||
// +build linux aix zos
|
|||
// +build !js
|
|||
|
|||
package logrus |
|||
|
|||
import "golang.org/x/sys/unix" |
|||
|
|||
const ioctlReadTermios = unix.TCGETS |
|||
|
|||
func isTerminal(fd int) bool { |
|||
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios) |
|||
return err == nil |
|||
} |
@ -0,0 +1,27 @@ |
|||
// +build !appengine,!js,windows
|
|||
|
|||
package logrus |
|||
|
|||
import ( |
|||
"io" |
|||
"os" |
|||
|
|||
"golang.org/x/sys/windows" |
|||
) |
|||
|
|||
func checkIfTerminal(w io.Writer) bool { |
|||
switch v := w.(type) { |
|||
case *os.File: |
|||
handle := windows.Handle(v.Fd()) |
|||
var mode uint32 |
|||
if err := windows.GetConsoleMode(handle, &mode); err != nil { |
|||
return false |
|||
} |
|||
mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING |
|||
if err := windows.SetConsoleMode(handle, mode); err != nil { |
|||
return false |
|||
} |
|||
return true |
|||
} |
|||
return false |
|||
} |
@ -0,0 +1,339 @@ |
|||
package logrus |
|||
|
|||
import ( |
|||
"bytes" |
|||
"fmt" |
|||
"os" |
|||
"runtime" |
|||
"sort" |
|||
"strconv" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
"unicode/utf8" |
|||
) |
|||
|
|||
const ( |
|||
red = 31 |
|||
yellow = 33 |
|||
blue = 36 |
|||
gray = 37 |
|||
) |
|||
|
|||
var baseTimestamp time.Time |
|||
|
|||
func init() { |
|||
baseTimestamp = time.Now() |
|||
} |
|||
|
|||
// TextFormatter formats logs into text
|
|||
type TextFormatter struct { |
|||
// Set to true to bypass checking for a TTY before outputting colors.
|
|||
ForceColors bool |
|||
|
|||
// Force disabling colors.
|
|||
DisableColors bool |
|||
|
|||
// Force quoting of all values
|
|||
ForceQuote bool |
|||
|
|||
// DisableQuote disables quoting for all values.
|
|||
// DisableQuote will have a lower priority than ForceQuote.
|
|||
// If both of them are set to true, quote will be forced on all values.
|
|||
DisableQuote bool |
|||
|
|||
// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
|
|||
EnvironmentOverrideColors bool |
|||
|
|||
// Disable timestamp logging. useful when output is redirected to logging
|
|||
// system that already adds timestamps.
|
|||
DisableTimestamp bool |
|||
|
|||
// Enable logging the full timestamp when a TTY is attached instead of just
|
|||
// the time passed since beginning of execution.
|
|||
FullTimestamp bool |
|||
|
|||
// TimestampFormat to use for display when a full timestamp is printed.
|
|||
// The format to use is the same than for time.Format or time.Parse from the standard
|
|||
// library.
|
|||
// The standard Library already provides a set of predefined format.
|
|||
TimestampFormat string |
|||
|
|||
// The fields are sorted by default for a consistent output. For applications
|
|||
// that log extremely frequently and don't use the JSON formatter this may not
|
|||
// be desired.
|
|||
DisableSorting bool |
|||
|
|||
// The keys sorting function, when uninitialized it uses sort.Strings.
|
|||
SortingFunc func([]string) |
|||
|
|||
// Disables the truncation of the level text to 4 characters.
|
|||
DisableLevelTruncation bool |
|||
|
|||
// PadLevelText Adds padding the level text so that all the levels output at the same length
|
|||
// PadLevelText is a superset of the DisableLevelTruncation option
|
|||
PadLevelText bool |
|||
|
|||
// QuoteEmptyFields will wrap empty fields in quotes if true
|
|||
QuoteEmptyFields bool |
|||
|
|||
// Whether the logger's out is to a terminal
|
|||
isTerminal bool |
|||
|
|||
// FieldMap allows users to customize the names of keys for default fields.
|
|||
// As an example:
|
|||
// formatter := &TextFormatter{
|
|||
// FieldMap: FieldMap{
|
|||
// FieldKeyTime: "@timestamp",
|
|||
// FieldKeyLevel: "@level",
|
|||
// FieldKeyMsg: "@message"}}
|
|||
FieldMap FieldMap |
|||
|
|||
// CallerPrettyfier can be set by the user to modify the content
|
|||
// of the function and file keys in the data when ReportCaller is
|
|||
// activated. If any of the returned value is the empty string the
|
|||
// corresponding key will be removed from fields.
|
|||
CallerPrettyfier func(*runtime.Frame) (function string, file string) |
|||
|
|||
terminalInitOnce sync.Once |
|||
|
|||
// The max length of the level text, generated dynamically on init
|
|||
levelTextMaxLength int |
|||
} |
|||
|
|||
func (f *TextFormatter) init(entry *Entry) { |
|||
if entry.Logger != nil { |
|||
f.isTerminal = checkIfTerminal(entry.Logger.Out) |
|||
} |
|||
// Get the max length of the level text
|
|||
for _, level := range AllLevels { |
|||
levelTextLength := utf8.RuneCount([]byte(level.String())) |
|||
if levelTextLength > f.levelTextMaxLength { |
|||
f.levelTextMaxLength = levelTextLength |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (f *TextFormatter) isColored() bool { |
|||
isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) |
|||
|
|||
if f.EnvironmentOverrideColors { |
|||
switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { |
|||
case ok && force != "0": |
|||
isColored = true |
|||
case ok && force == "0", os.Getenv("CLICOLOR") == "0": |
|||
isColored = false |
|||
} |
|||
} |
|||
|
|||
return isColored && !f.DisableColors |
|||
} |
|||
|
|||
// Format renders a single log entry
|
|||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { |
|||
data := make(Fields) |
|||
for k, v := range entry.Data { |
|||
data[k] = v |
|||
} |
|||
prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) |
|||
keys := make([]string, 0, len(data)) |
|||
for k := range data { |
|||
keys = append(keys, k) |
|||
} |
|||
|
|||
var funcVal, fileVal string |
|||
|
|||
fixedKeys := make([]string, 0, 4+len(data)) |
|||
if !f.DisableTimestamp { |
|||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) |
|||
} |
|||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) |
|||
if entry.Message != "" { |
|||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) |
|||
} |
|||
if entry.err != "" { |
|||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) |
|||
} |
|||
if entry.HasCaller() { |
|||
if f.CallerPrettyfier != nil { |
|||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller) |
|||
} else { |
|||
funcVal = entry.Caller.Function |
|||
fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) |
|||
} |
|||
|
|||
if funcVal != "" { |
|||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) |
|||
} |
|||
if fileVal != "" { |
|||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) |
|||
} |
|||
} |
|||
|
|||
if !f.DisableSorting { |
|||
if f.SortingFunc == nil { |
|||
sort.Strings(keys) |
|||
fixedKeys = append(fixedKeys, keys...) |
|||
} else { |
|||
if !f.isColored() { |
|||
fixedKeys = append(fixedKeys, keys...) |
|||
f.SortingFunc(fixedKeys) |
|||
} else { |
|||
f.SortingFunc(keys) |
|||
} |
|||
} |
|||
} else { |
|||
fixedKeys = append(fixedKeys, keys...) |
|||
} |
|||
|
|||
var b *bytes.Buffer |
|||
if entry.Buffer != nil { |
|||
b = entry.Buffer |
|||
} else { |
|||
b = &bytes.Buffer{} |
|||
} |
|||
|
|||
f.terminalInitOnce.Do(func() { f.init(entry) }) |
|||
|
|||
timestampFormat := f.TimestampFormat |
|||
if timestampFormat == "" { |
|||
timestampFormat = defaultTimestampFormat |
|||
} |
|||
if f.isColored() { |
|||
f.printColored(b, entry, keys, data, timestampFormat) |
|||
} else { |
|||
|
|||
for _, key := range fixedKeys { |
|||
var value interface{} |
|||
switch { |
|||
case key == f.FieldMap.resolve(FieldKeyTime): |
|||
value = entry.Time.Format(timestampFormat) |
|||
case key == f.FieldMap.resolve(FieldKeyLevel): |
|||
value = entry.Level.String() |
|||
case key == f.FieldMap.resolve(FieldKeyMsg): |
|||
value = entry.Message |
|||
case key == f.FieldMap.resolve(FieldKeyLogrusError): |
|||
value = entry.err |
|||
case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): |
|||
value = funcVal |
|||
case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): |
|||
value = fileVal |
|||
default: |
|||
value = data[key] |
|||
} |
|||
f.appendKeyValue(b, key, value) |
|||
} |
|||
} |
|||
|
|||
b.WriteByte('\n') |
|||
return b.Bytes(), nil |
|||
} |
|||
|
|||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { |
|||
var levelColor int |
|||
switch entry.Level { |
|||
case DebugLevel, TraceLevel: |
|||
levelColor = gray |
|||
case WarnLevel: |
|||
levelColor = yellow |
|||
case ErrorLevel, FatalLevel, PanicLevel: |
|||
levelColor = red |
|||
case InfoLevel: |
|||
levelColor = blue |
|||
default: |
|||
levelColor = blue |
|||
} |
|||
|
|||
levelText := strings.ToUpper(entry.Level.String()) |
|||
if !f.DisableLevelTruncation && !f.PadLevelText { |
|||
levelText = levelText[0:4] |
|||
} |
|||
if f.PadLevelText { |
|||
// Generates the format string used in the next line, for example "%-6s" or "%-7s".
|
|||
// Based on the max level text length.
|
|||
formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" |
|||
// Formats the level text by appending spaces up to the max length, for example:
|
|||
// - "INFO "
|
|||
// - "WARNING"
|
|||
levelText = fmt.Sprintf(formatString, levelText) |
|||
} |
|||
|
|||
// Remove a single newline if it already exists in the message to keep
|
|||
// the behavior of logrus text_formatter the same as the stdlib log package
|
|||
entry.Message = strings.TrimSuffix(entry.Message, "\n") |
|||
|
|||
caller := "" |
|||
if entry.HasCaller() { |
|||
funcVal := fmt.Sprintf("%s()", entry.Caller.Function) |
|||
fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) |
|||
|
|||
if f.CallerPrettyfier != nil { |
|||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller) |
|||
} |
|||
|
|||
if fileVal == "" { |
|||
caller = funcVal |
|||
} else if funcVal == "" { |
|||
caller = fileVal |
|||
} else { |
|||
caller = fileVal + " " + funcVal |
|||
} |
|||
} |
|||
|
|||
switch { |
|||
case f.DisableTimestamp: |
|||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) |
|||
case !f.FullTimestamp: |
|||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) |
|||
default: |
|||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) |
|||
} |
|||
for _, k := range keys { |
|||
v := data[k] |
|||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) |
|||
f.appendValue(b, v) |
|||
} |
|||
} |
|||
|
|||
func (f *TextFormatter) needsQuoting(text string) bool { |
|||
if f.ForceQuote { |
|||
return true |
|||
} |
|||
if f.QuoteEmptyFields && len(text) == 0 { |
|||
return true |
|||
} |
|||
if f.DisableQuote { |
|||
return false |
|||
} |
|||
for _, ch := range text { |
|||
if !((ch >= 'a' && ch <= 'z') || |
|||
(ch >= 'A' && ch <= 'Z') || |
|||
(ch >= '0' && ch <= '9') || |
|||
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { |
|||
if b.Len() > 0 { |
|||
b.WriteByte(' ') |
|||
} |
|||
b.WriteString(key) |
|||
b.WriteByte('=') |
|||
f.appendValue(b, value) |
|||
} |
|||
|
|||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { |
|||
stringVal, ok := value.(string) |
|||
if !ok { |
|||
stringVal = fmt.Sprint(value) |
|||
} |
|||
|
|||
if !f.needsQuoting(stringVal) { |
|||
b.WriteString(stringVal) |
|||
} else { |
|||
b.WriteString(fmt.Sprintf("%q", stringVal)) |
|||
} |
|||
} |
@ -0,0 +1,102 @@ |
|||
package logrus |
|||
|
|||
import ( |
|||
"bufio" |
|||
"io" |
|||
"runtime" |
|||
"strings" |
|||
) |
|||
|
|||
// Writer at INFO level. See WriterLevel for details.
|
|||
func (logger *Logger) Writer() *io.PipeWriter { |
|||
return logger.WriterLevel(InfoLevel) |
|||
} |
|||
|
|||
// WriterLevel returns an io.Writer that can be used to write arbitrary text to
|
|||
// the logger at the given log level. Each line written to the writer will be
|
|||
// printed in the usual way using formatters and hooks. The writer is part of an
|
|||
// io.Pipe and it is the callers responsibility to close the writer when done.
|
|||
// This can be used to override the standard library logger easily.
|
|||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { |
|||
return NewEntry(logger).WriterLevel(level) |
|||
} |
|||
|
|||
// Writer returns an io.Writer that writes to the logger at the info log level
|
|||
func (entry *Entry) Writer() *io.PipeWriter { |
|||
return entry.WriterLevel(InfoLevel) |
|||
} |
|||
|
|||
// WriterLevel returns an io.Writer that writes to the logger at the given log level
|
|||
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { |
|||
reader, writer := io.Pipe() |
|||
|
|||
var printFunc func(args ...interface{}) |
|||
|
|||
// Determine which log function to use based on the specified log level
|
|||
switch level { |
|||
case TraceLevel: |
|||
printFunc = entry.Trace |
|||
case DebugLevel: |
|||
printFunc = entry.Debug |
|||
case InfoLevel: |
|||
printFunc = entry.Info |
|||
case WarnLevel: |
|||
printFunc = entry.Warn |
|||
case ErrorLevel: |
|||
printFunc = entry.Error |
|||
case FatalLevel: |
|||
printFunc = entry.Fatal |
|||
case PanicLevel: |
|||
printFunc = entry.Panic |
|||
default: |
|||
printFunc = entry.Print |
|||
} |
|||
|
|||
// Start a new goroutine to scan the input and write it to the logger using the specified print function.
|
|||
// It splits the input into chunks of up to 64KB to avoid buffer overflows.
|
|||
go entry.writerScanner(reader, printFunc) |
|||
|
|||
// Set a finalizer function to close the writer when it is garbage collected
|
|||
runtime.SetFinalizer(writer, writerFinalizer) |
|||
|
|||
return writer |
|||
} |
|||
|
|||
// writerScanner scans the input from the reader and writes it to the logger
|
|||
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { |
|||
scanner := bufio.NewScanner(reader) |
|||
|
|||
// Set the buffer size to the maximum token size to avoid buffer overflows
|
|||
scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) |
|||
|
|||
// Define a split function to split the input into chunks of up to 64KB
|
|||
chunkSize := bufio.MaxScanTokenSize // 64KB
|
|||
splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { |
|||
if len(data) >= chunkSize { |
|||
return chunkSize, data[:chunkSize], nil |
|||
} |
|||
|
|||
return bufio.ScanLines(data, atEOF) |
|||
} |
|||
|
|||
// Use the custom split function to split the input
|
|||
scanner.Split(splitFunc) |
|||
|
|||
// Scan the input and write it to the logger using the specified print function
|
|||
for scanner.Scan() { |
|||
printFunc(strings.TrimRight(scanner.Text(), "\r\n")) |
|||
} |
|||
|
|||
// If there was an error while scanning the input, log an error
|
|||
if err := scanner.Err(); err != nil { |
|||
entry.Errorf("Error while reading from Writer: %s", err) |
|||
} |
|||
|
|||
// Close the reader when we are done
|
|||
reader.Close() |
|||
} |
|||
|
|||
// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected
|
|||
func writerFinalizer(writer *io.PipeWriter) { |
|||
writer.Close() |
|||
} |
@ -0,0 +1,27 @@ |
|||
Copyright (c) 2009 The Go Authors. All rights reserved. |
|||
|
|||
Redistribution and use in source and binary forms, with or without |
|||
modification, are permitted provided that the following conditions are |
|||
met: |
|||
|
|||
* Redistributions of source code must retain the above copyright |
|||
notice, this list of conditions and the following disclaimer. |
|||
* Redistributions in binary form must reproduce the above |
|||
copyright notice, this list of conditions and the following disclaimer |
|||
in the documentation and/or other materials provided with the |
|||
distribution. |
|||
* Neither the name of Google Inc. nor the names of its |
|||
contributors may be used to endorse or promote products derived from |
|||
this software without specific prior written permission. |
|||
|
|||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,22 @@ |
|||
Additional IP Rights Grant (Patents) |
|||
|
|||
"This implementation" means the copyrightable works distributed by |
|||
Google as part of the Go project. |
|||
|
|||
Google hereby grants to You a perpetual, worldwide, non-exclusive, |
|||
no-charge, royalty-free, irrevocable (except as stated in this section) |
|||
patent license to make, have made, use, offer to sell, sell, import, |
|||
transfer and otherwise run, modify and propagate the contents of this |
|||
implementation of Go, where such license applies only to those patent |
|||
claims, both currently owned or controlled by Google and acquired in |
|||
the future, licensable by Google that are necessarily infringed by this |
|||
implementation of Go. This grant does not include claims that would be |
|||
infringed only as a consequence of further modification of this |
|||
implementation. If you or your agent or exclusive licensee institute or |
|||
order or agree to the institution of patent litigation against any |
|||
entity (including a cross-claim or counterclaim in a lawsuit) alleging |
|||
that this implementation of Go or any code incorporated within this |
|||
implementation of Go constitutes direct or contributory patent |
|||
infringement, or inducement of patent infringement, then any patent |
|||
rights granted to you under this License for this implementation of Go |
|||
shall terminate as of the date such litigation is filed. |
@ -0,0 +1,50 @@ |
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
// Package httpguts provides functions implementing various details
|
|||
// of the HTTP specification.
|
|||
//
|
|||
// This package is shared by the standard library (which vendors it)
|
|||
// and x/net/http2. It comes with no API stability promise.
|
|||
package httpguts |
|||
|
|||
import ( |
|||
"net/textproto" |
|||
"strings" |
|||
) |
|||
|
|||
// ValidTrailerHeader reports whether name is a valid header field name to appear
|
|||
// in trailers.
|
|||
// See RFC 7230, Section 4.1.2
|
|||
func ValidTrailerHeader(name string) bool { |
|||
name = textproto.CanonicalMIMEHeaderKey(name) |
|||
if strings.HasPrefix(name, "If-") || badTrailer[name] { |
|||
return false |
|||
} |
|||
return true |
|||
} |
|||
|
|||
var badTrailer = map[string]bool{ |
|||
"Authorization": true, |
|||
"Cache-Control": true, |
|||
"Connection": true, |
|||
"Content-Encoding": true, |
|||
"Content-Length": true, |
|||
"Content-Range": true, |
|||
"Content-Type": true, |
|||
"Expect": true, |
|||
"Host": true, |
|||
"Keep-Alive": true, |
|||
"Max-Forwards": true, |
|||
"Pragma": true, |
|||
"Proxy-Authenticate": true, |
|||
"Proxy-Authorization": true, |
|||
"Proxy-Connection": true, |
|||
"Range": true, |
|||
"Realm": true, |
|||
"Te": true, |
|||
"Trailer": true, |
|||
"Transfer-Encoding": true, |
|||
"Www-Authenticate": true, |
|||
} |
@ -0,0 +1,352 @@ |
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package httpguts |
|||
|
|||
import ( |
|||
"net" |
|||
"strings" |
|||
"unicode/utf8" |
|||
|
|||
"golang.org/x/net/idna" |
|||
) |
|||
|
|||
var isTokenTable = [127]bool{ |
|||
'!': true, |
|||
'#': true, |
|||
'$': true, |
|||
'%': true, |
|||
'&': true, |
|||
'\'': true, |
|||
'*': true, |
|||
'+': true, |
|||
'-': true, |
|||
'.': true, |
|||
'0': true, |
|||
'1': true, |
|||
'2': true, |
|||
'3': true, |
|||
'4': true, |
|||
'5': true, |
|||
'6': true, |
|||
'7': true, |
|||
'8': true, |
|||
'9': true, |
|||
'A': true, |
|||
'B': true, |
|||
'C': true, |
|||
'D': true, |
|||
'E': true, |
|||
'F': true, |
|||
'G': true, |
|||
'H': true, |
|||
'I': true, |
|||
'J': true, |
|||
'K': true, |
|||
'L': true, |
|||
'M': true, |
|||
'N': true, |
|||
'O': true, |
|||
'P': true, |
|||
'Q': true, |
|||
'R': true, |
|||
'S': true, |
|||
'T': true, |
|||
'U': true, |
|||
'W': true, |
|||
'V': true, |
|||
'X': true, |
|||
'Y': true, |
|||
'Z': true, |
|||
'^': true, |
|||
'_': true, |
|||
'`': true, |
|||
'a': true, |
|||
'b': true, |
|||
'c': true, |
|||
'd': true, |
|||
'e': true, |
|||
'f': true, |
|||
'g': true, |
|||
'h': true, |
|||
'i': true, |
|||
'j': true, |
|||
'k': true, |
|||
'l': true, |
|||
'm': true, |
|||
'n': true, |
|||
'o': true, |
|||
'p': true, |
|||
'q': true, |
|||
'r': true, |
|||
's': true, |
|||
't': true, |
|||
'u': true, |
|||
'v': true, |
|||
'w': true, |
|||
'x': true, |
|||
'y': true, |
|||
'z': true, |
|||
'|': true, |
|||
'~': true, |
|||
} |
|||
|
|||
func IsTokenRune(r rune) bool { |
|||
i := int(r) |
|||
return i < len(isTokenTable) && isTokenTable[i] |
|||
} |
|||
|
|||
func isNotToken(r rune) bool { |
|||
return !IsTokenRune(r) |
|||
} |
|||
|
|||
// HeaderValuesContainsToken reports whether any string in values
|
|||
// contains the provided token, ASCII case-insensitively.
|
|||
func HeaderValuesContainsToken(values []string, token string) bool { |
|||
for _, v := range values { |
|||
if headerValueContainsToken(v, token) { |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
|
|||
// isOWS reports whether b is an optional whitespace byte, as defined
|
|||
// by RFC 7230 section 3.2.3.
|
|||
func isOWS(b byte) bool { return b == ' ' || b == '\t' } |
|||
|
|||
// trimOWS returns x with all optional whitespace removes from the
|
|||
// beginning and end.
|
|||
func trimOWS(x string) string { |
|||
// TODO: consider using strings.Trim(x, " \t") instead,
|
|||
// if and when it's fast enough. See issue 10292.
|
|||
// But this ASCII-only code will probably always beat UTF-8
|
|||
// aware code.
|
|||
for len(x) > 0 && isOWS(x[0]) { |
|||
x = x[1:] |
|||
} |
|||
for len(x) > 0 && isOWS(x[len(x)-1]) { |
|||
x = x[:len(x)-1] |
|||
} |
|||
return x |
|||
} |
|||
|
|||
// headerValueContainsToken reports whether v (assumed to be a
|
|||
// 0#element, in the ABNF extension described in RFC 7230 section 7)
|
|||
// contains token amongst its comma-separated tokens, ASCII
|
|||
// case-insensitively.
|
|||
func headerValueContainsToken(v string, token string) bool { |
|||
for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') { |
|||
if tokenEqual(trimOWS(v[:comma]), token) { |
|||
return true |
|||
} |
|||
v = v[comma+1:] |
|||
} |
|||
return tokenEqual(trimOWS(v), token) |
|||
} |
|||
|
|||
// lowerASCII returns the ASCII lowercase version of b.
|
|||
func lowerASCII(b byte) byte { |
|||
if 'A' <= b && b <= 'Z' { |
|||
return b + ('a' - 'A') |
|||
} |
|||
return b |
|||
} |
|||
|
|||
// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
|
|||
func tokenEqual(t1, t2 string) bool { |
|||
if len(t1) != len(t2) { |
|||
return false |
|||
} |
|||
for i, b := range t1 { |
|||
if b >= utf8.RuneSelf { |
|||
// No UTF-8 or non-ASCII allowed in tokens.
|
|||
return false |
|||
} |
|||
if lowerASCII(byte(b)) != lowerASCII(t2[i]) { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// isLWS reports whether b is linear white space, according
|
|||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
|||
//
|
|||
// LWS = [CRLF] 1*( SP | HT )
|
|||
func isLWS(b byte) bool { return b == ' ' || b == '\t' } |
|||
|
|||
// isCTL reports whether b is a control byte, according
|
|||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
|||
//
|
|||
// CTL = <any US-ASCII control character
|
|||
// (octets 0 - 31) and DEL (127)>
|
|||
func isCTL(b byte) bool { |
|||
const del = 0x7f // a CTL
|
|||
return b < ' ' || b == del |
|||
} |
|||
|
|||
// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
|
|||
// HTTP/2 imposes the additional restriction that uppercase ASCII
|
|||
// letters are not allowed.
|
|||
//
|
|||
// RFC 7230 says:
|
|||
//
|
|||
// header-field = field-name ":" OWS field-value OWS
|
|||
// field-name = token
|
|||
// token = 1*tchar
|
|||
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
|
|||
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
|
|||
func ValidHeaderFieldName(v string) bool { |
|||
if len(v) == 0 { |
|||
return false |
|||
} |
|||
for _, r := range v { |
|||
if !IsTokenRune(r) { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// ValidHostHeader reports whether h is a valid host header.
|
|||
func ValidHostHeader(h string) bool { |
|||
// The latest spec is actually this:
|
|||
//
|
|||
// http://tools.ietf.org/html/rfc7230#section-5.4
|
|||
// Host = uri-host [ ":" port ]
|
|||
//
|
|||
// Where uri-host is:
|
|||
// http://tools.ietf.org/html/rfc3986#section-3.2.2
|
|||
//
|
|||
// But we're going to be much more lenient for now and just
|
|||
// search for any byte that's not a valid byte in any of those
|
|||
// expressions.
|
|||
for i := 0; i < len(h); i++ { |
|||
if !validHostByte[h[i]] { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// See the validHostHeader comment.
|
|||
var validHostByte = [256]bool{ |
|||
'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, |
|||
'8': true, '9': true, |
|||
|
|||
'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, |
|||
'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, |
|||
'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, |
|||
'y': true, 'z': true, |
|||
|
|||
'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, |
|||
'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, |
|||
'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, |
|||
'Y': true, 'Z': true, |
|||
|
|||
'!': true, // sub-delims
|
|||
'$': true, // sub-delims
|
|||
'%': true, // pct-encoded (and used in IPv6 zones)
|
|||
'&': true, // sub-delims
|
|||
'(': true, // sub-delims
|
|||
')': true, // sub-delims
|
|||
'*': true, // sub-delims
|
|||
'+': true, // sub-delims
|
|||
',': true, // sub-delims
|
|||
'-': true, // unreserved
|
|||
'.': true, // unreserved
|
|||
':': true, // IPv6address + Host expression's optional port
|
|||
';': true, // sub-delims
|
|||
'=': true, // sub-delims
|
|||
'[': true, |
|||
'\'': true, // sub-delims
|
|||
']': true, |
|||
'_': true, // unreserved
|
|||
'~': true, // unreserved
|
|||
} |
|||
|
|||
// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
|
|||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
|
|||
//
|
|||
// message-header = field-name ":" [ field-value ]
|
|||
// field-value = *( field-content | LWS )
|
|||
// field-content = <the OCTETs making up the field-value
|
|||
// and consisting of either *TEXT or combinations
|
|||
// of token, separators, and quoted-string>
|
|||
//
|
|||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
|
|||
//
|
|||
// TEXT = <any OCTET except CTLs,
|
|||
// but including LWS>
|
|||
// LWS = [CRLF] 1*( SP | HT )
|
|||
// CTL = <any US-ASCII control character
|
|||
// (octets 0 - 31) and DEL (127)>
|
|||
//
|
|||
// RFC 7230 says:
|
|||
//
|
|||
// field-value = *( field-content / obs-fold )
|
|||
// obj-fold = N/A to http2, and deprecated
|
|||
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
|||
// field-vchar = VCHAR / obs-text
|
|||
// obs-text = %x80-FF
|
|||
// VCHAR = "any visible [USASCII] character"
|
|||
//
|
|||
// http2 further says: "Similarly, HTTP/2 allows header field values
|
|||
// that are not valid. While most of the values that can be encoded
|
|||
// will not alter header field parsing, carriage return (CR, ASCII
|
|||
// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
|
|||
// 0x0) might be exploited by an attacker if they are translated
|
|||
// verbatim. Any request or response that contains a character not
|
|||
// permitted in a header field value MUST be treated as malformed
|
|||
// (Section 8.1.2.6). Valid characters are defined by the
|
|||
// field-content ABNF rule in Section 3.2 of [RFC7230]."
|
|||
//
|
|||
// This function does not (yet?) properly handle the rejection of
|
|||
// strings that begin or end with SP or HTAB.
|
|||
func ValidHeaderFieldValue(v string) bool { |
|||
for i := 0; i < len(v); i++ { |
|||
b := v[i] |
|||
if isCTL(b) && !isLWS(b) { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
func isASCII(s string) bool { |
|||
for i := 0; i < len(s); i++ { |
|||
if s[i] >= utf8.RuneSelf { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// PunycodeHostPort returns the IDNA Punycode version
|
|||
// of the provided "host" or "host:port" string.
|
|||
func PunycodeHostPort(v string) (string, error) { |
|||
if isASCII(v) { |
|||
return v, nil |
|||
} |
|||
|
|||
host, port, err := net.SplitHostPort(v) |
|||
if err != nil { |
|||
// The input 'v' argument was just a "host" argument,
|
|||
// without a port. This error should not be returned
|
|||
// to the caller.
|
|||
host = v |
|||
port = "" |
|||
} |
|||
host, err = idna.ToASCII(host) |
|||
if err != nil { |
|||
// Non-UTF-8? Not representable in Punycode, in any
|
|||
// case.
|
|||
return "", err |
|||
} |
|||
if port == "" { |
|||
return host, nil |
|||
} |
|||
return net.JoinHostPort(host, port), nil |
|||
} |
@ -0,0 +1,2 @@ |
|||
*~ |
|||
h2i/h2i |
@ -0,0 +1,51 @@ |
|||
# |
|||
# This Dockerfile builds a recent curl with HTTP/2 client support, using |
|||
# a recent nghttp2 build. |
|||
# |
|||
# See the Makefile for how to tag it. If Docker and that image is found, the |
|||
# Go tests use this curl binary for integration tests. |
|||
# |
|||
|
|||
FROM ubuntu:trusty |
|||
|
|||
RUN apt-get update && \ |
|||
apt-get upgrade -y && \ |
|||
apt-get install -y git-core build-essential wget |
|||
|
|||
RUN apt-get install -y --no-install-recommends \ |
|||
autotools-dev libtool pkg-config zlib1g-dev \ |
|||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \ |
|||
automake autoconf |
|||
|
|||
# The list of packages nghttp2 recommends for h2load: |
|||
RUN apt-get install -y --no-install-recommends make binutils \ |
|||
autoconf automake autotools-dev \ |
|||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ |
|||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \ |
|||
cython python3.4-dev python-setuptools |
|||
|
|||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: |
|||
ENV NGHTTP2_VER 895da9a |
|||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git |
|||
|
|||
WORKDIR /root/nghttp2 |
|||
RUN git reset --hard $NGHTTP2_VER |
|||
RUN autoreconf -i |
|||
RUN automake |
|||
RUN autoconf |
|||
RUN ./configure |
|||
RUN make |
|||
RUN make install |
|||
|
|||
WORKDIR /root |
|||
RUN wget https://curl.se/download/curl-7.45.0.tar.gz |
|||
RUN tar -zxvf curl-7.45.0.tar.gz |
|||
WORKDIR /root/curl-7.45.0 |
|||
RUN ./configure --with-ssl --with-nghttp2=/usr/local |
|||
RUN make |
|||
RUN make install |
|||
RUN ldconfig |
|||
|
|||
CMD ["-h"] |
|||
ENTRYPOINT ["/usr/local/bin/curl"] |
|||
|
@ -0,0 +1,3 @@ |
|||
curlimage: |
|||
docker build -t gohttp2/curl . |
|||
|
@ -0,0 +1,53 @@ |
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package http2 |
|||
|
|||
import "strings" |
|||
|
|||
// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
|
|||
// contains helper functions which may use Unicode-aware functions which would
|
|||
// otherwise be unsafe and could introduce vulnerabilities if used improperly.
|
|||
|
|||
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
|
|||
// are equal, ASCII-case-insensitively.
|
|||
func asciiEqualFold(s, t string) bool { |
|||
if len(s) != len(t) { |
|||
return false |
|||
} |
|||
for i := 0; i < len(s); i++ { |
|||
if lower(s[i]) != lower(t[i]) { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// lower returns the ASCII lowercase version of b.
|
|||
func lower(b byte) byte { |
|||
if 'A' <= b && b <= 'Z' { |
|||
return b + ('a' - 'A') |
|||
} |
|||
return b |
|||
} |
|||
|
|||
// isASCIIPrint returns whether s is ASCII and printable according to
|
|||
// https://tools.ietf.org/html/rfc20#section-4.2.
|
|||
func isASCIIPrint(s string) bool { |
|||
for i := 0; i < len(s); i++ { |
|||
if s[i] < ' ' || s[i] > '~' { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
|
|||
// and whether or not it was.
|
|||
func asciiToLower(s string) (lower string, ok bool) { |
|||
if !isASCIIPrint(s) { |
|||
return "", false |
|||
} |
|||
return strings.ToLower(s), true |
|||
} |
@ -0,0 +1,641 @@ |
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package http2 |
|||
|
|||
// A list of the possible cipher suite ids. Taken from
|
|||
// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
|
|||
|
|||
const ( |
|||
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 |
|||
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 |
|||
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 |
|||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 |
|||
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 |
|||
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 |
|||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 |
|||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 |
|||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 |
|||
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 |
|||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A |
|||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B |
|||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C |
|||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D |
|||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E |
|||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F |
|||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 |
|||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 |
|||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 |
|||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 |
|||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 |
|||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 |
|||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 |
|||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 |
|||
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 |
|||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 |
|||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A |
|||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B |
|||
// Reserved uint16 = 0x001C-1D
|
|||
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E |
|||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F |
|||
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 |
|||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 |
|||
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 |
|||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 |
|||
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 |
|||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 |
|||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 |
|||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 |
|||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 |
|||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 |
|||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A |
|||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B |
|||
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C |
|||
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D |
|||
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E |
|||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F |
|||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 |
|||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 |
|||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 |
|||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 |
|||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 |
|||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 |
|||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 |
|||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 |
|||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 |
|||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 |
|||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A |
|||
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B |
|||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C |
|||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D |
|||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E |
|||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F |
|||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 |
|||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 |
|||
// Reserved uint16 = 0x0047-4F
|
|||
// Reserved uint16 = 0x0050-58
|
|||
// Reserved uint16 = 0x0059-5C
|
|||
// Unassigned uint16 = 0x005D-5F
|
|||
// Reserved uint16 = 0x0060-66
|
|||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 |
|||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 |
|||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 |
|||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A |
|||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B |
|||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C |
|||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D |
|||
// Unassigned uint16 = 0x006E-83
|
|||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 |
|||
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A |
|||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B |
|||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C |
|||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D |
|||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E |
|||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F |
|||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 |
|||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 |
|||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 |
|||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 |
|||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 |
|||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 |
|||
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 |
|||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 |
|||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 |
|||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 |
|||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A |
|||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B |
|||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C |
|||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D |
|||
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E |
|||
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F |
|||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 |
|||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 |
|||
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 |
|||
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 |
|||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 |
|||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 |
|||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 |
|||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 |
|||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 |
|||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 |
|||
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA |
|||
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB |
|||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC |
|||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD |
|||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE |
|||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF |
|||
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 |
|||
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 |
|||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 |
|||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 |
|||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 |
|||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 |
|||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 |
|||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 |
|||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 |
|||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 |
|||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF |
|||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 |
|||
// Unassigned uint16 = 0x00C6-FE
|
|||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF |
|||
// Unassigned uint16 = 0x01-55,*
|
|||
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 |
|||
// Unassigned uint16 = 0x5601 - 0xC000
|
|||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 |
|||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 |
|||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A |
|||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B |
|||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C |
|||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D |
|||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E |
|||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F |
|||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 |
|||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 |
|||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 |
|||
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 |
|||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 |
|||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 |
|||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 |
|||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 |
|||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A |
|||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B |
|||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C |
|||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D |
|||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E |
|||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F |
|||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 |
|||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 |
|||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 |
|||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 |
|||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 |
|||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 |
|||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 |
|||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 |
|||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 |
|||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 |
|||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 |
|||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 |
|||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 |
|||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 |
|||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A |
|||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B |
|||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C |
|||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D |
|||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E |
|||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F |
|||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 |
|||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 |
|||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 |
|||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 |
|||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 |
|||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 |
|||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 |
|||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 |
|||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A |
|||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B |
|||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C |
|||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D |
|||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E |
|||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F |
|||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 |
|||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 |
|||
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 |
|||
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 |
|||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 |
|||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 |
|||
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 |
|||
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 |
|||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 |
|||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 |
|||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A |
|||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B |
|||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C |
|||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D |
|||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E |
|||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F |
|||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 |
|||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 |
|||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 |
|||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 |
|||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 |
|||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 |
|||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 |
|||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 |
|||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 |
|||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 |
|||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A |
|||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B |
|||
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C |
|||
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D |
|||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E |
|||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F |
|||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 |
|||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 |
|||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 |
|||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 |
|||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 |
|||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 |
|||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 |
|||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 |
|||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A |
|||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 |
|||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 |
|||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 |
|||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A |
|||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B |
|||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C |
|||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D |
|||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E |
|||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F |
|||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 |
|||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 |
|||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 |
|||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 |
|||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 |
|||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 |
|||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 |
|||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 |
|||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 |
|||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 |
|||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A |
|||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B |
|||
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C |
|||
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D |
|||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E |
|||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F |
|||
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 |
|||
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 |
|||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 |
|||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 |
|||
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 |
|||
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 |
|||
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 |
|||
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 |
|||
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 |
|||
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 |
|||
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA |
|||
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF |
|||
// Unassigned uint16 = 0xC0B0-FF
|
|||
// Unassigned uint16 = 0xC1-CB,*
|
|||
// Unassigned uint16 = 0xCC00-A7
|
|||
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 |
|||
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 |
|||
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA |
|||
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB |
|||
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC |
|||
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD |
|||
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE |
|||
) |
|||
|
|||
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
|||
// References:
|
|||
// https://tools.ietf.org/html/rfc7540#appendix-A
|
|||
// Reject cipher suites from Appendix A.
|
|||
// "This list includes those cipher suites that do not
|
|||
// offer an ephemeral key exchange and those that are
|
|||
// based on the TLS null, stream or block cipher type"
|
|||
func isBadCipher(cipher uint16) bool { |
|||
switch cipher { |
|||
case cipher_TLS_NULL_WITH_NULL_NULL, |
|||
cipher_TLS_RSA_WITH_NULL_MD5, |
|||
cipher_TLS_RSA_WITH_NULL_SHA, |
|||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, |
|||
cipher_TLS_RSA_WITH_RC4_128_MD5, |
|||
cipher_TLS_RSA_WITH_RC4_128_SHA, |
|||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, |
|||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA, |
|||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, |
|||
cipher_TLS_RSA_WITH_DES_CBC_SHA, |
|||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, |
|||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, |
|||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, |
|||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, |
|||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, |
|||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, |
|||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, |
|||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, |
|||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, |
|||
cipher_TLS_DH_anon_WITH_RC4_128_MD5, |
|||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, |
|||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA, |
|||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_KRB5_WITH_DES_CBC_SHA, |
|||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_KRB5_WITH_RC4_128_SHA, |
|||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, |
|||
cipher_TLS_KRB5_WITH_DES_CBC_MD5, |
|||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, |
|||
cipher_TLS_KRB5_WITH_RC4_128_MD5, |
|||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, |
|||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, |
|||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, |
|||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, |
|||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, |
|||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, |
|||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, |
|||
cipher_TLS_PSK_WITH_NULL_SHA, |
|||
cipher_TLS_DHE_PSK_WITH_NULL_SHA, |
|||
cipher_TLS_RSA_PSK_WITH_NULL_SHA, |
|||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_RSA_WITH_NULL_SHA256, |
|||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, |
|||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, |
|||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, |
|||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, |
|||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, |
|||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, |
|||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, |
|||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, |
|||
cipher_TLS_PSK_WITH_RC4_128_SHA, |
|||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, |
|||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, |
|||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_RSA_WITH_SEED_CBC_SHA, |
|||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, |
|||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, |
|||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, |
|||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, |
|||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, |
|||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, |
|||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, |
|||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, |
|||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, |
|||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, |
|||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, |
|||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, |
|||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, |
|||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, |
|||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, |
|||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, |
|||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, |
|||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, |
|||
cipher_TLS_PSK_WITH_NULL_SHA256, |
|||
cipher_TLS_PSK_WITH_NULL_SHA384, |
|||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, |
|||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256, |
|||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384, |
|||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, |
|||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256, |
|||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384, |
|||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, |
|||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, |
|||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, |
|||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, |
|||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, |
|||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, |
|||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA, |
|||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, |
|||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, |
|||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, |
|||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_ECDH_anon_WITH_NULL_SHA, |
|||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, |
|||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, |
|||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, |
|||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, |
|||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, |
|||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, |
|||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, |
|||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, |
|||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, |
|||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, |
|||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, |
|||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, |
|||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, |
|||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, |
|||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, |
|||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, |
|||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, |
|||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, |
|||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, |
|||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, |
|||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, |
|||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, |
|||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, |
|||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, |
|||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, |
|||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, |
|||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, |
|||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, |
|||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, |
|||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, |
|||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, |
|||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, |
|||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, |
|||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, |
|||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, |
|||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, |
|||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, |
|||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, |
|||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, |
|||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, |
|||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, |
|||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, |
|||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, |
|||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, |
|||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, |
|||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, |
|||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, |
|||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, |
|||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, |
|||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, |
|||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, |
|||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, |
|||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, |
|||
cipher_TLS_RSA_WITH_AES_128_CCM, |
|||
cipher_TLS_RSA_WITH_AES_256_CCM, |
|||
cipher_TLS_RSA_WITH_AES_128_CCM_8, |
|||
cipher_TLS_RSA_WITH_AES_256_CCM_8, |
|||
cipher_TLS_PSK_WITH_AES_128_CCM, |
|||
cipher_TLS_PSK_WITH_AES_256_CCM, |
|||
cipher_TLS_PSK_WITH_AES_128_CCM_8, |
|||
cipher_TLS_PSK_WITH_AES_256_CCM_8: |
|||
return true |
|||
default: |
|||
return false |
|||
} |
|||
} |
@ -0,0 +1,311 @@ |
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
// Transport code's client connection pooling.
|
|||
|
|||
package http2 |
|||
|
|||
import ( |
|||
"context" |
|||
"crypto/tls" |
|||
"errors" |
|||
"net/http" |
|||
"sync" |
|||
) |
|||
|
|||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
|||
type ClientConnPool interface { |
|||
// GetClientConn returns a specific HTTP/2 connection (usually
|
|||
// a TLS-TCP connection) to an HTTP/2 server. On success, the
|
|||
// returned ClientConn accounts for the upcoming RoundTrip
|
|||
// call, so the caller should not omit it. If the caller needs
|
|||
// to, ClientConn.RoundTrip can be called with a bogus
|
|||
// new(http.Request) to release the stream reservation.
|
|||
GetClientConn(req *http.Request, addr string) (*ClientConn, error) |
|||
MarkDead(*ClientConn) |
|||
} |
|||
|
|||
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
|
|||
// implementations which can close their idle connections.
|
|||
type clientConnPoolIdleCloser interface { |
|||
ClientConnPool |
|||
closeIdleConnections() |
|||
} |
|||
|
|||
var ( |
|||
_ clientConnPoolIdleCloser = (*clientConnPool)(nil) |
|||
_ clientConnPoolIdleCloser = noDialClientConnPool{} |
|||
) |
|||
|
|||
// TODO: use singleflight for dialing and addConnCalls?
|
|||
type clientConnPool struct { |
|||
t *Transport |
|||
|
|||
mu sync.Mutex // TODO: maybe switch to RWMutex
|
|||
// TODO: add support for sharing conns based on cert names
|
|||
// (e.g. share conn for googleapis.com and appspot.com)
|
|||
conns map[string][]*ClientConn // key is host:port
|
|||
dialing map[string]*dialCall // currently in-flight dials
|
|||
keys map[*ClientConn][]string |
|||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls
|
|||
} |
|||
|
|||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { |
|||
return p.getClientConn(req, addr, dialOnMiss) |
|||
} |
|||
|
|||
const ( |
|||
dialOnMiss = true |
|||
noDialOnMiss = false |
|||
) |
|||
|
|||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { |
|||
// TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
|
|||
if isConnectionCloseRequest(req) && dialOnMiss { |
|||
// It gets its own connection.
|
|||
traceGetConn(req, addr) |
|||
const singleUse = true |
|||
cc, err := p.t.dialClientConn(req.Context(), addr, singleUse) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return cc, nil |
|||
} |
|||
for { |
|||
p.mu.Lock() |
|||
for _, cc := range p.conns[addr] { |
|||
if cc.ReserveNewRequest() { |
|||
// When a connection is presented to us by the net/http package,
|
|||
// the GetConn hook has already been called.
|
|||
// Don't call it a second time here.
|
|||
if !cc.getConnCalled { |
|||
traceGetConn(req, addr) |
|||
} |
|||
cc.getConnCalled = false |
|||
p.mu.Unlock() |
|||
return cc, nil |
|||
} |
|||
} |
|||
if !dialOnMiss { |
|||
p.mu.Unlock() |
|||
return nil, ErrNoCachedConn |
|||
} |
|||
traceGetConn(req, addr) |
|||
call := p.getStartDialLocked(req.Context(), addr) |
|||
p.mu.Unlock() |
|||
<-call.done |
|||
if shouldRetryDial(call, req) { |
|||
continue |
|||
} |
|||
cc, err := call.res, call.err |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
if cc.ReserveNewRequest() { |
|||
return cc, nil |
|||
} |
|||
} |
|||
} |
|||
|
|||
// dialCall is an in-flight Transport dial call to a host.
|
|||
type dialCall struct { |
|||
_ incomparable |
|||
p *clientConnPool |
|||
// the context associated with the request
|
|||
// that created this dialCall
|
|||
ctx context.Context |
|||
done chan struct{} // closed when done
|
|||
res *ClientConn // valid after done is closed
|
|||
err error // valid after done is closed
|
|||
} |
|||
|
|||
// requires p.mu is held.
|
|||
func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *dialCall { |
|||
if call, ok := p.dialing[addr]; ok { |
|||
// A dial is already in-flight. Don't start another.
|
|||
return call |
|||
} |
|||
call := &dialCall{p: p, done: make(chan struct{}), ctx: ctx} |
|||
if p.dialing == nil { |
|||
p.dialing = make(map[string]*dialCall) |
|||
} |
|||
p.dialing[addr] = call |
|||
go call.dial(call.ctx, addr) |
|||
return call |
|||
} |
|||
|
|||
// run in its own goroutine.
|
|||
func (c *dialCall) dial(ctx context.Context, addr string) { |
|||
const singleUse = false // shared conn
|
|||
c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse) |
|||
|
|||
c.p.mu.Lock() |
|||
delete(c.p.dialing, addr) |
|||
if c.err == nil { |
|||
c.p.addConnLocked(addr, c.res) |
|||
} |
|||
c.p.mu.Unlock() |
|||
|
|||
close(c.done) |
|||
} |
|||
|
|||
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
|||
// already exist. It coalesces concurrent calls with the same key.
|
|||
// This is used by the http1 Transport code when it creates a new connection. Because
|
|||
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
|||
// the protocol), it can get into a situation where it has multiple TLS connections.
|
|||
// This code decides which ones live or die.
|
|||
// The return value used is whether c was used.
|
|||
// c is never closed.
|
|||
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { |
|||
p.mu.Lock() |
|||
for _, cc := range p.conns[key] { |
|||
if cc.CanTakeNewRequest() { |
|||
p.mu.Unlock() |
|||
return false, nil |
|||
} |
|||
} |
|||
call, dup := p.addConnCalls[key] |
|||
if !dup { |
|||
if p.addConnCalls == nil { |
|||
p.addConnCalls = make(map[string]*addConnCall) |
|||
} |
|||
call = &addConnCall{ |
|||
p: p, |
|||
done: make(chan struct{}), |
|||
} |
|||
p.addConnCalls[key] = call |
|||
go call.run(t, key, c) |
|||
} |
|||
p.mu.Unlock() |
|||
|
|||
<-call.done |
|||
if call.err != nil { |
|||
return false, call.err |
|||
} |
|||
return !dup, nil |
|||
} |
|||
|
|||
type addConnCall struct { |
|||
_ incomparable |
|||
p *clientConnPool |
|||
done chan struct{} // closed when done
|
|||
err error |
|||
} |
|||
|
|||
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { |
|||
cc, err := t.NewClientConn(tc) |
|||
|
|||
p := c.p |
|||
p.mu.Lock() |
|||
if err != nil { |
|||
c.err = err |
|||
} else { |
|||
cc.getConnCalled = true // already called by the net/http package
|
|||
p.addConnLocked(key, cc) |
|||
} |
|||
delete(p.addConnCalls, key) |
|||
p.mu.Unlock() |
|||
close(c.done) |
|||
} |
|||
|
|||
// p.mu must be held
|
|||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { |
|||
for _, v := range p.conns[key] { |
|||
if v == cc { |
|||
return |
|||
} |
|||
} |
|||
if p.conns == nil { |
|||
p.conns = make(map[string][]*ClientConn) |
|||
} |
|||
if p.keys == nil { |
|||
p.keys = make(map[*ClientConn][]string) |
|||
} |
|||
p.conns[key] = append(p.conns[key], cc) |
|||
p.keys[cc] = append(p.keys[cc], key) |
|||
} |
|||
|
|||
func (p *clientConnPool) MarkDead(cc *ClientConn) { |
|||
p.mu.Lock() |
|||
defer p.mu.Unlock() |
|||
for _, key := range p.keys[cc] { |
|||
vv, ok := p.conns[key] |
|||
if !ok { |
|||
continue |
|||
} |
|||
newList := filterOutClientConn(vv, cc) |
|||
if len(newList) > 0 { |
|||
p.conns[key] = newList |
|||
} else { |
|||
delete(p.conns, key) |
|||
} |
|||
} |
|||
delete(p.keys, cc) |
|||
} |
|||
|
|||
func (p *clientConnPool) closeIdleConnections() { |
|||
p.mu.Lock() |
|||
defer p.mu.Unlock() |
|||
// TODO: don't close a cc if it was just added to the pool
|
|||
// milliseconds ago and has never been used. There's currently
|
|||
// a small race window with the HTTP/1 Transport's integration
|
|||
// where it can add an idle conn just before using it, and
|
|||
// somebody else can concurrently call CloseIdleConns and
|
|||
// break some caller's RoundTrip.
|
|||
for _, vv := range p.conns { |
|||
for _, cc := range vv { |
|||
cc.closeIfIdle() |
|||
} |
|||
} |
|||
} |
|||
|
|||
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { |
|||
out := in[:0] |
|||
for _, v := range in { |
|||
if v != exclude { |
|||
out = append(out, v) |
|||
} |
|||
} |
|||
// If we filtered it out, zero out the last item to prevent
|
|||
// the GC from seeing it.
|
|||
if len(in) != len(out) { |
|||
in[len(in)-1] = nil |
|||
} |
|||
return out |
|||
} |
|||
|
|||
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
|||
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
|||
// connection instead.
|
|||
type noDialClientConnPool struct{ *clientConnPool } |
|||
|
|||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { |
|||
return p.getClientConn(req, addr, noDialOnMiss) |
|||
} |
|||
|
|||
// shouldRetryDial reports whether the current request should
|
|||
// retry dialing after the call finished unsuccessfully, for example
|
|||
// if the dial was canceled because of a context cancellation or
|
|||
// deadline expiry.
|
|||
func shouldRetryDial(call *dialCall, req *http.Request) bool { |
|||
if call.err == nil { |
|||
// No error, no need to retry
|
|||
return false |
|||
} |
|||
if call.ctx == req.Context() { |
|||
// If the call has the same context as the request, the dial
|
|||
// should not be retried, since any cancellation will have come
|
|||
// from this request.
|
|||
return false |
|||
} |
|||
if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) { |
|||
// If the call error is not because of a context cancellation or a deadline expiry,
|
|||
// the dial should not be retried.
|
|||
return false |
|||
} |
|||
// Only retry if the error is a context cancellation error or deadline expiry
|
|||
// and the context associated with the call was canceled or expired.
|
|||
return call.ctx.Err() != nil |
|||
} |
@ -0,0 +1,146 @@ |
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package http2 |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"sync" |
|||
) |
|||
|
|||
// Buffer chunks are allocated from a pool to reduce pressure on GC.
|
|||
// The maximum wasted space per dataBuffer is 2x the largest size class,
|
|||
// which happens when the dataBuffer has multiple chunks and there is
|
|||
// one unread byte in both the first and last chunks. We use a few size
|
|||
// classes to minimize overheads for servers that typically receive very
|
|||
// small request bodies.
|
|||
//
|
|||
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
|||
// improved enough that we can instead allocate chunks like this:
|
|||
// make([]byte, max(16<<10, expectedBytesRemaining))
|
|||
var ( |
|||
dataChunkSizeClasses = []int{ |
|||
1 << 10, |
|||
2 << 10, |
|||
4 << 10, |
|||
8 << 10, |
|||
16 << 10, |
|||
} |
|||
dataChunkPools = [...]sync.Pool{ |
|||
{New: func() interface{} { return make([]byte, 1<<10) }}, |
|||
{New: func() interface{} { return make([]byte, 2<<10) }}, |
|||
{New: func() interface{} { return make([]byte, 4<<10) }}, |
|||
{New: func() interface{} { return make([]byte, 8<<10) }}, |
|||
{New: func() interface{} { return make([]byte, 16<<10) }}, |
|||
} |
|||
) |
|||
|
|||
func getDataBufferChunk(size int64) []byte { |
|||
i := 0 |
|||
for ; i < len(dataChunkSizeClasses)-1; i++ { |
|||
if size <= int64(dataChunkSizeClasses[i]) { |
|||
break |
|||
} |
|||
} |
|||
return dataChunkPools[i].Get().([]byte) |
|||
} |
|||
|
|||
func putDataBufferChunk(p []byte) { |
|||
for i, n := range dataChunkSizeClasses { |
|||
if len(p) == n { |
|||
dataChunkPools[i].Put(p) |
|||
return |
|||
} |
|||
} |
|||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) |
|||
} |
|||
|
|||
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
|||
// Each dataBuffer is used to read DATA frames on a single stream.
|
|||
// The buffer is divided into chunks so the server can limit the
|
|||
// total memory used by a single connection without limiting the
|
|||
// request body size on any single stream.
|
|||
type dataBuffer struct { |
|||
chunks [][]byte |
|||
r int // next byte to read is chunks[0][r]
|
|||
w int // next byte to write is chunks[len(chunks)-1][w]
|
|||
size int // total buffered bytes
|
|||
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
|
|||
} |
|||
|
|||
var errReadEmpty = errors.New("read from empty dataBuffer") |
|||
|
|||
// Read copies bytes from the buffer into p.
|
|||
// It is an error to read when no data is available.
|
|||
func (b *dataBuffer) Read(p []byte) (int, error) { |
|||
if b.size == 0 { |
|||
return 0, errReadEmpty |
|||
} |
|||
var ntotal int |
|||
for len(p) > 0 && b.size > 0 { |
|||
readFrom := b.bytesFromFirstChunk() |
|||
n := copy(p, readFrom) |
|||
p = p[n:] |
|||
ntotal += n |
|||
b.r += n |
|||
b.size -= n |
|||
// If the first chunk has been consumed, advance to the next chunk.
|
|||
if b.r == len(b.chunks[0]) { |
|||
putDataBufferChunk(b.chunks[0]) |
|||
end := len(b.chunks) - 1 |
|||
copy(b.chunks[:end], b.chunks[1:]) |
|||
b.chunks[end] = nil |
|||
b.chunks = b.chunks[:end] |
|||
b.r = 0 |
|||
} |
|||
} |
|||
return ntotal, nil |
|||
} |
|||
|
|||
func (b *dataBuffer) bytesFromFirstChunk() []byte { |
|||
if len(b.chunks) == 1 { |
|||
return b.chunks[0][b.r:b.w] |
|||
} |
|||
return b.chunks[0][b.r:] |
|||
} |
|||
|
|||
// Len returns the number of bytes of the unread portion of the buffer.
|
|||
func (b *dataBuffer) Len() int { |
|||
return b.size |
|||
} |
|||
|
|||
// Write appends p to the buffer.
|
|||
func (b *dataBuffer) Write(p []byte) (int, error) { |
|||
ntotal := len(p) |
|||
for len(p) > 0 { |
|||
// If the last chunk is empty, allocate a new chunk. Try to allocate
|
|||
// enough to fully copy p plus any additional bytes we expect to
|
|||
// receive. However, this may allocate less than len(p).
|
|||
want := int64(len(p)) |
|||
if b.expected > want { |
|||
want = b.expected |
|||
} |
|||
chunk := b.lastChunkOrAlloc(want) |
|||
n := copy(chunk[b.w:], p) |
|||
p = p[n:] |
|||
b.w += n |
|||
b.size += n |
|||
b.expected -= int64(n) |
|||
} |
|||
return ntotal, nil |
|||
} |
|||
|
|||
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { |
|||
if len(b.chunks) != 0 { |
|||
last := b.chunks[len(b.chunks)-1] |
|||
if b.w < len(last) { |
|||
return last |
|||
} |
|||
} |
|||
chunk := getDataBufferChunk(want) |
|||
b.chunks = append(b.chunks, chunk) |
|||
b.w = 0 |
|||
return chunk |
|||
} |
@ -0,0 +1,145 @@ |
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package http2 |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
) |
|||
|
|||
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
|||
type ErrCode uint32 |
|||
|
|||
const ( |
|||
ErrCodeNo ErrCode = 0x0 |
|||
ErrCodeProtocol ErrCode = 0x1 |
|||
ErrCodeInternal ErrCode = 0x2 |
|||
ErrCodeFlowControl ErrCode = 0x3 |
|||
ErrCodeSettingsTimeout ErrCode = 0x4 |
|||
ErrCodeStreamClosed ErrCode = 0x5 |
|||
ErrCodeFrameSize ErrCode = 0x6 |
|||
ErrCodeRefusedStream ErrCode = 0x7 |
|||
ErrCodeCancel ErrCode = 0x8 |
|||
ErrCodeCompression ErrCode = 0x9 |
|||
ErrCodeConnect ErrCode = 0xa |
|||
ErrCodeEnhanceYourCalm ErrCode = 0xb |
|||
ErrCodeInadequateSecurity ErrCode = 0xc |
|||
ErrCodeHTTP11Required ErrCode = 0xd |
|||
) |
|||
|
|||
var errCodeName = map[ErrCode]string{ |
|||
ErrCodeNo: "NO_ERROR", |
|||
ErrCodeProtocol: "PROTOCOL_ERROR", |
|||
ErrCodeInternal: "INTERNAL_ERROR", |
|||
ErrCodeFlowControl: "FLOW_CONTROL_ERROR", |
|||
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", |
|||
ErrCodeStreamClosed: "STREAM_CLOSED", |
|||
ErrCodeFrameSize: "FRAME_SIZE_ERROR", |
|||
ErrCodeRefusedStream: "REFUSED_STREAM", |
|||
ErrCodeCancel: "CANCEL", |
|||
ErrCodeCompression: "COMPRESSION_ERROR", |
|||
ErrCodeConnect: "CONNECT_ERROR", |
|||
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", |
|||
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", |
|||
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", |
|||
} |
|||
|
|||
func (e ErrCode) String() string { |
|||
if s, ok := errCodeName[e]; ok { |
|||
return s |
|||
} |
|||
return fmt.Sprintf("unknown error code 0x%x", uint32(e)) |
|||
} |
|||
|
|||
func (e ErrCode) stringToken() string { |
|||
if s, ok := errCodeName[e]; ok { |
|||
return s |
|||
} |
|||
return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e)) |
|||
} |
|||
|
|||
// ConnectionError is an error that results in the termination of the
|
|||
// entire connection.
|
|||
type ConnectionError ErrCode |
|||
|
|||
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } |
|||
|
|||
// StreamError is an error that only affects one stream within an
|
|||
// HTTP/2 connection.
|
|||
type StreamError struct { |
|||
StreamID uint32 |
|||
Code ErrCode |
|||
Cause error // optional additional detail
|
|||
} |
|||
|
|||
// errFromPeer is a sentinel error value for StreamError.Cause to
|
|||
// indicate that the StreamError was sent from the peer over the wire
|
|||
// and wasn't locally generated in the Transport.
|
|||
var errFromPeer = errors.New("received from peer") |
|||
|
|||
func streamError(id uint32, code ErrCode) StreamError { |
|||
return StreamError{StreamID: id, Code: code} |
|||
} |
|||
|
|||
func (e StreamError) Error() string { |
|||
if e.Cause != nil { |
|||
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) |
|||
} |
|||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) |
|||
} |
|||
|
|||
// 6.9.1 The Flow Control Window
|
|||
// "If a sender receives a WINDOW_UPDATE that causes a flow control
|
|||
// window to exceed this maximum it MUST terminate either the stream
|
|||
// or the connection, as appropriate. For streams, [...]; for the
|
|||
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
|
|||
type goAwayFlowError struct{} |
|||
|
|||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } |
|||
|
|||
// connError represents an HTTP/2 ConnectionError error code, along
|
|||
// with a string (for debugging) explaining why.
|
|||
//
|
|||
// Errors of this type are only returned by the frame parser functions
|
|||
// and converted into ConnectionError(Code), after stashing away
|
|||
// the Reason into the Framer's errDetail field, accessible via
|
|||
// the (*Framer).ErrorDetail method.
|
|||
type connError struct { |
|||
Code ErrCode // the ConnectionError error code
|
|||
Reason string // additional reason
|
|||
} |
|||
|
|||
func (e connError) Error() string { |
|||
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) |
|||
} |
|||
|
|||
type pseudoHeaderError string |
|||
|
|||
func (e pseudoHeaderError) Error() string { |
|||
return fmt.Sprintf("invalid pseudo-header %q", string(e)) |
|||
} |
|||
|
|||
type duplicatePseudoHeaderError string |
|||
|
|||
func (e duplicatePseudoHeaderError) Error() string { |
|||
return fmt.Sprintf("duplicate pseudo-header %q", string(e)) |
|||
} |
|||
|
|||
type headerFieldNameError string |
|||
|
|||
func (e headerFieldNameError) Error() string { |
|||
return fmt.Sprintf("invalid header field name %q", string(e)) |
|||
} |
|||
|
|||
type headerFieldValueError string |
|||
|
|||
func (e headerFieldValueError) Error() string { |
|||
return fmt.Sprintf("invalid header field value for %q", string(e)) |
|||
} |
|||
|
|||
var ( |
|||
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") |
|||
errPseudoAfterRegular = errors.New("pseudo header field after regular") |
|||
) |
@ -0,0 +1,52 @@ |
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
// Flow control
|
|||
|
|||
package http2 |
|||
|
|||
// flow is the flow control window's size.
|
|||
type flow struct { |
|||
_ incomparable |
|||
|
|||
// n is the number of DATA bytes we're allowed to send.
|
|||
// A flow is kept both on a conn and a per-stream.
|
|||
n int32 |
|||
|
|||
// conn points to the shared connection-level flow that is
|
|||
// shared by all streams on that conn. It is nil for the flow
|
|||
// that's on the conn directly.
|
|||
conn *flow |
|||
} |
|||
|
|||
func (f *flow) setConnFlow(cf *flow) { f.conn = cf } |
|||
|
|||
func (f *flow) available() int32 { |
|||
n := f.n |
|||
if f.conn != nil && f.conn.n < n { |
|||
n = f.conn.n |
|||
} |
|||
return n |
|||
} |
|||
|
|||
func (f *flow) take(n int32) { |
|||
if n > f.available() { |
|||
panic("internal error: took too much") |
|||
} |
|||
f.n -= n |
|||
if f.conn != nil { |
|||
f.conn.n -= n |
|||
} |
|||
} |
|||
|
|||
// add adds n bytes (positive or negative) to the flow control window.
|
|||
// It returns false if the sum would exceed 2^31-1.
|
|||
func (f *flow) add(n int32) bool { |
|||
sum := f.n + n |
|||
if (sum > n) == (f.n > 0) { |
|||
f.n = sum |
|||
return true |
|||
} |
|||
return false |
|||
} |
File diff suppressed because it is too large
@ -0,0 +1,30 @@ |
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
//go:build go1.11
|
|||
// +build go1.11
|
|||
|
|||
package http2 |
|||
|
|||
import ( |
|||
"net/http/httptrace" |
|||
"net/textproto" |
|||
) |
|||
|
|||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { |
|||
return trace != nil && trace.WroteHeaderField != nil |
|||
} |
|||
|
|||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { |
|||
if trace != nil && trace.WroteHeaderField != nil { |
|||
trace.WroteHeaderField(k, []string{v}) |
|||
} |
|||
} |
|||
|
|||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { |
|||
if trace != nil { |
|||
return trace.Got1xxResponse |
|||
} |
|||
return nil |
|||
} |
@ -0,0 +1,27 @@ |
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
//go:build go1.15
|
|||
// +build go1.15
|
|||
|
|||
package http2 |
|||
|
|||
import ( |
|||
"context" |
|||
"crypto/tls" |
|||
) |
|||
|
|||
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
|
|||
// connection.
|
|||
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { |
|||
dialer := &tls.Dialer{ |
|||
Config: cfg, |
|||
} |
|||
cn, err := dialer.DialContext(ctx, network, addr) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
|
|||
return tlsCn, nil |
|||
} |
@ -0,0 +1,17 @@ |
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
//go:build go1.18
|
|||
// +build go1.18
|
|||
|
|||
package http2 |
|||
|
|||
import ( |
|||
"crypto/tls" |
|||
"net" |
|||
) |
|||
|
|||
func tlsUnderlyingConn(tc *tls.Conn) net.Conn { |
|||
return tc.NetConn() |
|||
} |
@ -0,0 +1,170 @@ |
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
// Defensive debug-only utility to track that functions run on the
|
|||
// goroutine that they're supposed to.
|
|||
|
|||
package http2 |
|||
|
|||
import ( |
|||
"bytes" |
|||
"errors" |
|||
"fmt" |
|||
"os" |
|||
"runtime" |
|||
"strconv" |
|||
"sync" |
|||
) |
|||
|
|||
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" |
|||
|
|||
type goroutineLock uint64 |
|||
|
|||
func newGoroutineLock() goroutineLock { |
|||
if !DebugGoroutines { |
|||
return 0 |
|||
} |
|||
return goroutineLock(curGoroutineID()) |
|||
} |
|||
|
|||
func (g goroutineLock) check() { |
|||
if !DebugGoroutines { |
|||
return |
|||
} |
|||
if curGoroutineID() != uint64(g) { |
|||
panic("running on the wrong goroutine") |
|||
} |
|||
} |
|||
|
|||
func (g goroutineLock) checkNotOn() { |
|||
if !DebugGoroutines { |
|||
return |
|||
} |
|||
if curGoroutineID() == uint64(g) { |
|||
panic("running on the wrong goroutine") |
|||
} |
|||
} |
|||
|
|||
var goroutineSpace = []byte("goroutine ") |
|||
|
|||
func curGoroutineID() uint64 { |
|||
bp := littleBuf.Get().(*[]byte) |
|||
defer littleBuf.Put(bp) |
|||
b := *bp |
|||
b = b[:runtime.Stack(b, false)] |
|||
// Parse the 4707 out of "goroutine 4707 ["
|
|||
b = bytes.TrimPrefix(b, goroutineSpace) |
|||
i := bytes.IndexByte(b, ' ') |
|||
if i < 0 { |
|||
panic(fmt.Sprintf("No space found in %q", b)) |
|||
} |
|||
b = b[:i] |
|||
n, err := parseUintBytes(b, 10, 64) |
|||
if err != nil { |
|||
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) |
|||
} |
|||
return n |
|||
} |
|||
|
|||
var littleBuf = sync.Pool{ |
|||
New: func() interface{} { |
|||
buf := make([]byte, 64) |
|||
return &buf |
|||
}, |
|||
} |
|||
|
|||
// parseUintBytes is like strconv.ParseUint, but using a []byte.
|
|||
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { |
|||
var cutoff, maxVal uint64 |
|||
|
|||
if bitSize == 0 { |
|||
bitSize = int(strconv.IntSize) |
|||
} |
|||
|
|||
s0 := s |
|||
switch { |
|||
case len(s) < 1: |
|||
err = strconv.ErrSyntax |
|||
goto Error |
|||
|
|||
case 2 <= base && base <= 36: |
|||
// valid base; nothing to do
|
|||
|
|||
case base == 0: |
|||
// Look for octal, hex prefix.
|
|||
switch { |
|||
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): |
|||
base = 16 |
|||
s = s[2:] |
|||
if len(s) < 1 { |
|||
err = strconv.ErrSyntax |
|||
goto Error |
|||
} |
|||
case s[0] == '0': |
|||
base = 8 |
|||
default: |
|||
base = 10 |
|||
} |
|||
|
|||
default: |
|||
err = errors.New("invalid base " + strconv.Itoa(base)) |
|||
goto Error |
|||
} |
|||
|
|||
n = 0 |
|||
cutoff = cutoff64(base) |
|||
maxVal = 1<<uint(bitSize) - 1 |
|||
|
|||
for i := 0; i < len(s); i++ { |
|||
var v byte |
|||
d := s[i] |
|||
switch { |
|||
case '0' <= d && d <= '9': |
|||
v = d - '0' |
|||
case 'a' <= d && d <= 'z': |
|||
v = d - 'a' + 10 |
|||
case 'A' <= d && d <= 'Z': |
|||
v = d - 'A' + 10 |
|||
default: |
|||
n = 0 |
|||
err = strconv.ErrSyntax |
|||
goto Error |
|||
} |
|||
if int(v) >= base { |
|||
n = 0 |
|||
err = strconv.ErrSyntax |
|||
goto Error |
|||
} |
|||
|
|||
if n >= cutoff { |
|||
// n*base overflows
|
|||
n = 1<<64 - 1 |
|||
err = strconv.ErrRange |
|||
goto Error |
|||
} |
|||
n *= uint64(base) |
|||
|
|||
n1 := n + uint64(v) |
|||
if n1 < n || n1 > maxVal { |
|||
// n+v overflows
|
|||
n = 1<<64 - 1 |
|||
err = strconv.ErrRange |
|||
goto Error |
|||
} |
|||
n = n1 |
|||
} |
|||
|
|||
return n, nil |
|||
|
|||
Error: |
|||
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} |
|||
} |
|||
|
|||
// Return the first number n such that n*base >= 1<<64.
|
|||
func cutoff64(base int) uint64 { |
|||
if base < 2 { |
|||
return 0 |
|||
} |
|||
return (1<<64-1)/uint64(base) + 1 |
|||
} |
@ -0,0 +1,105 @@ |
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package http2 |
|||
|
|||
import ( |
|||
"net/http" |
|||
"sync" |
|||
) |
|||
|
|||
var ( |
|||
commonBuildOnce sync.Once |
|||
commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
|
|||
commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
|
|||
) |
|||
|
|||
func buildCommonHeaderMapsOnce() { |
|||
commonBuildOnce.Do(buildCommonHeaderMaps) |
|||
} |
|||
|
|||
func buildCommonHeaderMaps() { |
|||
common := []string{ |
|||
"accept", |
|||
"accept-charset", |
|||
"accept-encoding", |
|||
"accept-language", |
|||
"accept-ranges", |
|||
"age", |
|||
"access-control-allow-credentials", |
|||
"access-control-allow-headers", |
|||
"access-control-allow-methods", |
|||
"access-control-allow-origin", |
|||
"access-control-expose-headers", |
|||
"access-control-max-age", |
|||
"access-control-request-headers", |
|||
"access-control-request-method", |
|||
"allow", |
|||
"authorization", |
|||
"cache-control", |
|||
"content-disposition", |
|||
"content-encoding", |
|||
"content-language", |
|||
"content-length", |
|||
"content-location", |
|||
"content-range", |
|||
"content-type", |
|||
"cookie", |
|||
"date", |
|||
"etag", |
|||
"expect", |
|||
"expires", |
|||
"from", |
|||
"host", |
|||
"if-match", |
|||
"if-modified-since", |
|||
"if-none-match", |
|||
"if-unmodified-since", |
|||
"last-modified", |
|||
"link", |
|||
"location", |
|||
"max-forwards", |
|||
"origin", |
|||
"proxy-authenticate", |
|||
"proxy-authorization", |
|||
"range", |
|||
"referer", |
|||
"refresh", |
|||
"retry-after", |
|||
"server", |
|||
"set-cookie", |
|||
"strict-transport-security", |
|||
"trailer", |
|||
"transfer-encoding", |
|||
"user-agent", |
|||
"vary", |
|||
"via", |
|||
"www-authenticate", |
|||
"x-forwarded-for", |
|||
"x-forwarded-proto", |
|||
} |
|||
commonLowerHeader = make(map[string]string, len(common)) |
|||
commonCanonHeader = make(map[string]string, len(common)) |
|||
for _, v := range common { |
|||
chk := http.CanonicalHeaderKey(v) |
|||
commonLowerHeader[chk] = v |
|||
commonCanonHeader[v] = chk |
|||
} |
|||
} |
|||
|
|||
func lowerHeader(v string) (lower string, ascii bool) { |
|||
buildCommonHeaderMapsOnce() |
|||
if s, ok := commonLowerHeader[v]; ok { |
|||
return s, true |
|||
} |
|||
return asciiToLower(v) |
|||
} |
|||
|
|||
func canonicalHeader(v string) string { |
|||
buildCommonHeaderMapsOnce() |
|||
if s, ok := commonCanonHeader[v]; ok { |
|||
return s |
|||
} |
|||
return http.CanonicalHeaderKey(v) |
|||
} |
@ -0,0 +1,245 @@ |
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package hpack |
|||
|
|||
import ( |
|||
"io" |
|||
) |
|||
|
|||
const ( |
|||
uint32Max = ^uint32(0) |
|||
initialHeaderTableSize = 4096 |
|||
) |
|||
|
|||
type Encoder struct { |
|||
dynTab dynamicTable |
|||
// minSize is the minimum table size set by
|
|||
// SetMaxDynamicTableSize after the previous Header Table Size
|
|||
// Update.
|
|||
minSize uint32 |
|||
// maxSizeLimit is the maximum table size this encoder
|
|||
// supports. This will protect the encoder from too large
|
|||
// size.
|
|||
maxSizeLimit uint32 |
|||
// tableSizeUpdate indicates whether "Header Table Size
|
|||
// Update" is required.
|
|||
tableSizeUpdate bool |
|||
w io.Writer |
|||
buf []byte |
|||
} |
|||
|
|||
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
|||
// encoded data is written to w.
|
|||
func NewEncoder(w io.Writer) *Encoder { |
|||
e := &Encoder{ |
|||
minSize: uint32Max, |
|||
maxSizeLimit: initialHeaderTableSize, |
|||
tableSizeUpdate: false, |
|||
w: w, |
|||
} |
|||
e.dynTab.table.init() |
|||
e.dynTab.setMaxSize(initialHeaderTableSize) |
|||
return e |
|||
} |
|||
|
|||
// WriteField encodes f into a single Write to e's underlying Writer.
|
|||
// This function may also produce bytes for "Header Table Size Update"
|
|||
// if necessary. If produced, it is done before encoding f.
|
|||
func (e *Encoder) WriteField(f HeaderField) error { |
|||
e.buf = e.buf[:0] |
|||
|
|||
if e.tableSizeUpdate { |
|||
e.tableSizeUpdate = false |
|||
if e.minSize < e.dynTab.maxSize { |
|||
e.buf = appendTableSize(e.buf, e.minSize) |
|||
} |
|||
e.minSize = uint32Max |
|||
e.buf = appendTableSize(e.buf, e.dynTab.maxSize) |
|||
} |
|||
|
|||
idx, nameValueMatch := e.searchTable(f) |
|||
if nameValueMatch { |
|||
e.buf = appendIndexed(e.buf, idx) |
|||
} else { |
|||
indexing := e.shouldIndex(f) |
|||
if indexing { |
|||
e.dynTab.add(f) |
|||
} |
|||
|
|||
if idx == 0 { |
|||
e.buf = appendNewName(e.buf, f, indexing) |
|||
} else { |
|||
e.buf = appendIndexedName(e.buf, f, idx, indexing) |
|||
} |
|||
} |
|||
n, err := e.w.Write(e.buf) |
|||
if err == nil && n != len(e.buf) { |
|||
err = io.ErrShortWrite |
|||
} |
|||
return err |
|||
} |
|||
|
|||
// searchTable searches f in both stable and dynamic header tables.
|
|||
// The static header table is searched first. Only when there is no
|
|||
// exact match for both name and value, the dynamic header table is
|
|||
// then searched. If there is no match, i is 0. If both name and value
|
|||
// match, i is the matched index and nameValueMatch becomes true. If
|
|||
// only name matches, i points to that index and nameValueMatch
|
|||
// becomes false.
|
|||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { |
|||
i, nameValueMatch = staticTable.search(f) |
|||
if nameValueMatch { |
|||
return i, true |
|||
} |
|||
|
|||
j, nameValueMatch := e.dynTab.table.search(f) |
|||
if nameValueMatch || (i == 0 && j != 0) { |
|||
return j + uint64(staticTable.len()), nameValueMatch |
|||
} |
|||
|
|||
return i, false |
|||
} |
|||
|
|||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
|||
// The actual size is bounded by the value passed to
|
|||
// SetMaxDynamicTableSizeLimit.
|
|||
func (e *Encoder) SetMaxDynamicTableSize(v uint32) { |
|||
if v > e.maxSizeLimit { |
|||
v = e.maxSizeLimit |
|||
} |
|||
if v < e.minSize { |
|||
e.minSize = v |
|||
} |
|||
e.tableSizeUpdate = true |
|||
e.dynTab.setMaxSize(v) |
|||
} |
|||
|
|||
// MaxDynamicTableSize returns the current dynamic header table size.
|
|||
func (e *Encoder) MaxDynamicTableSize() (v uint32) { |
|||
return e.dynTab.maxSize |
|||
} |
|||
|
|||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
|||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
|||
// 4096, which is the same size of the default dynamic header table
|
|||
// size described in HPACK specification. If the current maximum
|
|||
// dynamic header table size is strictly greater than v, "Header Table
|
|||
// Size Update" will be done in the next WriteField call and the
|
|||
// maximum dynamic header table size is truncated to v.
|
|||
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { |
|||
e.maxSizeLimit = v |
|||
if e.dynTab.maxSize > v { |
|||
e.tableSizeUpdate = true |
|||
e.dynTab.setMaxSize(v) |
|||
} |
|||
} |
|||
|
|||
// shouldIndex reports whether f should be indexed.
|
|||
func (e *Encoder) shouldIndex(f HeaderField) bool { |
|||
return !f.Sensitive && f.Size() <= e.dynTab.maxSize |
|||
} |
|||
|
|||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
|||
// representation, to dst and returns the extended buffer.
|
|||
func appendIndexed(dst []byte, i uint64) []byte { |
|||
first := len(dst) |
|||
dst = appendVarInt(dst, 7, i) |
|||
dst[first] |= 0x80 |
|||
return dst |
|||
} |
|||
|
|||
// appendNewName appends f, as encoded in one of "Literal Header field
|
|||
// - New Name" representation variants, to dst and returns the
|
|||
// extended buffer.
|
|||
//
|
|||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
|||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
|||
// representation is used.
|
|||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { |
|||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) |
|||
dst = appendHpackString(dst, f.Name) |
|||
return appendHpackString(dst, f.Value) |
|||
} |
|||
|
|||
// appendIndexedName appends f and index i referring indexed name
|
|||
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
|||
// representation variants, to dst and returns the extended buffer.
|
|||
//
|
|||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
|||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
|||
// representation is used.
|
|||
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { |
|||
first := len(dst) |
|||
var n byte |
|||
if indexing { |
|||
n = 6 |
|||
} else { |
|||
n = 4 |
|||
} |
|||
dst = appendVarInt(dst, n, i) |
|||
dst[first] |= encodeTypeByte(indexing, f.Sensitive) |
|||
return appendHpackString(dst, f.Value) |
|||
} |
|||
|
|||
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
|||
// representation, to dst and returns the extended buffer.
|
|||
func appendTableSize(dst []byte, v uint32) []byte { |
|||
first := len(dst) |
|||
dst = appendVarInt(dst, 5, uint64(v)) |
|||
dst[first] |= 0x20 |
|||
return dst |
|||
} |
|||
|
|||
// appendVarInt appends i, as encoded in variable integer form using n
|
|||
// bit prefix, to dst and returns the extended buffer.
|
|||
//
|
|||
// See
|
|||
// https://httpwg.org/specs/rfc7541.html#integer.representation
|
|||
func appendVarInt(dst []byte, n byte, i uint64) []byte { |
|||
k := uint64((1 << n) - 1) |
|||
if i < k { |
|||
return append(dst, byte(i)) |
|||
} |
|||
dst = append(dst, byte(k)) |
|||
i -= k |
|||
for ; i >= 128; i >>= 7 { |
|||
dst = append(dst, byte(0x80|(i&0x7f))) |
|||
} |
|||
return append(dst, byte(i)) |
|||
} |
|||
|
|||
// appendHpackString appends s, as encoded in "String Literal"
|
|||
// representation, to dst and returns the extended buffer.
|
|||
//
|
|||
// s will be encoded in Huffman codes only when it produces strictly
|
|||
// shorter byte string.
|
|||
func appendHpackString(dst []byte, s string) []byte { |
|||
huffmanLength := HuffmanEncodeLength(s) |
|||
if huffmanLength < uint64(len(s)) { |
|||
first := len(dst) |
|||
dst = appendVarInt(dst, 7, huffmanLength) |
|||
dst = AppendHuffmanString(dst, s) |
|||
dst[first] |= 0x80 |
|||
} else { |
|||
dst = appendVarInt(dst, 7, uint64(len(s))) |
|||
dst = append(dst, s...) |
|||
} |
|||
return dst |
|||
} |
|||
|
|||
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
|||
// for "Never Indexed" representation is returned. If sensitive is
|
|||
// false and indexing is true, type byte for "Incremental Indexing"
|
|||
// representation is returned. Otherwise, type byte for "Without
|
|||
// Indexing" is returned.
|
|||
func encodeTypeByte(indexing, sensitive bool) byte { |
|||
if sensitive { |
|||
return 0x10 |
|||
} |
|||
if indexing { |
|||
return 0x40 |
|||
} |
|||
return 0 |
|||
} |
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue