fix!: closing errChan channel
it also check if context is canceld and if its so, it will not trying to send to it work function error. BREAKING CHANGE: it also takes now logr.Logger and stores it in the Dispatcher
This commit is contained in:
parent
fee10a73b6
commit
48fac0237a
4
Makefile
4
Makefile
@ -39,6 +39,10 @@ test:
|
||||
./... \
|
||||
-timeout=120m
|
||||
|
||||
.PHONY: coverage
|
||||
coverage: test
|
||||
$(GO) tool cover -html coverage.out
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
$(GOLANGCI_LINT) run \
|
||||
|
@ -5,15 +5,18 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/stdr"
|
||||
"go.xsfx.dev/workgroups"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
d, ctx := workgroups.NewDispatcher(
|
||||
context.Background(),
|
||||
stdr.New(log.New(os.Stderr, "", log.Lshortfile)),
|
||||
runtime.GOMAXPROCS(0), // This starts as much worker as maximal processes are allowed for go.
|
||||
10, // Capacity of the queue.
|
||||
)
|
||||
@ -54,6 +57,7 @@ func Example() {
|
||||
func ExampleRetry() {
|
||||
d, ctx := workgroups.NewDispatcher(
|
||||
context.Background(),
|
||||
stdr.New(log.New(os.Stderr, "", log.Lshortfile)),
|
||||
runtime.GOMAXPROCS(0), // This starts as much worker as maximal processes are allowed for go.
|
||||
10, // Capacity of the queue.
|
||||
)
|
||||
|
4
go.mod
4
go.mod
@ -4,11 +4,13 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/boumenot/gocover-cobertura v1.2.0
|
||||
github.com/go-logr/logr v1.2.3
|
||||
github.com/go-logr/stdr v1.2.2
|
||||
github.com/golangci/golangci-lint v1.42.1
|
||||
github.com/goreleaser/goreleaser v0.179.0
|
||||
github.com/posener/goreadme v1.4.1
|
||||
github.com/rs/zerolog v1.25.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tonglil/buflogr v0.0.0-20220413082439-d4c2784244cd
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
)
|
||||
|
||||
|
10
go.sum
10
go.sum
@ -375,6 +375,11 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
@ -920,9 +925,6 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II=
|
||||
github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
@ -1042,6 +1044,8 @@ github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
github.com/tonglil/buflogr v0.0.0-20220413082439-d4c2784244cd h1:7qrz07BLrIySfuRh2o9Nt8P8me4HV5NlNrWPzvUW/Zo=
|
||||
github.com/tonglil/buflogr v0.0.0-20220413082439-d4c2784244cd/go.mod h1:yYWwvSpn/3uAaqjf6mJg/XMiAciaR0QcRJH2gJGDxNE=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
|
29
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
Normal file
29
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
run:
|
||||
timeout: 1m
|
||||
tests: true
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- deadcode
|
||||
- errcheck
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 10
|
6
vendor/github.com/go-logr/logr/CHANGELOG.md
generated
vendored
Normal file
6
vendor/github.com/go-logr/logr/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# CHANGELOG
|
||||
|
||||
## v1.0.0-rc1
|
||||
|
||||
This is the first logged release. Major changes (including breaking changes)
|
||||
have occurred since earlier tags.
|
17
vendor/github.com/go-logr/logr/CONTRIBUTING.md
generated
vendored
Normal file
17
vendor/github.com/go-logr/logr/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
# Contributing
|
||||
|
||||
Logr is open to pull-requests, provided they fit within the intended scope of
|
||||
the project. Specifically, this library aims to be VERY small and minimalist,
|
||||
with no external dependencies.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This project intends to follow [semantic versioning](http://semver.org) and
|
||||
is very strict about compatibility. Any proposed changes MUST follow those
|
||||
rules.
|
||||
|
||||
## Performance
|
||||
|
||||
As a logging library, logr must be as light-weight as possible. Any proposed
|
||||
code change must include results of running the [benchmark](./benchmark)
|
||||
before and after the change.
|
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
282
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
282
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
@ -0,0 +1,282 @@
|
||||
# A minimal logging API for Go
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
|
||||
|
||||
logr offers an(other) opinion on how Go programs and libraries can do logging
|
||||
without becoming coupled to a particular logging implementation. This is not
|
||||
an implementation of logging - it is an API. In fact it is two APIs with two
|
||||
different sets of users.
|
||||
|
||||
The `Logger` type is intended for application and library authors. It provides
|
||||
a relatively small API which can be used everywhere you want to emit logs. It
|
||||
defers the actual act of writing logs (to files, to stdout, or whatever) to the
|
||||
`LogSink` interface.
|
||||
|
||||
The `LogSink` interface is intended for logging library implementers. It is a
|
||||
pure interface which can be implemented by logging frameworks to provide the actual logging
|
||||
functionality.
|
||||
|
||||
This decoupling allows application and library developers to write code in
|
||||
terms of `logr.Logger` (which has very low dependency fan-out) while the
|
||||
implementation of logging is managed "up stack" (e.g. in or near `main()`.)
|
||||
Application developers can then switch out implementations as necessary.
|
||||
|
||||
Many people assert that libraries should not be logging, and as such efforts
|
||||
like this are pointless. Those people are welcome to convince the authors of
|
||||
the tens-of-thousands of libraries that *DO* write logs that they are all
|
||||
wrong. In the meantime, logr takes a more practical approach.
|
||||
|
||||
## Typical usage
|
||||
|
||||
Somewhere, early in an application's life, it will make a decision about which
|
||||
logging library (implementation) it actually wants to use. Something like:
|
||||
|
||||
```
|
||||
func main() {
|
||||
// ... other setup code ...
|
||||
|
||||
// Create the "root" logger. We have chosen the "logimpl" implementation,
|
||||
// which takes some initial parameters and returns a logr.Logger.
|
||||
logger := logimpl.New(param1, param2)
|
||||
|
||||
// ... other setup code ...
|
||||
```
|
||||
|
||||
Most apps will call into other libraries, create structures to govern the flow,
|
||||
etc. The `logr.Logger` object can be passed to these other libraries, stored
|
||||
in structs, or even used as a package-global variable, if needed. For example:
|
||||
|
||||
```
|
||||
app := createTheAppObject(logger)
|
||||
app.Run()
|
||||
```
|
||||
|
||||
Outside of this early setup, no other packages need to know about the choice of
|
||||
implementation. They write logs in terms of the `logr.Logger` that they
|
||||
received:
|
||||
|
||||
```
|
||||
type appObject struct {
|
||||
// ... other fields ...
|
||||
logger logr.Logger
|
||||
// ... other fields ...
|
||||
}
|
||||
|
||||
func (app *appObject) Run() {
|
||||
app.logger.Info("starting up", "timestamp", time.Now())
|
||||
|
||||
// ... app code ...
|
||||
```
|
||||
|
||||
## Background
|
||||
|
||||
If the Go standard library had defined an interface for logging, this project
|
||||
probably would not be needed. Alas, here we are.
|
||||
|
||||
### Inspiration
|
||||
|
||||
Before you consider this package, please read [this blog post by the
|
||||
inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
|
||||
he has to say, and it largely aligns with our own experiences.
|
||||
|
||||
### Differences from Dave's ideas
|
||||
|
||||
The main differences are:
|
||||
|
||||
1. Dave basically proposes doing away with the notion of a logging API in favor
|
||||
of `fmt.Printf()`. We disagree, especially when you consider things like output
|
||||
locations, timestamps, file and line decorations, and structured logging. This
|
||||
package restricts the logging API to just 2 types of logs: info and error.
|
||||
|
||||
Info logs are things you want to tell the user which are not errors. Error
|
||||
logs are, well, errors. If your code receives an `error` from a subordinate
|
||||
function call and is logging that `error` *and not returning it*, use error
|
||||
logs.
|
||||
|
||||
2. Verbosity-levels on info logs. This gives developers a chance to indicate
|
||||
arbitrary grades of importance for info logs, without assigning names with
|
||||
semantic meaning such as "warning", "trace", and "debug." Superficially this
|
||||
may feel very similar, but the primary difference is the lack of semantics.
|
||||
Because verbosity is a numerical value, it's safe to assume that an app running
|
||||
with higher verbosity means more (and less important) logs will be generated.
|
||||
|
||||
## Implementations (non-exhaustive)
|
||||
|
||||
There are implementations for the following logging libraries:
|
||||
|
||||
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
|
||||
- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
|
||||
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
|
||||
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
|
||||
- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
|
||||
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
|
||||
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
|
||||
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
|
||||
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
|
||||
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
|
||||
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
|
||||
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
|
||||
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
|
||||
|
||||
## FAQ
|
||||
|
||||
### Conceptual
|
||||
|
||||
#### Why structured logging?
|
||||
|
||||
- **Structured logs are more easily queryable**: Since you've got
|
||||
key-value pairs, it's much easier to query your structured logs for
|
||||
particular values by filtering on the contents of a particular key --
|
||||
think searching request logs for error codes, Kubernetes reconcilers for
|
||||
the name and namespace of the reconciled object, etc.
|
||||
|
||||
- **Structured logging makes it easier to have cross-referenceable logs**:
|
||||
Similarly to searchability, if you maintain conventions around your
|
||||
keys, it becomes easy to gather all log lines related to a particular
|
||||
concept.
|
||||
|
||||
- **Structured logs allow better dimensions of filtering**: if you have
|
||||
structure to your logs, you've got more precise control over how much
|
||||
information is logged -- you might choose in a particular configuration
|
||||
to log certain keys but not others, only log lines where a certain key
|
||||
matches a certain value, etc., instead of just having v-levels and names
|
||||
to key off of.
|
||||
|
||||
- **Structured logs better represent structured data**: sometimes, the
|
||||
data that you want to log is inherently structured (think tuple-link
|
||||
objects.) Structured logs allow you to preserve that structure when
|
||||
outputting.
|
||||
|
||||
#### Why V-levels?
|
||||
|
||||
**V-levels give operators an easy way to control the chattiness of log
|
||||
operations**. V-levels provide a way for a given package to distinguish
|
||||
the relative importance or verbosity of a given log message. Then, if
|
||||
a particular logger or package is logging too many messages, the user
|
||||
of the package can simply change the v-levels for that library.
|
||||
|
||||
#### Why not named levels, like Info/Warning/Error?
|
||||
|
||||
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
|
||||
from Dave's ideas](#differences-from-daves-ideas).
|
||||
|
||||
#### Why not allow format strings, too?
|
||||
|
||||
**Format strings negate many of the benefits of structured logs**:
|
||||
|
||||
- They're not easily searchable without resorting to fuzzy searching,
|
||||
regular expressions, etc.
|
||||
|
||||
- They don't store structured data well, since contents are flattened into
|
||||
a string.
|
||||
|
||||
- They're not cross-referenceable.
|
||||
|
||||
- They don't compress easily, since the message is not constant.
|
||||
|
||||
(Unless you turn positional parameters into key-value pairs with numerical
|
||||
keys, at which point you've gotten key-value logging with meaningless
|
||||
keys.)
|
||||
|
||||
### Practical
|
||||
|
||||
#### Why key-value pairs, and not a map?
|
||||
|
||||
Key-value pairs are *much* easier to optimize, especially around
|
||||
allocations. Zap (a structured logger that inspired logr's interface) has
|
||||
[performance measurements](https://github.com/uber-go/zap#performance)
|
||||
that show this quite nicely.
|
||||
|
||||
While the interface ends up being a little less obvious, you get
|
||||
potentially better performance, plus avoid making users type
|
||||
`map[string]string{}` every time they want to log.
|
||||
|
||||
#### What if my V-levels differ between libraries?
|
||||
|
||||
That's fine. Control your V-levels on a per-logger basis, and use the
|
||||
`WithName` method to pass different loggers to different libraries.
|
||||
|
||||
Generally, you should take care to ensure that you have relatively
|
||||
consistent V-levels within a given logger, however, as this makes deciding
|
||||
on what verbosity of logs to request easier.
|
||||
|
||||
#### But I really want to use a format string!
|
||||
|
||||
That's not actually a question. Assuming your question is "how do
|
||||
I convert my mental model of logging with format strings to logging with
|
||||
constant messages":
|
||||
|
||||
1. Figure out what the error actually is, as you'd write in a TL;DR style,
|
||||
and use that as a message.
|
||||
|
||||
2. For every place you'd write a format specifier, look to the word before
|
||||
it, and add that as a key value pair.
|
||||
|
||||
For instance, consider the following examples (all taken from spots in the
|
||||
Kubernetes codebase):
|
||||
|
||||
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
|
||||
responseCode, err)` becomes `logger.Error(err, "client returned an
|
||||
error", "code", responseCode)`
|
||||
|
||||
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
|
||||
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
|
||||
response when requesting url", "attempt", retries, "after
|
||||
seconds", seconds, "url", url)`
|
||||
|
||||
If you *really* must use a format string, use it in a key's value, and
|
||||
call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
|
||||
reflect over type %T")` becomes `logger.Info("unable to reflect over
|
||||
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
|
||||
this is necessary should be few and far between.
|
||||
|
||||
#### How do I choose my V-levels?
|
||||
|
||||
This is basically the only hard constraint: increase V-levels to denote
|
||||
more verbose or more debug-y logs.
|
||||
|
||||
Otherwise, you can start out with `0` as "you always want to see this",
|
||||
`1` as "common logging that you might *possibly* want to turn off", and
|
||||
`10` as "I would like to performance-test your log collection stack."
|
||||
|
||||
Then gradually choose levels in between as you need them, working your way
|
||||
down from 10 (for debug and trace style logs) and up from 1 (for chattier
|
||||
info-type logs.)
|
||||
|
||||
#### How do I choose my keys?
|
||||
|
||||
Keys are fairly flexible, and can hold more or less any string
|
||||
value. For best compatibility with implementations and consistency
|
||||
with existing code in other projects, there are a few conventions you
|
||||
should consider.
|
||||
|
||||
- Make your keys human-readable.
|
||||
- Constant keys are generally a good idea.
|
||||
- Be consistent across your codebase.
|
||||
- Keys should naturally match parts of the message string.
|
||||
- Use lower case for simple keys and
|
||||
[lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
|
||||
more complex ones. Kubernetes is one example of a project that has
|
||||
[adopted that
|
||||
convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
|
||||
|
||||
While key names are mostly unrestricted (and spaces are acceptable),
|
||||
it's generally a good idea to stick to printable ascii characters, or at
|
||||
least match the general character set of your log lines.
|
||||
|
||||
#### Why should keys be constant values?
|
||||
|
||||
The point of structured logging is to make later log processing easier. Your
|
||||
keys are, effectively, the schema of each log message. If you use different
|
||||
keys across instances of the same log line, you will make your structured logs
|
||||
much harder to use. `Sprintf()` is for values, not for keys!
|
||||
|
||||
#### Why is this not a pure interface?
|
||||
|
||||
The Logger type is implemented as a struct in order to allow the Go compiler to
|
||||
optimize things like high-V `Info` logs that are not triggered. Not all of
|
||||
these implementations are implemented yet, but this structure was suggested as
|
||||
a way to ensure they *can* be implemented. All of the real work is behind the
|
||||
`LogSink` interface.
|
||||
|
||||
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
54
vendor/github.com/go-logr/logr/discard.go
generated
vendored
Normal file
54
vendor/github.com/go-logr/logr/discard.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
Copyright 2020 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
// Discard returns a Logger that discards all messages logged to it. It can be
|
||||
// used whenever the caller is not interested in the logs. Logger instances
|
||||
// produced by this function always compare as equal.
|
||||
func Discard() Logger {
|
||||
return Logger{
|
||||
level: 0,
|
||||
sink: discardLogSink{},
|
||||
}
|
||||
}
|
||||
|
||||
// discardLogSink is a LogSink that discards all messages.
|
||||
type discardLogSink struct{}
|
||||
|
||||
// Verify that it actually implements the interface
|
||||
var _ LogSink = discardLogSink{}
|
||||
|
||||
func (l discardLogSink) Init(RuntimeInfo) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) Enabled(int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (l discardLogSink) Info(int, string, ...interface{}) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) Error(error, string, ...interface{}) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) WithValues(...interface{}) LogSink {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l discardLogSink) WithName(string) LogSink {
|
||||
return l
|
||||
}
|
787
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
Normal file
787
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
Normal file
@ -0,0 +1,787 @@
|
||||
/*
|
||||
Copyright 2021 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package funcr implements formatting of structured log messages and
|
||||
// optionally captures the call site and timestamp.
|
||||
//
|
||||
// The simplest way to use it is via its implementation of a
|
||||
// github.com/go-logr/logr.LogSink with output through an arbitrary
|
||||
// "write" function. See New and NewJSON for details.
|
||||
//
|
||||
// Custom LogSinks
|
||||
//
|
||||
// For users who need more control, a funcr.Formatter can be embedded inside
|
||||
// your own custom LogSink implementation. This is useful when the LogSink
|
||||
// needs to implement additional methods, for example.
|
||||
//
|
||||
// Formatting
|
||||
//
|
||||
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
|
||||
// values which are being logged. When rendering a struct, funcr will use Go's
|
||||
// standard JSON tags (all except "string").
|
||||
package funcr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// New returns a logr.Logger which is implemented by an arbitrary function.
|
||||
func New(fn func(prefix, args string), opts Options) logr.Logger {
|
||||
return logr.New(newSink(fn, NewFormatter(opts)))
|
||||
}
|
||||
|
||||
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
|
||||
// and produces JSON output.
|
||||
func NewJSON(fn func(obj string), opts Options) logr.Logger {
|
||||
fnWrapper := func(_, obj string) {
|
||||
fn(obj)
|
||||
}
|
||||
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
|
||||
}
|
||||
|
||||
// Underlier exposes access to the underlying logging function. Since
|
||||
// callers only have a logr.Logger, they have to know which
|
||||
// implementation is in use, so this interface is less of an
|
||||
// abstraction and more of a way to test type conversion.
|
||||
type Underlier interface {
|
||||
GetUnderlying() func(prefix, args string)
|
||||
}
|
||||
|
||||
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
|
||||
l := &fnlogger{
|
||||
Formatter: formatter,
|
||||
write: fn,
|
||||
}
|
||||
// For skipping fnlogger.Info and fnlogger.Error.
|
||||
l.Formatter.AddCallDepth(1)
|
||||
return l
|
||||
}
|
||||
|
||||
// Options carries parameters which influence the way logs are generated.
|
||||
type Options struct {
|
||||
// LogCaller tells funcr to add a "caller" key to some or all log lines.
|
||||
// This has some overhead, so some users might not want it.
|
||||
LogCaller MessageClass
|
||||
|
||||
// LogCallerFunc tells funcr to also log the calling function name. This
|
||||
// has no effect if caller logging is not enabled (see Options.LogCaller).
|
||||
LogCallerFunc bool
|
||||
|
||||
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
|
||||
// overhead, so some users might not want it.
|
||||
LogTimestamp bool
|
||||
|
||||
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
|
||||
// is enabled. If not specified, a default format will be used. For more
|
||||
// details, see docs for Go's time.Layout.
|
||||
TimestampFormat string
|
||||
|
||||
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||
// more logs. Info logs at or below this level will be written, while logs
|
||||
// above this level will be discarded.
|
||||
Verbosity int
|
||||
|
||||
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
|
||||
// while a log line is being rendered. The kvList argument follows logr
|
||||
// conventions - each pair of slice elements is comprised of a string key
|
||||
// and an arbitrary value (verified and sanitized before calling this
|
||||
// hook). The value returned must follow the same conventions. This hook
|
||||
// can be used to audit or modify logged data. For example, you might want
|
||||
// to prefix all of funcr's built-in keys with some string. This hook is
|
||||
// only called for built-in (provided by funcr itself) key-value pairs.
|
||||
// Equivalent hooks are offered for key-value pairs saved via
|
||||
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
|
||||
// for user-provided pairs (see RenderArgsHook).
|
||||
RenderBuiltinsHook func(kvList []interface{}) []interface{}
|
||||
|
||||
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
|
||||
// only called for key-value pairs saved via logr.Logger.WithValues. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderValuesHook func(kvList []interface{}) []interface{}
|
||||
|
||||
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
|
||||
// called for key-value pairs passed directly to Info and Error. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderArgsHook func(kvList []interface{}) []interface{}
|
||||
|
||||
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
|
||||
// that contains a struct, etc.) it may log. Every time it finds a struct,
|
||||
// slice, array, or map the depth is increased by one. When the maximum is
|
||||
// reached, the value will be converted to a string indicating that the max
|
||||
// depth has been exceeded. If this field is not specified, a default
|
||||
// value will be used.
|
||||
MaxLogDepth int
|
||||
}
|
||||
|
||||
// MessageClass indicates which category or categories of messages to consider.
|
||||
type MessageClass int
|
||||
|
||||
const (
|
||||
// None ignores all message classes.
|
||||
None MessageClass = iota
|
||||
// All considers all message classes.
|
||||
All
|
||||
// Info only considers info messages.
|
||||
Info
|
||||
// Error only considers error messages.
|
||||
Error
|
||||
)
|
||||
|
||||
// fnlogger inherits some of its LogSink implementation from Formatter
|
||||
// and just needs to add some glue code.
|
||||
type fnlogger struct {
|
||||
Formatter
|
||||
write func(prefix, args string)
|
||||
}
|
||||
|
||||
func (l fnlogger) WithName(name string) logr.LogSink {
|
||||
l.Formatter.AddName(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||
l.Formatter.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
|
||||
l.Formatter.AddCallDepth(depth)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
|
||||
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatError(err, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
|
||||
func (l fnlogger) GetUnderlying() func(prefix, args string) {
|
||||
return l.write
|
||||
}
|
||||
|
||||
// Assert conformance to the interfaces.
|
||||
var _ logr.LogSink = &fnlogger{}
|
||||
var _ logr.CallDepthLogSink = &fnlogger{}
|
||||
var _ Underlier = &fnlogger{}
|
||||
|
||||
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
|
||||
func NewFormatter(opts Options) Formatter {
|
||||
return newFormatter(opts, outputKeyValue)
|
||||
}
|
||||
|
||||
// NewFormatterJSON constructs a Formatter which emits strict JSON.
|
||||
func NewFormatterJSON(opts Options) Formatter {
|
||||
return newFormatter(opts, outputJSON)
|
||||
}
|
||||
|
||||
// Defaults for Options.
|
||||
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
|
||||
const defaultMaxLogDepth = 16
|
||||
|
||||
func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||
if opts.TimestampFormat == "" {
|
||||
opts.TimestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if opts.MaxLogDepth == 0 {
|
||||
opts.MaxLogDepth = defaultMaxLogDepth
|
||||
}
|
||||
f := Formatter{
|
||||
outputFormat: outfmt,
|
||||
prefix: "",
|
||||
values: nil,
|
||||
depth: 0,
|
||||
opts: opts,
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// Formatter is an opaque struct which can be embedded in a LogSink
|
||||
// implementation. It should be constructed with NewFormatter. Some of
|
||||
// its methods directly implement logr.LogSink.
|
||||
type Formatter struct {
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []interface{}
|
||||
valuesStr string
|
||||
depth int
|
||||
opts Options
|
||||
}
|
||||
|
||||
// outputFormat indicates which outputFormat to use.
|
||||
type outputFormat int
|
||||
|
||||
const (
|
||||
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
|
||||
outputKeyValue outputFormat = iota
|
||||
// outputJSON emits strict JSON.
|
||||
outputJSON
|
||||
)
|
||||
|
||||
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
|
||||
type PseudoStruct []interface{}
|
||||
|
||||
// render produces a log line, ready to use.
|
||||
func (f Formatter) render(builtins, args []interface{}) string {
|
||||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
vals := builtins
|
||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||
continuing := len(builtins) > 0
|
||||
if len(f.valuesStr) > 0 {
|
||||
if continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
continuing = true
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
vals = args
|
||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// flatten renders a list of key-value pairs into a buffer. If continuing is
|
||||
// true, it assumes that the buffer has previous values and will emit a
|
||||
// separator (which depends on the output format) before the first pair it
|
||||
// writes. If escapeKeys is true, the keys are assumed to have
|
||||
// non-JSON-compatible characters in them and must be evaluated for escapes.
|
||||
//
|
||||
// This function returns a potentially modified version of kvList, which
|
||||
// ensures that there is a value for every key (adding a value if needed) and
|
||||
// that each key is a string (substituting a key if needed).
|
||||
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
|
||||
// This logic overlaps with sanitize() but saves one type-cast per key,
|
||||
// which can be measurable.
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
k, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
k = f.nonStringKey(kvList[i])
|
||||
kvList[i] = k
|
||||
}
|
||||
v := kvList[i+1]
|
||||
|
||||
if i > 0 || continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
// In theory the format could be something we don't understand. In
|
||||
// practice, we control it, so it won't be.
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
if escapeKeys {
|
||||
buf.WriteString(prettyString(k))
|
||||
} else {
|
||||
// this is faster
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(k)
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(':')
|
||||
} else {
|
||||
buf.WriteByte('=')
|
||||
}
|
||||
buf.WriteString(f.pretty(v))
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
func (f Formatter) pretty(value interface{}) string {
|
||||
return f.prettyWithFlags(value, 0, 0)
|
||||
}
|
||||
|
||||
const (
|
||||
flagRawStruct = 0x1 // do not print braces on structs
|
||||
)
|
||||
|
||||
// TODO: This is not fast. Most of the overhead goes here.
|
||||
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
|
||||
if depth > f.opts.MaxLogDepth {
|
||||
return `"<max-log-depth-exceeded>"`
|
||||
}
|
||||
|
||||
// Handle types that take full control of logging.
|
||||
if v, ok := value.(logr.Marshaler); ok {
|
||||
// Replace the value with what the type wants to get logged.
|
||||
// That then gets handled below via reflection.
|
||||
value = invokeMarshaler(v)
|
||||
}
|
||||
|
||||
// Handle types that want to format themselves.
|
||||
switch v := value.(type) {
|
||||
case fmt.Stringer:
|
||||
value = invokeStringer(v)
|
||||
case error:
|
||||
value = invokeError(v)
|
||||
}
|
||||
|
||||
// Handling the most common types without reflect is a small perf win.
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
return strconv.FormatBool(v)
|
||||
case string:
|
||||
return prettyString(v)
|
||||
case int:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int8:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int16:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int32:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int64:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case uint:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint8:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint16:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint32:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint64:
|
||||
return strconv.FormatUint(v, 10)
|
||||
case uintptr:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case float32:
|
||||
return strconv.FormatFloat(float64(v), 'f', -1, 32)
|
||||
case float64:
|
||||
return strconv.FormatFloat(v, 'f', -1, 64)
|
||||
case complex64:
|
||||
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
|
||||
case complex128:
|
||||
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
|
||||
case PseudoStruct:
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
v = f.sanitize(v)
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||
// arbitrary keys might need escaping
|
||||
buf.WriteString(prettyString(k))
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
t := reflect.TypeOf(value)
|
||||
if t == nil {
|
||||
return "null"
|
||||
}
|
||||
v := reflect.ValueOf(value)
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return strconv.FormatBool(v.Bool())
|
||||
case reflect.String:
|
||||
return prettyString(v.String())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(int64(v.Int()), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return strconv.FormatUint(uint64(v.Uint()), 10)
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
|
||||
case reflect.Float64:
|
||||
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
|
||||
case reflect.Complex64:
|
||||
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
|
||||
case reflect.Complex128:
|
||||
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
|
||||
case reflect.Struct:
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fld := t.Field(i)
|
||||
if fld.PkgPath != "" {
|
||||
// reflect says this field is only defined for non-exported fields.
|
||||
continue
|
||||
}
|
||||
if !v.Field(i).CanInterface() {
|
||||
// reflect isn't clear exactly what this means, but we can't use it.
|
||||
continue
|
||||
}
|
||||
name := ""
|
||||
omitempty := false
|
||||
if tag, found := fld.Tag.Lookup("json"); found {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
if comma := strings.Index(tag, ","); comma != -1 {
|
||||
if n := tag[:comma]; n != "" {
|
||||
name = n
|
||||
}
|
||||
rest := tag[comma:]
|
||||
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
|
||||
omitempty = true
|
||||
}
|
||||
} else {
|
||||
name = tag
|
||||
}
|
||||
}
|
||||
if omitempty && isEmpty(v.Field(i)) {
|
||||
continue
|
||||
}
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
|
||||
continue
|
||||
}
|
||||
if name == "" {
|
||||
name = fld.Name
|
||||
}
|
||||
// field names can't contain characters which need escaping
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(name)
|
||||
buf.WriteByte('"')
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
case reflect.Slice, reflect.Array:
|
||||
buf.WriteByte('[')
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
e := v.Index(i)
|
||||
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
return buf.String()
|
||||
case reflect.Map:
|
||||
buf.WriteByte('{')
|
||||
// This does not sort the map keys, for best perf.
|
||||
it := v.MapRange()
|
||||
i := 0
|
||||
for it.Next() {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
// If a map key supports TextMarshaler, use it.
|
||||
keystr := ""
|
||||
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
|
||||
txt, err := m.MarshalText()
|
||||
if err != nil {
|
||||
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
|
||||
} else {
|
||||
keystr = string(txt)
|
||||
}
|
||||
keystr = prettyString(keystr)
|
||||
} else {
|
||||
// prettyWithFlags will produce already-escaped values
|
||||
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
|
||||
if t.Key().Kind() != reflect.String {
|
||||
// JSON only does string keys. Unlike Go's standard JSON, we'll
|
||||
// convert just about anything to a string.
|
||||
keystr = prettyString(keystr)
|
||||
}
|
||||
}
|
||||
buf.WriteString(keystr)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||
i++
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
return buf.String()
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return "null"
|
||||
}
|
||||
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
|
||||
}
|
||||
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
|
||||
}
|
||||
|
||||
func prettyString(s string) string {
|
||||
// Avoid escaping (which does allocations) if we can.
|
||||
if needsEscape(s) {
|
||||
return strconv.Quote(s)
|
||||
}
|
||||
b := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
b.WriteByte('"')
|
||||
b.WriteString(s)
|
||||
b.WriteByte('"')
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// needsEscape determines whether the input string needs to be escaped or not,
|
||||
// without doing any allocations.
|
||||
func needsEscape(s string) bool {
|
||||
for _, r := range s {
|
||||
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return v.Complex() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return m.MarshalLog()
|
||||
}
|
||||
|
||||
func invokeStringer(s fmt.Stringer) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func invokeError(e error) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// Caller represents the original call site for a log line, after considering
|
||||
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
|
||||
// Line fields will always be provided, while the Func field is optional.
|
||||
// Users can set the render hook fields in Options to examine logged key-value
|
||||
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
|
||||
// field is enabled for the given MessageClass.
|
||||
type Caller struct {
|
||||
// File is the basename of the file for this call site.
|
||||
File string `json:"file"`
|
||||
// Line is the line number in the file for this call site.
|
||||
Line int `json:"line"`
|
||||
// Func is the function name for this call site, or empty if
|
||||
// Options.LogCallerFunc is not enabled.
|
||||
Func string `json:"function,omitempty"`
|
||||
}
|
||||
|
||||
func (f Formatter) caller() Caller {
|
||||
// +1 for this frame, +1 for Info/Error.
|
||||
pc, file, line, ok := runtime.Caller(f.depth + 2)
|
||||
if !ok {
|
||||
return Caller{"<unknown>", 0, ""}
|
||||
}
|
||||
fn := ""
|
||||
if f.opts.LogCallerFunc {
|
||||
if fp := runtime.FuncForPC(pc); fp != nil {
|
||||
fn = fp.Name()
|
||||
}
|
||||
}
|
||||
|
||||
return Caller{filepath.Base(file), line, fn}
|
||||
}
|
||||
|
||||
const noValue = "<no-value>"
|
||||
|
||||
func (f Formatter) nonStringKey(v interface{}) string {
|
||||
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
|
||||
}
|
||||
|
||||
// snippet produces a short snippet string of an arbitrary value.
|
||||
func (f Formatter) snippet(v interface{}) string {
|
||||
const snipLen = 16
|
||||
|
||||
snip := f.pretty(v)
|
||||
if len(snip) > snipLen {
|
||||
snip = snip[:snipLen]
|
||||
}
|
||||
return snip
|
||||
}
|
||||
|
||||
// sanitize ensures that a list of key-value pairs has a value for every key
|
||||
// (adding a value if needed) and that each key is a string (substituting a key
|
||||
// if needed).
|
||||
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
_, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
kvList[i] = f.nonStringKey(kvList[i])
|
||||
}
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
// Init configures this Formatter from runtime info, such as the call depth
|
||||
// imposed by logr itself.
|
||||
// Note that this receiver is a pointer, so depth can be saved.
|
||||
func (f *Formatter) Init(info logr.RuntimeInfo) {
|
||||
f.depth += info.CallDepth
|
||||
}
|
||||
|
||||
// Enabled checks whether an info message at the given level should be logged.
|
||||
func (f Formatter) Enabled(level int) bool {
|
||||
return level <= f.opts.Verbosity
|
||||
}
|
||||
|
||||
// GetDepth returns the current depth of this Formatter. This is useful for
|
||||
// implementations which do their own caller attribution.
|
||||
func (f Formatter) GetDepth() int {
|
||||
return f.depth
|
||||
}
|
||||
|
||||
// FormatInfo renders an Info log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
prefix = ""
|
||||
}
|
||||
if f.opts.LogTimestamp {
|
||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||
}
|
||||
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "level", level, "msg", msg)
|
||||
return prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
// FormatError renders an Error log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
prefix = ""
|
||||
}
|
||||
if f.opts.LogTimestamp {
|
||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||
}
|
||||
if policy := f.opts.LogCaller; policy == All || policy == Error {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "msg", msg)
|
||||
var loggableErr interface{}
|
||||
if err != nil {
|
||||
loggableErr = err.Error()
|
||||
}
|
||||
args = append(args, "error", loggableErr)
|
||||
return f.prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
// AddName appends the specified name. funcr uses '/' characters to separate
|
||||
// name elements. Callers should not pass '/' in the provided name string, but
|
||||
// this library does not actually enforce that.
|
||||
func (f *Formatter) AddName(name string) {
|
||||
if len(f.prefix) > 0 {
|
||||
f.prefix += "/"
|
||||
}
|
||||
f.prefix += name
|
||||
}
|
||||
|
||||
// AddValues adds key-value pairs to the set of saved values to be logged with
|
||||
// each log line.
|
||||
func (f *Formatter) AddValues(kvList []interface{}) {
|
||||
// Three slice args forces a copy.
|
||||
n := len(f.values)
|
||||
f.values = append(f.values[:n:n], kvList...)
|
||||
|
||||
vals := f.values
|
||||
if hook := f.opts.RenderValuesHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
|
||||
// Pre-render values, so we don't have to do it on each Info/Error call.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
f.flatten(buf, vals, false, true) // escape user-provided keys
|
||||
f.valuesStr = buf.String()
|
||||
}
|
||||
|
||||
// AddCallDepth increases the number of stack-frames to skip when attributing
|
||||
// the log line to a file and line.
|
||||
func (f *Formatter) AddCallDepth(depth int) {
|
||||
f.depth += depth
|
||||
}
|
510
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
510
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
@ -0,0 +1,510 @@
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This design derives from Dave Cheney's blog:
|
||||
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
||||
|
||||
// Package logr defines a general-purpose logging API and abstract interfaces
|
||||
// to back that API. Packages in the Go ecosystem can depend on this package,
|
||||
// while callers can implement logging with whatever backend is appropriate.
|
||||
//
|
||||
// Usage
|
||||
//
|
||||
// Logging is done using a Logger instance. Logger is a concrete type with
|
||||
// methods, which defers the actual logging to a LogSink interface. The main
|
||||
// methods of Logger are Info() and Error(). Arguments to Info() and Error()
|
||||
// are key/value pairs rather than printf-style formatted strings, emphasizing
|
||||
// "structured logging".
|
||||
//
|
||||
// With Go's standard log package, we might write:
|
||||
// log.Printf("setting target value %s", targetValue)
|
||||
//
|
||||
// With logr's structured logging, we'd write:
|
||||
// logger.Info("setting target", "value", targetValue)
|
||||
//
|
||||
// Errors are much the same. Instead of:
|
||||
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
|
||||
//
|
||||
// We'd write:
|
||||
// logger.Error(err, "failed to open the pod bay door", "user", user)
|
||||
//
|
||||
// Info() and Error() are very similar, but they are separate methods so that
|
||||
// LogSink implementations can choose to do things like attach additional
|
||||
// information (such as stack traces) on calls to Error(). Error() messages are
|
||||
// always logged, regardless of the current verbosity. If there is no error
|
||||
// instance available, passing nil is valid.
|
||||
//
|
||||
// Verbosity
|
||||
//
|
||||
// Often we want to log information only when the application in "verbose
|
||||
// mode". To write log lines that are more verbose, Logger has a V() method.
|
||||
// The higher the V-level of a log line, the less critical it is considered.
|
||||
// Log-lines with V-levels that are not enabled (as per the LogSink) will not
|
||||
// be written. Level V(0) is the default, and logger.V(0).Info() has the same
|
||||
// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
|
||||
// Error messages do not have a verbosity level and are always logged.
|
||||
//
|
||||
// Where we might have written:
|
||||
// if flVerbose >= 2 {
|
||||
// log.Printf("an unusual thing happened")
|
||||
// }
|
||||
//
|
||||
// We can write:
|
||||
// logger.V(2).Info("an unusual thing happened")
|
||||
//
|
||||
// Logger Names
|
||||
//
|
||||
// Logger instances can have name strings so that all messages logged through
|
||||
// that instance have additional context. For example, you might want to add
|
||||
// a subsystem name:
|
||||
//
|
||||
// logger.WithName("compactor").Info("started", "time", time.Now())
|
||||
//
|
||||
// The WithName() method returns a new Logger, which can be passed to
|
||||
// constructors or other functions for further use. Repeated use of WithName()
|
||||
// will accumulate name "segments". These name segments will be joined in some
|
||||
// way by the LogSink implementation. It is strongly recommended that name
|
||||
// segments contain simple identifiers (letters, digits, and hyphen), and do
|
||||
// not contain characters that could muddle the log output or confuse the
|
||||
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
|
||||
// quotes, etc).
|
||||
//
|
||||
// Saved Values
|
||||
//
|
||||
// Logger instances can store any number of key/value pairs, which will be
|
||||
// logged alongside all messages logged through that instance. For example,
|
||||
// you might want to create a Logger instance per managed object:
|
||||
//
|
||||
// With the standard log package, we might write:
|
||||
// log.Printf("decided to set field foo to value %q for object %s/%s",
|
||||
// targetValue, object.Namespace, object.Name)
|
||||
//
|
||||
// With logr we'd write:
|
||||
// // Elsewhere: set up the logger to log the object name.
|
||||
// obj.logger = mainLogger.WithValues(
|
||||
// "name", obj.name, "namespace", obj.namespace)
|
||||
//
|
||||
// // later on...
|
||||
// obj.logger.Info("setting foo", "value", targetValue)
|
||||
//
|
||||
// Best Practices
|
||||
//
|
||||
// Logger has very few hard rules, with the goal that LogSink implementations
|
||||
// might have a lot of freedom to differentiate. There are, however, some
|
||||
// things to consider.
|
||||
//
|
||||
// The log message consists of a constant message attached to the log line.
|
||||
// This should generally be a simple description of what's occurring, and should
|
||||
// never be a format string. Variable information can then be attached using
|
||||
// named values.
|
||||
//
|
||||
// Keys are arbitrary strings, but should generally be constant values. Values
|
||||
// may be any Go value, but how the value is formatted is determined by the
|
||||
// LogSink implementation.
|
||||
//
|
||||
// Logger instances are meant to be passed around by value. Code that receives
|
||||
// such a value can call its methods without having to check whether the
|
||||
// instance is ready for use.
|
||||
//
|
||||
// Calling methods with the null logger (Logger{}) as instance will crash
|
||||
// because it has no LogSink. Therefore this null logger should never be passed
|
||||
// around. For cases where passing a logger is optional, a pointer to Logger
|
||||
// should be used.
|
||||
//
|
||||
// Key Naming Conventions
|
||||
//
|
||||
// Keys are not strictly required to conform to any specification or regex, but
|
||||
// it is recommended that they:
|
||||
// * be human-readable and meaningful (not auto-generated or simple ordinals)
|
||||
// * be constant (not dependent on input data)
|
||||
// * contain only printable characters
|
||||
// * not contain whitespace or punctuation
|
||||
// * use lower case for simple keys and lowerCamelCase for more complex ones
|
||||
//
|
||||
// These guidelines help ensure that log data is processed properly regardless
|
||||
// of the log implementation. For example, log implementations will try to
|
||||
// output JSON data or will store data for later database (e.g. SQL) queries.
|
||||
//
|
||||
// While users are generally free to use key names of their choice, it's
|
||||
// generally best to avoid using the following keys, as they're frequently used
|
||||
// by implementations:
|
||||
// * "caller": the calling information (file/line) of a particular log line
|
||||
// * "error": the underlying error value in the `Error` method
|
||||
// * "level": the log level
|
||||
// * "logger": the name of the associated logger
|
||||
// * "msg": the log message
|
||||
// * "stacktrace": the stack trace associated with a particular log line or
|
||||
// error (often from the `Error` message)
|
||||
// * "ts": the timestamp for a log line
|
||||
//
|
||||
// Implementations are encouraged to make use of these keys to represent the
|
||||
// above concepts, when necessary (for example, in a pure-JSON output form, it
|
||||
// would be necessary to represent at least message and timestamp as ordinary
|
||||
// named values).
|
||||
//
|
||||
// Break Glass
|
||||
//
|
||||
// Implementations may choose to give callers access to the underlying
|
||||
// logging implementation. The recommended pattern for this is:
|
||||
// // Underlier exposes access to the underlying logging implementation.
|
||||
// // Since callers only have a logr.Logger, they have to know which
|
||||
// // implementation is in use, so this interface is less of an abstraction
|
||||
// // and more of way to test type conversion.
|
||||
// type Underlier interface {
|
||||
// GetUnderlying() <underlying-type>
|
||||
// }
|
||||
//
|
||||
// Logger grants access to the sink to enable type assertions like this:
|
||||
// func DoSomethingWithImpl(log logr.Logger) {
|
||||
// if underlier, ok := log.GetSink()(impl.Underlier) {
|
||||
// implLogger := underlier.GetUnderlying()
|
||||
// ...
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Custom `With*` functions can be implemented by copying the complete
|
||||
// Logger struct and replacing the sink in the copy:
|
||||
// // WithFooBar changes the foobar parameter in the log sink and returns a
|
||||
// // new logger with that modified sink. It does nothing for loggers where
|
||||
// // the sink doesn't support that parameter.
|
||||
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
|
||||
// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
|
||||
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
|
||||
// }
|
||||
// return log
|
||||
// }
|
||||
//
|
||||
// Don't use New to construct a new Logger with a LogSink retrieved from an
|
||||
// existing Logger. Source code attribution might not work correctly and
|
||||
// unexported fields in Logger get lost.
|
||||
//
|
||||
// Beware that the same LogSink instance may be shared by different logger
|
||||
// instances. Calling functions that modify the LogSink will affect all of
|
||||
// those.
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// New returns a new Logger instance. This is primarily used by libraries
|
||||
// implementing LogSink, rather than end users.
|
||||
func New(sink LogSink) Logger {
|
||||
logger := Logger{}
|
||||
logger.setSink(sink)
|
||||
sink.Init(runtimeInfo)
|
||||
return logger
|
||||
}
|
||||
|
||||
// setSink stores the sink and updates any related fields. It mutates the
|
||||
// logger and thus is only safe to use for loggers that are not currently being
|
||||
// used concurrently.
|
||||
func (l *Logger) setSink(sink LogSink) {
|
||||
l.sink = sink
|
||||
}
|
||||
|
||||
// GetSink returns the stored sink.
|
||||
func (l Logger) GetSink() LogSink {
|
||||
return l.sink
|
||||
}
|
||||
|
||||
// WithSink returns a copy of the logger with the new sink.
|
||||
func (l Logger) WithSink(sink LogSink) Logger {
|
||||
l.setSink(sink)
|
||||
return l
|
||||
}
|
||||
|
||||
// Logger is an interface to an abstract logging implementation. This is a
|
||||
// concrete type for performance reasons, but all the real work is passed on to
|
||||
// a LogSink. Implementations of LogSink should provide their own constructors
|
||||
// that return Logger, not LogSink.
|
||||
//
|
||||
// The underlying sink can be accessed through GetSink and be modified through
|
||||
// WithSink. This enables the implementation of custom extensions (see "Break
|
||||
// Glass" in the package documentation). Normally the sink should be used only
|
||||
// indirectly.
|
||||
type Logger struct {
|
||||
sink LogSink
|
||||
level int
|
||||
}
|
||||
|
||||
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||
// flags might be used to set the logging verbosity and disable some info logs.
|
||||
func (l Logger) Enabled() bool {
|
||||
return l.sink.Enabled(l.level)
|
||||
}
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
//
|
||||
// The msg argument should be used to add some constant description to the log
|
||||
// line. The key/value pairs can then be used to add additional variable
|
||||
// information. The key/value pairs must alternate string keys and arbitrary
|
||||
// values.
|
||||
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
|
||||
if l.Enabled() {
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
l.sink.Info(l.level, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as context.
|
||||
// It functions similarly to Info, but may have unique behavior, and should be
|
||||
// preferred for logging errors (see the package documentations for more
|
||||
// information). The log message will always be emitted, regardless of
|
||||
// verbosity level.
|
||||
//
|
||||
// The msg argument should be used to add context to any underlying error,
|
||||
// while the err argument should be used to attach the actual error that
|
||||
// triggered this log line, if present. The err parameter is optional
|
||||
// and nil may be passed instead of an error instance.
|
||||
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
l.sink.Error(err, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// V returns a new Logger instance for a specific verbosity level, relative to
|
||||
// this Logger. In other words, V-levels are additive. A higher verbosity
|
||||
// level means a log message is less important. Negative V-levels are treated
|
||||
// as 0.
|
||||
func (l Logger) V(level int) Logger {
|
||||
if level < 0 {
|
||||
level = 0
|
||||
}
|
||||
l.level += level
|
||||
return l
|
||||
}
|
||||
|
||||
// WithValues returns a new Logger instance with additional key/value pairs.
|
||||
// See Info for documentation on how key/value pairs work.
|
||||
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
|
||||
l.setSink(l.sink.WithValues(keysAndValues...))
|
||||
return l
|
||||
}
|
||||
|
||||
// WithName returns a new Logger instance with the specified name element added
|
||||
// to the Logger's name. Successive calls with WithName append additional
|
||||
// suffixes to the Logger's name. It's strongly recommended that name segments
|
||||
// contain only letters, digits, and hyphens (see the package documentation for
|
||||
// more information).
|
||||
func (l Logger) WithName(name string) Logger {
|
||||
l.setSink(l.sink.WithName(name))
|
||||
return l
|
||||
}
|
||||
|
||||
// WithCallDepth returns a Logger instance that offsets the call stack by the
|
||||
// specified number of frames when logging call site information, if possible.
|
||||
// This is useful for users who have helper functions between the "real" call
|
||||
// site and the actual calls to Logger methods. If depth is 0 the attribution
|
||||
// should be to the direct caller of this function. If depth is 1 the
|
||||
// attribution should skip 1 call frame, and so on. Successive calls to this
|
||||
// are additive.
|
||||
//
|
||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||
// it will be called and the result returned. If the implementation does not
|
||||
// support CallDepthLogSink, the original Logger will be returned.
|
||||
//
|
||||
// To skip one level, WithCallStackHelper() should be used instead of
|
||||
// WithCallDepth(1) because it works with implementions that support the
|
||||
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
|
||||
func (l Logger) WithCallDepth(depth int) Logger {
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(depth))
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// WithCallStackHelper returns a new Logger instance that skips the direct
|
||||
// caller when logging call site information, if possible. This is useful for
|
||||
// users who have helper functions between the "real" call site and the actual
|
||||
// calls to Logger methods and want to support loggers which depend on marking
|
||||
// each individual helper function, like loggers based on testing.T.
|
||||
//
|
||||
// In addition to using that new logger instance, callers also must call the
|
||||
// returned function.
|
||||
//
|
||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||
// WithCallDepth(1) will be called to produce a new logger. If it supports a
|
||||
// WithCallStackHelper() method, that will be also called. If the
|
||||
// implementation does not support either of these, the original Logger will be
|
||||
// returned.
|
||||
func (l Logger) WithCallStackHelper() (func(), Logger) {
|
||||
var helper func()
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(1))
|
||||
}
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
helper = withHelper.GetCallStackHelper()
|
||||
} else {
|
||||
helper = func() {}
|
||||
}
|
||||
return helper, l
|
||||
}
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context.
|
||||
type contextKey struct{}
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// RuntimeInfo holds information that the logr "core" library knows which
|
||||
// LogSinks might want to know.
|
||||
type RuntimeInfo struct {
|
||||
// CallDepth is the number of call frames the logr library adds between the
|
||||
// end-user and the LogSink. LogSink implementations which choose to print
|
||||
// the original logging site (e.g. file & line) should climb this many
|
||||
// additional frames to find it.
|
||||
CallDepth int
|
||||
}
|
||||
|
||||
// runtimeInfo is a static global. It must not be changed at run time.
|
||||
var runtimeInfo = RuntimeInfo{
|
||||
CallDepth: 1,
|
||||
}
|
||||
|
||||
// LogSink represents a logging implementation. End-users will generally not
|
||||
// interact with this type.
|
||||
type LogSink interface {
|
||||
// Init receives optional information about the logr library for LogSink
|
||||
// implementations that need it.
|
||||
Init(info RuntimeInfo)
|
||||
|
||||
// Enabled tests whether this LogSink is enabled at the specified V-level.
|
||||
// For example, commandline flags might be used to set the logging
|
||||
// verbosity and disable some info logs.
|
||||
Enabled(level int) bool
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
// The level argument is provided for optional logging. This method will
|
||||
// only be called when Enabled(level) is true. See Logger.Info for more
|
||||
// details.
|
||||
Info(level int, msg string, keysAndValues ...interface{})
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as
|
||||
// context. See Logger.Error for more details.
|
||||
Error(err error, msg string, keysAndValues ...interface{})
|
||||
|
||||
// WithValues returns a new LogSink with additional key/value pairs. See
|
||||
// Logger.WithValues for more details.
|
||||
WithValues(keysAndValues ...interface{}) LogSink
|
||||
|
||||
// WithName returns a new LogSink with the specified name appended. See
|
||||
// Logger.WithName for more details.
|
||||
WithName(name string) LogSink
|
||||
}
|
||||
|
||||
// CallDepthLogSink represents a Logger that knows how to climb the call stack
|
||||
// to identify the original call site and can offset the depth by a specified
|
||||
// number of frames. This is useful for users who have helper functions
|
||||
// between the "real" call site and the actual calls to Logger methods.
|
||||
// Implementations that log information about the call site (such as file,
|
||||
// function, or line) would otherwise log information about the intermediate
|
||||
// helper functions.
|
||||
//
|
||||
// This is an optional interface and implementations are not required to
|
||||
// support it.
|
||||
type CallDepthLogSink interface {
|
||||
// WithCallDepth returns a LogSink that will offset the call
|
||||
// stack by the specified number of frames when logging call
|
||||
// site information.
|
||||
//
|
||||
// If depth is 0, the LogSink should skip exactly the number
|
||||
// of call frames defined in RuntimeInfo.CallDepth when Info
|
||||
// or Error are called, i.e. the attribution should be to the
|
||||
// direct caller of Logger.Info or Logger.Error.
|
||||
//
|
||||
// If depth is 1 the attribution should skip 1 call frame, and so on.
|
||||
// Successive calls to this are additive.
|
||||
WithCallDepth(depth int) LogSink
|
||||
}
|
||||
|
||||
// CallStackHelperLogSink represents a Logger that knows how to climb
|
||||
// the call stack to identify the original call site and can skip
|
||||
// intermediate helper functions if they mark themselves as
|
||||
// helper. Go's testing package uses that approach.
|
||||
//
|
||||
// This is useful for users who have helper functions between the
|
||||
// "real" call site and the actual calls to Logger methods.
|
||||
// Implementations that log information about the call site (such as
|
||||
// file, function, or line) would otherwise log information about the
|
||||
// intermediate helper functions.
|
||||
//
|
||||
// This is an optional interface and implementations are not required
|
||||
// to support it. Implementations that choose to support this must not
|
||||
// simply implement it as WithCallDepth(1), because
|
||||
// Logger.WithCallStackHelper will call both methods if they are
|
||||
// present. This should only be implemented for LogSinks that actually
|
||||
// need it, as with testing.T.
|
||||
type CallStackHelperLogSink interface {
|
||||
// GetCallStackHelper returns a function that must be called
|
||||
// to mark the direct caller as helper function when logging
|
||||
// call site information.
|
||||
GetCallStackHelper() func()
|
||||
}
|
||||
|
||||
// Marshaler is an optional interface that logged values may choose to
|
||||
// implement. Loggers with structured output, such as JSON, should
|
||||
// log the object return by the MarshalLog method instead of the
|
||||
// original value.
|
||||
type Marshaler interface {
|
||||
// MarshalLog can be used to:
|
||||
// - ensure that structs are not logged as strings when the original
|
||||
// value has a String method: return a different type without a
|
||||
// String method
|
||||
// - select which fields of a complex type should get logged:
|
||||
// return a simpler struct with fewer fields
|
||||
// - log unexported fields: return a different struct
|
||||
// with exported fields
|
||||
//
|
||||
// It may return any value of any type.
|
||||
MarshalLog() interface{}
|
||||
}
|
201
vendor/github.com/go-logr/stdr/LICENSE
generated
vendored
Normal file
201
vendor/github.com/go-logr/stdr/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
6
vendor/github.com/go-logr/stdr/README.md
generated
vendored
Normal file
6
vendor/github.com/go-logr/stdr/README.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# Minimal Go logging using logr and Go's standard library
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr)
|
||||
|
||||
This package implements the [logr interface](https://github.com/go-logr/logr)
|
||||
in terms of Go's standard log package(https://pkg.go.dev/log).
|
170
vendor/github.com/go-logr/stdr/stdr.go
generated
vendored
Normal file
170
vendor/github.com/go-logr/stdr/stdr.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package stdr implements github.com/go-logr/logr.Logger in terms of
|
||||
// Go's standard log package.
|
||||
package stdr
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/go-logr/logr/funcr"
|
||||
)
|
||||
|
||||
// The global verbosity level. See SetVerbosity().
|
||||
var globalVerbosity int
|
||||
|
||||
// SetVerbosity sets the global level against which all info logs will be
|
||||
// compared. If this is greater than or equal to the "V" of the logger, the
|
||||
// message will be logged. A higher value here means more logs will be written.
|
||||
// The previous verbosity value is returned. This is not concurrent-safe -
|
||||
// callers must be sure to call it from only one goroutine.
|
||||
func SetVerbosity(v int) int {
|
||||
old := globalVerbosity
|
||||
globalVerbosity = v
|
||||
return old
|
||||
}
|
||||
|
||||
// New returns a logr.Logger which is implemented by Go's standard log package,
|
||||
// or something like it. If std is nil, this will use a default logger
|
||||
// instead.
|
||||
//
|
||||
// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
|
||||
func New(std StdLogger) logr.Logger {
|
||||
return NewWithOptions(std, Options{})
|
||||
}
|
||||
|
||||
// NewWithOptions returns a logr.Logger which is implemented by Go's standard
|
||||
// log package, or something like it. See New for details.
|
||||
func NewWithOptions(std StdLogger, opts Options) logr.Logger {
|
||||
if std == nil {
|
||||
// Go's log.Default() is only available in 1.16 and higher.
|
||||
std = log.New(os.Stderr, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
if opts.Depth < 0 {
|
||||
opts.Depth = 0
|
||||
}
|
||||
|
||||
fopts := funcr.Options{
|
||||
LogCaller: funcr.MessageClass(opts.LogCaller),
|
||||
}
|
||||
|
||||
sl := &logger{
|
||||
Formatter: funcr.NewFormatter(fopts),
|
||||
std: std,
|
||||
}
|
||||
|
||||
// For skipping our own logger.Info/Error.
|
||||
sl.Formatter.AddCallDepth(1 + opts.Depth)
|
||||
|
||||
return logr.New(sl)
|
||||
}
|
||||
|
||||
// Options carries parameters which influence the way logs are generated.
|
||||
type Options struct {
|
||||
// Depth biases the assumed number of call frames to the "true" caller.
|
||||
// This is useful when the calling code calls a function which then calls
|
||||
// stdr (e.g. a logging shim to another API). Values less than zero will
|
||||
// be treated as zero.
|
||||
Depth int
|
||||
|
||||
// LogCaller tells stdr to add a "caller" key to some or all log lines.
|
||||
// Go's log package has options to log this natively, too.
|
||||
LogCaller MessageClass
|
||||
|
||||
// TODO: add an option to log the date/time
|
||||
}
|
||||
|
||||
// MessageClass indicates which category or categories of messages to consider.
|
||||
type MessageClass int
|
||||
|
||||
const (
|
||||
// None ignores all message classes.
|
||||
None MessageClass = iota
|
||||
// All considers all message classes.
|
||||
All
|
||||
// Info only considers info messages.
|
||||
Info
|
||||
// Error only considers error messages.
|
||||
Error
|
||||
)
|
||||
|
||||
// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
|
||||
// this adapter.
|
||||
type StdLogger interface {
|
||||
// Output is the same as log.Output and log.Logger.Output.
|
||||
Output(calldepth int, logline string) error
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
funcr.Formatter
|
||||
std StdLogger
|
||||
}
|
||||
|
||||
var _ logr.LogSink = &logger{}
|
||||
var _ logr.CallDepthLogSink = &logger{}
|
||||
|
||||
func (l logger) Enabled(level int) bool {
|
||||
return globalVerbosity >= level
|
||||
}
|
||||
|
||||
func (l logger) Info(level int, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||
if prefix != "" {
|
||||
args = prefix + ": " + args
|
||||
}
|
||||
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||
}
|
||||
|
||||
func (l logger) Error(err error, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatError(err, msg, kvList)
|
||||
if prefix != "" {
|
||||
args = prefix + ": " + args
|
||||
}
|
||||
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||
}
|
||||
|
||||
func (l logger) WithName(name string) logr.LogSink {
|
||||
l.Formatter.AddName(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||
l.Formatter.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l logger) WithCallDepth(depth int) logr.LogSink {
|
||||
l.Formatter.AddCallDepth(depth)
|
||||
return &l
|
||||
}
|
||||
|
||||
// Underlier exposes access to the underlying logging implementation. Since
|
||||
// callers only have a logr.Logger, they have to know which implementation is
|
||||
// in use, so this interface is less of an abstraction and more of way to test
|
||||
// type conversion.
|
||||
type Underlier interface {
|
||||
GetUnderlying() StdLogger
|
||||
}
|
||||
|
||||
// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
|
||||
// is itself an interface, the result may or may not be a Go log.Logger.
|
||||
func (l logger) GetUnderlying() StdLogger {
|
||||
return l.std
|
||||
}
|
25
vendor/github.com/rs/zerolog/.gitignore
generated
vendored
25
vendor/github.com/rs/zerolog/.gitignore
generated
vendored
@ -1,25 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
tmp
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
1
vendor/github.com/rs/zerolog/CNAME
generated
vendored
1
vendor/github.com/rs/zerolog/CNAME
generated
vendored
@ -1 +0,0 @@
|
||||
zerolog.io
|
21
vendor/github.com/rs/zerolog/LICENSE
generated
vendored
21
vendor/github.com/rs/zerolog/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Olivier Poitrey
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
695
vendor/github.com/rs/zerolog/README.md
generated
vendored
695
vendor/github.com/rs/zerolog/README.md
generated
vendored
@ -1,695 +0,0 @@
|
||||
# Zero Allocation JSON Logger
|
||||
|
||||
[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog)
|
||||
|
||||
The zerolog package provides a fast and simple logger dedicated to JSON output.
|
||||
|
||||
Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection.
|
||||
|
||||
Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance.
|
||||
|
||||
To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging).
|
||||
|
||||
![Pretty Logging Image](pretty.png)
|
||||
|
||||
## Who uses zerolog
|
||||
|
||||
Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list.
|
||||
|
||||
## Features
|
||||
|
||||
* [Blazing fast](#benchmarks)
|
||||
* [Low to zero allocation](#benchmarks)
|
||||
* [Leveled logging](#leveled-logging)
|
||||
* [Sampling](#log-sampling)
|
||||
* [Hooks](#hooks)
|
||||
* [Contextual fields](#contextual-logging)
|
||||
* `context.Context` integration
|
||||
* [Integration with `net/http`](#integration-with-nethttp)
|
||||
* [JSON and CBOR encoding formats](#binary-encoding)
|
||||
* [Pretty logging for development](#pretty-logging)
|
||||
* [Error Logging (with optional Stacktrace)](#error-logging)
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get -u github.com/rs/zerolog/log
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Simple Logging Example
|
||||
|
||||
For simple logging, import the global logger package **github.com/rs/zerolog/log**
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// UNIX Time is faster and smaller than most timestamps
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
|
||||
log.Print("hello world")
|
||||
}
|
||||
|
||||
// Output: {"time":1516134303,"level":"debug","message":"hello world"}
|
||||
```
|
||||
> Note: By default log writes to `os.Stderr`
|
||||
> Note: The default log level for `log.Print` is *debug*
|
||||
|
||||
### Contextual Logging
|
||||
|
||||
**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
|
||||
log.Debug().
|
||||
Str("Scale", "833 cents").
|
||||
Float64("Interval", 833.09).
|
||||
Msg("Fibonacci is everywhere")
|
||||
|
||||
log.Debug().
|
||||
Str("Name", "Tom").
|
||||
Send()
|
||||
}
|
||||
|
||||
// Output: {"level":"debug","Scale":"833 cents","Interval":833.09,"time":1562212768,"message":"Fibonacci is everywhere"}
|
||||
// Output: {"level":"debug","Name":"Tom","time":1562212768}
|
||||
```
|
||||
|
||||
> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types)
|
||||
|
||||
### Leveled Logging
|
||||
|
||||
#### Simple Leveled Logging Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
|
||||
log.Info().Msg("hello world")
|
||||
}
|
||||
|
||||
// Output: {"time":1516134303,"level":"info","message":"hello world"}
|
||||
```
|
||||
|
||||
> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this.
|
||||
|
||||
**zerolog** allows for logging at the following levels (from highest to lowest):
|
||||
|
||||
* panic (`zerolog.PanicLevel`, 5)
|
||||
* fatal (`zerolog.FatalLevel`, 4)
|
||||
* error (`zerolog.ErrorLevel`, 3)
|
||||
* warn (`zerolog.WarnLevel`, 2)
|
||||
* info (`zerolog.InfoLevel`, 1)
|
||||
* debug (`zerolog.DebugLevel`, 0)
|
||||
* trace (`zerolog.TraceLevel`, -1)
|
||||
|
||||
You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant.
|
||||
|
||||
#### Setting Global Log Level
|
||||
|
||||
This example uses command-line flags to demonstrate various outputs depending on the chosen log level.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
debug := flag.Bool("debug", false, "sets log level to debug")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
// Default level for this example is info, unless debug flag is present
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
if *debug {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
}
|
||||
|
||||
log.Debug().Msg("This message appears only when log level set to Debug")
|
||||
log.Info().Msg("This message appears when log level set to Debug or Info")
|
||||
|
||||
if e := log.Debug(); e.Enabled() {
|
||||
// Compute log output only if enabled.
|
||||
value := "bar"
|
||||
e.Str("foo", value).Msg("some debug message")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Info Output (no flag)
|
||||
|
||||
```bash
|
||||
$ ./logLevelExample
|
||||
{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"}
|
||||
```
|
||||
|
||||
Debug Output (debug flag set)
|
||||
|
||||
```bash
|
||||
$ ./logLevelExample -debug
|
||||
{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"}
|
||||
{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"}
|
||||
{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"}
|
||||
```
|
||||
|
||||
#### Logging without Level or Message
|
||||
|
||||
You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
|
||||
log.Log().
|
||||
Str("foo", "bar").
|
||||
Msg("")
|
||||
}
|
||||
|
||||
// Output: {"time":1494567715,"foo":"bar"}
|
||||
```
|
||||
|
||||
### Error Logging
|
||||
|
||||
You can log errors using the `Err` method
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
|
||||
err := errors.New("seems we have an error here")
|
||||
log.Error().Err(err).Msg("")
|
||||
}
|
||||
|
||||
// Output: {"level":"error","error":"seems we have an error here","time":1609085256}
|
||||
```
|
||||
|
||||
> The default field name for errors is `error`, you can change this by setting `zerolog.ErrorFieldName` to meet your needs.
|
||||
|
||||
#### Error Logging with Stacktrace
|
||||
|
||||
Using `github.com/pkg/errors`, you can add a formatted stacktrace to your errors.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/pkgerrors"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
|
||||
|
||||
err := outer()
|
||||
log.Error().Stack().Err(err).Msg("")
|
||||
}
|
||||
|
||||
func inner() error {
|
||||
return errors.New("seems we have an error here")
|
||||
}
|
||||
|
||||
func middle() error {
|
||||
err := inner()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func outer() error {
|
||||
err := middle()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Output: {"level":"error","stack":[{"func":"inner","line":"20","source":"errors.go"},{"func":"middle","line":"24","source":"errors.go"},{"func":"outer","line":"32","source":"errors.go"},{"func":"main","line":"15","source":"errors.go"},{"func":"main","line":"204","source":"proc.go"},{"func":"goexit","line":"1374","source":"asm_amd64.s"}],"error":"seems we have an error here","time":1609086683}
|
||||
```
|
||||
|
||||
> zerolog.ErrorStackMarshaler must be set in order for the stack to output anything.
|
||||
|
||||
#### Logging Fatal Messages
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := errors.New("A repo man spends his life getting into tense situations")
|
||||
service := "myservice"
|
||||
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
|
||||
log.Fatal().
|
||||
Err(err).
|
||||
Str("service", service).
|
||||
Msgf("Cannot start %s", service)
|
||||
}
|
||||
|
||||
// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"}
|
||||
// exit status 1
|
||||
```
|
||||
|
||||
> NOTE: Using `Msgf` generates one allocation even when the logger is disabled.
|
||||
|
||||
|
||||
### Create logger instance to manage different outputs
|
||||
|
||||
```go
|
||||
logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
|
||||
|
||||
logger.Info().Str("foo", "bar").Msg("hello world")
|
||||
|
||||
// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"}
|
||||
```
|
||||
|
||||
### Sub-loggers let you chain loggers with additional context
|
||||
|
||||
```go
|
||||
sublogger := log.With().
|
||||
Str("component", "foo").
|
||||
Logger()
|
||||
sublogger.Info().Msg("hello world")
|
||||
|
||||
// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"}
|
||||
```
|
||||
|
||||
### Pretty logging
|
||||
|
||||
To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`:
|
||||
|
||||
```go
|
||||
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
|
||||
|
||||
log.Info().Str("foo", "bar").Msg("Hello world")
|
||||
|
||||
// Output: 3:04PM INF Hello World foo=bar
|
||||
```
|
||||
|
||||
To customize the configuration and formatting:
|
||||
|
||||
```go
|
||||
output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339}
|
||||
output.FormatLevel = func(i interface{}) string {
|
||||
return strings.ToUpper(fmt.Sprintf("| %-6s|", i))
|
||||
}
|
||||
output.FormatMessage = func(i interface{}) string {
|
||||
return fmt.Sprintf("***%s****", i)
|
||||
}
|
||||
output.FormatFieldName = func(i interface{}) string {
|
||||
return fmt.Sprintf("%s:", i)
|
||||
}
|
||||
output.FormatFieldValue = func(i interface{}) string {
|
||||
return strings.ToUpper(fmt.Sprintf("%s", i))
|
||||
}
|
||||
|
||||
log := zerolog.New(output).With().Timestamp().Logger()
|
||||
|
||||
log.Info().Str("foo", "bar").Msg("Hello World")
|
||||
|
||||
// Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR
|
||||
```
|
||||
|
||||
### Sub dictionary
|
||||
|
||||
```go
|
||||
log.Info().
|
||||
Str("foo", "bar").
|
||||
Dict("dict", zerolog.Dict().
|
||||
Str("bar", "baz").
|
||||
Int("n", 1),
|
||||
).Msg("hello world")
|
||||
|
||||
// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"}
|
||||
```
|
||||
|
||||
### Customize automatic field names
|
||||
|
||||
```go
|
||||
zerolog.TimestampFieldName = "t"
|
||||
zerolog.LevelFieldName = "l"
|
||||
zerolog.MessageFieldName = "m"
|
||||
|
||||
log.Info().Msg("hello world")
|
||||
|
||||
// Output: {"l":"info","t":1494567715,"m":"hello world"}
|
||||
```
|
||||
|
||||
### Add contextual fields to the global logger
|
||||
|
||||
```go
|
||||
log.Logger = log.With().Str("foo", "bar").Logger()
|
||||
```
|
||||
|
||||
### Add file and line number to log
|
||||
|
||||
```go
|
||||
log.Logger = log.With().Caller().Logger()
|
||||
log.Info().Msg("hello world")
|
||||
|
||||
// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"}
|
||||
```
|
||||
|
||||
|
||||
### Thread-safe, lock-free, non-blocking writer
|
||||
|
||||
If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follow:
|
||||
|
||||
```go
|
||||
wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) {
|
||||
fmt.Printf("Logger Dropped %d messages", missed)
|
||||
})
|
||||
log := zerolog.New(wr)
|
||||
log.Print("test")
|
||||
```
|
||||
|
||||
You will need to install `code.cloudfoundry.org/go-diodes` to use this feature.
|
||||
|
||||
### Log Sampling
|
||||
|
||||
```go
|
||||
sampled := log.Sample(&zerolog.BasicSampler{N: 10})
|
||||
sampled.Info().Msg("will be logged every 10 messages")
|
||||
|
||||
// Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"}
|
||||
```
|
||||
|
||||
More advanced sampling:
|
||||
|
||||
```go
|
||||
// Will let 5 debug messages per period of 1 second.
|
||||
// Over 5 debug message, 1 every 100 debug messages are logged.
|
||||
// Other levels are not sampled.
|
||||
sampled := log.Sample(zerolog.LevelSampler{
|
||||
DebugSampler: &zerolog.BurstSampler{
|
||||
Burst: 5,
|
||||
Period: 1*time.Second,
|
||||
NextSampler: &zerolog.BasicSampler{N: 100},
|
||||
},
|
||||
})
|
||||
sampled.Debug().Msg("hello world")
|
||||
|
||||
// Output: {"time":1494567715,"level":"debug","message":"hello world"}
|
||||
```
|
||||
|
||||
### Hooks
|
||||
|
||||
```go
|
||||
type SeverityHook struct{}
|
||||
|
||||
func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
|
||||
if level != zerolog.NoLevel {
|
||||
e.Str("severity", level.String())
|
||||
}
|
||||
}
|
||||
|
||||
hooked := log.Hook(SeverityHook{})
|
||||
hooked.Warn().Msg("")
|
||||
|
||||
// Output: {"level":"warn","severity":"warn"}
|
||||
```
|
||||
|
||||
### Pass a sub-logger by context
|
||||
|
||||
```go
|
||||
ctx := log.With().Str("component", "module").Logger().WithContext(ctx)
|
||||
|
||||
log.Ctx(ctx).Info().Msg("hello world")
|
||||
|
||||
// Output: {"component":"module","level":"info","message":"hello world"}
|
||||
```
|
||||
|
||||
### Set as standard logger output
|
||||
|
||||
```go
|
||||
log := zerolog.New(os.Stdout).With().
|
||||
Str("foo", "bar").
|
||||
Logger()
|
||||
|
||||
stdlog.SetFlags(0)
|
||||
stdlog.SetOutput(log)
|
||||
|
||||
stdlog.Print("hello world")
|
||||
|
||||
// Output: {"foo":"bar","message":"hello world"}
|
||||
```
|
||||
|
||||
### Integration with `net/http`
|
||||
|
||||
The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`.
|
||||
|
||||
In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability.
|
||||
|
||||
```go
|
||||
log := zerolog.New(os.Stdout).With().
|
||||
Timestamp().
|
||||
Str("role", "my-service").
|
||||
Str("host", host).
|
||||
Logger()
|
||||
|
||||
c := alice.New()
|
||||
|
||||
// Install the logger handler with default output on the console
|
||||
c = c.Append(hlog.NewHandler(log))
|
||||
|
||||
// Install some provided extra handler to set some request's context fields.
|
||||
// Thanks to that handler, all our logs will come with some prepopulated fields.
|
||||
c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) {
|
||||
hlog.FromRequest(r).Info().
|
||||
Str("method", r.Method).
|
||||
Stringer("url", r.URL).
|
||||
Int("status", status).
|
||||
Int("size", size).
|
||||
Dur("duration", duration).
|
||||
Msg("")
|
||||
}))
|
||||
c = c.Append(hlog.RemoteAddrHandler("ip"))
|
||||
c = c.Append(hlog.UserAgentHandler("user_agent"))
|
||||
c = c.Append(hlog.RefererHandler("referer"))
|
||||
c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id"))
|
||||
|
||||
// Here is your final handler
|
||||
h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Get the logger from the request's context. You can safely assume it
|
||||
// will be always there: if the handler is removed, hlog.FromRequest
|
||||
// will return a no-op logger.
|
||||
hlog.FromRequest(r).Info().
|
||||
Str("user", "current user").
|
||||
Str("status", "ok").
|
||||
Msg("Something happened")
|
||||
|
||||
// Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"}
|
||||
}))
|
||||
http.Handle("/", h)
|
||||
|
||||
if err := http.ListenAndServe(":8080", nil); err != nil {
|
||||
log.Fatal().Err(err).Msg("Startup failed")
|
||||
}
|
||||
```
|
||||
|
||||
## Multiple Log Output
|
||||
`zerolog.MultiLevelWriter` may be used to send the log message to multiple outputs.
|
||||
In this example, we send the log message to both `os.Stdout` and the in-built ConsoleWriter.
|
||||
```go
|
||||
func main() {
|
||||
consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout}
|
||||
|
||||
multi := zerolog.MultiLevelWriter(consoleWriter, os.Stdout)
|
||||
|
||||
logger := zerolog.New(multi).With().Timestamp().Logger()
|
||||
|
||||
logger.Info().Msg("Hello World!")
|
||||
}
|
||||
|
||||
// Output (Line 1: Console; Line 2: Stdout)
|
||||
// 12:36PM INF Hello World!
|
||||
// {"level":"info","time":"2019-11-07T12:36:38+03:00","message":"Hello World!"}
|
||||
```
|
||||
|
||||
## Global Settings
|
||||
|
||||
Some settings can be changed and will by applied to all loggers:
|
||||
|
||||
* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods).
|
||||
* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode).
|
||||
* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events.
|
||||
* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name.
|
||||
* `zerolog.LevelFieldName`: Can be set to customize level field name.
|
||||
* `zerolog.MessageFieldName`: Can be set to customize message field name.
|
||||
* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name.
|
||||
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp.
|
||||
* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`).
|
||||
* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`).
|
||||
* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking.
|
||||
|
||||
## Field Types
|
||||
|
||||
### Standard Types
|
||||
|
||||
* `Str`
|
||||
* `Bool`
|
||||
* `Int`, `Int8`, `Int16`, `Int32`, `Int64`
|
||||
* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64`
|
||||
* `Float32`, `Float64`
|
||||
|
||||
### Advanced Fields
|
||||
|
||||
* `Err`: Takes an `error` and renders it as a string using the `zerolog.ErrorFieldName` field name.
|
||||
* `Func`: Run a `func` only if the level is enabled.
|
||||
* `Timestamp`: Inserts a timestamp field with `zerolog.TimestampFieldName` field name, formatted using `zerolog.TimeFieldFormat`.
|
||||
* `Time`: Adds a field with time formatted with `zerolog.TimeFieldFormat`.
|
||||
* `Dur`: Adds a field with `time.Duration`.
|
||||
* `Dict`: Adds a sub-key/value as a field of the event.
|
||||
* `RawJSON`: Adds a field with an already encoded JSON (`[]byte`)
|
||||
* `Hex`: Adds a field with value formatted as a hexadecimal string (`[]byte`)
|
||||
* `Interface`: Uses reflection to marshal the type.
|
||||
|
||||
Most fields are also available in the slice format (`Strs` for `[]string`, `Errs` for `[]error` etc.)
|
||||
|
||||
## Binary Encoding
|
||||
|
||||
In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](http://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows:
|
||||
|
||||
```bash
|
||||
go build -tags binary_log .
|
||||
```
|
||||
|
||||
To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work
|
||||
with zerolog library is [CSD](https://github.com/toravir/csd/).
|
||||
|
||||
## Related Projects
|
||||
|
||||
* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog`
|
||||
* [overlog](https://github.com/Trendyol/overlog): Implementation of `Mapped Diagnostic Context` interface using `zerolog`
|
||||
* [zerologr](https://github.com/hn8/zerologr): Implementation of `logr.LogSink` interface using `zerolog`
|
||||
|
||||
## Benchmarks
|
||||
|
||||
See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks.
|
||||
|
||||
All operations are allocation free (those numbers *include* JSON encoding):
|
||||
|
||||
```text
|
||||
BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op
|
||||
```
|
||||
|
||||
There are a few Go logging benchmarks and comparisons that include zerolog.
|
||||
|
||||
* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench)
|
||||
* [uber-common/zap](https://github.com/uber-go/zap#performance)
|
||||
|
||||
Using Uber's zap comparison benchmark:
|
||||
|
||||
Log a message and 10 fields:
|
||||
|
||||
| Library | Time | Bytes Allocated | Objects Allocated |
|
||||
| :--- | :---: | :---: | :---: |
|
||||
| zerolog | 767 ns/op | 552 B/op | 6 allocs/op |
|
||||
| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op |
|
||||
| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op |
|
||||
| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op |
|
||||
| lion | 5392 ns/op | 5807 B/op | 63 allocs/op |
|
||||
| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op |
|
||||
| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op |
|
||||
| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op |
|
||||
|
||||
Log a message with a logger that already has 10 fields of context:
|
||||
|
||||
| Library | Time | Bytes Allocated | Objects Allocated |
|
||||
| :--- | :---: | :---: | :---: |
|
||||
| zerolog | 52 ns/op | 0 B/op | 0 allocs/op |
|
||||
| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op |
|
||||
| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op |
|
||||
| lion | 2702 ns/op | 4074 B/op | 38 allocs/op |
|
||||
| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op |
|
||||
| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op |
|
||||
| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op |
|
||||
| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op |
|
||||
|
||||
Log a static string, without any context or `printf`-style templating:
|
||||
|
||||
| Library | Time | Bytes Allocated | Objects Allocated |
|
||||
| :--- | :---: | :---: | :---: |
|
||||
| zerolog | 50 ns/op | 0 B/op | 0 allocs/op |
|
||||
| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op |
|
||||
| standard library | 453 ns/op | 80 B/op | 2 allocs/op |
|
||||
| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op |
|
||||
| go-kit | 508 ns/op | 656 B/op | 13 allocs/op |
|
||||
| lion | 771 ns/op | 1224 B/op | 10 allocs/op |
|
||||
| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op |
|
||||
| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op |
|
||||
| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op |
|
||||
|
||||
## Caveats
|
||||
|
||||
Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON:
|
||||
|
||||
```go
|
||||
logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
|
||||
logger.Info().
|
||||
Timestamp().
|
||||
Msg("dup")
|
||||
// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"}
|
||||
```
|
||||
|
||||
In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt.
|
1
vendor/github.com/rs/zerolog/_config.yml
generated
vendored
1
vendor/github.com/rs/zerolog/_config.yml
generated
vendored
@ -1 +0,0 @@
|
||||
remote_theme: rs/gh-readme
|
233
vendor/github.com/rs/zerolog/array.go
generated
vendored
233
vendor/github.com/rs/zerolog/array.go
generated
vendored
@ -1,233 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var arrayPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Array{
|
||||
buf: make([]byte, 0, 500),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Array is used to prepopulate an array of items
|
||||
// which can be re-used to add to log messages.
|
||||
type Array struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func putArray(a *Array) {
|
||||
// Proper usage of a sync.Pool requires each entry to have approximately
|
||||
// the same memory cost. To obtain this property when the stored type
|
||||
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
|
||||
// to place back in the pool.
|
||||
//
|
||||
// See https://golang.org/issue/23199
|
||||
const maxSize = 1 << 16 // 64KiB
|
||||
if cap(a.buf) > maxSize {
|
||||
return
|
||||
}
|
||||
arrayPool.Put(a)
|
||||
}
|
||||
|
||||
// Arr creates an array to be added to an Event or Context.
|
||||
func Arr() *Array {
|
||||
a := arrayPool.Get().(*Array)
|
||||
a.buf = a.buf[:0]
|
||||
return a
|
||||
}
|
||||
|
||||
// MarshalZerologArray method here is no-op - since data is
|
||||
// already in the needed format.
|
||||
func (*Array) MarshalZerologArray(*Array) {
|
||||
}
|
||||
|
||||
func (a *Array) write(dst []byte) []byte {
|
||||
dst = enc.AppendArrayStart(dst)
|
||||
if len(a.buf) > 0 {
|
||||
dst = append(append(dst, a.buf...))
|
||||
}
|
||||
dst = enc.AppendArrayEnd(dst)
|
||||
putArray(a)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Object marshals an object that implement the LogObjectMarshaler
|
||||
// interface and append append it to the array.
|
||||
func (a *Array) Object(obj LogObjectMarshaler) *Array {
|
||||
e := Dict()
|
||||
obj.MarshalZerologObject(e)
|
||||
e.buf = enc.AppendEndMarker(e.buf)
|
||||
a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
|
||||
putEvent(e)
|
||||
return a
|
||||
}
|
||||
|
||||
// Str append append the val as a string to the array.
|
||||
func (a *Array) Str(val string) *Array {
|
||||
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val)
|
||||
return a
|
||||
}
|
||||
|
||||
// Bytes append append the val as a string to the array.
|
||||
func (a *Array) Bytes(val []byte) *Array {
|
||||
a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val)
|
||||
return a
|
||||
}
|
||||
|
||||
// Hex append append the val as a hex string to the array.
|
||||
func (a *Array) Hex(val []byte) *Array {
|
||||
a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val)
|
||||
return a
|
||||
}
|
||||
|
||||
// RawJSON adds already encoded JSON to the array.
|
||||
func (a *Array) RawJSON(val []byte) *Array {
|
||||
a.buf = appendJSON(enc.AppendArrayDelim(a.buf), val)
|
||||
return a
|
||||
}
|
||||
|
||||
// Err serializes and appends the err to the array.
|
||||
func (a *Array) Err(err error) *Array {
|
||||
switch m := ErrorMarshalFunc(err).(type) {
|
||||
case LogObjectMarshaler:
|
||||
e := newEvent(nil, 0)
|
||||
e.buf = e.buf[:0]
|
||||
e.appendObject(m)
|
||||
a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
|
||||
putEvent(e)
|
||||
case error:
|
||||
if m == nil || isNilValue(m) {
|
||||
a.buf = enc.AppendNil(enc.AppendArrayDelim(a.buf))
|
||||
} else {
|
||||
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error())
|
||||
}
|
||||
case string:
|
||||
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m)
|
||||
default:
|
||||
a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m)
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// Bool append append the val as a bool to the array.
|
||||
func (a *Array) Bool(b bool) *Array {
|
||||
a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b)
|
||||
return a
|
||||
}
|
||||
|
||||
// Int append append i as a int to the array.
|
||||
func (a *Array) Int(i int) *Array {
|
||||
a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Int8 append append i as a int8 to the array.
|
||||
func (a *Array) Int8(i int8) *Array {
|
||||
a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Int16 append append i as a int16 to the array.
|
||||
func (a *Array) Int16(i int16) *Array {
|
||||
a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Int32 append append i as a int32 to the array.
|
||||
func (a *Array) Int32(i int32) *Array {
|
||||
a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Int64 append append i as a int64 to the array.
|
||||
func (a *Array) Int64(i int64) *Array {
|
||||
a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Uint append append i as a uint to the array.
|
||||
func (a *Array) Uint(i uint) *Array {
|
||||
a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Uint8 append append i as a uint8 to the array.
|
||||
func (a *Array) Uint8(i uint8) *Array {
|
||||
a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Uint16 append append i as a uint16 to the array.
|
||||
func (a *Array) Uint16(i uint16) *Array {
|
||||
a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Uint32 append append i as a uint32 to the array.
|
||||
func (a *Array) Uint32(i uint32) *Array {
|
||||
a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Uint64 append append i as a uint64 to the array.
|
||||
func (a *Array) Uint64(i uint64) *Array {
|
||||
a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// Float32 append append f as a float32 to the array.
|
||||
func (a *Array) Float32(f float32) *Array {
|
||||
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f)
|
||||
return a
|
||||
}
|
||||
|
||||
// Float64 append append f as a float64 to the array.
|
||||
func (a *Array) Float64(f float64) *Array {
|
||||
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f)
|
||||
return a
|
||||
}
|
||||
|
||||
// Time append append t formated as string using zerolog.TimeFieldFormat.
|
||||
func (a *Array) Time(t time.Time) *Array {
|
||||
a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat)
|
||||
return a
|
||||
}
|
||||
|
||||
// Dur append append d to the array.
|
||||
func (a *Array) Dur(d time.Duration) *Array {
|
||||
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger)
|
||||
return a
|
||||
}
|
||||
|
||||
// Interface append append i marshaled using reflection.
|
||||
func (a *Array) Interface(i interface{}) *Array {
|
||||
if obj, ok := i.(LogObjectMarshaler); ok {
|
||||
return a.Object(obj)
|
||||
}
|
||||
a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i)
|
||||
return a
|
||||
}
|
||||
|
||||
// IPAddr adds IPv4 or IPv6 address to the array
|
||||
func (a *Array) IPAddr(ip net.IP) *Array {
|
||||
a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip)
|
||||
return a
|
||||
}
|
||||
|
||||
// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array
|
||||
func (a *Array) IPPrefix(pfx net.IPNet) *Array {
|
||||
a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx)
|
||||
return a
|
||||
}
|
||||
|
||||
// MACAddr adds a MAC (Ethernet) address to the array
|
||||
func (a *Array) MACAddr(ha net.HardwareAddr) *Array {
|
||||
a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha)
|
||||
return a
|
||||
}
|
409
vendor/github.com/rs/zerolog/console.go
generated
vendored
409
vendor/github.com/rs/zerolog/console.go
generated
vendored
@ -1,409 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
colorBlack = iota + 30
|
||||
colorRed
|
||||
colorGreen
|
||||
colorYellow
|
||||
colorBlue
|
||||
colorMagenta
|
||||
colorCyan
|
||||
colorWhite
|
||||
|
||||
colorBold = 1
|
||||
colorDarkGray = 90
|
||||
)
|
||||
|
||||
var (
|
||||
consoleBufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bytes.NewBuffer(make([]byte, 0, 100))
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
consoleDefaultTimeFormat = time.Kitchen
|
||||
)
|
||||
|
||||
// Formatter transforms the input into a formatted string.
|
||||
type Formatter func(interface{}) string
|
||||
|
||||
// ConsoleWriter parses the JSON input and writes it in an
|
||||
// (optionally) colorized, human-friendly format to Out.
|
||||
type ConsoleWriter struct {
|
||||
// Out is the output destination.
|
||||
Out io.Writer
|
||||
|
||||
// NoColor disables the colorized output.
|
||||
NoColor bool
|
||||
|
||||
// TimeFormat specifies the format for timestamp in output.
|
||||
TimeFormat string
|
||||
|
||||
// PartsOrder defines the order of parts in output.
|
||||
PartsOrder []string
|
||||
|
||||
// PartsExclude defines parts to not display in output.
|
||||
PartsExclude []string
|
||||
|
||||
FormatTimestamp Formatter
|
||||
FormatLevel Formatter
|
||||
FormatCaller Formatter
|
||||
FormatMessage Formatter
|
||||
FormatFieldName Formatter
|
||||
FormatFieldValue Formatter
|
||||
FormatErrFieldName Formatter
|
||||
FormatErrFieldValue Formatter
|
||||
}
|
||||
|
||||
// NewConsoleWriter creates and initializes a new ConsoleWriter.
|
||||
func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter {
|
||||
w := ConsoleWriter{
|
||||
Out: os.Stdout,
|
||||
TimeFormat: consoleDefaultTimeFormat,
|
||||
PartsOrder: consoleDefaultPartsOrder(),
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
opt(&w)
|
||||
}
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
// Write transforms the JSON input with formatters and appends to w.Out.
|
||||
func (w ConsoleWriter) Write(p []byte) (n int, err error) {
|
||||
if w.PartsOrder == nil {
|
||||
w.PartsOrder = consoleDefaultPartsOrder()
|
||||
}
|
||||
|
||||
var buf = consoleBufPool.Get().(*bytes.Buffer)
|
||||
defer func() {
|
||||
buf.Reset()
|
||||
consoleBufPool.Put(buf)
|
||||
}()
|
||||
|
||||
var evt map[string]interface{}
|
||||
p = decodeIfBinaryToBytes(p)
|
||||
d := json.NewDecoder(bytes.NewReader(p))
|
||||
d.UseNumber()
|
||||
err = d.Decode(&evt)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("cannot decode event: %s", err)
|
||||
}
|
||||
|
||||
for _, p := range w.PartsOrder {
|
||||
w.writePart(buf, evt, p)
|
||||
}
|
||||
|
||||
w.writeFields(evt, buf)
|
||||
|
||||
err = buf.WriteByte('\n')
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
_, err = buf.WriteTo(w.Out)
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// writeFields appends formatted key-value pairs to buf.
|
||||
func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) {
|
||||
var fields = make([]string, 0, len(evt))
|
||||
for field := range evt {
|
||||
switch field {
|
||||
case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName:
|
||||
continue
|
||||
}
|
||||
fields = append(fields, field)
|
||||
}
|
||||
sort.Strings(fields)
|
||||
|
||||
if len(fields) > 0 {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
|
||||
// Move the "error" field to the front
|
||||
ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName })
|
||||
if ei < len(fields) && fields[ei] == ErrorFieldName {
|
||||
fields[ei] = ""
|
||||
fields = append([]string{ErrorFieldName}, fields...)
|
||||
var xfields = make([]string, 0, len(fields))
|
||||
for _, field := range fields {
|
||||
if field == "" { // Skip empty fields
|
||||
continue
|
||||
}
|
||||
xfields = append(xfields, field)
|
||||
}
|
||||
fields = xfields
|
||||
}
|
||||
|
||||
for i, field := range fields {
|
||||
var fn Formatter
|
||||
var fv Formatter
|
||||
|
||||
if field == ErrorFieldName {
|
||||
if w.FormatErrFieldName == nil {
|
||||
fn = consoleDefaultFormatErrFieldName(w.NoColor)
|
||||
} else {
|
||||
fn = w.FormatErrFieldName
|
||||
}
|
||||
|
||||
if w.FormatErrFieldValue == nil {
|
||||
fv = consoleDefaultFormatErrFieldValue(w.NoColor)
|
||||
} else {
|
||||
fv = w.FormatErrFieldValue
|
||||
}
|
||||
} else {
|
||||
if w.FormatFieldName == nil {
|
||||
fn = consoleDefaultFormatFieldName(w.NoColor)
|
||||
} else {
|
||||
fn = w.FormatFieldName
|
||||
}
|
||||
|
||||
if w.FormatFieldValue == nil {
|
||||
fv = consoleDefaultFormatFieldValue
|
||||
} else {
|
||||
fv = w.FormatFieldValue
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(fn(field))
|
||||
|
||||
switch fValue := evt[field].(type) {
|
||||
case string:
|
||||
if needsQuote(fValue) {
|
||||
buf.WriteString(fv(strconv.Quote(fValue)))
|
||||
} else {
|
||||
buf.WriteString(fv(fValue))
|
||||
}
|
||||
case json.Number:
|
||||
buf.WriteString(fv(fValue))
|
||||
default:
|
||||
b, err := json.Marshal(fValue)
|
||||
if err != nil {
|
||||
fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err)
|
||||
} else {
|
||||
fmt.Fprint(buf, fv(b))
|
||||
}
|
||||
}
|
||||
|
||||
if i < len(fields)-1 { // Skip space for last field
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// writePart appends a formatted part to buf.
|
||||
func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) {
|
||||
var f Formatter
|
||||
|
||||
if w.PartsExclude != nil && len(w.PartsExclude) > 0 {
|
||||
for _, exclude := range w.PartsExclude {
|
||||
if exclude == p {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch p {
|
||||
case LevelFieldName:
|
||||
if w.FormatLevel == nil {
|
||||
f = consoleDefaultFormatLevel(w.NoColor)
|
||||
} else {
|
||||
f = w.FormatLevel
|
||||
}
|
||||
case TimestampFieldName:
|
||||
if w.FormatTimestamp == nil {
|
||||
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor)
|
||||
} else {
|
||||
f = w.FormatTimestamp
|
||||
}
|
||||
case MessageFieldName:
|
||||
if w.FormatMessage == nil {
|
||||
f = consoleDefaultFormatMessage
|
||||
} else {
|
||||
f = w.FormatMessage
|
||||
}
|
||||
case CallerFieldName:
|
||||
if w.FormatCaller == nil {
|
||||
f = consoleDefaultFormatCaller(w.NoColor)
|
||||
} else {
|
||||
f = w.FormatCaller
|
||||
}
|
||||
default:
|
||||
if w.FormatFieldValue == nil {
|
||||
f = consoleDefaultFormatFieldValue
|
||||
} else {
|
||||
f = w.FormatFieldValue
|
||||
}
|
||||
}
|
||||
|
||||
var s = f(evt[p])
|
||||
|
||||
if len(s) > 0 {
|
||||
buf.WriteString(s)
|
||||
if p != w.PartsOrder[len(w.PartsOrder)-1] { // Skip space for last part
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// needsQuote returns true when the string s should be quoted in output.
|
||||
func needsQuote(s string) bool {
|
||||
for i := range s {
|
||||
if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// colorize returns the string s wrapped in ANSI code c, unless disabled is true.
|
||||
func colorize(s interface{}, c int, disabled bool) string {
|
||||
if disabled {
|
||||
return fmt.Sprintf("%s", s)
|
||||
}
|
||||
return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s)
|
||||
}
|
||||
|
||||
// ----- DEFAULT FORMATTERS ---------------------------------------------------
|
||||
|
||||
func consoleDefaultPartsOrder() []string {
|
||||
return []string{
|
||||
TimestampFieldName,
|
||||
LevelFieldName,
|
||||
CallerFieldName,
|
||||
MessageFieldName,
|
||||
}
|
||||
}
|
||||
|
||||
func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter {
|
||||
if timeFormat == "" {
|
||||
timeFormat = consoleDefaultTimeFormat
|
||||
}
|
||||
return func(i interface{}) string {
|
||||
t := "<nil>"
|
||||
switch tt := i.(type) {
|
||||
case string:
|
||||
ts, err := time.Parse(TimeFieldFormat, tt)
|
||||
if err != nil {
|
||||
t = tt
|
||||
} else {
|
||||
t = ts.Format(timeFormat)
|
||||
}
|
||||
case json.Number:
|
||||
i, err := tt.Int64()
|
||||
if err != nil {
|
||||
t = tt.String()
|
||||
} else {
|
||||
var sec, nsec int64 = i, 0
|
||||
switch TimeFieldFormat {
|
||||
case TimeFormatUnixMs:
|
||||
nsec = int64(time.Duration(i) * time.Millisecond)
|
||||
sec = 0
|
||||
case TimeFormatUnixMicro:
|
||||
nsec = int64(time.Duration(i) * time.Microsecond)
|
||||
sec = 0
|
||||
}
|
||||
ts := time.Unix(sec, nsec).UTC()
|
||||
t = ts.Format(timeFormat)
|
||||
}
|
||||
}
|
||||
return colorize(t, colorDarkGray, noColor)
|
||||
}
|
||||
}
|
||||
|
||||
func consoleDefaultFormatLevel(noColor bool) Formatter {
|
||||
return func(i interface{}) string {
|
||||
var l string
|
||||
if ll, ok := i.(string); ok {
|
||||
switch ll {
|
||||
case LevelTraceValue:
|
||||
l = colorize("TRC", colorMagenta, noColor)
|
||||
case LevelDebugValue:
|
||||
l = colorize("DBG", colorYellow, noColor)
|
||||
case LevelInfoValue:
|
||||
l = colorize("INF", colorGreen, noColor)
|
||||
case LevelWarnValue:
|
||||
l = colorize("WRN", colorRed, noColor)
|
||||
case LevelErrorValue:
|
||||
l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor)
|
||||
case LevelFatalValue:
|
||||
l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor)
|
||||
case LevelPanicValue:
|
||||
l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor)
|
||||
default:
|
||||
l = colorize("???", colorBold, noColor)
|
||||
}
|
||||
} else {
|
||||
if i == nil {
|
||||
l = colorize("???", colorBold, noColor)
|
||||
} else {
|
||||
l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3]
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
}
|
||||
|
||||
func consoleDefaultFormatCaller(noColor bool) Formatter {
|
||||
return func(i interface{}) string {
|
||||
var c string
|
||||
if cc, ok := i.(string); ok {
|
||||
c = cc
|
||||
}
|
||||
if len(c) > 0 {
|
||||
if cwd, err := os.Getwd(); err == nil {
|
||||
if rel, err := filepath.Rel(cwd, c); err == nil {
|
||||
c = rel
|
||||
}
|
||||
}
|
||||
c = colorize(c, colorBold, noColor) + colorize(" >", colorCyan, noColor)
|
||||
}
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
func consoleDefaultFormatMessage(i interface{}) string {
|
||||
if i == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s", i)
|
||||
}
|
||||
|
||||
func consoleDefaultFormatFieldName(noColor bool) Formatter {
|
||||
return func(i interface{}) string {
|
||||
return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor)
|
||||
}
|
||||
}
|
||||
|
||||
func consoleDefaultFormatFieldValue(i interface{}) string {
|
||||
return fmt.Sprintf("%s", i)
|
||||
}
|
||||
|
||||
func consoleDefaultFormatErrFieldName(noColor bool) Formatter {
|
||||
return func(i interface{}) string {
|
||||
return colorize(fmt.Sprintf("%s=", i), colorRed, noColor)
|
||||
}
|
||||
}
|
||||
|
||||
func consoleDefaultFormatErrFieldValue(noColor bool) Formatter {
|
||||
return func(i interface{}) string {
|
||||
return colorize(fmt.Sprintf("%s", i), colorRed, noColor)
|
||||
}
|
||||
}
|
433
vendor/github.com/rs/zerolog/context.go
generated
vendored
433
vendor/github.com/rs/zerolog/context.go
generated
vendored
@ -1,433 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Context configures a new sub-logger with contextual fields.
|
||||
type Context struct {
|
||||
l Logger
|
||||
}
|
||||
|
||||
// Logger returns the logger with the context previously set.
|
||||
func (c Context) Logger() Logger {
|
||||
return c.l
|
||||
}
|
||||
|
||||
// Fields is a helper function to use a map or slice to set fields using type assertion.
|
||||
// Only map[string]interface{} and []interface{} are accepted. []interface{} must
|
||||
// alternate string keys and arbitrary values, and extraneous ones are ignored.
|
||||
func (c Context) Fields(fields interface{}) Context {
|
||||
c.l.context = appendFields(c.l.context, fields)
|
||||
return c
|
||||
}
|
||||
|
||||
// Dict adds the field key with the dict to the logger context.
|
||||
func (c Context) Dict(key string, dict *Event) Context {
|
||||
dict.buf = enc.AppendEndMarker(dict.buf)
|
||||
c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...)
|
||||
putEvent(dict)
|
||||
return c
|
||||
}
|
||||
|
||||
// Array adds the field key with an array to the event context.
|
||||
// Use zerolog.Arr() to create the array or pass a type that
|
||||
// implement the LogArrayMarshaler interface.
|
||||
func (c Context) Array(key string, arr LogArrayMarshaler) Context {
|
||||
c.l.context = enc.AppendKey(c.l.context, key)
|
||||
if arr, ok := arr.(*Array); ok {
|
||||
c.l.context = arr.write(c.l.context)
|
||||
return c
|
||||
}
|
||||
var a *Array
|
||||
if aa, ok := arr.(*Array); ok {
|
||||
a = aa
|
||||
} else {
|
||||
a = Arr()
|
||||
arr.MarshalZerologArray(a)
|
||||
}
|
||||
c.l.context = a.write(c.l.context)
|
||||
return c
|
||||
}
|
||||
|
||||
// Object marshals an object that implement the LogObjectMarshaler interface.
|
||||
func (c Context) Object(key string, obj LogObjectMarshaler) Context {
|
||||
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
|
||||
e.Object(key, obj)
|
||||
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
|
||||
putEvent(e)
|
||||
return c
|
||||
}
|
||||
|
||||
// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface.
|
||||
func (c Context) EmbedObject(obj LogObjectMarshaler) Context {
|
||||
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
|
||||
e.EmbedObject(obj)
|
||||
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
|
||||
putEvent(e)
|
||||
return c
|
||||
}
|
||||
|
||||
// Str adds the field key with val as a string to the logger context.
|
||||
func (c Context) Str(key, val string) Context {
|
||||
c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val)
|
||||
return c
|
||||
}
|
||||
|
||||
// Strs adds the field key with val as a string to the logger context.
|
||||
func (c Context) Strs(key string, vals []string) Context {
|
||||
c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals)
|
||||
return c
|
||||
}
|
||||
|
||||
// Stringer adds the field key with val.String() (or null if val is nil) to the logger context.
|
||||
func (c Context) Stringer(key string, val fmt.Stringer) Context {
|
||||
if val != nil {
|
||||
c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val.String())
|
||||
return c
|
||||
}
|
||||
|
||||
c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), nil)
|
||||
return c
|
||||
}
|
||||
|
||||
// Bytes adds the field key with val as a []byte to the logger context.
|
||||
func (c Context) Bytes(key string, val []byte) Context {
|
||||
c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val)
|
||||
return c
|
||||
}
|
||||
|
||||
// Hex adds the field key with val as a hex string to the logger context.
|
||||
func (c Context) Hex(key string, val []byte) Context {
|
||||
c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val)
|
||||
return c
|
||||
}
|
||||
|
||||
// RawJSON adds already encoded JSON to context.
|
||||
//
|
||||
// No sanity check is performed on b; it must not contain carriage returns and
|
||||
// be valid JSON.
|
||||
func (c Context) RawJSON(key string, b []byte) Context {
|
||||
c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b)
|
||||
return c
|
||||
}
|
||||
|
||||
// AnErr adds the field key with serialized err to the logger context.
|
||||
func (c Context) AnErr(key string, err error) Context {
|
||||
switch m := ErrorMarshalFunc(err).(type) {
|
||||
case nil:
|
||||
return c
|
||||
case LogObjectMarshaler:
|
||||
return c.Object(key, m)
|
||||
case error:
|
||||
if m == nil || isNilValue(m) {
|
||||
return c
|
||||
} else {
|
||||
return c.Str(key, m.Error())
|
||||
}
|
||||
case string:
|
||||
return c.Str(key, m)
|
||||
default:
|
||||
return c.Interface(key, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Errs adds the field key with errs as an array of serialized errors to the
|
||||
// logger context.
|
||||
func (c Context) Errs(key string, errs []error) Context {
|
||||
arr := Arr()
|
||||
for _, err := range errs {
|
||||
switch m := ErrorMarshalFunc(err).(type) {
|
||||
case LogObjectMarshaler:
|
||||
arr = arr.Object(m)
|
||||
case error:
|
||||
if m == nil || isNilValue(m) {
|
||||
arr = arr.Interface(nil)
|
||||
} else {
|
||||
arr = arr.Str(m.Error())
|
||||
}
|
||||
case string:
|
||||
arr = arr.Str(m)
|
||||
default:
|
||||
arr = arr.Interface(m)
|
||||
}
|
||||
}
|
||||
|
||||
return c.Array(key, arr)
|
||||
}
|
||||
|
||||
// Err adds the field "error" with serialized err to the logger context.
|
||||
func (c Context) Err(err error) Context {
|
||||
return c.AnErr(ErrorFieldName, err)
|
||||
}
|
||||
|
||||
// Bool adds the field key with val as a bool to the logger context.
|
||||
func (c Context) Bool(key string, b bool) Context {
|
||||
c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b)
|
||||
return c
|
||||
}
|
||||
|
||||
// Bools adds the field key with val as a []bool to the logger context.
|
||||
func (c Context) Bools(key string, b []bool) Context {
|
||||
c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b)
|
||||
return c
|
||||
}
|
||||
|
||||
// Int adds the field key with i as a int to the logger context.
|
||||
func (c Context) Int(key string, i int) Context {
|
||||
c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Ints adds the field key with i as a []int to the logger context.
|
||||
func (c Context) Ints(key string, i []int) Context {
|
||||
c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Int8 adds the field key with i as a int8 to the logger context.
|
||||
func (c Context) Int8(key string, i int8) Context {
|
||||
c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Ints8 adds the field key with i as a []int8 to the logger context.
|
||||
func (c Context) Ints8(key string, i []int8) Context {
|
||||
c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Int16 adds the field key with i as a int16 to the logger context.
|
||||
func (c Context) Int16(key string, i int16) Context {
|
||||
c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Ints16 adds the field key with i as a []int16 to the logger context.
|
||||
func (c Context) Ints16(key string, i []int16) Context {
|
||||
c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Int32 adds the field key with i as a int32 to the logger context.
|
||||
func (c Context) Int32(key string, i int32) Context {
|
||||
c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Ints32 adds the field key with i as a []int32 to the logger context.
|
||||
func (c Context) Ints32(key string, i []int32) Context {
|
||||
c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Int64 adds the field key with i as a int64 to the logger context.
|
||||
func (c Context) Int64(key string, i int64) Context {
|
||||
c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Ints64 adds the field key with i as a []int64 to the logger context.
|
||||
func (c Context) Ints64(key string, i []int64) Context {
|
||||
c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uint adds the field key with i as a uint to the logger context.
|
||||
func (c Context) Uint(key string, i uint) Context {
|
||||
c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uints adds the field key with i as a []uint to the logger context.
|
||||
func (c Context) Uints(key string, i []uint) Context {
|
||||
c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uint8 adds the field key with i as a uint8 to the logger context.
|
||||
func (c Context) Uint8(key string, i uint8) Context {
|
||||
c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uints8 adds the field key with i as a []uint8 to the logger context.
|
||||
func (c Context) Uints8(key string, i []uint8) Context {
|
||||
c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uint16 adds the field key with i as a uint16 to the logger context.
|
||||
func (c Context) Uint16(key string, i uint16) Context {
|
||||
c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uints16 adds the field key with i as a []uint16 to the logger context.
|
||||
func (c Context) Uints16(key string, i []uint16) Context {
|
||||
c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uint32 adds the field key with i as a uint32 to the logger context.
|
||||
func (c Context) Uint32(key string, i uint32) Context {
|
||||
c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uints32 adds the field key with i as a []uint32 to the logger context.
|
||||
func (c Context) Uints32(key string, i []uint32) Context {
|
||||
c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uint64 adds the field key with i as a uint64 to the logger context.
|
||||
func (c Context) Uint64(key string, i uint64) Context {
|
||||
c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Uints64 adds the field key with i as a []uint64 to the logger context.
|
||||
func (c Context) Uints64(key string, i []uint64) Context {
|
||||
c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
// Float32 adds the field key with f as a float32 to the logger context.
|
||||
func (c Context) Float32(key string, f float32) Context {
|
||||
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f)
|
||||
return c
|
||||
}
|
||||
|
||||
// Floats32 adds the field key with f as a []float32 to the logger context.
|
||||
func (c Context) Floats32(key string, f []float32) Context {
|
||||
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f)
|
||||
return c
|
||||
}
|
||||
|
||||
// Float64 adds the field key with f as a float64 to the logger context.
|
||||
func (c Context) Float64(key string, f float64) Context {
|
||||
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f)
|
||||
return c
|
||||
}
|
||||
|
||||
// Floats64 adds the field key with f as a []float64 to the logger context.
|
||||
func (c Context) Floats64(key string, f []float64) Context {
|
||||
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f)
|
||||
return c
|
||||
}
|
||||
|
||||
type timestampHook struct{}
|
||||
|
||||
func (ts timestampHook) Run(e *Event, level Level, msg string) {
|
||||
e.Timestamp()
|
||||
}
|
||||
|
||||
var th = timestampHook{}
|
||||
|
||||
// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key.
|
||||
// To customize the key name, change zerolog.TimestampFieldName.
|
||||
//
|
||||
// NOTE: It won't dedupe the "time" key if the *Context has one already.
|
||||
func (c Context) Timestamp() Context {
|
||||
c.l = c.l.Hook(th)
|
||||
return c
|
||||
}
|
||||
|
||||
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||
func (c Context) Time(key string, t time.Time) Context {
|
||||
c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
|
||||
return c
|
||||
}
|
||||
|
||||
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||
func (c Context) Times(key string, t []time.Time) Context {
|
||||
c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
|
||||
return c
|
||||
}
|
||||
|
||||
// Dur adds the fields key with d divided by unit and stored as a float.
|
||||
func (c Context) Dur(key string, d time.Duration) Context {
|
||||
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
return c
|
||||
}
|
||||
|
||||
// Durs adds the fields key with d divided by unit and stored as a float.
|
||||
func (c Context) Durs(key string, d []time.Duration) Context {
|
||||
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
return c
|
||||
}
|
||||
|
||||
// Interface adds the field key with obj marshaled using reflection.
|
||||
func (c Context) Interface(key string, i interface{}) Context {
|
||||
c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i)
|
||||
return c
|
||||
}
|
||||
|
||||
type callerHook struct {
|
||||
callerSkipFrameCount int
|
||||
}
|
||||
|
||||
func newCallerHook(skipFrameCount int) callerHook {
|
||||
return callerHook{callerSkipFrameCount: skipFrameCount}
|
||||
}
|
||||
|
||||
func (ch callerHook) Run(e *Event, level Level, msg string) {
|
||||
switch ch.callerSkipFrameCount {
|
||||
case useGlobalSkipFrameCount:
|
||||
// Extra frames to skip (added by hook infra).
|
||||
e.caller(CallerSkipFrameCount + contextCallerSkipFrameCount)
|
||||
default:
|
||||
// Extra frames to skip (added by hook infra).
|
||||
e.caller(ch.callerSkipFrameCount + contextCallerSkipFrameCount)
|
||||
}
|
||||
}
|
||||
|
||||
// useGlobalSkipFrameCount acts as a flag to informat callerHook.Run
|
||||
// to use the global CallerSkipFrameCount.
|
||||
const useGlobalSkipFrameCount = math.MinInt32
|
||||
|
||||
// ch is the default caller hook using the global CallerSkipFrameCount.
|
||||
var ch = newCallerHook(useGlobalSkipFrameCount)
|
||||
|
||||
// Caller adds the file:line of the caller with the zerolog.CallerFieldName key.
|
||||
func (c Context) Caller() Context {
|
||||
c.l = c.l.Hook(ch)
|
||||
return c
|
||||
}
|
||||
|
||||
// CallerWithSkipFrameCount adds the file:line of the caller with the zerolog.CallerFieldName key.
|
||||
// The specified skipFrameCount int will override the global CallerSkipFrameCount for this context's respective logger.
|
||||
// If set to -1 the global CallerSkipFrameCount will be used.
|
||||
func (c Context) CallerWithSkipFrameCount(skipFrameCount int) Context {
|
||||
c.l = c.l.Hook(newCallerHook(skipFrameCount))
|
||||
return c
|
||||
}
|
||||
|
||||
// Stack enables stack trace printing for the error passed to Err().
|
||||
func (c Context) Stack() Context {
|
||||
c.l.stack = true
|
||||
return c
|
||||
}
|
||||
|
||||
// IPAddr adds IPv4 or IPv6 Address to the context
|
||||
func (c Context) IPAddr(key string, ip net.IP) Context {
|
||||
c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip)
|
||||
return c
|
||||
}
|
||||
|
||||
// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context
|
||||
func (c Context) IPPrefix(key string, pfx net.IPNet) Context {
|
||||
c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx)
|
||||
return c
|
||||
}
|
||||
|
||||
// MACAddr adds MAC address to the context
|
||||
func (c Context) MACAddr(key string, ha net.HardwareAddr) Context {
|
||||
c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha)
|
||||
return c
|
||||
}
|
51
vendor/github.com/rs/zerolog/ctx.go
generated
vendored
51
vendor/github.com/rs/zerolog/ctx.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
var disabledLogger *Logger
|
||||
|
||||
func init() {
|
||||
SetGlobalLevel(TraceLevel)
|
||||
l := Nop()
|
||||
disabledLogger = &l
|
||||
}
|
||||
|
||||
type ctxKey struct{}
|
||||
|
||||
// WithContext returns a copy of ctx with l associated. If an instance of Logger
|
||||
// is already in the context, the context is not updated.
|
||||
//
|
||||
// For instance, to add a field to an existing logger in the context, use this
|
||||
// notation:
|
||||
//
|
||||
// ctx := r.Context()
|
||||
// l := zerolog.Ctx(ctx)
|
||||
// l.UpdateContext(func(c Context) Context {
|
||||
// return c.Str("bar", "baz")
|
||||
// })
|
||||
func (l *Logger) WithContext(ctx context.Context) context.Context {
|
||||
if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok {
|
||||
if lp == l {
|
||||
// Do not store same logger.
|
||||
return ctx
|
||||
}
|
||||
} else if l.level == Disabled {
|
||||
// Do not store disabled logger.
|
||||
return ctx
|
||||
}
|
||||
return context.WithValue(ctx, ctxKey{}, l)
|
||||
}
|
||||
|
||||
// Ctx returns the Logger associated with the ctx. If no logger
|
||||
// is associated, DefaultContextLogger is returned, unless DefaultContextLogger
|
||||
// is nil, in which case a disabled logger is returned.
|
||||
func Ctx(ctx context.Context) *Logger {
|
||||
if l, ok := ctx.Value(ctxKey{}).(*Logger); ok {
|
||||
return l
|
||||
} else if l = DefaultContextLogger; l != nil {
|
||||
return l
|
||||
}
|
||||
return disabledLogger
|
||||
}
|
56
vendor/github.com/rs/zerolog/encoder.go
generated
vendored
56
vendor/github.com/rs/zerolog/encoder.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type encoder interface {
|
||||
AppendArrayDelim(dst []byte) []byte
|
||||
AppendArrayEnd(dst []byte) []byte
|
||||
AppendArrayStart(dst []byte) []byte
|
||||
AppendBeginMarker(dst []byte) []byte
|
||||
AppendBool(dst []byte, val bool) []byte
|
||||
AppendBools(dst []byte, vals []bool) []byte
|
||||
AppendBytes(dst, s []byte) []byte
|
||||
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte
|
||||
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte
|
||||
AppendEndMarker(dst []byte) []byte
|
||||
AppendFloat32(dst []byte, val float32) []byte
|
||||
AppendFloat64(dst []byte, val float64) []byte
|
||||
AppendFloats32(dst []byte, vals []float32) []byte
|
||||
AppendFloats64(dst []byte, vals []float64) []byte
|
||||
AppendHex(dst, s []byte) []byte
|
||||
AppendIPAddr(dst []byte, ip net.IP) []byte
|
||||
AppendIPPrefix(dst []byte, pfx net.IPNet) []byte
|
||||
AppendInt(dst []byte, val int) []byte
|
||||
AppendInt16(dst []byte, val int16) []byte
|
||||
AppendInt32(dst []byte, val int32) []byte
|
||||
AppendInt64(dst []byte, val int64) []byte
|
||||
AppendInt8(dst []byte, val int8) []byte
|
||||
AppendInterface(dst []byte, i interface{}) []byte
|
||||
AppendInts(dst []byte, vals []int) []byte
|
||||
AppendInts16(dst []byte, vals []int16) []byte
|
||||
AppendInts32(dst []byte, vals []int32) []byte
|
||||
AppendInts64(dst []byte, vals []int64) []byte
|
||||
AppendInts8(dst []byte, vals []int8) []byte
|
||||
AppendKey(dst []byte, key string) []byte
|
||||
AppendLineBreak(dst []byte) []byte
|
||||
AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte
|
||||
AppendNil(dst []byte) []byte
|
||||
AppendObjectData(dst []byte, o []byte) []byte
|
||||
AppendString(dst []byte, s string) []byte
|
||||
AppendStrings(dst []byte, vals []string) []byte
|
||||
AppendTime(dst []byte, t time.Time, format string) []byte
|
||||
AppendTimes(dst []byte, vals []time.Time, format string) []byte
|
||||
AppendUint(dst []byte, val uint) []byte
|
||||
AppendUint16(dst []byte, val uint16) []byte
|
||||
AppendUint32(dst []byte, val uint32) []byte
|
||||
AppendUint64(dst []byte, val uint64) []byte
|
||||
AppendUint8(dst []byte, val uint8) []byte
|
||||
AppendUints(dst []byte, vals []uint) []byte
|
||||
AppendUints16(dst []byte, vals []uint16) []byte
|
||||
AppendUints32(dst []byte, vals []uint32) []byte
|
||||
AppendUints64(dst []byte, vals []uint64) []byte
|
||||
AppendUints8(dst []byte, vals []uint8) []byte
|
||||
}
|
42
vendor/github.com/rs/zerolog/encoder_cbor.go
generated
vendored
42
vendor/github.com/rs/zerolog/encoder_cbor.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
// +build binary_log
|
||||
|
||||
package zerolog
|
||||
|
||||
// This file contains bindings to do binary encoding.
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog/internal/cbor"
|
||||
)
|
||||
|
||||
var (
|
||||
_ encoder = (*cbor.Encoder)(nil)
|
||||
|
||||
enc = cbor.Encoder{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// using closure to reflect the changes at runtime.
|
||||
cbor.JSONMarshalFunc = func(v interface{}) ([]byte, error) {
|
||||
return InterfaceMarshalFunc(v)
|
||||
}
|
||||
}
|
||||
|
||||
func appendJSON(dst []byte, j []byte) []byte {
|
||||
return cbor.AppendEmbeddedJSON(dst, j)
|
||||
}
|
||||
|
||||
// decodeIfBinaryToString - converts a binary formatted log msg to a
|
||||
// JSON formatted String Log message.
|
||||
func decodeIfBinaryToString(in []byte) string {
|
||||
return cbor.DecodeIfBinaryToString(in)
|
||||
}
|
||||
|
||||
func decodeObjectToStr(in []byte) string {
|
||||
return cbor.DecodeObjectToStr(in)
|
||||
}
|
||||
|
||||
// decodeIfBinaryToBytes - converts a binary formatted log msg to a
|
||||
// JSON formatted Bytes Log message.
|
||||
func decodeIfBinaryToBytes(in []byte) []byte {
|
||||
return cbor.DecodeIfBinaryToBytes(in)
|
||||
}
|
39
vendor/github.com/rs/zerolog/encoder_json.go
generated
vendored
39
vendor/github.com/rs/zerolog/encoder_json.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
// +build !binary_log
|
||||
|
||||
package zerolog
|
||||
|
||||
// encoder_json.go file contains bindings to generate
|
||||
// JSON encoded byte stream.
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog/internal/json"
|
||||
)
|
||||
|
||||
var (
|
||||
_ encoder = (*json.Encoder)(nil)
|
||||
|
||||
enc = json.Encoder{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// using closure to reflect the changes at runtime.
|
||||
json.JSONMarshalFunc = func(v interface{}) ([]byte, error) {
|
||||
return InterfaceMarshalFunc(v)
|
||||
}
|
||||
}
|
||||
|
||||
func appendJSON(dst []byte, j []byte) []byte {
|
||||
return append(dst, j...)
|
||||
}
|
||||
|
||||
func decodeIfBinaryToString(in []byte) string {
|
||||
return string(in)
|
||||
}
|
||||
|
||||
func decodeObjectToStr(in []byte) string {
|
||||
return string(in)
|
||||
}
|
||||
|
||||
func decodeIfBinaryToBytes(in []byte) []byte {
|
||||
return in
|
||||
}
|
767
vendor/github.com/rs/zerolog/event.go
generated
vendored
767
vendor/github.com/rs/zerolog/event.go
generated
vendored
@ -1,767 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var eventPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Event{
|
||||
buf: make([]byte, 0, 500),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Event represents a log event. It is instanced by one of the level method of
|
||||
// Logger and finalized by the Msg or Msgf method.
|
||||
type Event struct {
|
||||
buf []byte
|
||||
w LevelWriter
|
||||
level Level
|
||||
done func(msg string)
|
||||
stack bool // enable error stack trace
|
||||
ch []Hook // hooks from context
|
||||
skipFrame int // The number of additional frames to skip when printing the caller.
|
||||
}
|
||||
|
||||
func putEvent(e *Event) {
|
||||
// Proper usage of a sync.Pool requires each entry to have approximately
|
||||
// the same memory cost. To obtain this property when the stored type
|
||||
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
|
||||
// to place back in the pool.
|
||||
//
|
||||
// See https://golang.org/issue/23199
|
||||
const maxSize = 1 << 16 // 64KiB
|
||||
if cap(e.buf) > maxSize {
|
||||
return
|
||||
}
|
||||
eventPool.Put(e)
|
||||
}
|
||||
|
||||
// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface
|
||||
// to be implemented by types used with Event/Context's Object methods.
|
||||
type LogObjectMarshaler interface {
|
||||
MarshalZerologObject(e *Event)
|
||||
}
|
||||
|
||||
// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface
|
||||
// to be implemented by types used with Event/Context's Array methods.
|
||||
type LogArrayMarshaler interface {
|
||||
MarshalZerologArray(a *Array)
|
||||
}
|
||||
|
||||
func newEvent(w LevelWriter, level Level) *Event {
|
||||
e := eventPool.Get().(*Event)
|
||||
e.buf = e.buf[:0]
|
||||
e.ch = nil
|
||||
e.buf = enc.AppendBeginMarker(e.buf)
|
||||
e.w = w
|
||||
e.level = level
|
||||
e.stack = false
|
||||
e.skipFrame = 0
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *Event) write() (err error) {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
if e.level != Disabled {
|
||||
e.buf = enc.AppendEndMarker(e.buf)
|
||||
e.buf = enc.AppendLineBreak(e.buf)
|
||||
if e.w != nil {
|
||||
_, err = e.w.WriteLevel(e.level, e.buf)
|
||||
}
|
||||
}
|
||||
putEvent(e)
|
||||
return
|
||||
}
|
||||
|
||||
// Enabled return false if the *Event is going to be filtered out by
|
||||
// log level or sampling.
|
||||
func (e *Event) Enabled() bool {
|
||||
return e != nil && e.level != Disabled
|
||||
}
|
||||
|
||||
// Discard disables the event so Msg(f) won't print it.
|
||||
func (e *Event) Discard() *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.level = Disabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// Msg sends the *Event with msg added as the message field if not empty.
|
||||
//
|
||||
// NOTICE: once this method is called, the *Event should be disposed.
|
||||
// Calling Msg twice can have unexpected result.
|
||||
func (e *Event) Msg(msg string) {
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
e.msg(msg)
|
||||
}
|
||||
|
||||
// Send is equivalent to calling Msg("").
|
||||
//
|
||||
// NOTICE: once this method is called, the *Event should be disposed.
|
||||
func (e *Event) Send() {
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
e.msg("")
|
||||
}
|
||||
|
||||
// Msgf sends the event with formatted msg added as the message field if not empty.
|
||||
//
|
||||
// NOTICE: once this method is called, the *Event should be disposed.
|
||||
// Calling Msgf twice can have unexpected result.
|
||||
func (e *Event) Msgf(format string, v ...interface{}) {
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
e.msg(fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (e *Event) msg(msg string) {
|
||||
for _, hook := range e.ch {
|
||||
hook.Run(e, e.level, msg)
|
||||
}
|
||||
if msg != "" {
|
||||
e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg)
|
||||
}
|
||||
if e.done != nil {
|
||||
defer e.done(msg)
|
||||
}
|
||||
if err := e.write(); err != nil {
|
||||
if ErrorHandler != nil {
|
||||
ErrorHandler(err)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fields is a helper function to use a map or slice to set fields using type assertion.
|
||||
// Only map[string]interface{} and []interface{} are accepted. []interface{} must
|
||||
// alternate string keys and arbitrary values, and extraneous ones are ignored.
|
||||
func (e *Event) Fields(fields interface{}) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = appendFields(e.buf, fields)
|
||||
return e
|
||||
}
|
||||
|
||||
// Dict adds the field key with a dict to the event context.
|
||||
// Use zerolog.Dict() to create the dictionary.
|
||||
func (e *Event) Dict(key string, dict *Event) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
dict.buf = enc.AppendEndMarker(dict.buf)
|
||||
e.buf = append(enc.AppendKey(e.buf, key), dict.buf...)
|
||||
putEvent(dict)
|
||||
return e
|
||||
}
|
||||
|
||||
// Dict creates an Event to be used with the *Event.Dict method.
|
||||
// Call usual field methods like Str, Int etc to add fields to this
|
||||
// event and give it as argument the *Event.Dict method.
|
||||
func Dict() *Event {
|
||||
return newEvent(nil, 0)
|
||||
}
|
||||
|
||||
// Array adds the field key with an array to the event context.
|
||||
// Use zerolog.Arr() to create the array or pass a type that
|
||||
// implement the LogArrayMarshaler interface.
|
||||
func (e *Event) Array(key string, arr LogArrayMarshaler) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendKey(e.buf, key)
|
||||
var a *Array
|
||||
if aa, ok := arr.(*Array); ok {
|
||||
a = aa
|
||||
} else {
|
||||
a = Arr()
|
||||
arr.MarshalZerologArray(a)
|
||||
}
|
||||
e.buf = a.write(e.buf)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *Event) appendObject(obj LogObjectMarshaler) {
|
||||
e.buf = enc.AppendBeginMarker(e.buf)
|
||||
obj.MarshalZerologObject(e)
|
||||
e.buf = enc.AppendEndMarker(e.buf)
|
||||
}
|
||||
|
||||
// Object marshals an object that implement the LogObjectMarshaler interface.
|
||||
func (e *Event) Object(key string, obj LogObjectMarshaler) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendKey(e.buf, key)
|
||||
if obj == nil {
|
||||
e.buf = enc.AppendNil(e.buf)
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
e.appendObject(obj)
|
||||
return e
|
||||
}
|
||||
|
||||
// Func allows an anonymous func to run only if the event is enabled.
|
||||
func (e *Event) Func(f func(e *Event)) *Event {
|
||||
if e != nil && e.Enabled() {
|
||||
f(e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// EmbedObject marshals an object that implement the LogObjectMarshaler interface.
|
||||
func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
if obj == nil {
|
||||
return e
|
||||
}
|
||||
obj.MarshalZerologObject(e)
|
||||
return e
|
||||
}
|
||||
|
||||
// Str adds the field key with val as a string to the *Event context.
|
||||
func (e *Event) Str(key, val string) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val)
|
||||
return e
|
||||
}
|
||||
|
||||
// Strs adds the field key with vals as a []string to the *Event context.
|
||||
func (e *Event) Strs(key string, vals []string) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals)
|
||||
return e
|
||||
}
|
||||
|
||||
// Stringer adds the field key with val.String() (or null if val is nil) to the *Event context.
|
||||
func (e *Event) Stringer(key string, val fmt.Stringer) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
|
||||
if val != nil {
|
||||
e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val.String())
|
||||
return e
|
||||
}
|
||||
|
||||
e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), nil)
|
||||
return e
|
||||
}
|
||||
|
||||
// Bytes adds the field key with val as a string to the *Event context.
|
||||
//
|
||||
// Runes outside of normal ASCII ranges will be hex-encoded in the resulting
|
||||
// JSON.
|
||||
func (e *Event) Bytes(key string, val []byte) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val)
|
||||
return e
|
||||
}
|
||||
|
||||
// Hex adds the field key with val as a hex string to the *Event context.
|
||||
func (e *Event) Hex(key string, val []byte) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val)
|
||||
return e
|
||||
}
|
||||
|
||||
// RawJSON adds already encoded JSON to the log line under key.
|
||||
//
|
||||
// No sanity check is performed on b; it must not contain carriage returns and
|
||||
// be valid JSON.
|
||||
func (e *Event) RawJSON(key string, b []byte) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = appendJSON(enc.AppendKey(e.buf, key), b)
|
||||
return e
|
||||
}
|
||||
|
||||
// AnErr adds the field key with serialized err to the *Event context.
|
||||
// If err is nil, no field is added.
|
||||
func (e *Event) AnErr(key string, err error) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
switch m := ErrorMarshalFunc(err).(type) {
|
||||
case nil:
|
||||
return e
|
||||
case LogObjectMarshaler:
|
||||
return e.Object(key, m)
|
||||
case error:
|
||||
if m == nil || isNilValue(m) {
|
||||
return e
|
||||
} else {
|
||||
return e.Str(key, m.Error())
|
||||
}
|
||||
case string:
|
||||
return e.Str(key, m)
|
||||
default:
|
||||
return e.Interface(key, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Errs adds the field key with errs as an array of serialized errors to the
|
||||
// *Event context.
|
||||
func (e *Event) Errs(key string, errs []error) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
arr := Arr()
|
||||
for _, err := range errs {
|
||||
switch m := ErrorMarshalFunc(err).(type) {
|
||||
case LogObjectMarshaler:
|
||||
arr = arr.Object(m)
|
||||
case error:
|
||||
arr = arr.Err(m)
|
||||
case string:
|
||||
arr = arr.Str(m)
|
||||
default:
|
||||
arr = arr.Interface(m)
|
||||
}
|
||||
}
|
||||
|
||||
return e.Array(key, arr)
|
||||
}
|
||||
|
||||
// Err adds the field "error" with serialized err to the *Event context.
|
||||
// If err is nil, no field is added.
|
||||
//
|
||||
// To customize the key name, change zerolog.ErrorFieldName.
|
||||
//
|
||||
// If Stack() has been called before and zerolog.ErrorStackMarshaler is defined,
|
||||
// the err is passed to ErrorStackMarshaler and the result is appended to the
|
||||
// zerolog.ErrorStackFieldName.
|
||||
func (e *Event) Err(err error) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
if e.stack && ErrorStackMarshaler != nil {
|
||||
switch m := ErrorStackMarshaler(err).(type) {
|
||||
case nil:
|
||||
case LogObjectMarshaler:
|
||||
e.Object(ErrorStackFieldName, m)
|
||||
case error:
|
||||
if m != nil && !isNilValue(m) {
|
||||
e.Str(ErrorStackFieldName, m.Error())
|
||||
}
|
||||
case string:
|
||||
e.Str(ErrorStackFieldName, m)
|
||||
default:
|
||||
e.Interface(ErrorStackFieldName, m)
|
||||
}
|
||||
}
|
||||
return e.AnErr(ErrorFieldName, err)
|
||||
}
|
||||
|
||||
// Stack enables stack trace printing for the error passed to Err().
|
||||
//
|
||||
// ErrorStackMarshaler must be set for this method to do something.
|
||||
func (e *Event) Stack() *Event {
|
||||
if e != nil {
|
||||
e.stack = true
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Bool adds the field key with val as a bool to the *Event context.
|
||||
func (e *Event) Bool(key string, b bool) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b)
|
||||
return e
|
||||
}
|
||||
|
||||
// Bools adds the field key with val as a []bool to the *Event context.
|
||||
func (e *Event) Bools(key string, b []bool) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b)
|
||||
return e
|
||||
}
|
||||
|
||||
// Int adds the field key with i as a int to the *Event context.
|
||||
func (e *Event) Int(key string, i int) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Ints adds the field key with i as a []int to the *Event context.
|
||||
func (e *Event) Ints(key string, i []int) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Int8 adds the field key with i as a int8 to the *Event context.
|
||||
func (e *Event) Int8(key string, i int8) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Ints8 adds the field key with i as a []int8 to the *Event context.
|
||||
func (e *Event) Ints8(key string, i []int8) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Int16 adds the field key with i as a int16 to the *Event context.
|
||||
func (e *Event) Int16(key string, i int16) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Ints16 adds the field key with i as a []int16 to the *Event context.
|
||||
func (e *Event) Ints16(key string, i []int16) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Int32 adds the field key with i as a int32 to the *Event context.
|
||||
func (e *Event) Int32(key string, i int32) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Ints32 adds the field key with i as a []int32 to the *Event context.
|
||||
func (e *Event) Ints32(key string, i []int32) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Int64 adds the field key with i as a int64 to the *Event context.
|
||||
func (e *Event) Int64(key string, i int64) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Ints64 adds the field key with i as a []int64 to the *Event context.
|
||||
func (e *Event) Ints64(key string, i []int64) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uint adds the field key with i as a uint to the *Event context.
|
||||
func (e *Event) Uint(key string, i uint) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uints adds the field key with i as a []int to the *Event context.
|
||||
func (e *Event) Uints(key string, i []uint) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uint8 adds the field key with i as a uint8 to the *Event context.
|
||||
func (e *Event) Uint8(key string, i uint8) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uints8 adds the field key with i as a []int8 to the *Event context.
|
||||
func (e *Event) Uints8(key string, i []uint8) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uint16 adds the field key with i as a uint16 to the *Event context.
|
||||
func (e *Event) Uint16(key string, i uint16) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uints16 adds the field key with i as a []int16 to the *Event context.
|
||||
func (e *Event) Uints16(key string, i []uint16) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uint32 adds the field key with i as a uint32 to the *Event context.
|
||||
func (e *Event) Uint32(key string, i uint32) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uints32 adds the field key with i as a []int32 to the *Event context.
|
||||
func (e *Event) Uints32(key string, i []uint32) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uint64 adds the field key with i as a uint64 to the *Event context.
|
||||
func (e *Event) Uint64(key string, i uint64) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Uints64 adds the field key with i as a []int64 to the *Event context.
|
||||
func (e *Event) Uints64(key string, i []uint64) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// Float32 adds the field key with f as a float32 to the *Event context.
|
||||
func (e *Event) Float32(key string, f float32) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f)
|
||||
return e
|
||||
}
|
||||
|
||||
// Floats32 adds the field key with f as a []float32 to the *Event context.
|
||||
func (e *Event) Floats32(key string, f []float32) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f)
|
||||
return e
|
||||
}
|
||||
|
||||
// Float64 adds the field key with f as a float64 to the *Event context.
|
||||
func (e *Event) Float64(key string, f float64) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f)
|
||||
return e
|
||||
}
|
||||
|
||||
// Floats64 adds the field key with f as a []float64 to the *Event context.
|
||||
func (e *Event) Floats64(key string, f []float64) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f)
|
||||
return e
|
||||
}
|
||||
|
||||
// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key.
|
||||
// To customize the key name, change zerolog.TimestampFieldName.
|
||||
//
|
||||
// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one
|
||||
// already.
|
||||
func (e *Event) Timestamp() *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat)
|
||||
return e
|
||||
}
|
||||
|
||||
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||
func (e *Event) Time(key string, t time.Time) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat)
|
||||
return e
|
||||
}
|
||||
|
||||
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||
func (e *Event) Times(key string, t []time.Time) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat)
|
||||
return e
|
||||
}
|
||||
|
||||
// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit.
|
||||
// If zerolog.DurationFieldInteger is true, durations are rendered as integer
|
||||
// instead of float.
|
||||
func (e *Event) Dur(key string, d time.Duration) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
return e
|
||||
}
|
||||
|
||||
// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit.
|
||||
// If zerolog.DurationFieldInteger is true, durations are rendered as integer
|
||||
// instead of float.
|
||||
func (e *Event) Durs(key string, d []time.Duration) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
return e
|
||||
}
|
||||
|
||||
// TimeDiff adds the field key with positive duration between time t and start.
|
||||
// If time t is not greater than start, duration will be 0.
|
||||
// Duration format follows the same principle as Dur().
|
||||
func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
var d time.Duration
|
||||
if t.After(start) {
|
||||
d = t.Sub(start)
|
||||
}
|
||||
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
return e
|
||||
}
|
||||
|
||||
// Interface adds the field key with i marshaled using reflection.
|
||||
func (e *Event) Interface(key string, i interface{}) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
if obj, ok := i.(LogObjectMarshaler); ok {
|
||||
return e.Object(key, obj)
|
||||
}
|
||||
e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i)
|
||||
return e
|
||||
}
|
||||
|
||||
// CallerSkipFrame instructs any future Caller calls to skip the specified number of frames.
|
||||
// This includes those added via hooks from the context.
|
||||
func (e *Event) CallerSkipFrame(skip int) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.skipFrame += skip
|
||||
return e
|
||||
}
|
||||
|
||||
// Caller adds the file:line of the caller with the zerolog.CallerFieldName key.
|
||||
// The argument skip is the number of stack frames to ascend
|
||||
// Skip If not passed, use the global variable CallerSkipFrameCount
|
||||
func (e *Event) Caller(skip ...int) *Event {
|
||||
sk := CallerSkipFrameCount
|
||||
if len(skip) > 0 {
|
||||
sk = skip[0] + CallerSkipFrameCount
|
||||
}
|
||||
return e.caller(sk)
|
||||
}
|
||||
|
||||
func (e *Event) caller(skip int) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
_, file, line, ok := runtime.Caller(skip + e.skipFrame)
|
||||
if !ok {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(file, line))
|
||||
return e
|
||||
}
|
||||
|
||||
// IPAddr adds IPv4 or IPv6 Address to the event
|
||||
func (e *Event) IPAddr(key string, ip net.IP) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip)
|
||||
return e
|
||||
}
|
||||
|
||||
// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event
|
||||
func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx)
|
||||
return e
|
||||
}
|
||||
|
||||
// MACAddr adds MAC address to the event
|
||||
func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha)
|
||||
return e
|
||||
}
|
277
vendor/github.com/rs/zerolog/fields.go
generated
vendored
277
vendor/github.com/rs/zerolog/fields.go
generated
vendored
@ -1,277 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"sort"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func isNilValue(i interface{}) bool {
|
||||
return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0
|
||||
}
|
||||
|
||||
func appendFields(dst []byte, fields interface{}) []byte {
|
||||
switch fields := fields.(type) {
|
||||
case []interface{}:
|
||||
if n := len(fields); n&0x1 == 1 { // odd number
|
||||
fields = fields[:n-1]
|
||||
}
|
||||
dst = appendFieldList(dst, fields)
|
||||
case map[string]interface{}:
|
||||
keys := make([]string, 0, len(fields))
|
||||
for key := range fields {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
kv := make([]interface{}, 2)
|
||||
for _, key := range keys {
|
||||
kv[0], kv[1] = key, fields[key]
|
||||
dst = appendFieldList(dst, kv)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func appendFieldList(dst []byte, kvList []interface{}) []byte {
|
||||
for i, n := 0, len(kvList); i < n; i += 2 {
|
||||
key, val := kvList[i], kvList[i+1]
|
||||
if key, ok := key.(string); ok {
|
||||
dst = enc.AppendKey(dst, key)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
if val, ok := val.(LogObjectMarshaler); ok {
|
||||
e := newEvent(nil, 0)
|
||||
e.buf = e.buf[:0]
|
||||
e.appendObject(val)
|
||||
dst = append(dst, e.buf...)
|
||||
putEvent(e)
|
||||
continue
|
||||
}
|
||||
switch val := val.(type) {
|
||||
case string:
|
||||
dst = enc.AppendString(dst, val)
|
||||
case []byte:
|
||||
dst = enc.AppendBytes(dst, val)
|
||||
case error:
|
||||
switch m := ErrorMarshalFunc(val).(type) {
|
||||
case LogObjectMarshaler:
|
||||
e := newEvent(nil, 0)
|
||||
e.buf = e.buf[:0]
|
||||
e.appendObject(m)
|
||||
dst = append(dst, e.buf...)
|
||||
putEvent(e)
|
||||
case error:
|
||||
if m == nil || isNilValue(m) {
|
||||
dst = enc.AppendNil(dst)
|
||||
} else {
|
||||
dst = enc.AppendString(dst, m.Error())
|
||||
}
|
||||
case string:
|
||||
dst = enc.AppendString(dst, m)
|
||||
default:
|
||||
dst = enc.AppendInterface(dst, m)
|
||||
}
|
||||
case []error:
|
||||
dst = enc.AppendArrayStart(dst)
|
||||
for i, err := range val {
|
||||
switch m := ErrorMarshalFunc(err).(type) {
|
||||
case LogObjectMarshaler:
|
||||
e := newEvent(nil, 0)
|
||||
e.buf = e.buf[:0]
|
||||
e.appendObject(m)
|
||||
dst = append(dst, e.buf...)
|
||||
putEvent(e)
|
||||
case error:
|
||||
if m == nil || isNilValue(m) {
|
||||
dst = enc.AppendNil(dst)
|
||||
} else {
|
||||
dst = enc.AppendString(dst, m.Error())
|
||||
}
|
||||
case string:
|
||||
dst = enc.AppendString(dst, m)
|
||||
default:
|
||||
dst = enc.AppendInterface(dst, m)
|
||||
}
|
||||
|
||||
if i < (len(val) - 1) {
|
||||
enc.AppendArrayDelim(dst)
|
||||
}
|
||||
}
|
||||
dst = enc.AppendArrayEnd(dst)
|
||||
case bool:
|
||||
dst = enc.AppendBool(dst, val)
|
||||
case int:
|
||||
dst = enc.AppendInt(dst, val)
|
||||
case int8:
|
||||
dst = enc.AppendInt8(dst, val)
|
||||
case int16:
|
||||
dst = enc.AppendInt16(dst, val)
|
||||
case int32:
|
||||
dst = enc.AppendInt32(dst, val)
|
||||
case int64:
|
||||
dst = enc.AppendInt64(dst, val)
|
||||
case uint:
|
||||
dst = enc.AppendUint(dst, val)
|
||||
case uint8:
|
||||
dst = enc.AppendUint8(dst, val)
|
||||
case uint16:
|
||||
dst = enc.AppendUint16(dst, val)
|
||||
case uint32:
|
||||
dst = enc.AppendUint32(dst, val)
|
||||
case uint64:
|
||||
dst = enc.AppendUint64(dst, val)
|
||||
case float32:
|
||||
dst = enc.AppendFloat32(dst, val)
|
||||
case float64:
|
||||
dst = enc.AppendFloat64(dst, val)
|
||||
case time.Time:
|
||||
dst = enc.AppendTime(dst, val, TimeFieldFormat)
|
||||
case time.Duration:
|
||||
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger)
|
||||
case *string:
|
||||
if val != nil {
|
||||
dst = enc.AppendString(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *bool:
|
||||
if val != nil {
|
||||
dst = enc.AppendBool(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *int:
|
||||
if val != nil {
|
||||
dst = enc.AppendInt(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *int8:
|
||||
if val != nil {
|
||||
dst = enc.AppendInt8(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *int16:
|
||||
if val != nil {
|
||||
dst = enc.AppendInt16(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *int32:
|
||||
if val != nil {
|
||||
dst = enc.AppendInt32(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *int64:
|
||||
if val != nil {
|
||||
dst = enc.AppendInt64(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *uint:
|
||||
if val != nil {
|
||||
dst = enc.AppendUint(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *uint8:
|
||||
if val != nil {
|
||||
dst = enc.AppendUint8(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *uint16:
|
||||
if val != nil {
|
||||
dst = enc.AppendUint16(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *uint32:
|
||||
if val != nil {
|
||||
dst = enc.AppendUint32(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *uint64:
|
||||
if val != nil {
|
||||
dst = enc.AppendUint64(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *float32:
|
||||
if val != nil {
|
||||
dst = enc.AppendFloat32(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *float64:
|
||||
if val != nil {
|
||||
dst = enc.AppendFloat64(dst, *val)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *time.Time:
|
||||
if val != nil {
|
||||
dst = enc.AppendTime(dst, *val, TimeFieldFormat)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *time.Duration:
|
||||
if val != nil {
|
||||
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case []string:
|
||||
dst = enc.AppendStrings(dst, val)
|
||||
case []bool:
|
||||
dst = enc.AppendBools(dst, val)
|
||||
case []int:
|
||||
dst = enc.AppendInts(dst, val)
|
||||
case []int8:
|
||||
dst = enc.AppendInts8(dst, val)
|
||||
case []int16:
|
||||
dst = enc.AppendInts16(dst, val)
|
||||
case []int32:
|
||||
dst = enc.AppendInts32(dst, val)
|
||||
case []int64:
|
||||
dst = enc.AppendInts64(dst, val)
|
||||
case []uint:
|
||||
dst = enc.AppendUints(dst, val)
|
||||
// case []uint8:
|
||||
// dst = enc.AppendUints8(dst, val)
|
||||
case []uint16:
|
||||
dst = enc.AppendUints16(dst, val)
|
||||
case []uint32:
|
||||
dst = enc.AppendUints32(dst, val)
|
||||
case []uint64:
|
||||
dst = enc.AppendUints64(dst, val)
|
||||
case []float32:
|
||||
dst = enc.AppendFloats32(dst, val)
|
||||
case []float64:
|
||||
dst = enc.AppendFloats64(dst, val)
|
||||
case []time.Time:
|
||||
dst = enc.AppendTimes(dst, val, TimeFieldFormat)
|
||||
case []time.Duration:
|
||||
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger)
|
||||
case nil:
|
||||
dst = enc.AppendNil(dst)
|
||||
case net.IP:
|
||||
dst = enc.AppendIPAddr(dst, val)
|
||||
case net.IPNet:
|
||||
dst = enc.AppendIPPrefix(dst, val)
|
||||
case net.HardwareAddr:
|
||||
dst = enc.AppendMACAddr(dst, val)
|
||||
case json.RawMessage:
|
||||
dst = appendJSON(dst, val)
|
||||
default:
|
||||
dst = enc.AppendInterface(dst, val)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
138
vendor/github.com/rs/zerolog/globals.go
generated
vendored
138
vendor/github.com/rs/zerolog/globals.go
generated
vendored
@ -1,138 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// TimeFormatUnix defines a time format that makes time fields to be
|
||||
// serialized as Unix timestamp integers.
|
||||
TimeFormatUnix = ""
|
||||
|
||||
// TimeFormatUnixMs defines a time format that makes time fields to be
|
||||
// serialized as Unix timestamp integers in milliseconds.
|
||||
TimeFormatUnixMs = "UNIXMS"
|
||||
|
||||
// TimeFormatUnixMicro defines a time format that makes time fields to be
|
||||
// serialized as Unix timestamp integers in microseconds.
|
||||
TimeFormatUnixMicro = "UNIXMICRO"
|
||||
)
|
||||
|
||||
var (
|
||||
// TimestampFieldName is the field name used for the timestamp field.
|
||||
TimestampFieldName = "time"
|
||||
|
||||
// LevelFieldName is the field name used for the level field.
|
||||
LevelFieldName = "level"
|
||||
|
||||
// LevelTraceValue is the value used for the trace level field.
|
||||
LevelTraceValue = "trace"
|
||||
// LevelDebugValue is the value used for the debug level field.
|
||||
LevelDebugValue = "debug"
|
||||
// LevelInfoValue is the value used for the info level field.
|
||||
LevelInfoValue = "info"
|
||||
// LevelWarnValue is the value used for the warn level field.
|
||||
LevelWarnValue = "warn"
|
||||
// LevelErrorValue is the value used for the error level field.
|
||||
LevelErrorValue = "error"
|
||||
// LevelFatalValue is the value used for the fatal level field.
|
||||
LevelFatalValue = "fatal"
|
||||
// LevelPanicValue is the value used for the panic level field.
|
||||
LevelPanicValue = "panic"
|
||||
|
||||
// LevelFieldMarshalFunc allows customization of global level field marshaling.
|
||||
LevelFieldMarshalFunc = func(l Level) string {
|
||||
return l.String()
|
||||
}
|
||||
|
||||
// MessageFieldName is the field name used for the message field.
|
||||
MessageFieldName = "message"
|
||||
|
||||
// ErrorFieldName is the field name used for error fields.
|
||||
ErrorFieldName = "error"
|
||||
|
||||
// CallerFieldName is the field name used for caller field.
|
||||
CallerFieldName = "caller"
|
||||
|
||||
// CallerSkipFrameCount is the number of stack frames to skip to find the caller.
|
||||
CallerSkipFrameCount = 2
|
||||
|
||||
// CallerMarshalFunc allows customization of global caller marshaling
|
||||
CallerMarshalFunc = func(file string, line int) string {
|
||||
return file + ":" + strconv.Itoa(line)
|
||||
}
|
||||
|
||||
// ErrorStackFieldName is the field name used for error stacks.
|
||||
ErrorStackFieldName = "stack"
|
||||
|
||||
// ErrorStackMarshaler extract the stack from err if any.
|
||||
ErrorStackMarshaler func(err error) interface{}
|
||||
|
||||
// ErrorMarshalFunc allows customization of global error marshaling
|
||||
ErrorMarshalFunc = func(err error) interface{} {
|
||||
return err
|
||||
}
|
||||
|
||||
// InterfaceMarshalFunc allows customization of interface marshaling.
|
||||
// Default: "encoding/json.Marshal"
|
||||
InterfaceMarshalFunc = json.Marshal
|
||||
|
||||
// TimeFieldFormat defines the time format of the Time field type. If set to
|
||||
// TimeFormatUnix, TimeFormatUnixMs or TimeFormatUnixMicro, the time is formatted as an UNIX
|
||||
// timestamp as integer.
|
||||
TimeFieldFormat = time.RFC3339
|
||||
|
||||
// TimestampFunc defines the function called to generate a timestamp.
|
||||
TimestampFunc = time.Now
|
||||
|
||||
// DurationFieldUnit defines the unit for time.Duration type fields added
|
||||
// using the Dur method.
|
||||
DurationFieldUnit = time.Millisecond
|
||||
|
||||
// DurationFieldInteger renders Dur fields as integer instead of float if
|
||||
// set to true.
|
||||
DurationFieldInteger = false
|
||||
|
||||
// ErrorHandler is called whenever zerolog fails to write an event on its
|
||||
// output. If not set, an error is printed on the stderr. This handler must
|
||||
// be thread safe and non-blocking.
|
||||
ErrorHandler func(err error)
|
||||
|
||||
// DefaultContextLogger is returned from Ctx() if there is no logger associated
|
||||
// with the context.
|
||||
DefaultContextLogger *Logger
|
||||
)
|
||||
|
||||
var (
|
||||
gLevel = new(int32)
|
||||
disableSampling = new(int32)
|
||||
)
|
||||
|
||||
// SetGlobalLevel sets the global override for log level. If this
|
||||
// values is raised, all Loggers will use at least this value.
|
||||
//
|
||||
// To globally disable logs, set GlobalLevel to Disabled.
|
||||
func SetGlobalLevel(l Level) {
|
||||
atomic.StoreInt32(gLevel, int32(l))
|
||||
}
|
||||
|
||||
// GlobalLevel returns the current global log level
|
||||
func GlobalLevel() Level {
|
||||
return Level(atomic.LoadInt32(gLevel))
|
||||
}
|
||||
|
||||
// DisableSampling will disable sampling in all Loggers if true.
|
||||
func DisableSampling(v bool) {
|
||||
var i int32
|
||||
if v {
|
||||
i = 1
|
||||
}
|
||||
atomic.StoreInt32(disableSampling, i)
|
||||
}
|
||||
|
||||
func samplingDisabled() bool {
|
||||
return atomic.LoadInt32(disableSampling) == 1
|
||||
}
|
7
vendor/github.com/rs/zerolog/go112.go
generated
vendored
7
vendor/github.com/rs/zerolog/go112.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build go1.12
|
||||
|
||||
package zerolog
|
||||
|
||||
// Since go 1.12, some auto generated init functions are hidden from
|
||||
// runtime.Caller.
|
||||
const contextCallerSkipFrameCount = 2
|
64
vendor/github.com/rs/zerolog/hook.go
generated
vendored
64
vendor/github.com/rs/zerolog/hook.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
// Hook defines an interface to a log hook.
|
||||
type Hook interface {
|
||||
// Run runs the hook with the event.
|
||||
Run(e *Event, level Level, message string)
|
||||
}
|
||||
|
||||
// HookFunc is an adaptor to allow the use of an ordinary function
|
||||
// as a Hook.
|
||||
type HookFunc func(e *Event, level Level, message string)
|
||||
|
||||
// Run implements the Hook interface.
|
||||
func (h HookFunc) Run(e *Event, level Level, message string) {
|
||||
h(e, level, message)
|
||||
}
|
||||
|
||||
// LevelHook applies a different hook for each level.
|
||||
type LevelHook struct {
|
||||
NoLevelHook, TraceHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook
|
||||
}
|
||||
|
||||
// Run implements the Hook interface.
|
||||
func (h LevelHook) Run(e *Event, level Level, message string) {
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
if h.TraceHook != nil {
|
||||
h.TraceHook.Run(e, level, message)
|
||||
}
|
||||
case DebugLevel:
|
||||
if h.DebugHook != nil {
|
||||
h.DebugHook.Run(e, level, message)
|
||||
}
|
||||
case InfoLevel:
|
||||
if h.InfoHook != nil {
|
||||
h.InfoHook.Run(e, level, message)
|
||||
}
|
||||
case WarnLevel:
|
||||
if h.WarnHook != nil {
|
||||
h.WarnHook.Run(e, level, message)
|
||||
}
|
||||
case ErrorLevel:
|
||||
if h.ErrorHook != nil {
|
||||
h.ErrorHook.Run(e, level, message)
|
||||
}
|
||||
case FatalLevel:
|
||||
if h.FatalHook != nil {
|
||||
h.FatalHook.Run(e, level, message)
|
||||
}
|
||||
case PanicLevel:
|
||||
if h.PanicHook != nil {
|
||||
h.PanicHook.Run(e, level, message)
|
||||
}
|
||||
case NoLevel:
|
||||
if h.NoLevelHook != nil {
|
||||
h.NoLevelHook.Run(e, level, message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewLevelHook returns a new LevelHook.
|
||||
func NewLevelHook() LevelHook {
|
||||
return LevelHook{}
|
||||
}
|
56
vendor/github.com/rs/zerolog/internal/cbor/README.md
generated
vendored
56
vendor/github.com/rs/zerolog/internal/cbor/README.md
generated
vendored
@ -1,56 +0,0 @@
|
||||
## Reference:
|
||||
CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049)
|
||||
|
||||
## Comparison of JSON vs CBOR
|
||||
|
||||
Two main areas of reduction are:
|
||||
|
||||
1. CPU usage to write a log msg
|
||||
2. Size (in bytes) of log messages.
|
||||
|
||||
|
||||
CPU Usage savings are below:
|
||||
```
|
||||
name JSON time/op CBOR time/op delta
|
||||
Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10)
|
||||
ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9)
|
||||
ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9)
|
||||
LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9)
|
||||
LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10)
|
||||
LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10)
|
||||
LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10)
|
||||
LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9)
|
||||
LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10)
|
||||
LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10)
|
||||
LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10)
|
||||
LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10)
|
||||
LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9)
|
||||
LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9)
|
||||
LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10)
|
||||
LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9)
|
||||
LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9)
|
||||
LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8)
|
||||
LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10)
|
||||
LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10)
|
||||
LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9)
|
||||
LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9)
|
||||
LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10)
|
||||
LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10)
|
||||
```
|
||||
|
||||
Log message size savings is greatly dependent on the number and type of fields in the log message.
|
||||
Assuming this log message (with an Integer, timestamp and string, in addition to level).
|
||||
|
||||
`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}`
|
||||
|
||||
Two measurements were done for the log file sizes - one without any compression, second
|
||||
using [compress/zlib](https://golang.org/pkg/compress/zlib/).
|
||||
|
||||
Results for 10,000 log messages:
|
||||
|
||||
| Log Format | Plain File Size (in KB) | Compressed File Size (in KB) |
|
||||
| :--- | :---: | :---: |
|
||||
| JSON | 920 | 28 |
|
||||
| CBOR | 550 | 28 |
|
||||
|
||||
The example used to calculate the above data is available in [Examples](examples).
|
19
vendor/github.com/rs/zerolog/internal/cbor/base.go
generated
vendored
19
vendor/github.com/rs/zerolog/internal/cbor/base.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package cbor
|
||||
|
||||
// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice.
|
||||
// Making it package level instead of embedded in Encoder brings
|
||||
// some extra efforts at importing, but avoids value copy when the functions
|
||||
// of Encoder being invoked.
|
||||
// DO REMEMBER to set this variable at importing, or
|
||||
// you might get a nil pointer dereference panic at runtime.
|
||||
var JSONMarshalFunc func(v interface{}) ([]byte, error)
|
||||
|
||||
type Encoder struct{}
|
||||
|
||||
// AppendKey adds a key (string) to the binary encoded log message
|
||||
func (e Encoder) AppendKey(dst []byte, key string) []byte {
|
||||
if len(dst) < 1 {
|
||||
dst = e.AppendBeginMarker(dst)
|
||||
}
|
||||
return e.AppendString(dst, key)
|
||||
}
|
100
vendor/github.com/rs/zerolog/internal/cbor/cbor.go
generated
vendored
100
vendor/github.com/rs/zerolog/internal/cbor/cbor.go
generated
vendored
@ -1,100 +0,0 @@
|
||||
// Package cbor provides primitives for storing different data
|
||||
// in the CBOR (binary) format. CBOR is defined in RFC7049.
|
||||
package cbor
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
majorOffset = 5
|
||||
additionalMax = 23
|
||||
|
||||
// Non Values.
|
||||
additionalTypeBoolFalse byte = 20
|
||||
additionalTypeBoolTrue byte = 21
|
||||
additionalTypeNull byte = 22
|
||||
|
||||
// Integer (+ve and -ve) Sub-types.
|
||||
additionalTypeIntUint8 byte = 24
|
||||
additionalTypeIntUint16 byte = 25
|
||||
additionalTypeIntUint32 byte = 26
|
||||
additionalTypeIntUint64 byte = 27
|
||||
|
||||
// Float Sub-types.
|
||||
additionalTypeFloat16 byte = 25
|
||||
additionalTypeFloat32 byte = 26
|
||||
additionalTypeFloat64 byte = 27
|
||||
additionalTypeBreak byte = 31
|
||||
|
||||
// Tag Sub-types.
|
||||
additionalTypeTimestamp byte = 01
|
||||
|
||||
// Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml
|
||||
additionalTypeTagNetworkAddr uint16 = 260
|
||||
additionalTypeTagNetworkPrefix uint16 = 261
|
||||
additionalTypeEmbeddedJSON uint16 = 262
|
||||
additionalTypeTagHexString uint16 = 263
|
||||
|
||||
// Unspecified number of elements.
|
||||
additionalTypeInfiniteCount byte = 31
|
||||
)
|
||||
const (
|
||||
majorTypeUnsignedInt byte = iota << majorOffset // Major type 0
|
||||
majorTypeNegativeInt // Major type 1
|
||||
majorTypeByteString // Major type 2
|
||||
majorTypeUtf8String // Major type 3
|
||||
majorTypeArray // Major type 4
|
||||
majorTypeMap // Major type 5
|
||||
majorTypeTags // Major type 6
|
||||
majorTypeSimpleAndFloat // Major type 7
|
||||
)
|
||||
|
||||
const (
|
||||
maskOutAdditionalType byte = (7 << majorOffset)
|
||||
maskOutMajorType byte = 31
|
||||
)
|
||||
|
||||
const (
|
||||
float32Nan = "\xfa\x7f\xc0\x00\x00"
|
||||
float32PosInfinity = "\xfa\x7f\x80\x00\x00"
|
||||
float32NegInfinity = "\xfa\xff\x80\x00\x00"
|
||||
float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"
|
||||
float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"
|
||||
float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"
|
||||
)
|
||||
|
||||
// IntegerTimeFieldFormat indicates the format of timestamp decoded
|
||||
// from an integer (time in seconds).
|
||||
var IntegerTimeFieldFormat = time.RFC3339
|
||||
|
||||
// NanoTimeFieldFormat indicates the format of timestamp decoded
|
||||
// from a float value (time in seconds and nano seconds).
|
||||
var NanoTimeFieldFormat = time.RFC3339Nano
|
||||
|
||||
func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte {
|
||||
byteCount := 8
|
||||
var minor byte
|
||||
switch {
|
||||
case number < 256:
|
||||
byteCount = 1
|
||||
minor = additionalTypeIntUint8
|
||||
|
||||
case number < 65536:
|
||||
byteCount = 2
|
||||
minor = additionalTypeIntUint16
|
||||
|
||||
case number < 4294967296:
|
||||
byteCount = 4
|
||||
minor = additionalTypeIntUint32
|
||||
|
||||
default:
|
||||
byteCount = 8
|
||||
minor = additionalTypeIntUint64
|
||||
|
||||
}
|
||||
dst = append(dst, byte(major|minor))
|
||||
byteCount--
|
||||
for ; byteCount >= 0; byteCount-- {
|
||||
dst = append(dst, byte(number>>(uint(byteCount)*8)))
|
||||
}
|
||||
return dst
|
||||
}
|
614
vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
generated
vendored
614
vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
generated
vendored
@ -1,614 +0,0 @@
|
||||
package cbor
|
||||
|
||||
// This file contains code to decode a stream of CBOR Data into JSON.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var decodeTimeZone *time.Location
|
||||
|
||||
const hexTable = "0123456789abcdef"
|
||||
|
||||
const isFloat32 = 4
|
||||
const isFloat64 = 8
|
||||
|
||||
func readNBytes(src *bufio.Reader, n int) []byte {
|
||||
ret := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
ch, e := src.ReadByte()
|
||||
if e != nil {
|
||||
panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n))
|
||||
}
|
||||
ret[i] = ch
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func readByte(src *bufio.Reader) byte {
|
||||
b, e := src.ReadByte()
|
||||
if e != nil {
|
||||
panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file"))
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func decodeIntAdditonalType(src *bufio.Reader, minor byte) int64 {
|
||||
val := int64(0)
|
||||
if minor <= 23 {
|
||||
val = int64(minor)
|
||||
} else {
|
||||
bytesToRead := 0
|
||||
switch minor {
|
||||
case additionalTypeIntUint8:
|
||||
bytesToRead = 1
|
||||
case additionalTypeIntUint16:
|
||||
bytesToRead = 2
|
||||
case additionalTypeIntUint32:
|
||||
bytesToRead = 4
|
||||
case additionalTypeIntUint64:
|
||||
bytesToRead = 8
|
||||
default:
|
||||
panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor))
|
||||
}
|
||||
pb := readNBytes(src, bytesToRead)
|
||||
for i := 0; i < bytesToRead; i++ {
|
||||
val = val * 256
|
||||
val += int64(pb[i])
|
||||
}
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func decodeInteger(src *bufio.Reader) int64 {
|
||||
pb := readByte(src)
|
||||
major := pb & maskOutAdditionalType
|
||||
minor := pb & maskOutMajorType
|
||||
if major != majorTypeUnsignedInt && major != majorTypeNegativeInt {
|
||||
panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major))
|
||||
}
|
||||
val := decodeIntAdditonalType(src, minor)
|
||||
if major == 0 {
|
||||
return val
|
||||
}
|
||||
return (-1 - val)
|
||||
}
|
||||
|
||||
func decodeFloat(src *bufio.Reader) (float64, int) {
|
||||
pb := readByte(src)
|
||||
major := pb & maskOutAdditionalType
|
||||
minor := pb & maskOutMajorType
|
||||
if major != majorTypeSimpleAndFloat {
|
||||
panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major))
|
||||
}
|
||||
|
||||
switch minor {
|
||||
case additionalTypeFloat16:
|
||||
panic(fmt.Errorf("float16 is not suppported in decodeFloat"))
|
||||
|
||||
case additionalTypeFloat32:
|
||||
pb := readNBytes(src, 4)
|
||||
switch string(pb) {
|
||||
case float32Nan:
|
||||
return math.NaN(), isFloat32
|
||||
case float32PosInfinity:
|
||||
return math.Inf(0), isFloat32
|
||||
case float32NegInfinity:
|
||||
return math.Inf(-1), isFloat32
|
||||
}
|
||||
n := uint32(0)
|
||||
for i := 0; i < 4; i++ {
|
||||
n = n * 256
|
||||
n += uint32(pb[i])
|
||||
}
|
||||
val := math.Float32frombits(n)
|
||||
return float64(val), isFloat32
|
||||
case additionalTypeFloat64:
|
||||
pb := readNBytes(src, 8)
|
||||
switch string(pb) {
|
||||
case float64Nan:
|
||||
return math.NaN(), isFloat64
|
||||
case float64PosInfinity:
|
||||
return math.Inf(0), isFloat64
|
||||
case float64NegInfinity:
|
||||
return math.Inf(-1), isFloat64
|
||||
}
|
||||
n := uint64(0)
|
||||
for i := 0; i < 8; i++ {
|
||||
n = n * 256
|
||||
n += uint64(pb[i])
|
||||
}
|
||||
val := math.Float64frombits(n)
|
||||
return val, isFloat64
|
||||
}
|
||||
panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor))
|
||||
}
|
||||
|
||||
func decodeStringComplex(dst []byte, s string, pos uint) []byte {
|
||||
i := int(pos)
|
||||
start := 0
|
||||
|
||||
for i < len(s) {
|
||||
b := s[i]
|
||||
if b >= utf8.RuneSelf {
|
||||
r, size := utf8.DecodeRuneInString(s[i:])
|
||||
if r == utf8.RuneError && size == 1 {
|
||||
// In case of error, first append previous simple characters to
|
||||
// the byte slice if any and append a replacement character code
|
||||
// in place of the invalid sequence.
|
||||
if start < i {
|
||||
dst = append(dst, s[start:i]...)
|
||||
}
|
||||
dst = append(dst, `\ufffd`...)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
continue
|
||||
}
|
||||
if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
// We encountered a character that needs to be encoded.
|
||||
// Let's append the previous simple characters to the byte slice
|
||||
// and switch our operation to read and encode the remainder
|
||||
// characters byte-by-byte.
|
||||
if start < i {
|
||||
dst = append(dst, s[start:i]...)
|
||||
}
|
||||
switch b {
|
||||
case '"', '\\':
|
||||
dst = append(dst, '\\', b)
|
||||
case '\b':
|
||||
dst = append(dst, '\\', 'b')
|
||||
case '\f':
|
||||
dst = append(dst, '\\', 'f')
|
||||
case '\n':
|
||||
dst = append(dst, '\\', 'n')
|
||||
case '\r':
|
||||
dst = append(dst, '\\', 'r')
|
||||
case '\t':
|
||||
dst = append(dst, '\\', 't')
|
||||
default:
|
||||
dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
}
|
||||
if start < len(s) {
|
||||
dst = append(dst, s[start:]...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func decodeString(src *bufio.Reader, noQuotes bool) []byte {
|
||||
pb := readByte(src)
|
||||
major := pb & maskOutAdditionalType
|
||||
minor := pb & maskOutMajorType
|
||||
if major != majorTypeByteString {
|
||||
panic(fmt.Errorf("Major type is: %d in decodeString", major))
|
||||
}
|
||||
result := []byte{}
|
||||
if !noQuotes {
|
||||
result = append(result, '"')
|
||||
}
|
||||
length := decodeIntAdditonalType(src, minor)
|
||||
len := int(length)
|
||||
pbs := readNBytes(src, len)
|
||||
result = append(result, pbs...)
|
||||
if noQuotes {
|
||||
return result
|
||||
}
|
||||
return append(result, '"')
|
||||
}
|
||||
|
||||
func decodeUTF8String(src *bufio.Reader) []byte {
|
||||
pb := readByte(src)
|
||||
major := pb & maskOutAdditionalType
|
||||
minor := pb & maskOutMajorType
|
||||
if major != majorTypeUtf8String {
|
||||
panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major))
|
||||
}
|
||||
result := []byte{'"'}
|
||||
length := decodeIntAdditonalType(src, minor)
|
||||
len := int(length)
|
||||
pbs := readNBytes(src, len)
|
||||
|
||||
for i := 0; i < len; i++ {
|
||||
// Check if the character needs encoding. Control characters, slashes,
|
||||
// and the double quote need json encoding. Bytes above the ascii
|
||||
// boundary needs utf8 encoding.
|
||||
if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' {
|
||||
// We encountered a character that needs to be encoded. Switch
|
||||
// to complex version of the algorithm.
|
||||
dst := []byte{'"'}
|
||||
dst = decodeStringComplex(dst, string(pbs), uint(i))
|
||||
return append(dst, '"')
|
||||
}
|
||||
}
|
||||
// The string has no need for encoding an therefore is directly
|
||||
// appended to the byte slice.
|
||||
result = append(result, pbs...)
|
||||
return append(result, '"')
|
||||
}
|
||||
|
||||
func array2Json(src *bufio.Reader, dst io.Writer) {
|
||||
dst.Write([]byte{'['})
|
||||
pb := readByte(src)
|
||||
major := pb & maskOutAdditionalType
|
||||
minor := pb & maskOutMajorType
|
||||
if major != majorTypeArray {
|
||||
panic(fmt.Errorf("Major type is: %d in array2Json", major))
|
||||
}
|
||||
len := 0
|
||||
unSpecifiedCount := false
|
||||
if minor == additionalTypeInfiniteCount {
|
||||
unSpecifiedCount = true
|
||||
} else {
|
||||
length := decodeIntAdditonalType(src, minor)
|
||||
len = int(length)
|
||||
}
|
||||
for i := 0; unSpecifiedCount || i < len; i++ {
|
||||
if unSpecifiedCount {
|
||||
pb, e := src.Peek(1)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
|
||||
readByte(src)
|
||||
break
|
||||
}
|
||||
}
|
||||
cbor2JsonOneObject(src, dst)
|
||||
if unSpecifiedCount {
|
||||
pb, e := src.Peek(1)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
|
||||
readByte(src)
|
||||
break
|
||||
}
|
||||
dst.Write([]byte{','})
|
||||
} else if i+1 < len {
|
||||
dst.Write([]byte{','})
|
||||
}
|
||||
}
|
||||
dst.Write([]byte{']'})
|
||||
}
|
||||
|
||||
func map2Json(src *bufio.Reader, dst io.Writer) {
|
||||
pb := readByte(src)
|
||||
major := pb & maskOutAdditionalType
|
||||
minor := pb & maskOutMajorType
|
||||
if major != majorTypeMap {
|
||||
panic(fmt.Errorf("Major type is: %d in map2Json", major))
|
||||
}
|
||||
len := 0
|
||||
unSpecifiedCount := false
|
||||
if minor == additionalTypeInfiniteCount {
|
||||
unSpecifiedCount = true
|
||||
} else {
|
||||
length := decodeIntAdditonalType(src, minor)
|
||||
len = int(length)
|
||||
}
|
||||
dst.Write([]byte{'{'})
|
||||
for i := 0; unSpecifiedCount || i < len; i++ {
|
||||
if unSpecifiedCount {
|
||||
pb, e := src.Peek(1)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
|
||||
readByte(src)
|
||||
break
|
||||
}
|
||||
}
|
||||
cbor2JsonOneObject(src, dst)
|
||||
if i%2 == 0 {
|
||||
// Even position values are keys.
|
||||
dst.Write([]byte{':'})
|
||||
} else {
|
||||
if unSpecifiedCount {
|
||||
pb, e := src.Peek(1)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
|
||||
readByte(src)
|
||||
break
|
||||
}
|
||||
dst.Write([]byte{','})
|
||||
} else if i+1 < len {
|
||||
dst.Write([]byte{','})
|
||||
}
|
||||
}
|
||||
}
|
||||
dst.Write([]byte{'}'})
|
||||
}
|
||||
|
||||
func decodeTagData(src *bufio.Reader) []byte {
|
||||
pb := readByte(src)
|
||||
major := pb & maskOutAdditionalType
|
||||
minor := pb & maskOutMajorType
|
||||
if major != majorTypeTags {
|
||||
panic(fmt.Errorf("Major type is: %d in decodeTagData", major))
|
||||
}
|
||||
switch minor {
|
||||
case additionalTypeTimestamp:
|
||||
return decodeTimeStamp(src)
|
||||
|
||||
// Tag value is larger than 256 (so uint16).
|
||||
case additionalTypeIntUint16:
|
||||
val := decodeIntAdditonalType(src, minor)
|
||||
|
||||
switch uint16(val) {
|
||||
case additionalTypeEmbeddedJSON:
|
||||
pb := readByte(src)
|
||||
dataMajor := pb & maskOutAdditionalType
|
||||
if dataMajor != majorTypeByteString {
|
||||
panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor))
|
||||
}
|
||||
src.UnreadByte()
|
||||
return decodeString(src, true)
|
||||
|
||||
case additionalTypeTagNetworkAddr:
|
||||
octets := decodeString(src, true)
|
||||
ss := []byte{'"'}
|
||||
switch len(octets) {
|
||||
case 6: // MAC address.
|
||||
ha := net.HardwareAddr(octets)
|
||||
ss = append(append(ss, ha.String()...), '"')
|
||||
case 4: // IPv4 address.
|
||||
fallthrough
|
||||
case 16: // IPv6 address.
|
||||
ip := net.IP(octets)
|
||||
ss = append(append(ss, ip.String()...), '"')
|
||||
default:
|
||||
panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets)))
|
||||
}
|
||||
return ss
|
||||
|
||||
case additionalTypeTagNetworkPrefix:
|
||||
pb := readByte(src)
|
||||
if pb != byte(majorTypeMap|0x1) {
|
||||
panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected"))
|
||||
}
|
||||
octets := decodeString(src, true)
|
||||
val := decodeInteger(src)
|
||||
ip := net.IP(octets)
|
||||
var mask net.IPMask
|
||||
pfxLen := int(val)
|
||||
if len(octets) == 4 {
|
||||
mask = net.CIDRMask(pfxLen, 32)
|
||||
} else {
|
||||
mask = net.CIDRMask(pfxLen, 128)
|
||||
}
|
||||
ipPfx := net.IPNet{IP: ip, Mask: mask}
|
||||
ss := []byte{'"'}
|
||||
ss = append(append(ss, ipPfx.String()...), '"')
|
||||
return ss
|
||||
|
||||
case additionalTypeTagHexString:
|
||||
octets := decodeString(src, true)
|
||||
ss := []byte{'"'}
|
||||
for _, v := range octets {
|
||||
ss = append(ss, hexTable[v>>4], hexTable[v&0x0f])
|
||||
}
|
||||
return append(ss, '"')
|
||||
|
||||
default:
|
||||
panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val))
|
||||
}
|
||||
}
|
||||
panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor))
|
||||
}
|
||||
|
||||
func decodeTimeStamp(src *bufio.Reader) []byte {
|
||||
pb := readByte(src)
|
||||
src.UnreadByte()
|
||||
tsMajor := pb & maskOutAdditionalType
|
||||
if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt {
|
||||
n := decodeInteger(src)
|
||||
t := time.Unix(n, 0)
|
||||
if decodeTimeZone != nil {
|
||||
t = t.In(decodeTimeZone)
|
||||
} else {
|
||||
t = t.In(time.UTC)
|
||||
}
|
||||
tsb := []byte{}
|
||||
tsb = append(tsb, '"')
|
||||
tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat)
|
||||
tsb = append(tsb, '"')
|
||||
return tsb
|
||||
} else if tsMajor == majorTypeSimpleAndFloat {
|
||||
n, _ := decodeFloat(src)
|
||||
secs := int64(n)
|
||||
n -= float64(secs)
|
||||
n *= float64(1e9)
|
||||
t := time.Unix(secs, int64(n))
|
||||
if decodeTimeZone != nil {
|
||||
t = t.In(decodeTimeZone)
|
||||
} else {
|
||||
t = t.In(time.UTC)
|
||||
}
|
||||
tsb := []byte{}
|
||||
tsb = append(tsb, '"')
|
||||
tsb = t.AppendFormat(tsb, NanoTimeFieldFormat)
|
||||
tsb = append(tsb, '"')
|
||||
return tsb
|
||||
}
|
||||
panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor))
|
||||
}
|
||||
|
||||
func decodeSimpleFloat(src *bufio.Reader) []byte {
|
||||
pb := readByte(src)
|
||||
major := pb & maskOutAdditionalType
|
||||
minor := pb & maskOutMajorType
|
||||
if major != majorTypeSimpleAndFloat {
|
||||
panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major))
|
||||
}
|
||||
switch minor {
|
||||
case additionalTypeBoolTrue:
|
||||
return []byte("true")
|
||||
case additionalTypeBoolFalse:
|
||||
return []byte("false")
|
||||
case additionalTypeNull:
|
||||
return []byte("null")
|
||||
case additionalTypeFloat16:
|
||||
fallthrough
|
||||
case additionalTypeFloat32:
|
||||
fallthrough
|
||||
case additionalTypeFloat64:
|
||||
src.UnreadByte()
|
||||
v, bc := decodeFloat(src)
|
||||
ba := []byte{}
|
||||
switch {
|
||||
case math.IsNaN(v):
|
||||
return []byte("\"NaN\"")
|
||||
case math.IsInf(v, 1):
|
||||
return []byte("\"+Inf\"")
|
||||
case math.IsInf(v, -1):
|
||||
return []byte("\"-Inf\"")
|
||||
}
|
||||
if bc == isFloat32 {
|
||||
ba = strconv.AppendFloat(ba, v, 'f', -1, 32)
|
||||
} else if bc == isFloat64 {
|
||||
ba = strconv.AppendFloat(ba, v, 'f', -1, 64)
|
||||
} else {
|
||||
panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc))
|
||||
}
|
||||
return ba
|
||||
default:
|
||||
panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor))
|
||||
}
|
||||
}
|
||||
|
||||
func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) {
|
||||
pb, e := src.Peek(1)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
major := (pb[0] & maskOutAdditionalType)
|
||||
|
||||
switch major {
|
||||
case majorTypeUnsignedInt:
|
||||
fallthrough
|
||||
case majorTypeNegativeInt:
|
||||
n := decodeInteger(src)
|
||||
dst.Write([]byte(strconv.Itoa(int(n))))
|
||||
|
||||
case majorTypeByteString:
|
||||
s := decodeString(src, false)
|
||||
dst.Write(s)
|
||||
|
||||
case majorTypeUtf8String:
|
||||
s := decodeUTF8String(src)
|
||||
dst.Write(s)
|
||||
|
||||
case majorTypeArray:
|
||||
array2Json(src, dst)
|
||||
|
||||
case majorTypeMap:
|
||||
map2Json(src, dst)
|
||||
|
||||
case majorTypeTags:
|
||||
s := decodeTagData(src)
|
||||
dst.Write(s)
|
||||
|
||||
case majorTypeSimpleAndFloat:
|
||||
s := decodeSimpleFloat(src)
|
||||
dst.Write(s)
|
||||
}
|
||||
}
|
||||
|
||||
func moreBytesToRead(src *bufio.Reader) bool {
|
||||
_, e := src.ReadByte()
|
||||
if e == nil {
|
||||
src.UnreadByte()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Cbor2JsonManyObjects decodes all the CBOR Objects read from src
|
||||
// reader. It keeps on decoding until reader returns EOF (error when reading).
|
||||
// Decoded string is written to the dst. At the end of every CBOR Object
|
||||
// newline is written to the output stream.
|
||||
//
|
||||
// Returns error (if any) that was encountered during decode.
|
||||
// The child functions will generate a panic when error is encountered and
|
||||
// this function will recover non-runtime Errors and return the reason as error.
|
||||
func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
}
|
||||
err = r.(error)
|
||||
}
|
||||
}()
|
||||
bufRdr := bufio.NewReader(src)
|
||||
for moreBytesToRead(bufRdr) {
|
||||
cbor2JsonOneObject(bufRdr, dst)
|
||||
dst.Write([]byte("\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detect if the bytes to be printed is Binary or not.
|
||||
func binaryFmt(p []byte) bool {
|
||||
if len(p) > 0 && p[0] > 0x7F {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getReader(str string) *bufio.Reader {
|
||||
return bufio.NewReader(strings.NewReader(str))
|
||||
}
|
||||
|
||||
// DecodeIfBinaryToString converts a binary formatted log msg to a
|
||||
// JSON formatted String Log message - suitable for printing to Console/Syslog.
|
||||
func DecodeIfBinaryToString(in []byte) string {
|
||||
if binaryFmt(in) {
|
||||
var b bytes.Buffer
|
||||
Cbor2JsonManyObjects(strings.NewReader(string(in)), &b)
|
||||
return b.String()
|
||||
}
|
||||
return string(in)
|
||||
}
|
||||
|
||||
// DecodeObjectToStr checks if the input is a binary format, if so,
|
||||
// it will decode a single Object and return the decoded string.
|
||||
func DecodeObjectToStr(in []byte) string {
|
||||
if binaryFmt(in) {
|
||||
var b bytes.Buffer
|
||||
cbor2JsonOneObject(getReader(string(in)), &b)
|
||||
return b.String()
|
||||
}
|
||||
return string(in)
|
||||
}
|
||||
|
||||
// DecodeIfBinaryToBytes checks if the input is a binary format, if so,
|
||||
// it will decode all Objects and return the decoded string as byte array.
|
||||
func DecodeIfBinaryToBytes(in []byte) []byte {
|
||||
if binaryFmt(in) {
|
||||
var b bytes.Buffer
|
||||
Cbor2JsonManyObjects(bytes.NewReader(in), &b)
|
||||
return b.Bytes()
|
||||
}
|
||||
return in
|
||||
}
|
68
vendor/github.com/rs/zerolog/internal/cbor/string.go
generated
vendored
68
vendor/github.com/rs/zerolog/internal/cbor/string.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
package cbor
|
||||
|
||||
// AppendStrings encodes and adds an array of strings to the dst byte array.
|
||||
func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendString(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendString encodes and adds a string to the dst byte array.
|
||||
func (Encoder) AppendString(dst []byte, s string) []byte {
|
||||
major := majorTypeUtf8String
|
||||
|
||||
l := len(s)
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l))
|
||||
}
|
||||
return append(dst, s...)
|
||||
}
|
||||
|
||||
// AppendBytes encodes and adds an array of bytes to the dst byte array.
|
||||
func (Encoder) AppendBytes(dst, s []byte) []byte {
|
||||
major := majorTypeByteString
|
||||
|
||||
l := len(s)
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
return append(dst, s...)
|
||||
}
|
||||
|
||||
// AppendEmbeddedJSON adds a tag and embeds input JSON as such.
|
||||
func AppendEmbeddedJSON(dst, s []byte) []byte {
|
||||
major := majorTypeTags
|
||||
minor := additionalTypeEmbeddedJSON
|
||||
|
||||
// Append the TAG to indicate this is Embedded JSON.
|
||||
dst = append(dst, byte(major|additionalTypeIntUint16))
|
||||
dst = append(dst, byte(minor>>8))
|
||||
dst = append(dst, byte(minor&0xff))
|
||||
|
||||
// Append the JSON Object as Byte String.
|
||||
major = majorTypeByteString
|
||||
|
||||
l := len(s)
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
return append(dst, s...)
|
||||
}
|
93
vendor/github.com/rs/zerolog/internal/cbor/time.go
generated
vendored
93
vendor/github.com/rs/zerolog/internal/cbor/time.go
generated
vendored
@ -1,93 +0,0 @@
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func appendIntegerTimestamp(dst []byte, t time.Time) []byte {
|
||||
major := majorTypeTags
|
||||
minor := additionalTypeTimestamp
|
||||
dst = append(dst, byte(major|minor))
|
||||
secs := t.Unix()
|
||||
var val uint64
|
||||
if secs < 0 {
|
||||
major = majorTypeNegativeInt
|
||||
val = uint64(-secs - 1)
|
||||
} else {
|
||||
major = majorTypeUnsignedInt
|
||||
val = uint64(secs)
|
||||
}
|
||||
dst = appendCborTypePrefix(dst, major, uint64(val))
|
||||
return dst
|
||||
}
|
||||
|
||||
func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte {
|
||||
major := majorTypeTags
|
||||
minor := additionalTypeTimestamp
|
||||
dst = append(dst, byte(major|minor))
|
||||
secs := t.Unix()
|
||||
nanos := t.Nanosecond()
|
||||
var val float64
|
||||
val = float64(secs)*1.0 + float64(nanos)*1E-9
|
||||
return e.AppendFloat64(dst, val)
|
||||
}
|
||||
|
||||
// AppendTime encodes and adds a timestamp to the dst byte array.
|
||||
func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte {
|
||||
utc := t.UTC()
|
||||
if utc.Nanosecond() == 0 {
|
||||
return appendIntegerTimestamp(dst, utc)
|
||||
}
|
||||
return e.appendFloatTimestamp(dst, utc)
|
||||
}
|
||||
|
||||
// AppendTimes encodes and adds an array of timestamps to the dst byte array.
|
||||
func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
|
||||
for _, t := range vals {
|
||||
dst = e.AppendTime(dst, t, unused)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendDuration encodes and adds a duration to the dst byte array.
|
||||
// useInt field indicates whether to store the duration as seconds (integer) or
|
||||
// as seconds+nanoseconds (float).
|
||||
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
|
||||
if useInt {
|
||||
return e.AppendInt64(dst, int64(d/unit))
|
||||
}
|
||||
return e.AppendFloat64(dst, float64(d)/float64(unit))
|
||||
}
|
||||
|
||||
// AppendDurations encodes and adds an array of durations to the dst byte array.
|
||||
// useInt field indicates whether to store the duration as seconds (integer) or
|
||||
// as seconds+nanoseconds (float).
|
||||
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, d := range vals {
|
||||
dst = e.AppendDuration(dst, d, unit, useInt)
|
||||
}
|
||||
return dst
|
||||
}
|
477
vendor/github.com/rs/zerolog/internal/cbor/types.go
generated
vendored
477
vendor/github.com/rs/zerolog/internal/cbor/types.go
generated
vendored
@ -1,477 +0,0 @@
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
)
|
||||
|
||||
// AppendNil inserts a 'Nil' object into the dst byte array.
|
||||
func (Encoder) AppendNil(dst []byte) []byte {
|
||||
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull))
|
||||
}
|
||||
|
||||
// AppendBeginMarker inserts a map start into the dst byte array.
|
||||
func (Encoder) AppendBeginMarker(dst []byte) []byte {
|
||||
return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount))
|
||||
}
|
||||
|
||||
// AppendEndMarker inserts a map end into the dst byte array.
|
||||
func (Encoder) AppendEndMarker(dst []byte) []byte {
|
||||
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
|
||||
}
|
||||
|
||||
// AppendObjectData takes an object in form of a byte array and appends to dst.
|
||||
func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {
|
||||
// BeginMarker is present in the dst, which
|
||||
// should not be copied when appending to existing data.
|
||||
return append(dst, o[1:]...)
|
||||
}
|
||||
|
||||
// AppendArrayStart adds markers to indicate the start of an array.
|
||||
func (Encoder) AppendArrayStart(dst []byte) []byte {
|
||||
return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount))
|
||||
}
|
||||
|
||||
// AppendArrayEnd adds markers to indicate the end of an array.
|
||||
func (Encoder) AppendArrayEnd(dst []byte) []byte {
|
||||
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
|
||||
}
|
||||
|
||||
// AppendArrayDelim adds markers to indicate end of a particular array element.
|
||||
func (Encoder) AppendArrayDelim(dst []byte) []byte {
|
||||
//No delimiters needed in cbor
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendLineBreak is a noop that keep API compat with json encoder.
|
||||
func (Encoder) AppendLineBreak(dst []byte) []byte {
|
||||
// No line breaks needed in binary format.
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendBool encodes and inserts a boolean value into the dst byte array.
|
||||
func (Encoder) AppendBool(dst []byte, val bool) []byte {
|
||||
b := additionalTypeBoolFalse
|
||||
if val {
|
||||
b = additionalTypeBoolTrue
|
||||
}
|
||||
return append(dst, byte(majorTypeSimpleAndFloat|b))
|
||||
}
|
||||
|
||||
// AppendBools encodes and inserts an array of boolean values into the dst byte array.
|
||||
func (e Encoder) AppendBools(dst []byte, vals []bool) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendBool(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt encodes and inserts an integer value into the dst byte array.
|
||||
func (Encoder) AppendInt(dst []byte, val int) []byte {
|
||||
major := majorTypeUnsignedInt
|
||||
contentVal := val
|
||||
if val < 0 {
|
||||
major = majorTypeNegativeInt
|
||||
contentVal = -val - 1
|
||||
}
|
||||
if contentVal <= additionalMax {
|
||||
lb := byte(contentVal)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInts encodes and inserts an array of integer values into the dst byte array.
|
||||
func (e Encoder) AppendInts(dst []byte, vals []int) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendInt(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt8 encodes and inserts an int8 value into the dst byte array.
|
||||
func (e Encoder) AppendInt8(dst []byte, val int8) []byte {
|
||||
return e.AppendInt(dst, int(val))
|
||||
}
|
||||
|
||||
// AppendInts8 encodes and inserts an array of integer values into the dst byte array.
|
||||
func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendInt(dst, int(v))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt16 encodes and inserts a int16 value into the dst byte array.
|
||||
func (e Encoder) AppendInt16(dst []byte, val int16) []byte {
|
||||
return e.AppendInt(dst, int(val))
|
||||
}
|
||||
|
||||
// AppendInts16 encodes and inserts an array of int16 values into the dst byte array.
|
||||
func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendInt(dst, int(v))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt32 encodes and inserts a int32 value into the dst byte array.
|
||||
func (e Encoder) AppendInt32(dst []byte, val int32) []byte {
|
||||
return e.AppendInt(dst, int(val))
|
||||
}
|
||||
|
||||
// AppendInts32 encodes and inserts an array of int32 values into the dst byte array.
|
||||
func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendInt(dst, int(v))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt64 encodes and inserts a int64 value into the dst byte array.
|
||||
func (Encoder) AppendInt64(dst []byte, val int64) []byte {
|
||||
major := majorTypeUnsignedInt
|
||||
contentVal := val
|
||||
if val < 0 {
|
||||
major = majorTypeNegativeInt
|
||||
contentVal = -val - 1
|
||||
}
|
||||
if contentVal <= additionalMax {
|
||||
lb := byte(contentVal)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInts64 encodes and inserts an array of int64 values into the dst byte array.
|
||||
func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendInt64(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint encodes and inserts an unsigned integer value into the dst byte array.
|
||||
func (e Encoder) AppendUint(dst []byte, val uint) []byte {
|
||||
return e.AppendInt64(dst, int64(val))
|
||||
}
|
||||
|
||||
// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array.
|
||||
func (e Encoder) AppendUints(dst []byte, vals []uint) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendUint(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array.
|
||||
func (e Encoder) AppendUint8(dst []byte, val uint8) []byte {
|
||||
return e.AppendUint(dst, uint(val))
|
||||
}
|
||||
|
||||
// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array.
|
||||
func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendUint8(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint16 encodes and inserts a uint16 value into the dst byte array.
|
||||
func (e Encoder) AppendUint16(dst []byte, val uint16) []byte {
|
||||
return e.AppendUint(dst, uint(val))
|
||||
}
|
||||
|
||||
// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array.
|
||||
func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendUint16(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint32 encodes and inserts a uint32 value into the dst byte array.
|
||||
func (e Encoder) AppendUint32(dst []byte, val uint32) []byte {
|
||||
return e.AppendUint(dst, uint(val))
|
||||
}
|
||||
|
||||
// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array.
|
||||
func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendUint32(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint64 encodes and inserts a uint64 value into the dst byte array.
|
||||
func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
|
||||
major := majorTypeUnsignedInt
|
||||
contentVal := val
|
||||
if contentVal <= additionalMax {
|
||||
lb := byte(contentVal)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array.
|
||||
func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendUint64(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendFloat32 encodes and inserts a single precision float value into the dst byte array.
|
||||
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
|
||||
switch {
|
||||
case math.IsNaN(float64(val)):
|
||||
return append(dst, "\xfa\x7f\xc0\x00\x00"...)
|
||||
case math.IsInf(float64(val), 1):
|
||||
return append(dst, "\xfa\x7f\x80\x00\x00"...)
|
||||
case math.IsInf(float64(val), -1):
|
||||
return append(dst, "\xfa\xff\x80\x00\x00"...)
|
||||
}
|
||||
major := majorTypeSimpleAndFloat
|
||||
subType := additionalTypeFloat32
|
||||
n := math.Float32bits(val)
|
||||
var buf [4]byte
|
||||
for i := uint(0); i < 4; i++ {
|
||||
buf[i] = byte(n >> ((3 - i) * 8))
|
||||
}
|
||||
return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3])
|
||||
}
|
||||
|
||||
// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array.
|
||||
func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendFloat32(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendFloat64 encodes and inserts a double precision float value into the dst byte array.
|
||||
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
|
||||
switch {
|
||||
case math.IsNaN(val):
|
||||
return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...)
|
||||
case math.IsInf(val, 1):
|
||||
return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...)
|
||||
case math.IsInf(val, -1):
|
||||
return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...)
|
||||
}
|
||||
major := majorTypeSimpleAndFloat
|
||||
subType := additionalTypeFloat64
|
||||
n := math.Float64bits(val)
|
||||
dst = append(dst, byte(major|subType))
|
||||
for i := uint(1); i <= 8; i++ {
|
||||
b := byte(n >> ((8 - i) * 8))
|
||||
dst = append(dst, b)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array.
|
||||
func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
return e.AppendArrayEnd(e.AppendArrayStart(dst))
|
||||
}
|
||||
if l <= additionalMax {
|
||||
lb := byte(l)
|
||||
dst = append(dst, byte(major|lb))
|
||||
} else {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendFloat64(dst, v)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst.
|
||||
func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
|
||||
marshaled, err := JSONMarshalFunc(i)
|
||||
if err != nil {
|
||||
return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err))
|
||||
}
|
||||
return AppendEmbeddedJSON(dst, marshaled)
|
||||
}
|
||||
|
||||
// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6).
|
||||
func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
|
||||
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
|
||||
dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
|
||||
dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
|
||||
return e.AppendBytes(dst, ip)
|
||||
}
|
||||
|
||||
// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length).
|
||||
func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte {
|
||||
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
|
||||
dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8))
|
||||
dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff))
|
||||
|
||||
// Prefix is a tuple (aka MAP of 1 pair of elements) -
|
||||
// first element is prefix, second is mask length.
|
||||
dst = append(dst, byte(majorTypeMap|0x1))
|
||||
dst = e.AppendBytes(dst, pfx.IP)
|
||||
maskLen, _ := pfx.Mask.Size()
|
||||
return e.AppendUint8(dst, uint8(maskLen))
|
||||
}
|
||||
|
||||
// AppendMACAddr encodes and inserts an Hardware (MAC) address.
|
||||
func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
|
||||
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
|
||||
dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
|
||||
dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
|
||||
return e.AppendBytes(dst, ha)
|
||||
}
|
||||
|
||||
// AppendHex adds a TAG and inserts a hex bytes as a string.
|
||||
func (e Encoder) AppendHex(dst []byte, val []byte) []byte {
|
||||
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
|
||||
dst = append(dst, byte(additionalTypeTagHexString>>8))
|
||||
dst = append(dst, byte(additionalTypeTagHexString&0xff))
|
||||
return e.AppendBytes(dst, val)
|
||||
}
|
19
vendor/github.com/rs/zerolog/internal/json/base.go
generated
vendored
19
vendor/github.com/rs/zerolog/internal/json/base.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package json
|
||||
|
||||
// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice.
|
||||
// Making it package level instead of embedded in Encoder brings
|
||||
// some extra efforts at importing, but avoids value copy when the functions
|
||||
// of Encoder being invoked.
|
||||
// DO REMEMBER to set this variable at importing, or
|
||||
// you might get a nil pointer dereference panic at runtime.
|
||||
var JSONMarshalFunc func(v interface{}) ([]byte, error)
|
||||
|
||||
type Encoder struct{}
|
||||
|
||||
// AppendKey appends a new key to the output JSON.
|
||||
func (e Encoder) AppendKey(dst []byte, key string) []byte {
|
||||
if dst[len(dst)-1] != '{' {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
return append(e.AppendString(dst, key), ':')
|
||||
}
|
85
vendor/github.com/rs/zerolog/internal/json/bytes.go
generated
vendored
85
vendor/github.com/rs/zerolog/internal/json/bytes.go
generated
vendored
@ -1,85 +0,0 @@
|
||||
package json
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// AppendBytes is a mirror of appendString with []byte arg
|
||||
func (Encoder) AppendBytes(dst, s []byte) []byte {
|
||||
dst = append(dst, '"')
|
||||
for i := 0; i < len(s); i++ {
|
||||
if !noEscapeTable[s[i]] {
|
||||
dst = appendBytesComplex(dst, s, i)
|
||||
return append(dst, '"')
|
||||
}
|
||||
}
|
||||
dst = append(dst, s...)
|
||||
return append(dst, '"')
|
||||
}
|
||||
|
||||
// AppendHex encodes the input bytes to a hex string and appends
|
||||
// the encoded string to the input byte slice.
|
||||
//
|
||||
// The operation loops though each byte and encodes it as hex using
|
||||
// the hex lookup table.
|
||||
func (Encoder) AppendHex(dst, s []byte) []byte {
|
||||
dst = append(dst, '"')
|
||||
for _, v := range s {
|
||||
dst = append(dst, hex[v>>4], hex[v&0x0f])
|
||||
}
|
||||
return append(dst, '"')
|
||||
}
|
||||
|
||||
// appendBytesComplex is a mirror of the appendStringComplex
|
||||
// with []byte arg
|
||||
func appendBytesComplex(dst, s []byte, i int) []byte {
|
||||
start := 0
|
||||
for i < len(s) {
|
||||
b := s[i]
|
||||
if b >= utf8.RuneSelf {
|
||||
r, size := utf8.DecodeRune(s[i:])
|
||||
if r == utf8.RuneError && size == 1 {
|
||||
if start < i {
|
||||
dst = append(dst, s[start:i]...)
|
||||
}
|
||||
dst = append(dst, `\ufffd`...)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
continue
|
||||
}
|
||||
if noEscapeTable[b] {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
// We encountered a character that needs to be encoded.
|
||||
// Let's append the previous simple characters to the byte slice
|
||||
// and switch our operation to read and encode the remainder
|
||||
// characters byte-by-byte.
|
||||
if start < i {
|
||||
dst = append(dst, s[start:i]...)
|
||||
}
|
||||
switch b {
|
||||
case '"', '\\':
|
||||
dst = append(dst, '\\', b)
|
||||
case '\b':
|
||||
dst = append(dst, '\\', 'b')
|
||||
case '\f':
|
||||
dst = append(dst, '\\', 'f')
|
||||
case '\n':
|
||||
dst = append(dst, '\\', 'n')
|
||||
case '\r':
|
||||
dst = append(dst, '\\', 'r')
|
||||
case '\t':
|
||||
dst = append(dst, '\\', 't')
|
||||
default:
|
||||
dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
}
|
||||
if start < len(s) {
|
||||
dst = append(dst, s[start:]...)
|
||||
}
|
||||
return dst
|
||||
}
|
121
vendor/github.com/rs/zerolog/internal/json/string.go
generated
vendored
121
vendor/github.com/rs/zerolog/internal/json/string.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
package json
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
const hex = "0123456789abcdef"
|
||||
|
||||
var noEscapeTable = [256]bool{}
|
||||
|
||||
func init() {
|
||||
for i := 0; i <= 0x7e; i++ {
|
||||
noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"'
|
||||
}
|
||||
}
|
||||
|
||||
// AppendStrings encodes the input strings to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = e.AppendString(dst, vals[0])
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = e.AppendString(append(dst, ','), val)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendString encodes the input string to json and appends
|
||||
// the encoded string to the input byte slice.
|
||||
//
|
||||
// The operation loops though each byte in the string looking
|
||||
// for characters that need json or utf8 encoding. If the string
|
||||
// does not need encoding, then the string is appended in it's
|
||||
// entirety to the byte slice.
|
||||
// If we encounter a byte that does need encoding, switch up
|
||||
// the operation and perform a byte-by-byte read-encode-append.
|
||||
func (Encoder) AppendString(dst []byte, s string) []byte {
|
||||
// Start with a double quote.
|
||||
dst = append(dst, '"')
|
||||
// Loop through each character in the string.
|
||||
for i := 0; i < len(s); i++ {
|
||||
// Check if the character needs encoding. Control characters, slashes,
|
||||
// and the double quote need json encoding. Bytes above the ascii
|
||||
// boundary needs utf8 encoding.
|
||||
if !noEscapeTable[s[i]] {
|
||||
// We encountered a character that needs to be encoded. Switch
|
||||
// to complex version of the algorithm.
|
||||
dst = appendStringComplex(dst, s, i)
|
||||
return append(dst, '"')
|
||||
}
|
||||
}
|
||||
// The string has no need for encoding an therefore is directly
|
||||
// appended to the byte slice.
|
||||
dst = append(dst, s...)
|
||||
// End with a double quote
|
||||
return append(dst, '"')
|
||||
}
|
||||
|
||||
// appendStringComplex is used by appendString to take over an in
|
||||
// progress JSON string encoding that encountered a character that needs
|
||||
// to be encoded.
|
||||
func appendStringComplex(dst []byte, s string, i int) []byte {
|
||||
start := 0
|
||||
for i < len(s) {
|
||||
b := s[i]
|
||||
if b >= utf8.RuneSelf {
|
||||
r, size := utf8.DecodeRuneInString(s[i:])
|
||||
if r == utf8.RuneError && size == 1 {
|
||||
// In case of error, first append previous simple characters to
|
||||
// the byte slice if any and append a remplacement character code
|
||||
// in place of the invalid sequence.
|
||||
if start < i {
|
||||
dst = append(dst, s[start:i]...)
|
||||
}
|
||||
dst = append(dst, `\ufffd`...)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
continue
|
||||
}
|
||||
if noEscapeTable[b] {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
// We encountered a character that needs to be encoded.
|
||||
// Let's append the previous simple characters to the byte slice
|
||||
// and switch our operation to read and encode the remainder
|
||||
// characters byte-by-byte.
|
||||
if start < i {
|
||||
dst = append(dst, s[start:i]...)
|
||||
}
|
||||
switch b {
|
||||
case '"', '\\':
|
||||
dst = append(dst, '\\', b)
|
||||
case '\b':
|
||||
dst = append(dst, '\\', 'b')
|
||||
case '\f':
|
||||
dst = append(dst, '\\', 'f')
|
||||
case '\n':
|
||||
dst = append(dst, '\\', 'n')
|
||||
case '\r':
|
||||
dst = append(dst, '\\', 'r')
|
||||
case '\t':
|
||||
dst = append(dst, '\\', 't')
|
||||
default:
|
||||
dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
}
|
||||
if start < len(s) {
|
||||
dst = append(dst, s[start:]...)
|
||||
}
|
||||
return dst
|
||||
}
|
106
vendor/github.com/rs/zerolog/internal/json/time.go
generated
vendored
106
vendor/github.com/rs/zerolog/internal/json/time.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
package json
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Import from zerolog/global.go
|
||||
timeFormatUnix = ""
|
||||
timeFormatUnixMs = "UNIXMS"
|
||||
timeFormatUnixMicro = "UNIXMICRO"
|
||||
)
|
||||
|
||||
// AppendTime formats the input time with the given format
|
||||
// and appends the encoded string to the input byte slice.
|
||||
func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte {
|
||||
switch format {
|
||||
case timeFormatUnix:
|
||||
return e.AppendInt64(dst, t.Unix())
|
||||
case timeFormatUnixMs:
|
||||
return e.AppendInt64(dst, t.UnixNano()/1000000)
|
||||
case timeFormatUnixMicro:
|
||||
return e.AppendInt64(dst, t.UnixNano()/1000)
|
||||
}
|
||||
return append(t.AppendFormat(append(dst, '"'), format), '"')
|
||||
}
|
||||
|
||||
// AppendTimes converts the input times with the given format
|
||||
// and appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte {
|
||||
switch format {
|
||||
case timeFormatUnix:
|
||||
return appendUnixTimes(dst, vals)
|
||||
case timeFormatUnixMs:
|
||||
return appendUnixMsTimes(dst, vals)
|
||||
}
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"')
|
||||
if len(vals) > 1 {
|
||||
for _, t := range vals[1:] {
|
||||
dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"')
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
func appendUnixTimes(dst []byte, vals []time.Time) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendInt(dst, vals[0].Unix(), 10)
|
||||
if len(vals) > 1 {
|
||||
for _, t := range vals[1:] {
|
||||
dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
func appendUnixMsTimes(dst []byte, vals []time.Time) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendInt(dst, vals[0].UnixNano()/1000000, 10)
|
||||
if len(vals) > 1 {
|
||||
for _, t := range vals[1:] {
|
||||
dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/1000000, 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendDuration formats the input duration with the given unit & format
|
||||
// and appends the encoded string to the input byte slice.
|
||||
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
|
||||
if useInt {
|
||||
return strconv.AppendInt(dst, int64(d/unit), 10)
|
||||
}
|
||||
return e.AppendFloat64(dst, float64(d)/float64(unit))
|
||||
}
|
||||
|
||||
// AppendDurations formats the input durations with the given unit & format
|
||||
// and appends the encoded string list to the input byte slice.
|
||||
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = e.AppendDuration(dst, vals[0], unit, useInt)
|
||||
if len(vals) > 1 {
|
||||
for _, d := range vals[1:] {
|
||||
dst = e.AppendDuration(append(dst, ','), d, unit, useInt)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
405
vendor/github.com/rs/zerolog/internal/json/types.go
generated
vendored
405
vendor/github.com/rs/zerolog/internal/json/types.go
generated
vendored
@ -1,405 +0,0 @@
|
||||
package json
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// AppendNil inserts a 'Nil' object into the dst byte array.
|
||||
func (Encoder) AppendNil(dst []byte) []byte {
|
||||
return append(dst, "null"...)
|
||||
}
|
||||
|
||||
// AppendBeginMarker inserts a map start into the dst byte array.
|
||||
func (Encoder) AppendBeginMarker(dst []byte) []byte {
|
||||
return append(dst, '{')
|
||||
}
|
||||
|
||||
// AppendEndMarker inserts a map end into the dst byte array.
|
||||
func (Encoder) AppendEndMarker(dst []byte) []byte {
|
||||
return append(dst, '}')
|
||||
}
|
||||
|
||||
// AppendLineBreak appends a line break.
|
||||
func (Encoder) AppendLineBreak(dst []byte) []byte {
|
||||
return append(dst, '\n')
|
||||
}
|
||||
|
||||
// AppendArrayStart adds markers to indicate the start of an array.
|
||||
func (Encoder) AppendArrayStart(dst []byte) []byte {
|
||||
return append(dst, '[')
|
||||
}
|
||||
|
||||
// AppendArrayEnd adds markers to indicate the end of an array.
|
||||
func (Encoder) AppendArrayEnd(dst []byte) []byte {
|
||||
return append(dst, ']')
|
||||
}
|
||||
|
||||
// AppendArrayDelim adds markers to indicate end of a particular array element.
|
||||
func (Encoder) AppendArrayDelim(dst []byte) []byte {
|
||||
if len(dst) > 0 {
|
||||
return append(dst, ',')
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendBool converts the input bool to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendBool(dst []byte, val bool) []byte {
|
||||
return strconv.AppendBool(dst, val)
|
||||
}
|
||||
|
||||
// AppendBools encodes the input bools to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendBools(dst []byte, vals []bool) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendBool(dst, vals[0])
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendBool(append(dst, ','), val)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt converts the input int to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendInt(dst []byte, val int) []byte {
|
||||
return strconv.AppendInt(dst, int64(val), 10)
|
||||
}
|
||||
|
||||
// AppendInts encodes the input ints to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendInts(dst []byte, vals []int) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendInt(dst, int64(vals[0]), 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt8 converts the input []int8 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendInt8(dst []byte, val int8) []byte {
|
||||
return strconv.AppendInt(dst, int64(val), 10)
|
||||
}
|
||||
|
||||
// AppendInts8 encodes the input int8s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendInts8(dst []byte, vals []int8) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendInt(dst, int64(vals[0]), 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt16 converts the input int16 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendInt16(dst []byte, val int16) []byte {
|
||||
return strconv.AppendInt(dst, int64(val), 10)
|
||||
}
|
||||
|
||||
// AppendInts16 encodes the input int16s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendInts16(dst []byte, vals []int16) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendInt(dst, int64(vals[0]), 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt32 converts the input int32 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendInt32(dst []byte, val int32) []byte {
|
||||
return strconv.AppendInt(dst, int64(val), 10)
|
||||
}
|
||||
|
||||
// AppendInts32 encodes the input int32s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendInts32(dst []byte, vals []int32) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendInt(dst, int64(vals[0]), 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInt64 converts the input int64 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendInt64(dst []byte, val int64) []byte {
|
||||
return strconv.AppendInt(dst, val, 10)
|
||||
}
|
||||
|
||||
// AppendInts64 encodes the input int64s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendInts64(dst []byte, vals []int64) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendInt(dst, vals[0], 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendInt(append(dst, ','), val, 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint converts the input uint to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendUint(dst []byte, val uint) []byte {
|
||||
return strconv.AppendUint(dst, uint64(val), 10)
|
||||
}
|
||||
|
||||
// AppendUints encodes the input uints to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendUints(dst []byte, vals []uint) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint8 converts the input uint8 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendUint8(dst []byte, val uint8) []byte {
|
||||
return strconv.AppendUint(dst, uint64(val), 10)
|
||||
}
|
||||
|
||||
// AppendUints8 encodes the input uint8s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint16 converts the input uint16 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendUint16(dst []byte, val uint16) []byte {
|
||||
return strconv.AppendUint(dst, uint64(val), 10)
|
||||
}
|
||||
|
||||
// AppendUints16 encodes the input uint16s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint32 converts the input uint32 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendUint32(dst []byte, val uint32) []byte {
|
||||
return strconv.AppendUint(dst, uint64(val), 10)
|
||||
}
|
||||
|
||||
// AppendUints32 encodes the input uint32s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendUint64 converts the input uint64 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
|
||||
return strconv.AppendUint(dst, uint64(val), 10)
|
||||
}
|
||||
|
||||
// AppendUints64 encodes the input uint64s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = strconv.AppendUint(dst, vals[0], 10)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = strconv.AppendUint(append(dst, ','), val, 10)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
func appendFloat(dst []byte, val float64, bitSize int) []byte {
|
||||
// JSON does not permit NaN or Infinity. A typical JSON encoder would fail
|
||||
// with an error, but a logging library wants the data to get thru so we
|
||||
// make a tradeoff and store those types as string.
|
||||
switch {
|
||||
case math.IsNaN(val):
|
||||
return append(dst, `"NaN"`...)
|
||||
case math.IsInf(val, 1):
|
||||
return append(dst, `"+Inf"`...)
|
||||
case math.IsInf(val, -1):
|
||||
return append(dst, `"-Inf"`...)
|
||||
}
|
||||
return strconv.AppendFloat(dst, val, 'f', -1, bitSize)
|
||||
}
|
||||
|
||||
// AppendFloat32 converts the input float32 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
|
||||
return appendFloat(dst, float64(val), 32)
|
||||
}
|
||||
|
||||
// AppendFloats32 encodes the input float32s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = appendFloat(dst, float64(vals[0]), 32)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = appendFloat(append(dst, ','), float64(val), 32)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendFloat64 converts the input float64 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
|
||||
return appendFloat(dst, val, 64)
|
||||
}
|
||||
|
||||
// AppendFloats64 encodes the input float64s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = appendFloat(dst, vals[0], 64)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = appendFloat(append(dst, ','), val, 64)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendInterface marshals the input interface to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
|
||||
marshaled, err := JSONMarshalFunc(i)
|
||||
if err != nil {
|
||||
return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err))
|
||||
}
|
||||
return append(dst, marshaled...)
|
||||
}
|
||||
|
||||
// AppendObjectData takes in an object that is already in a byte array
|
||||
// and adds it to the dst.
|
||||
func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {
|
||||
// Three conditions apply here:
|
||||
// 1. new content starts with '{' - which should be dropped OR
|
||||
// 2. new content starts with '{' - which should be replaced with ','
|
||||
// to separate with existing content OR
|
||||
// 3. existing content has already other fields
|
||||
if o[0] == '{' {
|
||||
if len(dst) > 1 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
o = o[1:]
|
||||
} else if len(dst) > 1 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
return append(dst, o...)
|
||||
}
|
||||
|
||||
// AppendIPAddr adds IPv4 or IPv6 address to dst.
|
||||
func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
|
||||
return e.AppendString(dst, ip.String())
|
||||
}
|
||||
|
||||
// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst.
|
||||
func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte {
|
||||
return e.AppendString(dst, pfx.String())
|
||||
|
||||
}
|
||||
|
||||
// AppendMACAddr adds MAC address to dst.
|
||||
func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
|
||||
return e.AppendString(dst, ha.String())
|
||||
}
|
457
vendor/github.com/rs/zerolog/log.go
generated
vendored
457
vendor/github.com/rs/zerolog/log.go
generated
vendored
@ -1,457 +0,0 @@
|
||||
// Package zerolog provides a lightweight logging library dedicated to JSON logging.
|
||||
//
|
||||
// A global Logger can be use for simple logging:
|
||||
//
|
||||
// import "github.com/rs/zerolog/log"
|
||||
//
|
||||
// log.Info().Msg("hello world")
|
||||
// // Output: {"time":1494567715,"level":"info","message":"hello world"}
|
||||
//
|
||||
// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log".
|
||||
//
|
||||
// Fields can be added to log messages:
|
||||
//
|
||||
// log.Info().Str("foo", "bar").Msg("hello world")
|
||||
// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
|
||||
//
|
||||
// Create logger instance to manage different outputs:
|
||||
//
|
||||
// logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
|
||||
// logger.Info().
|
||||
// Str("foo", "bar").
|
||||
// Msg("hello world")
|
||||
// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
|
||||
//
|
||||
// Sub-loggers let you chain loggers with additional context:
|
||||
//
|
||||
// sublogger := log.With().Str("component": "foo").Logger()
|
||||
// sublogger.Info().Msg("hello world")
|
||||
// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"}
|
||||
//
|
||||
// Level logging
|
||||
//
|
||||
// zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
//
|
||||
// log.Debug().Msg("filtered out message")
|
||||
// log.Info().Msg("routed message")
|
||||
//
|
||||
// if e := log.Debug(); e.Enabled() {
|
||||
// // Compute log output only if enabled.
|
||||
// value := compute()
|
||||
// e.Str("foo": value).Msg("some debug message")
|
||||
// }
|
||||
// // Output: {"level":"info","time":1494567715,"routed message"}
|
||||
//
|
||||
// Customize automatic field names:
|
||||
//
|
||||
// log.TimestampFieldName = "t"
|
||||
// log.LevelFieldName = "p"
|
||||
// log.MessageFieldName = "m"
|
||||
//
|
||||
// log.Info().Msg("hello world")
|
||||
// // Output: {"t":1494567715,"p":"info","m":"hello world"}
|
||||
//
|
||||
// Log with no level and message:
|
||||
//
|
||||
// log.Log().Str("foo","bar").Msg("")
|
||||
// // Output: {"time":1494567715,"foo":"bar"}
|
||||
//
|
||||
// Add contextual fields to global Logger:
|
||||
//
|
||||
// log.Logger = log.With().Str("foo", "bar").Logger()
|
||||
//
|
||||
// Sample logs:
|
||||
//
|
||||
// sampled := log.Sample(&zerolog.BasicSampler{N: 10})
|
||||
// sampled.Info().Msg("will be logged every 10 messages")
|
||||
//
|
||||
// Log with contextual hooks:
|
||||
//
|
||||
// // Create the hook:
|
||||
// type SeverityHook struct{}
|
||||
//
|
||||
// func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
|
||||
// if level != zerolog.NoLevel {
|
||||
// e.Str("severity", level.String())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // And use it:
|
||||
// var h SeverityHook
|
||||
// log := zerolog.New(os.Stdout).Hook(h)
|
||||
// log.Warn().Msg("")
|
||||
// // Output: {"level":"warn","severity":"warn"}
|
||||
//
|
||||
//
|
||||
// Caveats
|
||||
//
|
||||
// There is no fields deduplication out-of-the-box.
|
||||
// Using the same key multiple times creates new key in final JSON each time.
|
||||
//
|
||||
// logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
|
||||
// logger.Info().
|
||||
// Timestamp().
|
||||
// Msg("dup")
|
||||
// // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"}
|
||||
//
|
||||
// In this case, many consumers will take the last value,
|
||||
// but this is not guaranteed; check yours if in doubt.
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Level defines log levels.
|
||||
type Level int8
|
||||
|
||||
const (
|
||||
// DebugLevel defines debug log level.
|
||||
DebugLevel Level = iota
|
||||
// InfoLevel defines info log level.
|
||||
InfoLevel
|
||||
// WarnLevel defines warn log level.
|
||||
WarnLevel
|
||||
// ErrorLevel defines error log level.
|
||||
ErrorLevel
|
||||
// FatalLevel defines fatal log level.
|
||||
FatalLevel
|
||||
// PanicLevel defines panic log level.
|
||||
PanicLevel
|
||||
// NoLevel defines an absent log level.
|
||||
NoLevel
|
||||
// Disabled disables the logger.
|
||||
Disabled
|
||||
|
||||
// TraceLevel defines trace log level.
|
||||
TraceLevel Level = -1
|
||||
// Values less than TraceLevel are handled as numbers.
|
||||
)
|
||||
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
case TraceLevel:
|
||||
return LevelTraceValue
|
||||
case DebugLevel:
|
||||
return LevelDebugValue
|
||||
case InfoLevel:
|
||||
return LevelInfoValue
|
||||
case WarnLevel:
|
||||
return LevelWarnValue
|
||||
case ErrorLevel:
|
||||
return LevelErrorValue
|
||||
case FatalLevel:
|
||||
return LevelFatalValue
|
||||
case PanicLevel:
|
||||
return LevelPanicValue
|
||||
case Disabled:
|
||||
return "disabled"
|
||||
case NoLevel:
|
||||
return ""
|
||||
}
|
||||
return strconv.Itoa(int(l))
|
||||
}
|
||||
|
||||
// ParseLevel converts a level string into a zerolog Level value.
|
||||
// returns an error if the input string does not match known values.
|
||||
func ParseLevel(levelStr string) (Level, error) {
|
||||
switch levelStr {
|
||||
case LevelFieldMarshalFunc(TraceLevel):
|
||||
return TraceLevel, nil
|
||||
case LevelFieldMarshalFunc(DebugLevel):
|
||||
return DebugLevel, nil
|
||||
case LevelFieldMarshalFunc(InfoLevel):
|
||||
return InfoLevel, nil
|
||||
case LevelFieldMarshalFunc(WarnLevel):
|
||||
return WarnLevel, nil
|
||||
case LevelFieldMarshalFunc(ErrorLevel):
|
||||
return ErrorLevel, nil
|
||||
case LevelFieldMarshalFunc(FatalLevel):
|
||||
return FatalLevel, nil
|
||||
case LevelFieldMarshalFunc(PanicLevel):
|
||||
return PanicLevel, nil
|
||||
case LevelFieldMarshalFunc(Disabled):
|
||||
return Disabled, nil
|
||||
case LevelFieldMarshalFunc(NoLevel):
|
||||
return NoLevel, nil
|
||||
}
|
||||
i, err := strconv.Atoi(levelStr)
|
||||
if err != nil {
|
||||
return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr)
|
||||
}
|
||||
if i > 127 || i < -128 {
|
||||
return NoLevel, fmt.Errorf("Out-Of-Bounds Level: '%d', defaulting to NoLevel", i)
|
||||
}
|
||||
return Level(i), nil
|
||||
}
|
||||
|
||||
// A Logger represents an active logging object that generates lines
|
||||
// of JSON output to an io.Writer. Each logging operation makes a single
|
||||
// call to the Writer's Write method. There is no guarantee on access
|
||||
// serialization to the Writer. If your Writer is not thread safe,
|
||||
// you may consider a sync wrapper.
|
||||
type Logger struct {
|
||||
w LevelWriter
|
||||
level Level
|
||||
sampler Sampler
|
||||
context []byte
|
||||
hooks []Hook
|
||||
stack bool
|
||||
}
|
||||
|
||||
// New creates a root logger with given output writer. If the output writer implements
|
||||
// the LevelWriter interface, the WriteLevel method will be called instead of the Write
|
||||
// one.
|
||||
//
|
||||
// Each logging operation makes a single call to the Writer's Write method. There is no
|
||||
// guarantee on access serialization to the Writer. If your Writer is not thread safe,
|
||||
// you may consider using sync wrapper.
|
||||
func New(w io.Writer) Logger {
|
||||
if w == nil {
|
||||
w = ioutil.Discard
|
||||
}
|
||||
lw, ok := w.(LevelWriter)
|
||||
if !ok {
|
||||
lw = levelWriterAdapter{w}
|
||||
}
|
||||
return Logger{w: lw, level: TraceLevel}
|
||||
}
|
||||
|
||||
// Nop returns a disabled logger for which all operation are no-op.
|
||||
func Nop() Logger {
|
||||
return New(nil).Level(Disabled)
|
||||
}
|
||||
|
||||
// Output duplicates the current logger and sets w as its output.
|
||||
func (l Logger) Output(w io.Writer) Logger {
|
||||
l2 := New(w)
|
||||
l2.level = l.level
|
||||
l2.sampler = l.sampler
|
||||
l2.stack = l.stack
|
||||
if len(l.hooks) > 0 {
|
||||
l2.hooks = append(l2.hooks, l.hooks...)
|
||||
}
|
||||
if l.context != nil {
|
||||
l2.context = make([]byte, len(l.context), cap(l.context))
|
||||
copy(l2.context, l.context)
|
||||
}
|
||||
return l2
|
||||
}
|
||||
|
||||
// With creates a child logger with the field added to its context.
|
||||
func (l Logger) With() Context {
|
||||
context := l.context
|
||||
l.context = make([]byte, 0, 500)
|
||||
if context != nil {
|
||||
l.context = append(l.context, context...)
|
||||
} else {
|
||||
// This is needed for AppendKey to not check len of input
|
||||
// thus making it inlinable
|
||||
l.context = enc.AppendBeginMarker(l.context)
|
||||
}
|
||||
return Context{l}
|
||||
}
|
||||
|
||||
// UpdateContext updates the internal logger's context.
|
||||
//
|
||||
// Use this method with caution. If unsure, prefer the With method.
|
||||
func (l *Logger) UpdateContext(update func(c Context) Context) {
|
||||
if l == disabledLogger {
|
||||
return
|
||||
}
|
||||
if cap(l.context) == 0 {
|
||||
l.context = make([]byte, 0, 500)
|
||||
}
|
||||
if len(l.context) == 0 {
|
||||
l.context = enc.AppendBeginMarker(l.context)
|
||||
}
|
||||
c := update(Context{*l})
|
||||
l.context = c.l.context
|
||||
}
|
||||
|
||||
// Level creates a child logger with the minimum accepted level set to level.
|
||||
func (l Logger) Level(lvl Level) Logger {
|
||||
l.level = lvl
|
||||
return l
|
||||
}
|
||||
|
||||
// GetLevel returns the current Level of l.
|
||||
func (l Logger) GetLevel() Level {
|
||||
return l.level
|
||||
}
|
||||
|
||||
// Sample returns a logger with the s sampler.
|
||||
func (l Logger) Sample(s Sampler) Logger {
|
||||
l.sampler = s
|
||||
return l
|
||||
}
|
||||
|
||||
// Hook returns a logger with the h Hook.
|
||||
func (l Logger) Hook(h Hook) Logger {
|
||||
l.hooks = append(l.hooks, h)
|
||||
return l
|
||||
}
|
||||
|
||||
// Trace starts a new message with trace level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) Trace() *Event {
|
||||
return l.newEvent(TraceLevel, nil)
|
||||
}
|
||||
|
||||
// Debug starts a new message with debug level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) Debug() *Event {
|
||||
return l.newEvent(DebugLevel, nil)
|
||||
}
|
||||
|
||||
// Info starts a new message with info level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) Info() *Event {
|
||||
return l.newEvent(InfoLevel, nil)
|
||||
}
|
||||
|
||||
// Warn starts a new message with warn level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) Warn() *Event {
|
||||
return l.newEvent(WarnLevel, nil)
|
||||
}
|
||||
|
||||
// Error starts a new message with error level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) Error() *Event {
|
||||
return l.newEvent(ErrorLevel, nil)
|
||||
}
|
||||
|
||||
// Err starts a new message with error level with err as a field if not nil or
|
||||
// with info level if err is nil.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) Err(err error) *Event {
|
||||
if err != nil {
|
||||
return l.Error().Err(err)
|
||||
}
|
||||
|
||||
return l.Info()
|
||||
}
|
||||
|
||||
// Fatal starts a new message with fatal level. The os.Exit(1) function
|
||||
// is called by the Msg method, which terminates the program immediately.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) Fatal() *Event {
|
||||
return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) })
|
||||
}
|
||||
|
||||
// Panic starts a new message with panic level. The panic() function
|
||||
// is called by the Msg method, which stops the ordinary flow of a goroutine.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) Panic() *Event {
|
||||
return l.newEvent(PanicLevel, func(msg string) { panic(msg) })
|
||||
}
|
||||
|
||||
// WithLevel starts a new message with level. Unlike Fatal and Panic
|
||||
// methods, WithLevel does not terminate the program or stop the ordinary
|
||||
// flow of a gourotine when used with their respective levels.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) WithLevel(level Level) *Event {
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
return l.Trace()
|
||||
case DebugLevel:
|
||||
return l.Debug()
|
||||
case InfoLevel:
|
||||
return l.Info()
|
||||
case WarnLevel:
|
||||
return l.Warn()
|
||||
case ErrorLevel:
|
||||
return l.Error()
|
||||
case FatalLevel:
|
||||
return l.newEvent(FatalLevel, nil)
|
||||
case PanicLevel:
|
||||
return l.newEvent(PanicLevel, nil)
|
||||
case NoLevel:
|
||||
return l.Log()
|
||||
case Disabled:
|
||||
return nil
|
||||
default:
|
||||
return l.newEvent(level, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Log starts a new message with no level. Setting GlobalLevel to Disabled
|
||||
// will still disable events produced by this method.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func (l *Logger) Log() *Event {
|
||||
return l.newEvent(NoLevel, nil)
|
||||
}
|
||||
|
||||
// Print sends a log event using debug level and no extra field.
|
||||
// Arguments are handled in the manner of fmt.Print.
|
||||
func (l *Logger) Print(v ...interface{}) {
|
||||
if e := l.Debug(); e.Enabled() {
|
||||
e.CallerSkipFrame(1).Msg(fmt.Sprint(v...))
|
||||
}
|
||||
}
|
||||
|
||||
// Printf sends a log event using debug level and no extra field.
|
||||
// Arguments are handled in the manner of fmt.Printf.
|
||||
func (l *Logger) Printf(format string, v ...interface{}) {
|
||||
if e := l.Debug(); e.Enabled() {
|
||||
e.CallerSkipFrame(1).Msg(fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
|
||||
// Write implements the io.Writer interface. This is useful to set as a writer
|
||||
// for the standard library log.
|
||||
func (l Logger) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
if n > 0 && p[n-1] == '\n' {
|
||||
// Trim CR added by stdlog.
|
||||
p = p[0 : n-1]
|
||||
}
|
||||
l.Log().CallerSkipFrame(1).Msg(string(p))
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Logger) newEvent(level Level, done func(string)) *Event {
|
||||
enabled := l.should(level)
|
||||
if !enabled {
|
||||
return nil
|
||||
}
|
||||
e := newEvent(l.w, level)
|
||||
e.done = done
|
||||
e.ch = l.hooks
|
||||
if level != NoLevel && LevelFieldName != "" {
|
||||
e.Str(LevelFieldName, LevelFieldMarshalFunc(level))
|
||||
}
|
||||
if l.context != nil && len(l.context) > 1 {
|
||||
e.buf = enc.AppendObjectData(e.buf, l.context)
|
||||
}
|
||||
if l.stack {
|
||||
e.Stack()
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// should returns true if the log event should be logged.
|
||||
func (l *Logger) should(lvl Level) bool {
|
||||
if lvl < l.level || lvl < GlobalLevel() {
|
||||
return false
|
||||
}
|
||||
if l.sampler != nil && !samplingDisabled() {
|
||||
return l.sampler.Sample(lvl)
|
||||
}
|
||||
return true
|
||||
}
|
131
vendor/github.com/rs/zerolog/log/log.go
generated
vendored
131
vendor/github.com/rs/zerolog/log/log.go
generated
vendored
@ -1,131 +0,0 @@
|
||||
// Package log provides a global logger for zerolog.
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Logger is the global logger.
|
||||
var Logger = zerolog.New(os.Stderr).With().Timestamp().Logger()
|
||||
|
||||
// Output duplicates the global logger and sets w as its output.
|
||||
func Output(w io.Writer) zerolog.Logger {
|
||||
return Logger.Output(w)
|
||||
}
|
||||
|
||||
// With creates a child logger with the field added to its context.
|
||||
func With() zerolog.Context {
|
||||
return Logger.With()
|
||||
}
|
||||
|
||||
// Level creates a child logger with the minimum accepted level set to level.
|
||||
func Level(level zerolog.Level) zerolog.Logger {
|
||||
return Logger.Level(level)
|
||||
}
|
||||
|
||||
// Sample returns a logger with the s sampler.
|
||||
func Sample(s zerolog.Sampler) zerolog.Logger {
|
||||
return Logger.Sample(s)
|
||||
}
|
||||
|
||||
// Hook returns a logger with the h Hook.
|
||||
func Hook(h zerolog.Hook) zerolog.Logger {
|
||||
return Logger.Hook(h)
|
||||
}
|
||||
|
||||
// Err starts a new message with error level with err as a field if not nil or
|
||||
// with info level if err is nil.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func Err(err error) *zerolog.Event {
|
||||
return Logger.Err(err)
|
||||
}
|
||||
|
||||
// Trace starts a new message with trace level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func Trace() *zerolog.Event {
|
||||
return Logger.Trace()
|
||||
}
|
||||
|
||||
// Debug starts a new message with debug level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func Debug() *zerolog.Event {
|
||||
return Logger.Debug()
|
||||
}
|
||||
|
||||
// Info starts a new message with info level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func Info() *zerolog.Event {
|
||||
return Logger.Info()
|
||||
}
|
||||
|
||||
// Warn starts a new message with warn level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func Warn() *zerolog.Event {
|
||||
return Logger.Warn()
|
||||
}
|
||||
|
||||
// Error starts a new message with error level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func Error() *zerolog.Event {
|
||||
return Logger.Error()
|
||||
}
|
||||
|
||||
// Fatal starts a new message with fatal level. The os.Exit(1) function
|
||||
// is called by the Msg method.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func Fatal() *zerolog.Event {
|
||||
return Logger.Fatal()
|
||||
}
|
||||
|
||||
// Panic starts a new message with panic level. The message is also sent
|
||||
// to the panic function.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func Panic() *zerolog.Event {
|
||||
return Logger.Panic()
|
||||
}
|
||||
|
||||
// WithLevel starts a new message with level.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func WithLevel(level zerolog.Level) *zerolog.Event {
|
||||
return Logger.WithLevel(level)
|
||||
}
|
||||
|
||||
// Log starts a new message with no level. Setting zerolog.GlobalLevel to
|
||||
// zerolog.Disabled will still disable events produced by this method.
|
||||
//
|
||||
// You must call Msg on the returned event in order to send the event.
|
||||
func Log() *zerolog.Event {
|
||||
return Logger.Log()
|
||||
}
|
||||
|
||||
// Print sends a log event using debug level and no extra field.
|
||||
// Arguments are handled in the manner of fmt.Print.
|
||||
func Print(v ...interface{}) {
|
||||
Logger.Debug().CallerSkipFrame(1).Msg(fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
// Printf sends a log event using debug level and no extra field.
|
||||
// Arguments are handled in the manner of fmt.Printf.
|
||||
func Printf(format string, v ...interface{}) {
|
||||
Logger.Debug().CallerSkipFrame(1).Msgf(format, v...)
|
||||
}
|
||||
|
||||
// Ctx returns the Logger associated with the ctx. If no logger
|
||||
// is associated, a disabled logger is returned.
|
||||
func Ctx(ctx context.Context) *zerolog.Logger {
|
||||
return zerolog.Ctx(ctx)
|
||||
}
|
5
vendor/github.com/rs/zerolog/not_go112.go
generated
vendored
5
vendor/github.com/rs/zerolog/not_go112.go
generated
vendored
@ -1,5 +0,0 @@
|
||||
// +build !go1.12
|
||||
|
||||
package zerolog
|
||||
|
||||
const contextCallerSkipFrameCount = 3
|
BIN
vendor/github.com/rs/zerolog/pretty.png
generated
vendored
BIN
vendor/github.com/rs/zerolog/pretty.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 82 KiB |
134
vendor/github.com/rs/zerolog/sampler.go
generated
vendored
134
vendor/github.com/rs/zerolog/sampler.go
generated
vendored
@ -1,134 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Often samples log every ~ 10 events.
|
||||
Often = RandomSampler(10)
|
||||
// Sometimes samples log every ~ 100 events.
|
||||
Sometimes = RandomSampler(100)
|
||||
// Rarely samples log every ~ 1000 events.
|
||||
Rarely = RandomSampler(1000)
|
||||
)
|
||||
|
||||
// Sampler defines an interface to a log sampler.
|
||||
type Sampler interface {
|
||||
// Sample returns true if the event should be part of the sample, false if
|
||||
// the event should be dropped.
|
||||
Sample(lvl Level) bool
|
||||
}
|
||||
|
||||
// RandomSampler use a PRNG to randomly sample an event out of N events,
|
||||
// regardless of their level.
|
||||
type RandomSampler uint32
|
||||
|
||||
// Sample implements the Sampler interface.
|
||||
func (s RandomSampler) Sample(lvl Level) bool {
|
||||
if s <= 0 {
|
||||
return false
|
||||
}
|
||||
if rand.Intn(int(s)) != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// BasicSampler is a sampler that will send every Nth events, regardless of
|
||||
// their level.
|
||||
type BasicSampler struct {
|
||||
N uint32
|
||||
counter uint32
|
||||
}
|
||||
|
||||
// Sample implements the Sampler interface.
|
||||
func (s *BasicSampler) Sample(lvl Level) bool {
|
||||
n := s.N
|
||||
if n == 1 {
|
||||
return true
|
||||
}
|
||||
c := atomic.AddUint32(&s.counter, 1)
|
||||
return c%n == 1
|
||||
}
|
||||
|
||||
// BurstSampler lets Burst events pass per Period then pass the decision to
|
||||
// NextSampler. If Sampler is not set, all subsequent events are rejected.
|
||||
type BurstSampler struct {
|
||||
// Burst is the maximum number of event per period allowed before calling
|
||||
// NextSampler.
|
||||
Burst uint32
|
||||
// Period defines the burst period. If 0, NextSampler is always called.
|
||||
Period time.Duration
|
||||
// NextSampler is the sampler used after the burst is reached. If nil,
|
||||
// events are always rejected after the burst.
|
||||
NextSampler Sampler
|
||||
|
||||
counter uint32
|
||||
resetAt int64
|
||||
}
|
||||
|
||||
// Sample implements the Sampler interface.
|
||||
func (s *BurstSampler) Sample(lvl Level) bool {
|
||||
if s.Burst > 0 && s.Period > 0 {
|
||||
if s.inc() <= s.Burst {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if s.NextSampler == nil {
|
||||
return false
|
||||
}
|
||||
return s.NextSampler.Sample(lvl)
|
||||
}
|
||||
|
||||
func (s *BurstSampler) inc() uint32 {
|
||||
now := time.Now().UnixNano()
|
||||
resetAt := atomic.LoadInt64(&s.resetAt)
|
||||
var c uint32
|
||||
if now > resetAt {
|
||||
c = 1
|
||||
atomic.StoreUint32(&s.counter, c)
|
||||
newResetAt := now + s.Period.Nanoseconds()
|
||||
reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt)
|
||||
if !reset {
|
||||
// Lost the race with another goroutine trying to reset.
|
||||
c = atomic.AddUint32(&s.counter, 1)
|
||||
}
|
||||
} else {
|
||||
c = atomic.AddUint32(&s.counter, 1)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// LevelSampler applies a different sampler for each level.
|
||||
type LevelSampler struct {
|
||||
TraceSampler, DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler
|
||||
}
|
||||
|
||||
func (s LevelSampler) Sample(lvl Level) bool {
|
||||
switch lvl {
|
||||
case TraceLevel:
|
||||
if s.TraceSampler != nil {
|
||||
return s.TraceSampler.Sample(lvl)
|
||||
}
|
||||
case DebugLevel:
|
||||
if s.DebugSampler != nil {
|
||||
return s.DebugSampler.Sample(lvl)
|
||||
}
|
||||
case InfoLevel:
|
||||
if s.InfoSampler != nil {
|
||||
return s.InfoSampler.Sample(lvl)
|
||||
}
|
||||
case WarnLevel:
|
||||
if s.WarnSampler != nil {
|
||||
return s.WarnSampler.Sample(lvl)
|
||||
}
|
||||
case ErrorLevel:
|
||||
if s.ErrorSampler != nil {
|
||||
return s.ErrorSampler.Sample(lvl)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
80
vendor/github.com/rs/zerolog/syslog.go
generated
vendored
80
vendor/github.com/rs/zerolog/syslog.go
generated
vendored
@ -1,80 +0,0 @@
|
||||
// +build !windows
|
||||
// +build !binary_log
|
||||
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// See http://cee.mitre.org/language/1.0-beta1/clt.html#syslog
|
||||
// or https://www.rsyslog.com/json-elasticsearch/
|
||||
const ceePrefix = "@cee:"
|
||||
|
||||
// SyslogWriter is an interface matching a syslog.Writer struct.
|
||||
type SyslogWriter interface {
|
||||
io.Writer
|
||||
Debug(m string) error
|
||||
Info(m string) error
|
||||
Warning(m string) error
|
||||
Err(m string) error
|
||||
Emerg(m string) error
|
||||
Crit(m string) error
|
||||
}
|
||||
|
||||
type syslogWriter struct {
|
||||
w SyslogWriter
|
||||
prefix string
|
||||
}
|
||||
|
||||
// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level
|
||||
// method matching the zerolog level.
|
||||
func SyslogLevelWriter(w SyslogWriter) LevelWriter {
|
||||
return syslogWriter{w, ""}
|
||||
}
|
||||
|
||||
// SyslogCEEWriter wraps a SyslogWriter with a SyslogLevelWriter that adds a
|
||||
// MITRE CEE prefix for JSON syslog entries, compatible with rsyslog
|
||||
// and syslog-ng JSON logging support.
|
||||
// See https://www.rsyslog.com/json-elasticsearch/
|
||||
func SyslogCEEWriter(w SyslogWriter) LevelWriter {
|
||||
return syslogWriter{w, ceePrefix}
|
||||
}
|
||||
|
||||
func (sw syslogWriter) Write(p []byte) (n int, err error) {
|
||||
var pn int
|
||||
if sw.prefix != "" {
|
||||
pn, err = sw.w.Write([]byte(sw.prefix))
|
||||
if err != nil {
|
||||
return pn, err
|
||||
}
|
||||
}
|
||||
n, err = sw.w.Write(p)
|
||||
return pn + n, err
|
||||
}
|
||||
|
||||
// WriteLevel implements LevelWriter interface.
|
||||
func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) {
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
case DebugLevel:
|
||||
err = sw.w.Debug(sw.prefix + string(p))
|
||||
case InfoLevel:
|
||||
err = sw.w.Info(sw.prefix + string(p))
|
||||
case WarnLevel:
|
||||
err = sw.w.Warning(sw.prefix + string(p))
|
||||
case ErrorLevel:
|
||||
err = sw.w.Err(sw.prefix + string(p))
|
||||
case FatalLevel:
|
||||
err = sw.w.Emerg(sw.prefix + string(p))
|
||||
case PanicLevel:
|
||||
err = sw.w.Crit(sw.prefix + string(p))
|
||||
case NoLevel:
|
||||
err = sw.w.Info(sw.prefix + string(p))
|
||||
default:
|
||||
panic("invalid level")
|
||||
}
|
||||
// Any CEE prefix is not part of the message, so we don't include its length
|
||||
n = len(p)
|
||||
return
|
||||
}
|
98
vendor/github.com/rs/zerolog/writer.go
generated
vendored
98
vendor/github.com/rs/zerolog/writer.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// LevelWriter defines as interface a writer may implement in order
|
||||
// to receive level information with payload.
|
||||
type LevelWriter interface {
|
||||
io.Writer
|
||||
WriteLevel(level Level, p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
type levelWriterAdapter struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) {
|
||||
return lw.Write(p)
|
||||
}
|
||||
|
||||
type syncWriter struct {
|
||||
mu sync.Mutex
|
||||
lw LevelWriter
|
||||
}
|
||||
|
||||
// SyncWriter wraps w so that each call to Write is synchronized with a mutex.
|
||||
// This syncer can be used to wrap the call to writer's Write method if it is
|
||||
// not thread safe. Note that you do not need this wrapper for os.File Write
|
||||
// operations on POSIX and Windows systems as they are already thread-safe.
|
||||
func SyncWriter(w io.Writer) io.Writer {
|
||||
if lw, ok := w.(LevelWriter); ok {
|
||||
return &syncWriter{lw: lw}
|
||||
}
|
||||
return &syncWriter{lw: levelWriterAdapter{w}}
|
||||
}
|
||||
|
||||
// Write implements the io.Writer interface.
|
||||
func (s *syncWriter) Write(p []byte) (n int, err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.lw.Write(p)
|
||||
}
|
||||
|
||||
// WriteLevel implements the LevelWriter interface.
|
||||
func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.lw.WriteLevel(l, p)
|
||||
}
|
||||
|
||||
type multiLevelWriter struct {
|
||||
writers []LevelWriter
|
||||
}
|
||||
|
||||
func (t multiLevelWriter) Write(p []byte) (n int, err error) {
|
||||
for _, w := range t.writers {
|
||||
if _n, _err := w.Write(p); err == nil {
|
||||
n = _n
|
||||
if _err != nil {
|
||||
err = _err
|
||||
} else if _n != len(p) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
|
||||
for _, w := range t.writers {
|
||||
if _n, _err := w.WriteLevel(l, p); err == nil {
|
||||
n = _n
|
||||
if _err != nil {
|
||||
err = _err
|
||||
} else if _n != len(p) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// MultiLevelWriter creates a writer that duplicates its writes to all the
|
||||
// provided writers, similar to the Unix tee(1) command. If some writers
|
||||
// implement LevelWriter, their WriteLevel method will be used instead of Write.
|
||||
func MultiLevelWriter(writers ...io.Writer) LevelWriter {
|
||||
lwriters := make([]LevelWriter, 0, len(writers))
|
||||
for _, w := range writers {
|
||||
if lw, ok := w.(LevelWriter); ok {
|
||||
lwriters = append(lwriters, lw)
|
||||
} else {
|
||||
lwriters = append(lwriters, levelWriterAdapter{w})
|
||||
}
|
||||
}
|
||||
return multiLevelWriter{lwriters}
|
||||
}
|
201
vendor/github.com/tonglil/buflogr/LICENSE
generated
vendored
Normal file
201
vendor/github.com/tonglil/buflogr/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
36
vendor/github.com/tonglil/buflogr/README.md
generated
vendored
Normal file
36
vendor/github.com/tonglil/buflogr/README.md
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
# buflogr
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/tonglil/buflogr.svg)](https://pkg.go.dev/github.com/tonglil/buflogr)
|
||||
<!-- ![test](https://github.com/tonglil/buflogr/workflows/test/badge.svg) -->
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/tonglil/buflogr)](https://goreportcard.com/report/github.com/tonglil/buflogr)
|
||||
|
||||
A [logr](https://github.com/go-logr/logr) LogSink implementation using [bytes.Buffer](https://pkg.go.dev/bytes).
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/tonglil/buflogr"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var buf bytes.Buffer
|
||||
var log logr.Logger = buflogr.NewWithBuffer(&buf)
|
||||
|
||||
log = log.WithName("my app")
|
||||
log := log.WithValues("format", "none")
|
||||
|
||||
log.Info("Logr in action!", "the answer", 42)
|
||||
|
||||
fmt.Print(buf.String())
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
This is a simple log adapter to log messages into a buffer.
|
||||
Useful for testing.
|
178
vendor/github.com/tonglil/buflogr/buflogr.go
generated
vendored
Normal file
178
vendor/github.com/tonglil/buflogr/buflogr.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
package buflogr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
const (
|
||||
LevelError = "ERROR"
|
||||
LevelInfo = "INFO"
|
||||
LevelV = "V[%d]"
|
||||
)
|
||||
|
||||
var (
|
||||
// NameSeparator separates names for logr.WithName.
|
||||
NameSeparator = "/"
|
||||
// KVFormatter is a function that renders a slice of key/value pairs into a
|
||||
// string with the signature: `func(kv ...interface{}) string`.
|
||||
KVFormatter = defaultKVFormatter
|
||||
)
|
||||
|
||||
var (
|
||||
_ logr.LogSink = &bufLogger{}
|
||||
_ Underlier = &bufLogger{}
|
||||
)
|
||||
|
||||
// New returns a logr.Logger with logr.LogSink implemented by bufLogger using a
|
||||
// new bytes.Buffer.
|
||||
func New() logr.Logger {
|
||||
return NewWithBuffer(nil)
|
||||
}
|
||||
|
||||
// New returns a logr.Logger with logr.LogSink implemented by bufLogger with an
|
||||
// existing bytes.Buffer.
|
||||
func NewWithBuffer(b *bytes.Buffer) logr.Logger {
|
||||
if b == nil {
|
||||
b = &bytes.Buffer{}
|
||||
}
|
||||
bl := &bufLogger{
|
||||
verbosity: 9,
|
||||
buf: b,
|
||||
}
|
||||
return logr.New(bl)
|
||||
}
|
||||
|
||||
// bufLogger implements the LogSink interface.
|
||||
type bufLogger struct {
|
||||
name string
|
||||
values []interface{}
|
||||
verbosity int
|
||||
buf *bytes.Buffer // pointer so logged text are persisted across all invocations
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Init is not implemented and does not use any runtime info.
|
||||
func (l *bufLogger) Init(info logr.RuntimeInfo) {
|
||||
// not implemented
|
||||
}
|
||||
|
||||
// Enabled implements logr.Logger.Enabled by checking if the current
|
||||
// verbosity level is less or equal than the logger's maximum verbosity.
|
||||
func (l *bufLogger) Enabled(level int) bool {
|
||||
return level <= l.verbosity
|
||||
}
|
||||
|
||||
// Info implements logr.Logger.Info by writing the line to the internal buffer.
|
||||
func (l *bufLogger) Info(level int, msg string, kv ...interface{}) {
|
||||
if l.Enabled(level) {
|
||||
l.writeLine(l.levelString(level), msg, kv...)
|
||||
}
|
||||
}
|
||||
|
||||
// Error implements logr.Logger.Error by prefixing the line with "ERROR" and
|
||||
// write it to the internal buffer.
|
||||
func (l *bufLogger) Error(err error, msg string, kv ...interface{}) {
|
||||
l.writeLine(fmt.Sprintf("%s %v", LevelError, err), msg, kv...)
|
||||
}
|
||||
|
||||
// WithValues returns a new LogSink with additional key/value pairs.
|
||||
// The new LogSink has a new sync.Mutex.
|
||||
func (l *bufLogger) WithValues(kv ...interface{}) logr.LogSink {
|
||||
return &bufLogger{
|
||||
name: l.name,
|
||||
values: append(l.values, kv...),
|
||||
verbosity: l.verbosity,
|
||||
buf: l.buf,
|
||||
}
|
||||
}
|
||||
|
||||
// WithName returns a new LogSink with the specified name, appended with
|
||||
// NameSeparator if there is already a name.
|
||||
// The new LogSink has a new sync.Mutex.
|
||||
func (l *bufLogger) WithName(name string) logr.LogSink {
|
||||
if l.name != "" {
|
||||
name = l.name + NameSeparator + name
|
||||
}
|
||||
return &bufLogger{
|
||||
name: name,
|
||||
values: l.values,
|
||||
verbosity: l.verbosity,
|
||||
buf: l.buf,
|
||||
}
|
||||
}
|
||||
|
||||
func defaultKVFormatter(kv ...interface{}) string {
|
||||
s := strings.Join(strings.Fields(fmt.Sprint(kv...)), " ")
|
||||
s = strings.TrimPrefix(s, "[")
|
||||
s = strings.TrimSuffix(s, "]")
|
||||
return s
|
||||
}
|
||||
|
||||
func (l *bufLogger) writeLine(level, msg string, kv ...interface{}) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
var line []string
|
||||
|
||||
fields := []string{level, l.name, msg, KVFormatter(l.values), KVFormatter(kv)}
|
||||
for _, f := range fields {
|
||||
if f != "" {
|
||||
line = append(line, f)
|
||||
}
|
||||
}
|
||||
|
||||
l.buf.WriteString(strings.Join(line, " "))
|
||||
if !strings.HasSuffix(line[len(line)-1], "\n") {
|
||||
l.buf.WriteRune('\n')
|
||||
}
|
||||
}
|
||||
|
||||
func (l *bufLogger) levelString(level int) string {
|
||||
if level > 0 {
|
||||
return fmt.Sprintf(LevelV, level)
|
||||
}
|
||||
return LevelInfo
|
||||
}
|
||||
|
||||
// Underlier exposes access to the underlying testing.T instance. Since
|
||||
// callers only have a logr.Logger, they have to know which
|
||||
// implementation is in use, so this interface is less of an
|
||||
// abstraction and more of a way to test type conversion.
|
||||
type Underlier interface {
|
||||
GetUnderlying() *bufLogger
|
||||
}
|
||||
|
||||
// GetUnderlying returns the bufLogger underneath this logSink.
|
||||
func (l *bufLogger) GetUnderlying() *bufLogger {
|
||||
return l
|
||||
}
|
||||
|
||||
// Additional methods
|
||||
|
||||
// Buf returns the internal buffer.
|
||||
// Wrap with Mutex().Lock() and Mutex().Unlock() when doing write calls to
|
||||
// preserve the write order.
|
||||
func (l *bufLogger) Buf() *bytes.Buffer {
|
||||
return l.buf
|
||||
}
|
||||
|
||||
// Mutex returns the sync.Mutex used to preserve the order of writes to the buffer.
|
||||
func (l *bufLogger) Mutex() *sync.Mutex {
|
||||
return &l.mu
|
||||
}
|
||||
|
||||
// Reset clears the buffer, name, and values, as well as resets the verbosity to
|
||||
// maximum (9).
|
||||
func (l *bufLogger) Reset() {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.name = ""
|
||||
l.values = nil
|
||||
l.verbosity = 9
|
||||
l.buf = &bytes.Buffer{}
|
||||
}
|
16
vendor/modules.txt
vendored
16
vendor/modules.txt
vendored
@ -418,6 +418,13 @@ github.com/go-git/go-git/v5/utils/merkletrie/filesystem
|
||||
github.com/go-git/go-git/v5/utils/merkletrie/index
|
||||
github.com/go-git/go-git/v5/utils/merkletrie/internal/frame
|
||||
github.com/go-git/go-git/v5/utils/merkletrie/noder
|
||||
# github.com/go-logr/logr v1.2.3
|
||||
## explicit; go 1.16
|
||||
github.com/go-logr/logr
|
||||
github.com/go-logr/logr/funcr
|
||||
# github.com/go-logr/stdr v1.2.2
|
||||
## explicit; go 1.16
|
||||
github.com/go-logr/stdr
|
||||
# github.com/go-toolsmith/astcast v1.0.0
|
||||
## explicit
|
||||
github.com/go-toolsmith/astcast
|
||||
@ -883,12 +890,6 @@ github.com/quasilyte/go-ruleguard/ruleguard/typematch
|
||||
# github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95
|
||||
## explicit; go 1.14
|
||||
github.com/quasilyte/regex/syntax
|
||||
# github.com/rs/zerolog v1.25.0
|
||||
## explicit; go 1.15
|
||||
github.com/rs/zerolog
|
||||
github.com/rs/zerolog/internal/cbor
|
||||
github.com/rs/zerolog/internal/json
|
||||
github.com/rs/zerolog/log
|
||||
# github.com/russross/blackfriday/v2 v2.0.1
|
||||
## explicit
|
||||
github.com/russross/blackfriday/v2
|
||||
@ -989,6 +990,9 @@ github.com/tomarrell/wrapcheck/v2/wrapcheck
|
||||
github.com/tommy-muehle/go-mnd/v2
|
||||
github.com/tommy-muehle/go-mnd/v2/checks
|
||||
github.com/tommy-muehle/go-mnd/v2/config
|
||||
# github.com/tonglil/buflogr v0.0.0-20220413082439-d4c2784244cd
|
||||
## explicit; go 1.17
|
||||
github.com/tonglil/buflogr
|
||||
# github.com/ulikunitz/xz v0.5.10
|
||||
## explicit; go 1.12
|
||||
github.com/ulikunitz/xz
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/go-logr/logr"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@ -54,36 +54,46 @@ type Dispatcher struct {
|
||||
queue chan Job
|
||||
eg *errgroup.Group
|
||||
numWorkers int
|
||||
log logr.Logger
|
||||
}
|
||||
|
||||
// NewDispatcher creates a new Dispatcher.
|
||||
// It takes a context and adds it to the errgroup creation and returns it again.
|
||||
func NewDispatcher(ctx context.Context, numWorkers, workLength int) (*Dispatcher, context.Context) {
|
||||
func NewDispatcher(ctx context.Context, log logr.Logger, numWorkers, workLength int) (*Dispatcher, context.Context) {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
return &Dispatcher{
|
||||
queue: make(chan Job, workLength),
|
||||
eg: eg,
|
||||
numWorkers: numWorkers,
|
||||
log: log,
|
||||
}, ctx
|
||||
}
|
||||
|
||||
// Start starts the configured number of workers and waits for jobs.
|
||||
func (d *Dispatcher) Start() {
|
||||
for i := 0; i < d.numWorkers; i++ {
|
||||
logger := log.With().Int("worker", i).Logger()
|
||||
logger.Debug().Msg("starting worker")
|
||||
d.log.V(1).Info("starting worker", "worker", i)
|
||||
i := i
|
||||
|
||||
d.eg.Go(func() error {
|
||||
for j := range d.queue {
|
||||
errChan := make(chan error)
|
||||
|
||||
go func() {
|
||||
errChan <- j.work(j.ctx)
|
||||
err := j.work(j.ctx)
|
||||
select {
|
||||
case <-j.ctx.Done():
|
||||
d.log.V(1).Info("received job return after canceled context", "worker", i, "return", err)
|
||||
default:
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-j.ctx.Done():
|
||||
close(errChan)
|
||||
|
||||
return fmt.Errorf("got error from context: %w", j.ctx.Err())
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
@ -92,7 +102,7 @@ func (d *Dispatcher) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug().Msg("no work. returning...")
|
||||
d.log.V(1).Info("no work. returning...", "worker", i)
|
||||
|
||||
return nil
|
||||
})
|
||||
@ -101,19 +111,19 @@ func (d *Dispatcher) Start() {
|
||||
|
||||
// Append adds a job to the work queue.
|
||||
func (d *Dispatcher) Append(job Job) {
|
||||
log.Debug().Msg("adds job")
|
||||
d.log.V(1).Info("adds job")
|
||||
d.queue <- job
|
||||
}
|
||||
|
||||
// Close closes the queue channel.
|
||||
func (d *Dispatcher) Close() {
|
||||
log.Debug().Msg("closing queue")
|
||||
d.log.V(1).Info("closing queue")
|
||||
close(d.queue)
|
||||
}
|
||||
|
||||
// Wait for all jobs to finnish.
|
||||
func (d *Dispatcher) Wait() error {
|
||||
log.Debug().Msg("waiting for jobs to finnish")
|
||||
d.log.V(1).Info("waiting for jobs to finnish")
|
||||
|
||||
if err := d.eg.Wait(); err != nil {
|
||||
return fmt.Errorf("error on waiting: %w", err)
|
||||
|
@ -2,16 +2,20 @@
|
||||
package workgroups_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/go-logr/stdr"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tonglil/buflogr"
|
||||
"go.xsfx.dev/workgroups"
|
||||
)
|
||||
|
||||
@ -32,7 +36,12 @@ func TestDispatcher(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
|
||||
d, ctx := workgroups.NewDispatcher(context.Background(), runtime.GOMAXPROCS(0), 20)
|
||||
d, ctx := workgroups.NewDispatcher(
|
||||
context.Background(),
|
||||
stdr.New(log.New(os.Stderr, "", log.Lshortfile)),
|
||||
runtime.GOMAXPROCS(0),
|
||||
20,
|
||||
)
|
||||
d.Start()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
@ -57,7 +66,12 @@ func TestDispatcherError(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
|
||||
d, ctx := workgroups.NewDispatcher(context.Background(), runtime.GOMAXPROCS(0), 2)
|
||||
d, ctx := workgroups.NewDispatcher(
|
||||
context.Background(),
|
||||
stdr.New(log.New(os.Stderr, "", log.Lshortfile)),
|
||||
runtime.GOMAXPROCS(0),
|
||||
2,
|
||||
)
|
||||
d.Start()
|
||||
d.Append(workgroups.NewJob(ctx, errWork))
|
||||
d.Append(workgroups.NewJob(ctx, okWork))
|
||||
@ -77,7 +91,7 @@ func TestDispatcherErrorOneWorker(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
|
||||
d, ctx := workgroups.NewDispatcher(context.Background(), 1, 1)
|
||||
d, ctx := workgroups.NewDispatcher(context.Background(), stdr.New(log.New(os.Stderr, "", log.Lshortfile)), 1, 1)
|
||||
d.Start()
|
||||
d.Append(workgroups.NewJob(ctx, errWork))
|
||||
d.Append(workgroups.NewJob(ctx, okWork))
|
||||
@ -99,7 +113,7 @@ func TestDispatcherTimeout(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
d, ctx := workgroups.NewDispatcher(ctx, runtime.GOMAXPROCS(0), 1)
|
||||
d, ctx := workgroups.NewDispatcher(ctx, stdr.New(log.New(os.Stderr, "", log.Lshortfile)), runtime.GOMAXPROCS(0), 1)
|
||||
d.Start()
|
||||
d.Append(workgroups.NewJob(ctx, work))
|
||||
d.Close()
|
||||
@ -158,6 +172,7 @@ func TestRetry(t *testing.T) {
|
||||
c := counter{}
|
||||
d, ctx := workgroups.NewDispatcher(
|
||||
ctx,
|
||||
stdr.New(log.New(os.Stderr, "", log.Lshortfile)),
|
||||
1,
|
||||
1,
|
||||
)
|
||||
@ -180,3 +195,44 @@ func TestRetry(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrChanNotUsed(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
log := buflogr.NewWithBuffer(&buf)
|
||||
require := require.New(t)
|
||||
work := func(ctx context.Context) error {
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
d, ctx := workgroups.NewDispatcher(ctx, log, runtime.GOMAXPROCS(0), 1)
|
||||
d.Start()
|
||||
d.Append(workgroups.NewJob(ctx, work))
|
||||
d.Close()
|
||||
|
||||
go func() {
|
||||
time.Sleep(time.Second / 2)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
err := d.Wait()
|
||||
require.ErrorIs(err, context.Canceled)
|
||||
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Breaking glass!
|
||||
s := log.GetSink()
|
||||
|
||||
underlier, ok := s.(buflogr.Underlier)
|
||||
if !ok {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
bl := underlier.GetUnderlying()
|
||||
|
||||
bl.Mutex().Lock()
|
||||
require.Contains(buf.String(), "received job return after canceled context")
|
||||
bl.Mutex().Unlock()
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user