Merge pull request #1040 from jimmidyson/influxdb-0.9
Upgrade InfluxDB storage to InfluxDB 0.9
This commit is contained in:
commit
040bdd3cb1
30
Godeps/Godeps.json
generated
30
Godeps/Godeps.json
generated
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor",
|
"ImportPath": "github.com/google/cadvisor",
|
||||||
"GoVersion": "go1.5.1",
|
"GoVersion": "go1.5",
|
||||||
"Packages": [
|
"Packages": [
|
||||||
"./..."
|
"./..."
|
||||||
],
|
],
|
||||||
@ -37,6 +37,11 @@
|
|||||||
"Comment": "v4",
|
"Comment": "v4",
|
||||||
"Rev": "b4a58d95188dd092ae20072bac14cece0e67c388"
|
"Rev": "b4a58d95188dd092ae20072bac14cece0e67c388"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/docker/pkg/longpath",
|
||||||
|
"Comment": "v1.9.1",
|
||||||
|
"Rev": "a34a1d598c6096ed8b5ce5219e77d68e5cd85462"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
"ImportPath": "github.com/docker/docker/pkg/mount",
|
||||||
"Comment": "v1.9.1",
|
"Comment": "v1.9.1",
|
||||||
@ -59,6 +64,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||||
|
"Comment": "0.2.1-764-g412c004",
|
||||||
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
|
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -78,18 +84,24 @@
|
|||||||
"ImportPath": "github.com/golang/glog",
|
"ImportPath": "github.com/golang/glog",
|
||||||
"Rev": "fca8c8854093a154ff1eb580aae10276ad6b1b5f"
|
"Rev": "fca8c8854093a154ff1eb580aae10276ad6b1b5f"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "github.com/golang/mock/gomock",
|
|
||||||
"Rev": "15f8b22550555c0d3edf5afa97d74001bda2208b"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
"ImportPath": "github.com/golang/protobuf/proto",
|
||||||
"Rev": "f7137ae6b19afbfd61a94b746fda3b3fe0491874"
|
"Rev": "f7137ae6b19afbfd61a94b746fda3b3fe0491874"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/influxdb/influxdb/client",
|
"ImportPath": "github.com/influxdb/influxdb/client",
|
||||||
"Comment": "v0.8.0-rc.3-9-g3284662",
|
"Comment": "v0.9.5.1",
|
||||||
"Rev": "3284662b350688b651359f9124928856071bd3f5"
|
"Rev": "9eab56311373ee6f788ae5dfc87e2240038f0eb4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdb/influxdb/models",
|
||||||
|
"Comment": "v0.9.5.1",
|
||||||
|
"Rev": "9eab56311373ee6f788ae5dfc87e2240038f0eb4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdb/influxdb/pkg/escape",
|
||||||
|
"Comment": "v0.9.5.1",
|
||||||
|
"Rev": "9eab56311373ee6f788ae5dfc87e2240038f0eb4"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/kr/pretty",
|
"ImportPath": "github.com/kr/pretty",
|
||||||
@ -131,6 +143,10 @@
|
|||||||
"ImportPath": "github.com/prometheus/procfs",
|
"ImportPath": "github.com/prometheus/procfs",
|
||||||
"Rev": "6c34ef819e19b4e16f410100ace4aa006f0e3bf8"
|
"Rev": "6c34ef819e19b4e16f410100ace4aa006f0e3bf8"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/seccomp/libseccomp-golang",
|
||||||
|
"Rev": "1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/stretchr/objx",
|
"ImportPath": "github.com/stretchr/objx",
|
||||||
"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
|
"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
|
||||||
|
26
Godeps/_workspace/src/github.com/docker/docker/pkg/longpath/longpath.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/docker/docker/pkg/longpath/longpath.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// longpath introduces some constants and helper functions for handling long paths
|
||||||
|
// in Windows, which are expected to be prepended with `\\?\` and followed by either
|
||||||
|
// a drive letter, a UNC server\share, or a volume identifier.
|
||||||
|
|
||||||
|
package longpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Prefix is the longpath prefix for Windows file paths.
|
||||||
|
const Prefix = `\\?\`
|
||||||
|
|
||||||
|
// AddPrefix will add the Windows long path prefix to the path provided if
|
||||||
|
// it does not already have it.
|
||||||
|
func AddPrefix(path string) string {
|
||||||
|
if !strings.HasPrefix(path, Prefix) {
|
||||||
|
if strings.HasPrefix(path, `\\`) {
|
||||||
|
// This is a UNC path, so we need to add 'UNC' to the path as well.
|
||||||
|
path = Prefix + `UNC` + path[1:]
|
||||||
|
} else {
|
||||||
|
path = Prefix + path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
202
Godeps/_workspace/src/github.com/golang/mock/LICENSE
generated
vendored
202
Godeps/_workspace/src/github.com/golang/mock/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
247
Godeps/_workspace/src/github.com/golang/mock/gomock/call.go
generated
vendored
247
Godeps/_workspace/src/github.com/golang/mock/gomock/call.go
generated
vendored
@ -1,247 +0,0 @@
|
|||||||
// Copyright 2010 Google Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package gomock
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Call represents an expected call to a mock.
|
|
||||||
type Call struct {
|
|
||||||
t TestReporter // for triggering test failures on invalid call setup
|
|
||||||
|
|
||||||
receiver interface{} // the receiver of the method call
|
|
||||||
method string // the name of the method
|
|
||||||
args []Matcher // the args
|
|
||||||
rets []interface{} // the return values (if any)
|
|
||||||
|
|
||||||
preReqs []*Call // prerequisite calls
|
|
||||||
|
|
||||||
// Expectations
|
|
||||||
minCalls, maxCalls int
|
|
||||||
|
|
||||||
numCalls int // actual number made
|
|
||||||
|
|
||||||
// Actions
|
|
||||||
doFunc reflect.Value
|
|
||||||
setArgs map[int]reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Call) AnyTimes() *Call {
|
|
||||||
c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do declares the action to run when the call is matched.
|
|
||||||
// It takes an interface{} argument to support n-arity functions.
|
|
||||||
func (c *Call) Do(f interface{}) *Call {
|
|
||||||
// TODO: Check arity and types here, rather than dying badly elsewhere.
|
|
||||||
c.doFunc = reflect.ValueOf(f)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Call) Return(rets ...interface{}) *Call {
|
|
||||||
mt := c.methodType()
|
|
||||||
if len(rets) != mt.NumOut() {
|
|
||||||
c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d",
|
|
||||||
c.receiver, c.method, len(rets), mt.NumOut())
|
|
||||||
}
|
|
||||||
for i, ret := range rets {
|
|
||||||
if got, want := reflect.TypeOf(ret), mt.Out(i); got == want {
|
|
||||||
// Identical types; nothing to do.
|
|
||||||
} else if got == nil {
|
|
||||||
// Nil needs special handling.
|
|
||||||
switch want.Kind() {
|
|
||||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
|
||||||
// ok
|
|
||||||
default:
|
|
||||||
c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable",
|
|
||||||
i, c.receiver, c.method, want)
|
|
||||||
}
|
|
||||||
} else if got.AssignableTo(want) {
|
|
||||||
// Assignable type relation. Make the assignment now so that the generated code
|
|
||||||
// can return the values with a type assertion.
|
|
||||||
v := reflect.New(want).Elem()
|
|
||||||
v.Set(reflect.ValueOf(ret))
|
|
||||||
rets[i] = v.Interface()
|
|
||||||
} else {
|
|
||||||
c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v",
|
|
||||||
i, c.receiver, c.method, got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.rets = rets
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Call) Times(n int) *Call {
|
|
||||||
c.minCalls, c.maxCalls = n, n
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetArg declares an action that will set the nth argument's value,
|
|
||||||
// indirected through a pointer.
|
|
||||||
func (c *Call) SetArg(n int, value interface{}) *Call {
|
|
||||||
if c.setArgs == nil {
|
|
||||||
c.setArgs = make(map[int]reflect.Value)
|
|
||||||
}
|
|
||||||
mt := c.methodType()
|
|
||||||
// TODO: This will break on variadic methods.
|
|
||||||
// We will need to check those at invocation time.
|
|
||||||
if n < 0 || n >= mt.NumIn() {
|
|
||||||
c.t.Fatalf("SetArg(%d, ...) called for a method with %d args", n, mt.NumIn())
|
|
||||||
}
|
|
||||||
// Permit setting argument through an interface.
|
|
||||||
// In the interface case, we don't (nay, can't) check the type here.
|
|
||||||
at := mt.In(n)
|
|
||||||
switch at.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
dt := at.Elem()
|
|
||||||
if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) {
|
|
||||||
c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v", n, vt, dt)
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
// nothing to do
|
|
||||||
default:
|
|
||||||
c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface type %v", n, at)
|
|
||||||
}
|
|
||||||
c.setArgs[n] = reflect.ValueOf(value)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// isPreReq returns true if other is a direct or indirect prerequisite to c.
|
|
||||||
func (c *Call) isPreReq(other *Call) bool {
|
|
||||||
for _, preReq := range c.preReqs {
|
|
||||||
if other == preReq || preReq.isPreReq(other) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// After declares that the call may only match after preReq has been exhausted.
|
|
||||||
func (c *Call) After(preReq *Call) *Call {
|
|
||||||
if preReq.isPreReq(c) {
|
|
||||||
msg := fmt.Sprintf(
|
|
||||||
"Loop in call order: %v is a prerequisite to %v (possibly indirectly).",
|
|
||||||
c, preReq,
|
|
||||||
)
|
|
||||||
panic(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.preReqs = append(c.preReqs, preReq)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true iff the minimum number of calls have been made.
|
|
||||||
func (c *Call) satisfied() bool {
|
|
||||||
return c.numCalls >= c.minCalls
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true iff the maximum number of calls have been made.
|
|
||||||
func (c *Call) exhausted() bool {
|
|
||||||
return c.numCalls >= c.maxCalls
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Call) String() string {
|
|
||||||
args := make([]string, len(c.args))
|
|
||||||
for i, arg := range c.args {
|
|
||||||
args[i] = arg.String()
|
|
||||||
}
|
|
||||||
arguments := strings.Join(args, ", ")
|
|
||||||
return fmt.Sprintf("%T.%v(%s)", c.receiver, c.method, arguments)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests if the given call matches the expected call.
|
|
||||||
func (c *Call) matches(args []interface{}) bool {
|
|
||||||
if len(args) != len(c.args) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, m := range c.args {
|
|
||||||
if !m.Matches(args[i]) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that all prerequisite calls have been satisfied.
|
|
||||||
for _, preReqCall := range c.preReqs {
|
|
||||||
if !preReqCall.satisfied() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// dropPrereqs tells the expected Call to not re-check prerequite calls any
|
|
||||||
// longer, and to return its current set.
|
|
||||||
func (c *Call) dropPrereqs() (preReqs []*Call) {
|
|
||||||
preReqs = c.preReqs
|
|
||||||
c.preReqs = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Call) call(args []interface{}) (rets []interface{}, action func()) {
|
|
||||||
c.numCalls++
|
|
||||||
|
|
||||||
// Actions
|
|
||||||
if c.doFunc.IsValid() {
|
|
||||||
doArgs := make([]reflect.Value, len(args))
|
|
||||||
ft := c.doFunc.Type()
|
|
||||||
for i := 0; i < ft.NumIn(); i++ {
|
|
||||||
if args[i] != nil {
|
|
||||||
doArgs[i] = reflect.ValueOf(args[i])
|
|
||||||
} else {
|
|
||||||
// Use the zero value for the arg.
|
|
||||||
doArgs[i] = reflect.Zero(ft.In(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
action = func() { c.doFunc.Call(doArgs) }
|
|
||||||
}
|
|
||||||
for n, v := range c.setArgs {
|
|
||||||
reflect.ValueOf(args[n]).Elem().Set(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
rets = c.rets
|
|
||||||
if rets == nil {
|
|
||||||
// Synthesize the zero value for each of the return args' types.
|
|
||||||
mt := c.methodType()
|
|
||||||
rets = make([]interface{}, mt.NumOut())
|
|
||||||
for i := 0; i < mt.NumOut(); i++ {
|
|
||||||
rets[i] = reflect.Zero(mt.Out(i)).Interface()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Call) methodType() reflect.Type {
|
|
||||||
recv := reflect.ValueOf(c.receiver)
|
|
||||||
for i := 0; i < recv.Type().NumMethod(); i++ {
|
|
||||||
if recv.Type().Method(i).Name == c.method {
|
|
||||||
return recv.Method(i).Type()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("gomock: failed finding method %s on %T", c.method, c.receiver))
|
|
||||||
}
|
|
||||||
|
|
||||||
// InOrder declares that the given calls should occur in order.
|
|
||||||
func InOrder(calls ...*Call) {
|
|
||||||
for i := 1; i < len(calls); i++ {
|
|
||||||
calls[i].After(calls[i-1])
|
|
||||||
}
|
|
||||||
}
|
|
76
Godeps/_workspace/src/github.com/golang/mock/gomock/callset.go
generated
vendored
76
Godeps/_workspace/src/github.com/golang/mock/gomock/callset.go
generated
vendored
@ -1,76 +0,0 @@
|
|||||||
// Copyright 2011 Google Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package gomock
|
|
||||||
|
|
||||||
// callSet represents a set of expected calls, indexed by receiver and method
|
|
||||||
// name.
|
|
||||||
type callSet map[interface{}]map[string][]*Call
|
|
||||||
|
|
||||||
// Add adds a new expected call.
|
|
||||||
func (cs callSet) Add(call *Call) {
|
|
||||||
methodMap, ok := cs[call.receiver]
|
|
||||||
if !ok {
|
|
||||||
methodMap = make(map[string][]*Call)
|
|
||||||
cs[call.receiver] = methodMap
|
|
||||||
}
|
|
||||||
methodMap[call.method] = append(methodMap[call.method], call)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes an expected call.
|
|
||||||
func (cs callSet) Remove(call *Call) {
|
|
||||||
methodMap, ok := cs[call.receiver]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sl := methodMap[call.method]
|
|
||||||
for i, c := range sl {
|
|
||||||
if c == call {
|
|
||||||
// quick removal; we don't need to maintain call order
|
|
||||||
if len(sl) > 1 {
|
|
||||||
sl[i] = sl[len(sl)-1]
|
|
||||||
}
|
|
||||||
methodMap[call.method] = sl[:len(sl)-1]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindMatch searches for a matching call. Returns nil if no call matched.
|
|
||||||
func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) *Call {
|
|
||||||
methodMap, ok := cs[receiver]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
calls, ok := methodMap[method]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search through the unordered set of calls expected on a method on a
|
|
||||||
// receiver.
|
|
||||||
for _, call := range calls {
|
|
||||||
// A call should not normally still be here if exhausted,
|
|
||||||
// but it can happen if, for instance, .Times(0) was used.
|
|
||||||
// Pretend the call doesn't match.
|
|
||||||
if call.exhausted() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if call.matches(args) {
|
|
||||||
return call
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
167
Godeps/_workspace/src/github.com/golang/mock/gomock/controller.go
generated
vendored
167
Godeps/_workspace/src/github.com/golang/mock/gomock/controller.go
generated
vendored
@ -1,167 +0,0 @@
|
|||||||
// Copyright 2010 Google Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// GoMock - a mock framework for Go.
|
|
||||||
//
|
|
||||||
// Standard usage:
|
|
||||||
// (1) Define an interface that you wish to mock.
|
|
||||||
// type MyInterface interface {
|
|
||||||
// SomeMethod(x int64, y string)
|
|
||||||
// }
|
|
||||||
// (2) Use mockgen to generate a mock from the interface.
|
|
||||||
// (3) Use the mock in a test:
|
|
||||||
// func TestMyThing(t *testing.T) {
|
|
||||||
// mockCtrl := gomock.NewController(t)
|
|
||||||
// defer mockCtrl.Finish()
|
|
||||||
//
|
|
||||||
// mockObj := something.NewMockMyInterface(mockCtrl)
|
|
||||||
// mockObj.EXPECT().SomeMethod(4, "blah")
|
|
||||||
// // pass mockObj to a real object and play with it.
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// By default, expected calls are not enforced to run in any particular order.
|
|
||||||
// Call order dependency can be enforced by use of InOrder and/or Call.After.
|
|
||||||
// Call.After can create more varied call order dependencies, but InOrder is
|
|
||||||
// often more convenient.
|
|
||||||
//
|
|
||||||
// The following examples create equivalent call order dependencies.
|
|
||||||
//
|
|
||||||
// Example of using Call.After to chain expected call order:
|
|
||||||
//
|
|
||||||
// firstCall := mockObj.EXPECT().SomeMethod(1, "first")
|
|
||||||
// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall)
|
|
||||||
// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall)
|
|
||||||
//
|
|
||||||
// Example of using InOrder to declare expected call order:
|
|
||||||
//
|
|
||||||
// gomock.InOrder(
|
|
||||||
// mockObj.EXPECT().SomeMethod(1, "first"),
|
|
||||||
// mockObj.EXPECT().SomeMethod(2, "second"),
|
|
||||||
// mockObj.EXPECT().SomeMethod(3, "third"),
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// TODO:
|
|
||||||
// - Handle different argument/return types (e.g. ..., chan, map, interface).
|
|
||||||
package gomock
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
// A TestReporter is something that can be used to report test failures.
|
|
||||||
// It is satisfied by the standard library's *testing.T.
|
|
||||||
type TestReporter interface {
|
|
||||||
Errorf(format string, args ...interface{})
|
|
||||||
Fatalf(format string, args ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Controller represents the top-level control of a mock ecosystem.
|
|
||||||
// It defines the scope and lifetime of mock objects, as well as their expectations.
|
|
||||||
// It is safe to call Controller's methods from multiple goroutines.
|
|
||||||
type Controller struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
t TestReporter
|
|
||||||
expectedCalls callSet
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewController(t TestReporter) *Controller {
|
|
||||||
return &Controller{
|
|
||||||
t: t,
|
|
||||||
expectedCalls: make(callSet),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call {
|
|
||||||
// TODO: check arity, types.
|
|
||||||
margs := make([]Matcher, len(args))
|
|
||||||
for i, arg := range args {
|
|
||||||
if m, ok := arg.(Matcher); ok {
|
|
||||||
margs[i] = m
|
|
||||||
} else if arg == nil {
|
|
||||||
// Handle nil specially so that passing a nil interface value
|
|
||||||
// will match the typed nils of concrete args.
|
|
||||||
margs[i] = Nil()
|
|
||||||
} else {
|
|
||||||
margs[i] = Eq(arg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctrl.mu.Lock()
|
|
||||||
defer ctrl.mu.Unlock()
|
|
||||||
|
|
||||||
call := &Call{t: ctrl.t, receiver: receiver, method: method, args: margs, minCalls: 1, maxCalls: 1}
|
|
||||||
|
|
||||||
ctrl.expectedCalls.Add(call)
|
|
||||||
return call
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} {
|
|
||||||
ctrl.mu.Lock()
|
|
||||||
defer ctrl.mu.Unlock()
|
|
||||||
|
|
||||||
expected := ctrl.expectedCalls.FindMatch(receiver, method, args)
|
|
||||||
if expected == nil {
|
|
||||||
ctrl.t.Fatalf("no matching expected call: %T.%v(%v)", receiver, method, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Two things happen here:
|
|
||||||
// * the matching call no longer needs to check prerequite calls,
|
|
||||||
// * and the prerequite calls are no longer expected, so remove them.
|
|
||||||
preReqCalls := expected.dropPrereqs()
|
|
||||||
for _, preReqCall := range preReqCalls {
|
|
||||||
ctrl.expectedCalls.Remove(preReqCall)
|
|
||||||
}
|
|
||||||
|
|
||||||
rets, action := expected.call(args)
|
|
||||||
if expected.exhausted() {
|
|
||||||
ctrl.expectedCalls.Remove(expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't hold the lock while doing the call's action (if any)
|
|
||||||
// so that actions may execute concurrently.
|
|
||||||
// We use the deferred Unlock to capture any panics that happen above;
|
|
||||||
// here we add a deferred Lock to balance it.
|
|
||||||
ctrl.mu.Unlock()
|
|
||||||
defer ctrl.mu.Lock()
|
|
||||||
if action != nil {
|
|
||||||
action()
|
|
||||||
}
|
|
||||||
|
|
||||||
return rets
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctrl *Controller) Finish() {
|
|
||||||
ctrl.mu.Lock()
|
|
||||||
defer ctrl.mu.Unlock()
|
|
||||||
|
|
||||||
// If we're currently panicking, probably because this is a deferred call,
|
|
||||||
// pass through the panic.
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that all remaining expected calls are satisfied.
|
|
||||||
failures := false
|
|
||||||
for _, methodMap := range ctrl.expectedCalls {
|
|
||||||
for _, calls := range methodMap {
|
|
||||||
for _, call := range calls {
|
|
||||||
if !call.satisfied() {
|
|
||||||
ctrl.t.Errorf("missing call(s) to %v", call)
|
|
||||||
failures = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if failures {
|
|
||||||
ctrl.t.Fatalf("aborting test due to missing call(s)")
|
|
||||||
}
|
|
||||||
}
|
|
97
Godeps/_workspace/src/github.com/golang/mock/gomock/matchers.go
generated
vendored
97
Godeps/_workspace/src/github.com/golang/mock/gomock/matchers.go
generated
vendored
@ -1,97 +0,0 @@
|
|||||||
// Copyright 2010 Google Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package gomock
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Matcher is a representation of a class of values.
|
|
||||||
// It is used to represent the valid or expected arguments to a mocked method.
|
|
||||||
type Matcher interface {
|
|
||||||
// Matches returns whether y is a match.
|
|
||||||
Matches(x interface{}) bool
|
|
||||||
|
|
||||||
// String describes what the matcher matches.
|
|
||||||
String() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type anyMatcher struct{}
|
|
||||||
|
|
||||||
func (anyMatcher) Matches(x interface{}) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (anyMatcher) String() string {
|
|
||||||
return "is anything"
|
|
||||||
}
|
|
||||||
|
|
||||||
type eqMatcher struct {
|
|
||||||
x interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e eqMatcher) Matches(x interface{}) bool {
|
|
||||||
return reflect.DeepEqual(e.x, x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e eqMatcher) String() string {
|
|
||||||
return fmt.Sprintf("is equal to %v", e.x)
|
|
||||||
}
|
|
||||||
|
|
||||||
type nilMatcher struct{}
|
|
||||||
|
|
||||||
func (nilMatcher) Matches(x interface{}) bool {
|
|
||||||
if x == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
v := reflect.ValueOf(x)
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
|
|
||||||
reflect.Ptr, reflect.Slice:
|
|
||||||
return v.IsNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nilMatcher) String() string {
|
|
||||||
return "is nil"
|
|
||||||
}
|
|
||||||
|
|
||||||
type notMatcher struct {
|
|
||||||
m Matcher
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n notMatcher) Matches(x interface{}) bool {
|
|
||||||
return !n.m.Matches(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n notMatcher) String() string {
|
|
||||||
// TODO: Improve this if we add a NotString method to the Matcher interface.
|
|
||||||
return "not(" + n.m.String() + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constructors
|
|
||||||
func Any() Matcher { return anyMatcher{} }
|
|
||||||
func Eq(x interface{}) Matcher { return eqMatcher{x} }
|
|
||||||
func Nil() Matcher { return nilMatcher{} }
|
|
||||||
func Not(x interface{}) Matcher {
|
|
||||||
if m, ok := x.(Matcher); ok {
|
|
||||||
return notMatcher{m}
|
|
||||||
}
|
|
||||||
return notMatcher{Eq(x)}
|
|
||||||
}
|
|
49
Godeps/_workspace/src/github.com/golang/mock/gomock/mock_matcher/mock_matcher.go
generated
vendored
49
Godeps/_workspace/src/github.com/golang/mock/gomock/mock_matcher/mock_matcher.go
generated
vendored
@ -1,49 +0,0 @@
|
|||||||
// Automatically generated by MockGen. DO NOT EDIT!
|
|
||||||
// Source: github.com/golang/mock/gomock (interfaces: Matcher)
|
|
||||||
|
|
||||||
package mock_gomock
|
|
||||||
|
|
||||||
import (
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Mock of Matcher interface
|
|
||||||
type MockMatcher struct {
|
|
||||||
ctrl *gomock.Controller
|
|
||||||
recorder *_MockMatcherRecorder
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recorder for MockMatcher (not exported)
|
|
||||||
type _MockMatcherRecorder struct {
|
|
||||||
mock *MockMatcher
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMockMatcher(ctrl *gomock.Controller) *MockMatcher {
|
|
||||||
mock := &MockMatcher{ctrl: ctrl}
|
|
||||||
mock.recorder = &_MockMatcherRecorder{mock}
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockMatcher) EXPECT() *_MockMatcherRecorder {
|
|
||||||
return _m.recorder
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockMatcher) Matches(_param0 interface{}) bool {
|
|
||||||
ret := _m.ctrl.Call(_m, "Matches", _param0)
|
|
||||||
ret0, _ := ret[0].(bool)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockMatcherRecorder) Matches(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Matches", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockMatcher) String() string {
|
|
||||||
ret := _m.ctrl.Call(_m, "String")
|
|
||||||
ret0, _ := ret[0].(string)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockMatcherRecorder) String() *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "String")
|
|
||||||
}
|
|
2
Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE
generated
vendored
2
Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2013-2014 Errplane Inc.
|
Copyright (c) 2013-2015 Errplane Inc.
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
19
Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# List
|
||||||
|
- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||||
|
- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||||
|
- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
|
||||||
|
- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
|
||||||
|
- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
||||||
|
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||||
|
- github.com/rakyll/statik/fs [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
|
||||||
|
- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE)
|
||||||
|
- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
|
||||||
|
- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE)
|
||||||
|
- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
|
||||||
|
- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
|
||||||
|
- glyphicons [LICENSE](http://glyphicons.com/license/)
|
||||||
|
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||||
|
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||||
|
- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||||
|
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||||
|
|
20
Godeps/_workspace/src/github.com/influxdb/influxdb/_vendor/raft/LICENSE
generated
vendored
20
Godeps/_workspace/src/github.com/influxdb/influxdb/_vendor/raft/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
Copyright 2013 go-raft contributors
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
258
Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md
generated
vendored
258
Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md
generated
vendored
@ -1,2 +1,256 @@
|
|||||||
influxdb-go
|
# InfluxDB Client
|
||||||
===========
|
|
||||||
|
[](http://godoc.org/github.com/influxdb/influxdb/client/v2)
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
**NOTE:** The Go client library now has a "v2" version, with the old version
|
||||||
|
being deprecated. The new version can be imported at
|
||||||
|
`import "github.com/influxdb/influxdb/client/v2"`. It is not backwards-compatible.
|
||||||
|
|
||||||
|
A Go client library written and maintained by the **InfluxDB** team.
|
||||||
|
This package provides convenience functions to read and write time series data.
|
||||||
|
It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
|
||||||
|
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
### Connecting To Your Database
|
||||||
|
|
||||||
|
Connecting to an **InfluxDB** database is straightforward. You will need a host
|
||||||
|
name, a port and the cluster user credentials if applicable. The default port is
|
||||||
|
8086. You can customize these settings to your specific installation via the
|
||||||
|
**InfluxDB** configuration file.
|
||||||
|
|
||||||
|
Thought not necessary for experimentation, you may want to create a new user
|
||||||
|
and authenticate the connection to your database.
|
||||||
|
|
||||||
|
For more information please check out the
|
||||||
|
[Cluster Admin Docs](http://influxdb.com/docs/v0.9/query_language/database_administration.html).
|
||||||
|
|
||||||
|
For the impatient, you can create a new admin user _bubba_ by firing off the
|
||||||
|
[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go).
|
||||||
|
|
||||||
|
```shell
|
||||||
|
influx
|
||||||
|
> create user bubba with password 'bumblebeetuna'
|
||||||
|
> grant all privileges to bubba
|
||||||
|
```
|
||||||
|
|
||||||
|
And now for good measure set the credentials in you shell environment.
|
||||||
|
In the example below we will use $INFLUX_USER and $INFLUX_PWD
|
||||||
|
|
||||||
|
Now with the administrivia out of the way, let's connect to our database.
|
||||||
|
|
||||||
|
NOTE: If you've opted out of creating a user, you can omit Username and Password in
|
||||||
|
the configuration below.
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
MyDB = "square_holes"
|
||||||
|
username = "bubba"
|
||||||
|
password = "bumblebeetuna"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Make client
|
||||||
|
u, _ := url.Parse("http://localhost:8086")
|
||||||
|
c := client.NewClient(client.Config{
|
||||||
|
URL: u,
|
||||||
|
Username: username,
|
||||||
|
Password: password,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create a new point batch
|
||||||
|
bp := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Database: MyDB,
|
||||||
|
Precision: "s",
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create a point and add to batch
|
||||||
|
tags := map[string]string{"cpu": "cpu-total"}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"idle": 10.1,
|
||||||
|
"system": 53.3,
|
||||||
|
"user": 46.6,
|
||||||
|
}
|
||||||
|
pt := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
|
bp.AddPoint(pt)
|
||||||
|
|
||||||
|
// Write the batch
|
||||||
|
c.Write(bp)
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inserting Data
|
||||||
|
|
||||||
|
Time series data aka *points* are written to the database using batch inserts.
|
||||||
|
The mechanism is to create one or more points and then create a batch aka
|
||||||
|
*batch points* and write these to a given database and series. A series is a
|
||||||
|
combination of a measurement (time/values) and a set of tags.
|
||||||
|
|
||||||
|
In this sample we will create a batch of a 1,000 points. Each point has a time and
|
||||||
|
a single value as well as 2 tags indicating a shape and color. We write these points
|
||||||
|
to a database called _square_holes_ using a measurement named _shapes_.
|
||||||
|
|
||||||
|
NOTE: You can specify a RetentionPolicy as part of the batch points. If not
|
||||||
|
provided InfluxDB will use the database _default_ retention policy.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func writePoints(clnt client.Client) {
|
||||||
|
sampleSize := 1000
|
||||||
|
rand.Seed(42)
|
||||||
|
|
||||||
|
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Database: "systemstats",
|
||||||
|
Precision: "us",
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < sampleSize; i++ {
|
||||||
|
regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
|
||||||
|
tags := map[string]string{
|
||||||
|
"cpu": "cpu-total",
|
||||||
|
"host": fmt.Sprintf("host%d", rand.Intn(1000)),
|
||||||
|
"region": regions[rand.Intn(len(regions))],
|
||||||
|
}
|
||||||
|
|
||||||
|
idle := rand.Float64() * 100.0
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"idle": idle,
|
||||||
|
"busy": 100.0 - idle,
|
||||||
|
}
|
||||||
|
|
||||||
|
bp.AddPoint(client.NewPoint(
|
||||||
|
"cpu_usage",
|
||||||
|
tags,
|
||||||
|
fields,
|
||||||
|
time.Now(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
err := clnt.Write(bp)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Querying Data
|
||||||
|
|
||||||
|
One nice advantage of using **InfluxDB** the ability to query your data using familiar
|
||||||
|
SQL constructs. In this example we can create a convenience function to query the database
|
||||||
|
as follows:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// queryDB convenience function to query the database
|
||||||
|
func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
|
||||||
|
q := client.Query{
|
||||||
|
Command: cmd,
|
||||||
|
Database: MyDB,
|
||||||
|
}
|
||||||
|
if response, err := clnt.Query(q); err == nil {
|
||||||
|
if response.Error() != nil {
|
||||||
|
return res, response.Error()
|
||||||
|
}
|
||||||
|
res = response.Results
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Creating a Database
|
||||||
|
|
||||||
|
```go
|
||||||
|
_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Count Records
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
|
||||||
|
res, err := queryDB(clnt, q)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
count := res[0].Series[0].Values[0][1]
|
||||||
|
log.Printf("Found a total of %v records\n", count)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Find the last 10 _shapes_ records
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20)
|
||||||
|
res, err = queryDB(clnt, q)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, row := range res[0].Series[0].Values {
|
||||||
|
t, err := time.Parse(time.RFC3339, row[0].(string))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
val := row[1].(string)
|
||||||
|
log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using the UDP Client
|
||||||
|
|
||||||
|
The **InfluxDB** client also supports writing over UDP.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func WriteUDP() {
|
||||||
|
// Make client
|
||||||
|
c := client.NewUDPClient("localhost:8089")
|
||||||
|
|
||||||
|
// Create a new point batch
|
||||||
|
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Precision: "s",
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create a point and add to batch
|
||||||
|
tags := map[string]string{"cpu": "cpu-total"}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"idle": 10.1,
|
||||||
|
"system": 53.3,
|
||||||
|
"user": 46.6,
|
||||||
|
}
|
||||||
|
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
bp.AddPoint(pt)
|
||||||
|
|
||||||
|
// Write the batch
|
||||||
|
c.Write(bp)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Go Docs
|
||||||
|
|
||||||
|
Please refer to
|
||||||
|
[http://godoc.org/github.com/influxdb/influxdb/client/v2](http://godoc.org/github.com/influxdb/influxdb/client/v2)
|
||||||
|
for documentation.
|
||||||
|
|
||||||
|
## See Also
|
||||||
|
|
||||||
|
You can also examine how the client library is used by the
|
||||||
|
[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go).
|
||||||
|
1180
Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go
generated
vendored
1180
Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
19
Godeps/_workspace/src/github.com/influxdb/influxdb/client/series.go
generated
vendored
19
Godeps/_workspace/src/github.com/influxdb/influxdb/client/series.go
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
package client
|
|
||||||
|
|
||||||
type Series struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Columns []string `json:"columns"`
|
|
||||||
Points [][]interface{} `json:"points"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *Series) GetName() string {
|
|
||||||
return self.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *Series) GetColumns() []string {
|
|
||||||
return self.Columns
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *Series) GetPoints() [][]interface{} {
|
|
||||||
return self.Points
|
|
||||||
}
|
|
15
Godeps/_workspace/src/github.com/influxdb/influxdb/client/shard_space.go
generated
vendored
15
Godeps/_workspace/src/github.com/influxdb/influxdb/client/shard_space.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
package client
|
|
||||||
|
|
||||||
type ShardSpace struct {
|
|
||||||
// required, must be unique within the database
|
|
||||||
Name string `json:"name"`
|
|
||||||
// required, a database has many shard spaces and a shard space belongs to a database
|
|
||||||
Database string `json:"database"`
|
|
||||||
// this is optional, if they don't set it, we'll set to /.*/
|
|
||||||
Regex string `json:"regex"`
|
|
||||||
// this is optional, if they don't set it, it will default to the storage.dir in the config
|
|
||||||
RetentionPolicy string `json:"retentionPolicy"`
|
|
||||||
ShardDuration string `json:"shardDuration"`
|
|
||||||
ReplicationFactor uint32 `json:"replicationFactor"`
|
|
||||||
Split uint32 `json:"split"`
|
|
||||||
}
|
|
498
Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go
generated
vendored
Normal file
498
Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go
generated
vendored
Normal file
@ -0,0 +1,498 @@
|
|||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UDPPayloadSize is a reasonable default payload size for UDP packets that
|
||||||
|
// could be travelling over the internet.
|
||||||
|
const (
|
||||||
|
UDPPayloadSize = 512
|
||||||
|
)
|
||||||
|
|
||||||
|
type HTTPConfig struct {
|
||||||
|
// Addr should be of the form "http://host:port"
|
||||||
|
// or "http://[ipv6-host%zone]:port".
|
||||||
|
Addr string
|
||||||
|
|
||||||
|
// Username is the influxdb username, optional
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password is the influxdb password, optional
|
||||||
|
Password string
|
||||||
|
|
||||||
|
// UserAgent is the http User Agent, defaults to "InfluxDBClient"
|
||||||
|
UserAgent string
|
||||||
|
|
||||||
|
// Timeout for influxdb writes, defaults to no timeout
|
||||||
|
Timeout time.Duration
|
||||||
|
|
||||||
|
// InsecureSkipVerify gets passed to the http client, if true, it will
|
||||||
|
// skip https certificate verification. Defaults to false
|
||||||
|
InsecureSkipVerify bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type UDPConfig struct {
|
||||||
|
// Addr should be of the form "host:port"
|
||||||
|
// or "[ipv6-host%zone]:port".
|
||||||
|
Addr string
|
||||||
|
|
||||||
|
// PayloadSize is the maximum size of a UDP client message, optional
|
||||||
|
// Tune this based on your network. Defaults to UDPBufferSize.
|
||||||
|
PayloadSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
type BatchPointsConfig struct {
|
||||||
|
// Precision is the write precision of the points, defaults to "ns"
|
||||||
|
Precision string
|
||||||
|
|
||||||
|
// Database is the database to write points to
|
||||||
|
Database string
|
||||||
|
|
||||||
|
// RetentionPolicy is the retention policy of the points
|
||||||
|
RetentionPolicy string
|
||||||
|
|
||||||
|
// Write consistency is the number of servers required to confirm write
|
||||||
|
WriteConsistency string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is a client interface for writing & querying the database
|
||||||
|
type Client interface {
|
||||||
|
// Write takes a BatchPoints object and writes all Points to InfluxDB.
|
||||||
|
Write(bp BatchPoints) error
|
||||||
|
|
||||||
|
// Query makes an InfluxDB Query on the database. This will fail if using
|
||||||
|
// the UDP client.
|
||||||
|
Query(q Query) (*Response, error)
|
||||||
|
|
||||||
|
// Close releases any resources a Client may be using.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a client interface from the given config.
|
||||||
|
func NewHTTPClient(conf HTTPConfig) (Client, error) {
|
||||||
|
if conf.UserAgent == "" {
|
||||||
|
conf.UserAgent = "InfluxDBClient"
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse(conf.Addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if u.Scheme != "http" && u.Scheme != "https" {
|
||||||
|
m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
|
||||||
|
" must start with http:// or https://", u.Scheme)
|
||||||
|
return nil, errors.New(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
InsecureSkipVerify: conf.InsecureSkipVerify,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return &client{
|
||||||
|
url: u,
|
||||||
|
username: conf.Username,
|
||||||
|
password: conf.Password,
|
||||||
|
useragent: conf.UserAgent,
|
||||||
|
httpClient: &http.Client{
|
||||||
|
Timeout: conf.Timeout,
|
||||||
|
Transport: tr,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases the client's resources.
|
||||||
|
func (c *client) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUDPClient returns a client interface for writing to an InfluxDB UDP
|
||||||
|
// service from the given config.
|
||||||
|
func NewUDPClient(conf UDPConfig) (Client, error) {
|
||||||
|
var udpAddr *net.UDPAddr
|
||||||
|
udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := net.DialUDP("udp", nil, udpAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
payloadSize := conf.PayloadSize
|
||||||
|
if payloadSize == 0 {
|
||||||
|
payloadSize = UDPPayloadSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return &udpclient{
|
||||||
|
conn: conn,
|
||||||
|
payloadSize: payloadSize,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases the udpclient's resources.
|
||||||
|
func (uc *udpclient) Close() error {
|
||||||
|
return uc.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type client struct {
|
||||||
|
url *url.URL
|
||||||
|
username string
|
||||||
|
password string
|
||||||
|
useragent string
|
||||||
|
httpClient *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type udpclient struct {
|
||||||
|
conn *net.UDPConn
|
||||||
|
payloadSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchPoints is an interface into a batched grouping of points to write into
|
||||||
|
// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
|
||||||
|
// batch for each goroutine.
|
||||||
|
type BatchPoints interface {
|
||||||
|
// AddPoint adds the given point to the Batch of points
|
||||||
|
AddPoint(p *Point)
|
||||||
|
// Points lists the points in the Batch
|
||||||
|
Points() []*Point
|
||||||
|
|
||||||
|
// Precision returns the currently set precision of this Batch
|
||||||
|
Precision() string
|
||||||
|
// SetPrecision sets the precision of this batch.
|
||||||
|
SetPrecision(s string) error
|
||||||
|
|
||||||
|
// Database returns the currently set database of this Batch
|
||||||
|
Database() string
|
||||||
|
// SetDatabase sets the database of this Batch
|
||||||
|
SetDatabase(s string)
|
||||||
|
|
||||||
|
// WriteConsistency returns the currently set write consistency of this Batch
|
||||||
|
WriteConsistency() string
|
||||||
|
// SetWriteConsistency sets the write consistency of this Batch
|
||||||
|
SetWriteConsistency(s string)
|
||||||
|
|
||||||
|
// RetentionPolicy returns the currently set retention policy of this Batch
|
||||||
|
RetentionPolicy() string
|
||||||
|
// SetRetentionPolicy sets the retention policy of this Batch
|
||||||
|
SetRetentionPolicy(s string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatchPoints returns a BatchPoints interface based on the given config.
|
||||||
|
func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
|
||||||
|
if conf.Precision == "" {
|
||||||
|
conf.Precision = "ns"
|
||||||
|
}
|
||||||
|
if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bp := &batchpoints{
|
||||||
|
database: conf.Database,
|
||||||
|
precision: conf.Precision,
|
||||||
|
retentionPolicy: conf.RetentionPolicy,
|
||||||
|
writeConsistency: conf.WriteConsistency,
|
||||||
|
}
|
||||||
|
return bp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type batchpoints struct {
|
||||||
|
points []*Point
|
||||||
|
database string
|
||||||
|
precision string
|
||||||
|
retentionPolicy string
|
||||||
|
writeConsistency string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) AddPoint(p *Point) {
|
||||||
|
bp.points = append(bp.points, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) Points() []*Point {
|
||||||
|
return bp.points
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) Precision() string {
|
||||||
|
return bp.precision
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) Database() string {
|
||||||
|
return bp.database
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) WriteConsistency() string {
|
||||||
|
return bp.writeConsistency
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) RetentionPolicy() string {
|
||||||
|
return bp.retentionPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetPrecision(p string) error {
|
||||||
|
if _, err := time.ParseDuration("1" + p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bp.precision = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetDatabase(db string) {
|
||||||
|
bp.database = db
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetWriteConsistency(wc string) {
|
||||||
|
bp.writeConsistency = wc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetRetentionPolicy(rp string) {
|
||||||
|
bp.retentionPolicy = rp
|
||||||
|
}
|
||||||
|
|
||||||
|
type Point struct {
|
||||||
|
pt models.Point
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPoint returns a point with the given timestamp. If a timestamp is not
|
||||||
|
// given, then data is sent to the database without a timestamp, in which case
|
||||||
|
// the server will assign local time upon reception. NOTE: it is recommended
|
||||||
|
// to send data without a timestamp.
|
||||||
|
func NewPoint(
|
||||||
|
name string,
|
||||||
|
tags map[string]string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
t ...time.Time,
|
||||||
|
) (*Point, error) {
|
||||||
|
var T time.Time
|
||||||
|
if len(t) > 0 {
|
||||||
|
T = t[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
pt, err := models.NewPoint(name, tags, fields, T)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Point{
|
||||||
|
pt: pt,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a line-protocol string of the Point
|
||||||
|
func (p *Point) String() string {
|
||||||
|
return p.pt.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrecisionString returns a line-protocol string of the Point, at precision
|
||||||
|
func (p *Point) PrecisionString(precison string) string {
|
||||||
|
return p.pt.PrecisionString(precison)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the measurement name of the point
|
||||||
|
func (p *Point) Name() string {
|
||||||
|
return p.pt.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the tags associated with the point
|
||||||
|
func (p *Point) Tags() map[string]string {
|
||||||
|
return p.pt.Tags()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time return the timestamp for the point
|
||||||
|
func (p *Point) Time() time.Time {
|
||||||
|
return p.pt.Time()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnixNano returns the unix nano time of the point
|
||||||
|
func (p *Point) UnixNano() int64 {
|
||||||
|
return p.pt.UnixNano()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields returns the fields for the point
|
||||||
|
func (p *Point) Fields() map[string]interface{} {
|
||||||
|
return p.pt.Fields()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc *udpclient) Write(bp BatchPoints) error {
|
||||||
|
var b bytes.Buffer
|
||||||
|
var d time.Duration
|
||||||
|
d, _ = time.ParseDuration("1" + bp.Precision())
|
||||||
|
|
||||||
|
for _, p := range bp.Points() {
|
||||||
|
pointstring := p.pt.RoundedString(d) + "\n"
|
||||||
|
|
||||||
|
// Write and reset the buffer if we reach the max size
|
||||||
|
if b.Len()+len(pointstring) >= uc.payloadSize {
|
||||||
|
if _, err := uc.conn.Write(b.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := b.WriteString(pointstring); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := uc.conn.Write(b.Bytes())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Write(bp BatchPoints) error {
|
||||||
|
var b bytes.Buffer
|
||||||
|
|
||||||
|
for _, p := range bp.Points() {
|
||||||
|
if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u := c.url
|
||||||
|
u.Path = "write"
|
||||||
|
req, err := http.NewRequest("POST", u.String(), &b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "")
|
||||||
|
req.Header.Set("User-Agent", c.useragent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("db", bp.Database())
|
||||||
|
params.Set("rp", bp.RetentionPolicy())
|
||||||
|
params.Set("precision", bp.Precision())
|
||||||
|
params.Set("consistency", bp.WriteConsistency())
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
|
||||||
|
var err = fmt.Errorf(string(body))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query defines a query to send to the server
|
||||||
|
type Query struct {
|
||||||
|
Command string
|
||||||
|
Database string
|
||||||
|
Precision string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQuery returns a query object
|
||||||
|
// database and precision strings can be empty strings if they are not needed
|
||||||
|
// for the query.
|
||||||
|
func NewQuery(command, database, precision string) Query {
|
||||||
|
return Query{
|
||||||
|
Command: command,
|
||||||
|
Database: database,
|
||||||
|
Precision: precision,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response represents a list of statement results.
|
||||||
|
type Response struct {
|
||||||
|
Results []Result
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the first error from any statement.
|
||||||
|
// Returns nil if no errors occurred on any statements.
|
||||||
|
func (r *Response) Error() error {
|
||||||
|
if r.Err != nil {
|
||||||
|
return r.Err
|
||||||
|
}
|
||||||
|
for _, result := range r.Results {
|
||||||
|
if result.Err != nil {
|
||||||
|
return result.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Result represents a resultset returned from a single statement.
|
||||||
|
type Result struct {
|
||||||
|
Series []models.Row
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc *udpclient) Query(q Query) (*Response, error) {
|
||||||
|
return nil, fmt.Errorf("Querying via UDP is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query sends a command to the server and returns the Response
|
||||||
|
func (c *client) Query(q Query) (*Response, error) {
|
||||||
|
u := c.url
|
||||||
|
u.Path = "query"
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "")
|
||||||
|
req.Header.Set("User-Agent", c.useragent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("q", q.Command)
|
||||||
|
params.Set("db", q.Database)
|
||||||
|
if q.Precision != "" {
|
||||||
|
params.Set("epoch", q.Precision)
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var response Response
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
dec.UseNumber()
|
||||||
|
decErr := dec.Decode(&response)
|
||||||
|
|
||||||
|
// ignore this error if we got an invalid status code
|
||||||
|
if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
|
||||||
|
decErr = nil
|
||||||
|
}
|
||||||
|
// If we got a valid decode error, send that back
|
||||||
|
if decErr != nil {
|
||||||
|
return nil, decErr
|
||||||
|
}
|
||||||
|
// If we don't have an error in our json response, and didn't get statusOK
|
||||||
|
// then send back an error
|
||||||
|
if resp.StatusCode != http.StatusOK && response.Error() == nil {
|
||||||
|
return &response, fmt.Errorf("received status code %d from server",
|
||||||
|
resp.StatusCode)
|
||||||
|
}
|
||||||
|
return &response, nil
|
||||||
|
}
|
1385
Godeps/_workspace/src/github.com/influxdb/influxdb/models/points.go
generated
vendored
Normal file
1385
Godeps/_workspace/src/github.com/influxdb/influxdb/models/points.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
59
Godeps/_workspace/src/github.com/influxdb/influxdb/models/rows.go
generated
vendored
Normal file
59
Godeps/_workspace/src/github.com/influxdb/influxdb/models/rows.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hash/fnv"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Row represents a single row returned from the execution of a statement.
|
||||||
|
type Row struct {
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Tags map[string]string `json:"tags,omitempty"`
|
||||||
|
Columns []string `json:"columns,omitempty"`
|
||||||
|
Values [][]interface{} `json:"values,omitempty"`
|
||||||
|
Err error `json:"err,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SameSeries returns true if r contains values for the same series as o.
|
||||||
|
func (r *Row) SameSeries(o *Row) bool {
|
||||||
|
return r.tagsHash() == o.tagsHash() && r.Name == o.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagsHash returns a hash of tag key/value pairs.
|
||||||
|
func (r *Row) tagsHash() uint64 {
|
||||||
|
h := fnv.New64a()
|
||||||
|
keys := r.tagsKeys()
|
||||||
|
for _, k := range keys {
|
||||||
|
h.Write([]byte(k))
|
||||||
|
h.Write([]byte(r.Tags[k]))
|
||||||
|
}
|
||||||
|
return h.Sum64()
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagKeys returns a sorted list of tag keys.
|
||||||
|
func (r *Row) tagsKeys() []string {
|
||||||
|
a := make([]string, 0, len(r.Tags))
|
||||||
|
for k := range r.Tags {
|
||||||
|
a = append(a, k)
|
||||||
|
}
|
||||||
|
sort.Strings(a)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
type Rows []*Row
|
||||||
|
|
||||||
|
func (p Rows) Len() int { return len(p) }
|
||||||
|
|
||||||
|
func (p Rows) Less(i, j int) bool {
|
||||||
|
// Sort by name first.
|
||||||
|
if p[i].Name != p[j].Name {
|
||||||
|
return p[i].Name < p[j].Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by tag set hash. Tags don't have a meaningful sort order so we
|
||||||
|
// just compute a hash and sort by that instead. This allows the tests
|
||||||
|
// to receive rows in a predictable order every time.
|
||||||
|
return p[i].tagsHash() < p[j].tagsHash()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
45
Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/bytes.go
generated
vendored
Normal file
45
Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/bytes.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package escape
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
func Bytes(in []byte) []byte {
|
||||||
|
for b, esc := range Codes {
|
||||||
|
in = bytes.Replace(in, []byte{b}, esc, -1)
|
||||||
|
}
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
func Unescape(in []byte) []byte {
|
||||||
|
i := 0
|
||||||
|
inLen := len(in)
|
||||||
|
var out []byte
|
||||||
|
|
||||||
|
for {
|
||||||
|
if i >= inLen {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if in[i] == '\\' && i+1 < inLen {
|
||||||
|
switch in[i+1] {
|
||||||
|
case ',':
|
||||||
|
out = append(out, ',')
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
case '"':
|
||||||
|
out = append(out, '"')
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
case ' ':
|
||||||
|
out = append(out, ' ')
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
case '=':
|
||||||
|
out = append(out, '=')
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, in[i])
|
||||||
|
i += 1
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
34
Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/strings.go
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/strings.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package escape
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
var (
|
||||||
|
Codes = map[byte][]byte{
|
||||||
|
',': []byte(`\,`),
|
||||||
|
'"': []byte(`\"`),
|
||||||
|
' ': []byte(`\ `),
|
||||||
|
'=': []byte(`\=`),
|
||||||
|
}
|
||||||
|
|
||||||
|
codesStr = map[string]string{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for k, v := range Codes {
|
||||||
|
codesStr[string(k)] = string(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnescapeString(in string) string {
|
||||||
|
for b, esc := range codesStr {
|
||||||
|
in = strings.Replace(in, esc, b, -1)
|
||||||
|
}
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
func String(in string) string {
|
||||||
|
for b, esc := range codesStr {
|
||||||
|
in = strings.Replace(in, b, esc, -1)
|
||||||
|
}
|
||||||
|
return in
|
||||||
|
}
|
22
Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Copyright (c) 2015 Matthew Heon <mheon@redhat.com>
|
||||||
|
Copyright (c) 2015 Paul Moore <pmoore@redhat.com>
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
- Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
- Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26
Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/README
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/README
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
libseccomp-golang: Go Language Bindings for the libseccomp Project
|
||||||
|
===============================================================================
|
||||||
|
https://github.com/seccomp/libseccomp-golang
|
||||||
|
https://github.com/seccomp/libseccomp
|
||||||
|
|
||||||
|
The libseccomp library provides an easy to use, platform independent, interface
|
||||||
|
to the Linux Kernel's syscall filtering mechanism. The libseccomp API is
|
||||||
|
designed to abstract away the underlying BPF based syscall filter language and
|
||||||
|
present a more conventional function-call based filtering interface that should
|
||||||
|
be familiar to, and easily adopted by, application developers.
|
||||||
|
|
||||||
|
The libseccomp-golang library provides a Go based interface to the libseccomp
|
||||||
|
library.
|
||||||
|
|
||||||
|
* Online Resources
|
||||||
|
|
||||||
|
The library source repository currently lives on GitHub at the following URLs:
|
||||||
|
|
||||||
|
-> https://github.com/seccomp/libseccomp-golang
|
||||||
|
-> https://github.com/seccomp/libseccomp
|
||||||
|
|
||||||
|
The project mailing list is currently hosted on Google Groups at the URL below,
|
||||||
|
please note that a Google account is not required to subscribe to the mailing
|
||||||
|
list.
|
||||||
|
|
||||||
|
-> https://groups.google.com/d/forum/libseccomp
|
827
Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp.go
generated
vendored
Normal file
827
Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp.go
generated
vendored
Normal file
@ -0,0 +1,827 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
// Public API specification for libseccomp Go bindings
|
||||||
|
// Contains public API for the bindings
|
||||||
|
|
||||||
|
// Package seccomp rovides bindings for libseccomp, a library wrapping the Linux
|
||||||
|
// seccomp syscall. Seccomp enables an application to restrict system call use
|
||||||
|
// for itself and its children.
|
||||||
|
package seccomp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// C wrapping code
|
||||||
|
|
||||||
|
// #cgo LDFLAGS: -lseccomp
|
||||||
|
// #include <stdlib.h>
|
||||||
|
// #include <seccomp.h>
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
// Exported types
|
||||||
|
|
||||||
|
// ScmpArch represents a CPU architecture. Seccomp can restrict syscalls on a
|
||||||
|
// per-architecture basis.
|
||||||
|
type ScmpArch uint
|
||||||
|
|
||||||
|
// ScmpAction represents an action to be taken on a filter rule match in
|
||||||
|
// libseccomp
|
||||||
|
type ScmpAction uint
|
||||||
|
|
||||||
|
// ScmpCompareOp represents a comparison operator which can be used in a filter
|
||||||
|
// rule
|
||||||
|
type ScmpCompareOp uint
|
||||||
|
|
||||||
|
// ScmpCondition represents a rule in a libseccomp filter context
|
||||||
|
type ScmpCondition struct {
|
||||||
|
Argument uint `json:"argument,omitempty"`
|
||||||
|
Op ScmpCompareOp `json:"operator,omitempty"`
|
||||||
|
Operand1 uint64 `json:"operand_one,omitempty"`
|
||||||
|
Operand2 uint64 `json:"operand_two,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScmpSyscall represents a Linux System Call
|
||||||
|
type ScmpSyscall int32
|
||||||
|
|
||||||
|
// Exported Constants
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Valid architectures recognized by libseccomp
|
||||||
|
// ARM64 and all MIPS architectures are unsupported by versions of the
|
||||||
|
// library before v2.2 and will return errors if used
|
||||||
|
|
||||||
|
// ArchInvalid is a placeholder to ensure uninitialized ScmpArch
|
||||||
|
// variables are invalid
|
||||||
|
ArchInvalid ScmpArch = iota
|
||||||
|
// ArchNative is the native architecture of the kernel
|
||||||
|
ArchNative ScmpArch = iota
|
||||||
|
// ArchX86 represents 32-bit x86 syscalls
|
||||||
|
ArchX86 ScmpArch = iota
|
||||||
|
// ArchAMD64 represents 64-bit x86-64 syscalls
|
||||||
|
ArchAMD64 ScmpArch = iota
|
||||||
|
// ArchX32 represents 64-bit x86-64 syscalls (32-bit pointers)
|
||||||
|
ArchX32 ScmpArch = iota
|
||||||
|
// ArchARM represents 32-bit ARM syscalls
|
||||||
|
ArchARM ScmpArch = iota
|
||||||
|
// ArchARM64 represents 64-bit ARM syscalls
|
||||||
|
ArchARM64 ScmpArch = iota
|
||||||
|
// ArchMIPS represents 32-bit MIPS syscalls
|
||||||
|
ArchMIPS ScmpArch = iota
|
||||||
|
// ArchMIPS64 represents 64-bit MIPS syscalls
|
||||||
|
ArchMIPS64 ScmpArch = iota
|
||||||
|
// ArchMIPS64N32 represents 64-bit MIPS syscalls (32-bit pointers)
|
||||||
|
ArchMIPS64N32 ScmpArch = iota
|
||||||
|
// ArchMIPSEL represents 32-bit MIPS syscalls (little endian)
|
||||||
|
ArchMIPSEL ScmpArch = iota
|
||||||
|
// ArchMIPSEL64 represents 64-bit MIPS syscalls (little endian)
|
||||||
|
ArchMIPSEL64 ScmpArch = iota
|
||||||
|
// ArchMIPSEL64N32 represents 64-bit MIPS syscalls (little endian,
|
||||||
|
// 32-bit pointers)
|
||||||
|
ArchMIPSEL64N32 ScmpArch = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Supported actions on filter match
|
||||||
|
|
||||||
|
// ActInvalid is a placeholder to ensure uninitialized ScmpAction
|
||||||
|
// variables are invalid
|
||||||
|
ActInvalid ScmpAction = iota
|
||||||
|
// ActKill kills the process
|
||||||
|
ActKill ScmpAction = iota
|
||||||
|
// ActTrap throws SIGSYS
|
||||||
|
ActTrap ScmpAction = iota
|
||||||
|
// ActErrno causes the syscall to return a negative error code. This
|
||||||
|
// code can be set with the SetReturnCode method
|
||||||
|
ActErrno ScmpAction = iota
|
||||||
|
// ActTrace causes the syscall to notify tracing processes with the
|
||||||
|
// given error code. This code can be set with the SetReturnCode method
|
||||||
|
ActTrace ScmpAction = iota
|
||||||
|
// ActAllow permits the syscall to continue execution
|
||||||
|
ActAllow ScmpAction = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// These are comparison operators used in conditional seccomp rules
|
||||||
|
// They are used to compare the value of a single argument of a syscall
|
||||||
|
// against a user-defined constant
|
||||||
|
|
||||||
|
// CompareInvalid is a placeholder to ensure uninitialized ScmpCompareOp
|
||||||
|
// variables are invalid
|
||||||
|
CompareInvalid ScmpCompareOp = iota
|
||||||
|
// CompareNotEqual returns true if the argument is not equal to the
|
||||||
|
// given value
|
||||||
|
CompareNotEqual ScmpCompareOp = iota
|
||||||
|
// CompareLess returns true if the argument is less than the given value
|
||||||
|
CompareLess ScmpCompareOp = iota
|
||||||
|
// CompareLessOrEqual returns true if the argument is less than or equal
|
||||||
|
// to the given value
|
||||||
|
CompareLessOrEqual ScmpCompareOp = iota
|
||||||
|
// CompareEqual returns true if the argument is equal to the given value
|
||||||
|
CompareEqual ScmpCompareOp = iota
|
||||||
|
// CompareGreaterEqual returns true if the argument is greater than or
|
||||||
|
// equal to the given value
|
||||||
|
CompareGreaterEqual ScmpCompareOp = iota
|
||||||
|
// CompareGreater returns true if the argument is greater than the given
|
||||||
|
// value
|
||||||
|
CompareGreater ScmpCompareOp = iota
|
||||||
|
// CompareMaskedEqual returns true if the argument is equal to the given
|
||||||
|
// value, when masked (bitwise &) against the second given value
|
||||||
|
CompareMaskedEqual ScmpCompareOp = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
// Helpers for types
|
||||||
|
|
||||||
|
// GetArchFromString returns an ScmpArch constant from a string representing an
|
||||||
|
// architecture
|
||||||
|
func GetArchFromString(arch string) (ScmpArch, error) {
|
||||||
|
switch strings.ToLower(arch) {
|
||||||
|
case "x86":
|
||||||
|
return ArchX86, nil
|
||||||
|
case "amd64", "x86-64", "x86_64", "x64":
|
||||||
|
return ArchAMD64, nil
|
||||||
|
case "x32":
|
||||||
|
return ArchX32, nil
|
||||||
|
case "arm":
|
||||||
|
return ArchARM, nil
|
||||||
|
case "arm64", "aarch64":
|
||||||
|
return ArchARM64, nil
|
||||||
|
case "mips":
|
||||||
|
return ArchMIPS, nil
|
||||||
|
case "mips64":
|
||||||
|
return ArchMIPS64, nil
|
||||||
|
case "mips64n32":
|
||||||
|
return ArchMIPS64N32, nil
|
||||||
|
case "mipsel":
|
||||||
|
return ArchMIPSEL, nil
|
||||||
|
case "mipsel64":
|
||||||
|
return ArchMIPSEL64, nil
|
||||||
|
case "mipsel64n32":
|
||||||
|
return ArchMIPSEL64N32, nil
|
||||||
|
default:
|
||||||
|
return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %s", arch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of an architecture constant
|
||||||
|
func (a ScmpArch) String() string {
|
||||||
|
switch a {
|
||||||
|
case ArchX86:
|
||||||
|
return "x86"
|
||||||
|
case ArchAMD64:
|
||||||
|
return "amd64"
|
||||||
|
case ArchX32:
|
||||||
|
return "x32"
|
||||||
|
case ArchARM:
|
||||||
|
return "arm"
|
||||||
|
case ArchARM64:
|
||||||
|
return "arm64"
|
||||||
|
case ArchMIPS:
|
||||||
|
return "mips"
|
||||||
|
case ArchMIPS64:
|
||||||
|
return "mips64"
|
||||||
|
case ArchMIPS64N32:
|
||||||
|
return "mips64n32"
|
||||||
|
case ArchMIPSEL:
|
||||||
|
return "mipsel"
|
||||||
|
case ArchMIPSEL64:
|
||||||
|
return "mipsel64"
|
||||||
|
case ArchMIPSEL64N32:
|
||||||
|
return "mipsel64n32"
|
||||||
|
case ArchNative:
|
||||||
|
return "native"
|
||||||
|
case ArchInvalid:
|
||||||
|
return "Invalid architecture"
|
||||||
|
default:
|
||||||
|
return "Unknown architecture"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of a comparison operator constant
|
||||||
|
func (a ScmpCompareOp) String() string {
|
||||||
|
switch a {
|
||||||
|
case CompareNotEqual:
|
||||||
|
return "Not equal"
|
||||||
|
case CompareLess:
|
||||||
|
return "Less than"
|
||||||
|
case CompareLessOrEqual:
|
||||||
|
return "Less than or equal to"
|
||||||
|
case CompareEqual:
|
||||||
|
return "Equal"
|
||||||
|
case CompareGreaterEqual:
|
||||||
|
return "Greater than or equal to"
|
||||||
|
case CompareGreater:
|
||||||
|
return "Greater than"
|
||||||
|
case CompareMaskedEqual:
|
||||||
|
return "Masked equality"
|
||||||
|
case CompareInvalid:
|
||||||
|
return "Invalid comparison operator"
|
||||||
|
default:
|
||||||
|
return "Unrecognized comparison operator"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of a seccomp match action
|
||||||
|
func (a ScmpAction) String() string {
|
||||||
|
switch a & 0xFFFF {
|
||||||
|
case ActKill:
|
||||||
|
return "Action: Kill Process"
|
||||||
|
case ActTrap:
|
||||||
|
return "Action: Send SIGSYS"
|
||||||
|
case ActErrno:
|
||||||
|
return fmt.Sprintf("Action: Return error code %d", (a >> 16))
|
||||||
|
case ActTrace:
|
||||||
|
return fmt.Sprintf("Action: Notify tracing processes with code %d",
|
||||||
|
(a >> 16))
|
||||||
|
case ActAllow:
|
||||||
|
return "Action: Allow system call"
|
||||||
|
default:
|
||||||
|
return "Unrecognized Action"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReturnCode adds a return code to a supporting ScmpAction, clearing any
|
||||||
|
// existing code Only valid on ActErrno and ActTrace. Takes no action otherwise.
|
||||||
|
// Accepts 16-bit return code as argument.
|
||||||
|
// Returns a valid ScmpAction of the original type with the new error code set.
|
||||||
|
func (a ScmpAction) SetReturnCode(code int16) ScmpAction {
|
||||||
|
aTmp := a & 0x0000FFFF
|
||||||
|
if aTmp == ActErrno || aTmp == ActTrace {
|
||||||
|
return (aTmp | (ScmpAction(code)&0xFFFF)<<16)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReturnCode returns the return code of an ScmpAction
|
||||||
|
func (a ScmpAction) GetReturnCode() int16 {
|
||||||
|
return int16(a >> 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
// General utility functions
|
||||||
|
|
||||||
|
// GetLibraryVersion returns the version of the library the bindings are built
|
||||||
|
// against.
|
||||||
|
// The version is formatted as follows: Major.Minor.Micro
|
||||||
|
func GetLibraryVersion() (major, minor, micro int) {
|
||||||
|
return verMajor, verMinor, verMicro
|
||||||
|
}
|
||||||
|
|
||||||
|
// Syscall functions
|
||||||
|
|
||||||
|
// GetName retrieves the name of a syscall from its number.
|
||||||
|
// Acts on any syscall number.
|
||||||
|
// Returns either a string containing the name of the syscall, or an error.
|
||||||
|
func (s ScmpSyscall) GetName() (string, error) {
|
||||||
|
return s.GetNameByArch(ArchNative)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNameByArch retrieves the name of a syscall from its number for a given
|
||||||
|
// architecture.
|
||||||
|
// Acts on any syscall number.
|
||||||
|
// Accepts a valid architecture constant.
|
||||||
|
// Returns either a string containing the name of the syscall, or an error.
|
||||||
|
// if the syscall is unrecognized or an issue occurred.
|
||||||
|
func (s ScmpSyscall) GetNameByArch(arch ScmpArch) (string, error) {
|
||||||
|
if err := sanitizeArch(arch); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
cString := C.seccomp_syscall_resolve_num_arch(arch.toNative(), C.int(s))
|
||||||
|
if cString == nil {
|
||||||
|
return "", fmt.Errorf("could not resolve syscall name")
|
||||||
|
}
|
||||||
|
defer C.free(unsafe.Pointer(cString))
|
||||||
|
|
||||||
|
finalStr := C.GoString(cString)
|
||||||
|
return finalStr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSyscallFromName returns the number of a syscall by name on the kernel's
|
||||||
|
// native architecture.
|
||||||
|
// Accepts a string containing the name of a syscall.
|
||||||
|
// Returns the number of the syscall, or an error if no syscall with that name
|
||||||
|
// was found.
|
||||||
|
func GetSyscallFromName(name string) (ScmpSyscall, error) {
|
||||||
|
cString := C.CString(name)
|
||||||
|
defer C.free(unsafe.Pointer(cString))
|
||||||
|
|
||||||
|
result := C.seccomp_syscall_resolve_name(cString)
|
||||||
|
if result == scmpError {
|
||||||
|
return 0, fmt.Errorf("could not resolve name to syscall")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ScmpSyscall(result), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSyscallFromNameByArch returns the number of a syscall by name for a given
|
||||||
|
// architecture's ABI.
|
||||||
|
// Accepts the name of a syscall and an architecture constant.
|
||||||
|
// Returns the number of the syscall, or an error if an invalid architecture is
|
||||||
|
// passed or a syscall with that name was not found.
|
||||||
|
func GetSyscallFromNameByArch(name string, arch ScmpArch) (ScmpSyscall, error) {
|
||||||
|
if err := sanitizeArch(arch); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cString := C.CString(name)
|
||||||
|
defer C.free(unsafe.Pointer(cString))
|
||||||
|
|
||||||
|
result := C.seccomp_syscall_resolve_name_arch(arch.toNative(), cString)
|
||||||
|
if result == scmpError {
|
||||||
|
return 0, fmt.Errorf("could not resolve name to syscall")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ScmpSyscall(result), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeCondition creates and returns a new condition to attach to a filter rule.
|
||||||
|
// Associated rules will only match if this condition is true.
|
||||||
|
// Accepts the number the argument we are checking, and a comparison operator
|
||||||
|
// and value to compare to.
|
||||||
|
// The rule will match if argument $arg (zero-indexed) of the syscall is
|
||||||
|
// $COMPARE_OP the provided comparison value.
|
||||||
|
// Some comparison operators accept two values. Masked equals, for example,
|
||||||
|
// will mask $arg of the syscall with the second value provided (via bitwise
|
||||||
|
// AND) and then compare against the first value provided.
|
||||||
|
// For example, in the less than or equal case, if the syscall argument was
|
||||||
|
// 0 and the value provided was 1, the condition would match, as 0 is less
|
||||||
|
// than or equal to 1.
|
||||||
|
// Return either an error on bad argument or a valid ScmpCondition struct.
|
||||||
|
func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCondition, error) {
|
||||||
|
var condStruct ScmpCondition
|
||||||
|
|
||||||
|
if comparison == CompareInvalid {
|
||||||
|
return condStruct, fmt.Errorf("invalid comparison operator")
|
||||||
|
} else if arg > 5 {
|
||||||
|
return condStruct, fmt.Errorf("syscalls only have up to 6 arguments")
|
||||||
|
} else if len(values) > 2 {
|
||||||
|
return condStruct, fmt.Errorf("conditions can have at most 2 arguments")
|
||||||
|
} else if len(values) == 0 {
|
||||||
|
return condStruct, fmt.Errorf("must provide at least one value to compare against")
|
||||||
|
}
|
||||||
|
|
||||||
|
condStruct.Argument = arg
|
||||||
|
condStruct.Op = comparison
|
||||||
|
condStruct.Operand1 = values[0]
|
||||||
|
if len(values) == 2 {
|
||||||
|
condStruct.Operand2 = values[1]
|
||||||
|
} else {
|
||||||
|
condStruct.Operand2 = 0 // Unused
|
||||||
|
}
|
||||||
|
|
||||||
|
return condStruct, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utility Functions
|
||||||
|
|
||||||
|
// GetNativeArch returns architecture token representing the native kernel
|
||||||
|
// architecture
|
||||||
|
func GetNativeArch() (ScmpArch, error) {
|
||||||
|
arch := C.seccomp_arch_native()
|
||||||
|
|
||||||
|
return archFromNative(arch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Public Filter API
|
||||||
|
|
||||||
|
// ScmpFilter represents a filter context in libseccomp.
|
||||||
|
// A filter context is initially empty. Rules can be added to it, and it can
|
||||||
|
// then be loaded into the kernel.
|
||||||
|
type ScmpFilter struct {
|
||||||
|
filterCtx C.scmp_filter_ctx
|
||||||
|
valid bool
|
||||||
|
lock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFilter creates and returns a new filter context.
|
||||||
|
// Accepts a default action to be taken for syscalls which match no rules in
|
||||||
|
// the filter.
|
||||||
|
// Returns a reference to a valid filter context, or nil and an error if the
|
||||||
|
// filter context could not be created or an invalid default action was given.
|
||||||
|
func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
|
||||||
|
if err := sanitizeAction(defaultAction); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fPtr := C.seccomp_init(defaultAction.toNative())
|
||||||
|
if fPtr == nil {
|
||||||
|
return nil, fmt.Errorf("could not create filter")
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := new(ScmpFilter)
|
||||||
|
filter.filterCtx = fPtr
|
||||||
|
filter.valid = true
|
||||||
|
runtime.SetFinalizer(filter, filterFinalizer)
|
||||||
|
|
||||||
|
return filter, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid determines whether a filter context is valid to use.
|
||||||
|
// Some operations (Release and Merge) render filter contexts invalid and
|
||||||
|
// consequently prevent further use.
|
||||||
|
func (f *ScmpFilter) IsValid() bool {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
return f.valid
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets a filter context, removing all its existing state.
|
||||||
|
// Accepts a new default action to be taken for syscalls which do not match.
|
||||||
|
// Returns an error if the filter or action provided are invalid.
|
||||||
|
func (f *ScmpFilter) Reset(defaultAction ScmpAction) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if err := sanitizeAction(defaultAction); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !f.valid {
|
||||||
|
return errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
retCode := C.seccomp_reset(f.filterCtx, defaultAction.toNative())
|
||||||
|
if retCode != 0 {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release releases a filter context, freeing its memory. Should be called after
|
||||||
|
// loading into the kernel, when the filter is no longer needed.
|
||||||
|
// After calling this function, the given filter is no longer valid and cannot
|
||||||
|
// be used.
|
||||||
|
// Release() will be invoked automatically when a filter context is garbage
|
||||||
|
// collected, but can also be called manually to free memory.
|
||||||
|
func (f *ScmpFilter) Release() {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if !f.valid {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f.valid = false
|
||||||
|
C.seccomp_release(f.filterCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges two filter contexts.
|
||||||
|
// The source filter src will be released as part of the process, and will no
|
||||||
|
// longer be usable or valid after this call.
|
||||||
|
// To be merged, filters must NOT share any architectures, and all their
|
||||||
|
// attributes (Default Action, Bad Arch Action, No New Privs and TSync bools)
|
||||||
|
// must match.
|
||||||
|
// The filter src will be merged into the filter this is called on.
|
||||||
|
// The architectures of the src filter not present in the destination, and all
|
||||||
|
// associated rules, will be added to the destination.
|
||||||
|
// Returns an error if merging the filters failed.
|
||||||
|
func (f *ScmpFilter) Merge(src *ScmpFilter) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
src.lock.Lock()
|
||||||
|
defer src.lock.Unlock()
|
||||||
|
|
||||||
|
if !src.valid || !f.valid {
|
||||||
|
return fmt.Errorf("one or more of the filter contexts is invalid or uninitialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge the filters
|
||||||
|
retCode := C.seccomp_merge(f.filterCtx, src.filterCtx)
|
||||||
|
if syscall.Errno(-1*retCode) == syscall.EINVAL {
|
||||||
|
return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter")
|
||||||
|
} else if retCode != 0 {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
src.valid = false
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsArchPresent checks if an architecture is present in a filter.
|
||||||
|
// If a filter contains an architecture, it uses its default action for
|
||||||
|
// syscalls which do not match rules in it, and its rules can match syscalls
|
||||||
|
// for that ABI.
|
||||||
|
// If a filter does not contain an architecture, all syscalls made to that
|
||||||
|
// kernel ABI will fail with the filter's default Bad Architecture Action
|
||||||
|
// (by default, killing the process).
|
||||||
|
// Accepts an architecture constant.
|
||||||
|
// Returns true if the architecture is present in the filter, false otherwise,
|
||||||
|
// and an error on an invalid filter context, architecture constant, or an
|
||||||
|
// issue with the call to libseccomp.
|
||||||
|
func (f *ScmpFilter) IsArchPresent(arch ScmpArch) (bool, error) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if err := sanitizeArch(arch); err != nil {
|
||||||
|
return false, err
|
||||||
|
} else if !f.valid {
|
||||||
|
return false, errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
retCode := C.seccomp_arch_exist(f.filterCtx, arch.toNative())
|
||||||
|
if syscall.Errno(-1*retCode) == syscall.EEXIST {
|
||||||
|
// -EEXIST is "arch not present"
|
||||||
|
return false, nil
|
||||||
|
} else if retCode != 0 {
|
||||||
|
return false, syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddArch adds an architecture to the filter.
|
||||||
|
// Accepts an architecture constant.
|
||||||
|
// Returns an error on invalid filter context or architecture token, or an
|
||||||
|
// issue with the call to libseccomp.
|
||||||
|
func (f *ScmpFilter) AddArch(arch ScmpArch) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if err := sanitizeArch(arch); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !f.valid {
|
||||||
|
return errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Libseccomp returns -EEXIST if the specified architecture is already
|
||||||
|
// present. Succeed silently in this case, as it's not fatal, and the
|
||||||
|
// architecture is present already.
|
||||||
|
retCode := C.seccomp_arch_add(f.filterCtx, arch.toNative())
|
||||||
|
if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveArch removes an architecture from the filter.
|
||||||
|
// Accepts an architecture constant.
|
||||||
|
// Returns an error on invalid filter context or architecture token, or an
|
||||||
|
// issue with the call to libseccomp.
|
||||||
|
func (f *ScmpFilter) RemoveArch(arch ScmpArch) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if err := sanitizeArch(arch); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !f.valid {
|
||||||
|
return errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Similar to AddArch, -EEXIST is returned if the arch is not present
|
||||||
|
// Succeed silently in that case, this is not fatal and the architecture
|
||||||
|
// is not present in the filter after RemoveArch
|
||||||
|
retCode := C.seccomp_arch_remove(f.filterCtx, arch.toNative())
|
||||||
|
if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load loads a filter context into the kernel.
|
||||||
|
// Returns an error if the filter context is invalid or the syscall failed.
|
||||||
|
func (f *ScmpFilter) Load() error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if !f.valid {
|
||||||
|
return errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
if retCode := C.seccomp_load(f.filterCtx); retCode != 0 {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDefaultAction returns the default action taken on a syscall which does not
|
||||||
|
// match a rule in the filter, or an error if an issue was encountered
|
||||||
|
// retrieving the value.
|
||||||
|
func (f *ScmpFilter) GetDefaultAction() (ScmpAction, error) {
|
||||||
|
action, err := f.getFilterAttr(filterAttrActDefault)
|
||||||
|
if err != nil {
|
||||||
|
return 0x0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return actionFromNative(action)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBadArchAction returns the default action taken on a syscall for an
|
||||||
|
// architecture not in the filter, or an error if an issue was encountered
|
||||||
|
// retrieving the value.
|
||||||
|
func (f *ScmpFilter) GetBadArchAction() (ScmpAction, error) {
|
||||||
|
action, err := f.getFilterAttr(filterAttrActBadArch)
|
||||||
|
if err != nil {
|
||||||
|
return 0x0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return actionFromNative(action)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNoNewPrivsBit returns the current state the No New Privileges bit will be set
|
||||||
|
// to on the filter being loaded, or an error if an issue was encountered
|
||||||
|
// retrieving the value.
|
||||||
|
// The No New Privileges bit tells the kernel that new processes run with exec()
|
||||||
|
// cannot gain more privileges than the process that ran exec().
|
||||||
|
// For example, a process with No New Privileges set would be unable to exec
|
||||||
|
// setuid/setgid executables.
|
||||||
|
func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) {
|
||||||
|
noNewPrivs, err := f.getFilterAttr(filterAttrNNP)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if noNewPrivs == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTsyncBit returns whether Thread Synchronization will be enabled on the
|
||||||
|
// filter being loaded, or an error if an issue was encountered retrieving the
|
||||||
|
// value.
|
||||||
|
// Thread Sync ensures that all members of the thread group of the calling
|
||||||
|
// process will share the same Seccomp filter set.
|
||||||
|
// Tsync is a fairly recent addition to the Linux kernel and older kernels
|
||||||
|
// lack support. If the running kernel does not support Tsync and it is
|
||||||
|
// requested in a filter, Libseccomp will not enable TSync support and will
|
||||||
|
// proceed as normal.
|
||||||
|
// This function is unavailable before v2.2 of libseccomp and will return an
|
||||||
|
// error.
|
||||||
|
func (f *ScmpFilter) GetTsyncBit() (bool, error) {
|
||||||
|
tSync, err := f.getFilterAttr(filterAttrTsync)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if tSync == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBadArchAction sets the default action taken on a syscall for an
|
||||||
|
// architecture not in the filter, or an error if an issue was encountered
|
||||||
|
// setting the value.
|
||||||
|
func (f *ScmpFilter) SetBadArchAction(action ScmpAction) error {
|
||||||
|
if err := sanitizeAction(action); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.setFilterAttr(filterAttrActBadArch, action.toNative())
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNoNewPrivsBit sets the state of the No New Privileges bit, which will be
|
||||||
|
// applied on filter load, or an error if an issue was encountered setting the
|
||||||
|
// value.
|
||||||
|
// Filters with No New Privileges set to 0 can only be loaded if the process
|
||||||
|
// has the CAP_SYS_ADMIN capability.
|
||||||
|
func (f *ScmpFilter) SetNoNewPrivsBit(state bool) error {
|
||||||
|
var toSet C.uint32_t = 0x0
|
||||||
|
|
||||||
|
if state {
|
||||||
|
toSet = 0x1
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.setFilterAttr(filterAttrNNP, toSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTsync sets whether Thread Synchronization will be enabled on the filter
|
||||||
|
// being loaded. Returns an error if setting Tsync failed, or the filter is
|
||||||
|
// invalid.
|
||||||
|
// Thread Sync ensures that all members of the thread group of the calling
|
||||||
|
// process will share the same Seccomp filter set.
|
||||||
|
// Tsync is a fairly recent addition to the Linux kernel and older kernels
|
||||||
|
// lack support. If the running kernel does not support Tsync and it is
|
||||||
|
// requested in a filter, Libseccomp will not enable TSync support and will
|
||||||
|
// proceed as normal.
|
||||||
|
// This function is unavailable before v2.2 of libseccomp and will return an
|
||||||
|
// error.
|
||||||
|
func (f *ScmpFilter) SetTsync(enable bool) error {
|
||||||
|
var toSet C.uint32_t = 0x0
|
||||||
|
|
||||||
|
if enable {
|
||||||
|
toSet = 0x1
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.setFilterAttr(filterAttrTsync, toSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSyscallPriority sets a syscall's priority.
|
||||||
|
// This provides a hint to the filter generator in libseccomp about the
|
||||||
|
// importance of this syscall. High-priority syscalls are placed
|
||||||
|
// first in the filter code, and incur less overhead (at the expense of
|
||||||
|
// lower-priority syscalls).
|
||||||
|
func (f *ScmpFilter) SetSyscallPriority(call ScmpSyscall, priority uint8) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if !f.valid {
|
||||||
|
return errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
if retCode := C.seccomp_syscall_priority(f.filterCtx, C.int(call),
|
||||||
|
C.uint8_t(priority)); retCode != 0 {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRule adds a single rule for an unconditional action on a syscall.
|
||||||
|
// Accepts the number of the syscall and the action to be taken on the call
|
||||||
|
// being made.
|
||||||
|
// Returns an error if an issue was encountered adding the rule.
|
||||||
|
func (f *ScmpFilter) AddRule(call ScmpSyscall, action ScmpAction) error {
|
||||||
|
return f.addRuleGeneric(call, action, false, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRuleExact adds a single rule for an unconditional action on a syscall.
|
||||||
|
// Accepts the number of the syscall and the action to be taken on the call
|
||||||
|
// being made.
|
||||||
|
// No modifications will be made to the rule, and it will fail to add if it
|
||||||
|
// cannot be applied to the current architecture without modification.
|
||||||
|
// The rule will function exactly as described, but it may not function identically
|
||||||
|
// (or be able to be applied to) all architectures.
|
||||||
|
// Returns an error if an issue was encountered adding the rule.
|
||||||
|
func (f *ScmpFilter) AddRuleExact(call ScmpSyscall, action ScmpAction) error {
|
||||||
|
return f.addRuleGeneric(call, action, true, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRuleConditional adds a single rule for a conditional action on a syscall.
|
||||||
|
// Returns an error if an issue was encountered adding the rule.
|
||||||
|
// All conditions must match for the rule to match.
|
||||||
|
// There is a bug in library versions below v2.2.1 which can, in some cases,
|
||||||
|
// cause conditions to be lost when more than one are used. Consequently,
|
||||||
|
// AddRuleConditional is disabled on library versions lower than v2.2.1
|
||||||
|
func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
|
||||||
|
return f.addRuleGeneric(call, action, false, conds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRuleConditionalExact adds a single rule for a conditional action on a
|
||||||
|
// syscall.
|
||||||
|
// No modifications will be made to the rule, and it will fail to add if it
|
||||||
|
// cannot be applied to the current architecture without modification.
|
||||||
|
// The rule will function exactly as described, but it may not function identically
|
||||||
|
// (or be able to be applied to) all architectures.
|
||||||
|
// Returns an error if an issue was encountered adding the rule.
|
||||||
|
// There is a bug in library versions below v2.2.1 which can, in some cases,
|
||||||
|
// cause conditions to be lost when more than one are used. Consequently,
|
||||||
|
// AddRuleConditionalExact is disabled on library versions lower than v2.2.1
|
||||||
|
func (f *ScmpFilter) AddRuleConditionalExact(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
|
||||||
|
return f.addRuleGeneric(call, action, true, conds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportPFC output PFC-formatted, human-readable dump of a filter context's
|
||||||
|
// rules to a file.
|
||||||
|
// Accepts file to write to (must be open for writing).
|
||||||
|
// Returns an error if writing to the file fails.
|
||||||
|
func (f *ScmpFilter) ExportPFC(file *os.File) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
fd := file.Fd()
|
||||||
|
|
||||||
|
if !f.valid {
|
||||||
|
return errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
if retCode := C.seccomp_export_pfc(f.filterCtx, C.int(fd)); retCode != 0 {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportBPF outputs Berkeley Packet Filter-formatted, kernel-readable dump of a
|
||||||
|
// filter context's rules to a file.
|
||||||
|
// Accepts file to write to (must be open for writing).
|
||||||
|
// Returns an error if writing to the file fails.
|
||||||
|
func (f *ScmpFilter) ExportBPF(file *os.File) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
fd := file.Fd()
|
||||||
|
|
||||||
|
if !f.valid {
|
||||||
|
return errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
if retCode := C.seccomp_export_bpf(f.filterCtx, C.int(fd)); retCode != 0 {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
461
Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp_internal.go
generated
vendored
Normal file
461
Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp_internal.go
generated
vendored
Normal file
@ -0,0 +1,461 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
// Internal functions for libseccomp Go bindings
|
||||||
|
// No exported functions
|
||||||
|
|
||||||
|
package seccomp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Unexported C wrapping code - provides the C-Golang interface
|
||||||
|
// Get the seccomp header in scope
|
||||||
|
// Need stdlib.h for free() on cstrings
|
||||||
|
|
||||||
|
// #cgo LDFLAGS: -lseccomp
|
||||||
|
/*
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <seccomp.h>
|
||||||
|
|
||||||
|
#if SCMP_VER_MAJOR < 2
|
||||||
|
#error Minimum supported version of Libseccomp is v2.1.0
|
||||||
|
#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 1
|
||||||
|
#error Minimum supported version of Libseccomp is v2.1.0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define ARCH_BAD ~0
|
||||||
|
|
||||||
|
const uint32_t C_ARCH_BAD = ARCH_BAD;
|
||||||
|
|
||||||
|
#ifndef SCMP_ARCH_AARCH64
|
||||||
|
#define SCMP_ARCH_AARCH64 ARCH_BAD
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef SCMP_ARCH_MIPS
|
||||||
|
#define SCMP_ARCH_MIPS ARCH_BAD
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef SCMP_ARCH_MIPS64
|
||||||
|
#define SCMP_ARCH_MIPS64 ARCH_BAD
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef SCMP_ARCH_MIPS64N32
|
||||||
|
#define SCMP_ARCH_MIPS64N32 ARCH_BAD
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef SCMP_ARCH_MIPSEL
|
||||||
|
#define SCMP_ARCH_MIPSEL ARCH_BAD
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef SCMP_ARCH_MIPSEL64
|
||||||
|
#define SCMP_ARCH_MIPSEL64 ARCH_BAD
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef SCMP_ARCH_MIPSEL64N32
|
||||||
|
#define SCMP_ARCH_MIPSEL64N32 ARCH_BAD
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE;
|
||||||
|
const uint32_t C_ARCH_X86 = SCMP_ARCH_X86;
|
||||||
|
const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64;
|
||||||
|
const uint32_t C_ARCH_X32 = SCMP_ARCH_X32;
|
||||||
|
const uint32_t C_ARCH_ARM = SCMP_ARCH_ARM;
|
||||||
|
const uint32_t C_ARCH_AARCH64 = SCMP_ARCH_AARCH64;
|
||||||
|
const uint32_t C_ARCH_MIPS = SCMP_ARCH_MIPS;
|
||||||
|
const uint32_t C_ARCH_MIPS64 = SCMP_ARCH_MIPS64;
|
||||||
|
const uint32_t C_ARCH_MIPS64N32 = SCMP_ARCH_MIPS64N32;
|
||||||
|
const uint32_t C_ARCH_MIPSEL = SCMP_ARCH_MIPSEL;
|
||||||
|
const uint32_t C_ARCH_MIPSEL64 = SCMP_ARCH_MIPSEL64;
|
||||||
|
const uint32_t C_ARCH_MIPSEL64N32 = SCMP_ARCH_MIPSEL64N32;
|
||||||
|
|
||||||
|
const uint32_t C_ACT_KILL = SCMP_ACT_KILL;
|
||||||
|
const uint32_t C_ACT_TRAP = SCMP_ACT_TRAP;
|
||||||
|
const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0);
|
||||||
|
const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0);
|
||||||
|
const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW;
|
||||||
|
|
||||||
|
// If TSync is not supported, make sure it doesn't map to a supported filter attribute
|
||||||
|
// Don't worry about major version < 2, the minimum version checks should catch that case
|
||||||
|
#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
|
||||||
|
#define SCMP_FLTATR_CTL_TSYNC _SCMP_CMP_MIN
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
|
||||||
|
const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
|
||||||
|
const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
|
||||||
|
const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC;
|
||||||
|
|
||||||
|
const int C_CMP_NE = (int)SCMP_CMP_NE;
|
||||||
|
const int C_CMP_LT = (int)SCMP_CMP_LT;
|
||||||
|
const int C_CMP_LE = (int)SCMP_CMP_LE;
|
||||||
|
const int C_CMP_EQ = (int)SCMP_CMP_EQ;
|
||||||
|
const int C_CMP_GE = (int)SCMP_CMP_GE;
|
||||||
|
const int C_CMP_GT = (int)SCMP_CMP_GT;
|
||||||
|
const int C_CMP_MASKED_EQ = (int)SCMP_CMP_MASKED_EQ;
|
||||||
|
|
||||||
|
const int C_VERSION_MAJOR = SCMP_VER_MAJOR;
|
||||||
|
const int C_VERSION_MINOR = SCMP_VER_MINOR;
|
||||||
|
const int C_VERSION_MICRO = SCMP_VER_MICRO;
|
||||||
|
|
||||||
|
typedef struct scmp_arg_cmp* scmp_cast_t;
|
||||||
|
|
||||||
|
// Wrapper to create an scmp_arg_cmp struct
|
||||||
|
void*
|
||||||
|
make_struct_arg_cmp(
|
||||||
|
unsigned int arg,
|
||||||
|
int compare,
|
||||||
|
uint64_t a,
|
||||||
|
uint64_t b
|
||||||
|
)
|
||||||
|
{
|
||||||
|
struct scmp_arg_cmp *s = malloc(sizeof(struct scmp_arg_cmp));
|
||||||
|
|
||||||
|
s->arg = arg;
|
||||||
|
s->op = compare;
|
||||||
|
s->datum_a = a;
|
||||||
|
s->datum_b = b;
|
||||||
|
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
// Nonexported types
|
||||||
|
type scmpFilterAttr uint32
|
||||||
|
|
||||||
|
// Nonexported constants
|
||||||
|
|
||||||
|
const (
|
||||||
|
filterAttrActDefault scmpFilterAttr = iota
|
||||||
|
filterAttrActBadArch scmpFilterAttr = iota
|
||||||
|
filterAttrNNP scmpFilterAttr = iota
|
||||||
|
filterAttrTsync scmpFilterAttr = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// An error return from certain libseccomp functions
|
||||||
|
scmpError C.int = -1
|
||||||
|
// Comparison boundaries to check for architecture validity
|
||||||
|
archStart ScmpArch = ArchNative
|
||||||
|
archEnd ScmpArch = ArchMIPSEL64N32
|
||||||
|
// Comparison boundaries to check for action validity
|
||||||
|
actionStart ScmpAction = ActKill
|
||||||
|
actionEnd ScmpAction = ActAllow
|
||||||
|
// Comparison boundaries to check for comparison operator validity
|
||||||
|
compareOpStart ScmpCompareOp = CompareNotEqual
|
||||||
|
compareOpEnd ScmpCompareOp = CompareMaskedEqual
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Error thrown on bad filter context
|
||||||
|
errBadFilter = fmt.Errorf("filter is invalid or uninitialized")
|
||||||
|
// Constants representing library major, minor, and micro versions
|
||||||
|
verMajor = int(C.C_VERSION_MAJOR)
|
||||||
|
verMinor = int(C.C_VERSION_MINOR)
|
||||||
|
verMicro = int(C.C_VERSION_MICRO)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Nonexported functions
|
||||||
|
|
||||||
|
// Check if library version is greater than or equal to the given one
|
||||||
|
func checkVersionAbove(major, minor, micro int) bool {
|
||||||
|
return (verMajor > major) ||
|
||||||
|
(verMajor == major && verMinor > minor) ||
|
||||||
|
(verMajor == major && verMinor == minor && verMicro >= micro)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init function: Verify library version is appropriate
|
||||||
|
func init() {
|
||||||
|
if !checkVersionAbove(2, 1, 0) {
|
||||||
|
fmt.Fprintf(os.Stderr, "Libseccomp version too low: minimum supported is 2.1.0, detected %d.%d.%d", C.C_VERSION_MAJOR, C.C_VERSION_MINOR, C.C_VERSION_MICRO)
|
||||||
|
os.Exit(-1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter helpers
|
||||||
|
|
||||||
|
// Filter finalizer - ensure that kernel context for filters is freed
|
||||||
|
func filterFinalizer(f *ScmpFilter) {
|
||||||
|
f.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a raw filter attribute
|
||||||
|
func (f *ScmpFilter) getFilterAttr(attr scmpFilterAttr) (C.uint32_t, error) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if !f.valid {
|
||||||
|
return 0x0, errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
|
||||||
|
return 0x0, fmt.Errorf("the thread synchronization attribute is not supported in this version of the library")
|
||||||
|
}
|
||||||
|
|
||||||
|
var attribute C.uint32_t
|
||||||
|
|
||||||
|
retCode := C.seccomp_attr_get(f.filterCtx, attr.toNative(), &attribute)
|
||||||
|
if retCode != 0 {
|
||||||
|
return 0x0, syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return attribute, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a raw filter attribute
|
||||||
|
func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if !f.valid {
|
||||||
|
return errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
|
||||||
|
return fmt.Errorf("the thread synchronization attribute is not supported in this version of the library")
|
||||||
|
}
|
||||||
|
|
||||||
|
retCode := C.seccomp_attr_set(f.filterCtx, attr.toNative(), value)
|
||||||
|
if retCode != 0 {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DOES NOT LOCK OR CHECK VALIDITY
|
||||||
|
// Assumes caller has already done this
|
||||||
|
// Wrapper for seccomp_rule_add_... functions
|
||||||
|
func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, cond C.scmp_cast_t) error {
|
||||||
|
var length C.uint
|
||||||
|
if cond != nil {
|
||||||
|
length = 1
|
||||||
|
} else {
|
||||||
|
length = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var retCode C.int
|
||||||
|
if exact {
|
||||||
|
retCode = C.seccomp_rule_add_exact_array(f.filterCtx, action.toNative(), C.int(call), length, cond)
|
||||||
|
} else {
|
||||||
|
retCode = C.seccomp_rule_add_array(f.filterCtx, action.toNative(), C.int(call), length, cond)
|
||||||
|
}
|
||||||
|
|
||||||
|
if syscall.Errno(-1*retCode) == syscall.EFAULT {
|
||||||
|
return fmt.Errorf("unrecognized syscall")
|
||||||
|
} else if syscall.Errno(-1*retCode) == syscall.EPERM {
|
||||||
|
return fmt.Errorf("requested action matches default action of filter")
|
||||||
|
} else if retCode != 0 {
|
||||||
|
return syscall.Errno(-1 * retCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generic add function for filter rules
|
||||||
|
func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact bool, conds []ScmpCondition) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if !f.valid {
|
||||||
|
return errBadFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(conds) == 0 {
|
||||||
|
if err := f.addRuleWrapper(call, action, exact, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// We don't support conditional filtering in library version v2.1
|
||||||
|
if !checkVersionAbove(2, 2, 1) {
|
||||||
|
return fmt.Errorf("conditional filtering requires libseccomp version >= 2.2.1")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cond := range conds {
|
||||||
|
cmpStruct := C.make_struct_arg_cmp(C.uint(cond.Argument), cond.Op.toNative(), C.uint64_t(cond.Operand1), C.uint64_t(cond.Operand2))
|
||||||
|
defer C.free(cmpStruct)
|
||||||
|
|
||||||
|
if err := f.addRuleWrapper(call, action, exact, C.scmp_cast_t(cmpStruct)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generic Helpers
|
||||||
|
|
||||||
|
// Helper - Sanitize Arch token input
|
||||||
|
func sanitizeArch(in ScmpArch) error {
|
||||||
|
if in < archStart || in > archEnd {
|
||||||
|
return fmt.Errorf("unrecognized architecture")
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.toNative() == C.C_ARCH_BAD {
|
||||||
|
return fmt.Errorf("architecture is not supported on this version of the library")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sanitizeAction(in ScmpAction) error {
|
||||||
|
inTmp := in & 0x0000FFFF
|
||||||
|
if inTmp < actionStart || inTmp > actionEnd {
|
||||||
|
return fmt.Errorf("unrecognized action")
|
||||||
|
}
|
||||||
|
|
||||||
|
if inTmp != ActTrace && inTmp != ActErrno && (in&0xFFFF0000) != 0 {
|
||||||
|
return fmt.Errorf("highest 16 bits must be zeroed except for Trace and Errno")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sanitizeCompareOp(in ScmpCompareOp) error {
|
||||||
|
if in < compareOpStart || in > compareOpEnd {
|
||||||
|
return fmt.Errorf("unrecognized comparison operator")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func archFromNative(a C.uint32_t) (ScmpArch, error) {
|
||||||
|
switch a {
|
||||||
|
case C.C_ARCH_X86:
|
||||||
|
return ArchX86, nil
|
||||||
|
case C.C_ARCH_X86_64:
|
||||||
|
return ArchAMD64, nil
|
||||||
|
case C.C_ARCH_X32:
|
||||||
|
return ArchX32, nil
|
||||||
|
case C.C_ARCH_ARM:
|
||||||
|
return ArchARM, nil
|
||||||
|
case C.C_ARCH_NATIVE:
|
||||||
|
return ArchNative, nil
|
||||||
|
case C.C_ARCH_AARCH64:
|
||||||
|
return ArchARM64, nil
|
||||||
|
case C.C_ARCH_MIPS:
|
||||||
|
return ArchMIPS, nil
|
||||||
|
case C.C_ARCH_MIPS64:
|
||||||
|
return ArchMIPS64, nil
|
||||||
|
case C.C_ARCH_MIPS64N32:
|
||||||
|
return ArchMIPS64N32, nil
|
||||||
|
case C.C_ARCH_MIPSEL:
|
||||||
|
return ArchMIPSEL, nil
|
||||||
|
case C.C_ARCH_MIPSEL64:
|
||||||
|
return ArchMIPSEL64, nil
|
||||||
|
case C.C_ARCH_MIPSEL64N32:
|
||||||
|
return ArchMIPSEL64N32, nil
|
||||||
|
default:
|
||||||
|
return 0x0, fmt.Errorf("unrecognized architecture")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only use with sanitized arches, no error handling
|
||||||
|
func (a ScmpArch) toNative() C.uint32_t {
|
||||||
|
switch a {
|
||||||
|
case ArchX86:
|
||||||
|
return C.C_ARCH_X86
|
||||||
|
case ArchAMD64:
|
||||||
|
return C.C_ARCH_X86_64
|
||||||
|
case ArchX32:
|
||||||
|
return C.C_ARCH_X32
|
||||||
|
case ArchARM:
|
||||||
|
return C.C_ARCH_ARM
|
||||||
|
case ArchARM64:
|
||||||
|
return C.C_ARCH_AARCH64
|
||||||
|
case ArchMIPS:
|
||||||
|
return C.C_ARCH_MIPS
|
||||||
|
case ArchMIPS64:
|
||||||
|
return C.C_ARCH_MIPS64
|
||||||
|
case ArchMIPS64N32:
|
||||||
|
return C.C_ARCH_MIPS64N32
|
||||||
|
case ArchMIPSEL:
|
||||||
|
return C.C_ARCH_MIPSEL
|
||||||
|
case ArchMIPSEL64:
|
||||||
|
return C.C_ARCH_MIPSEL64
|
||||||
|
case ArchMIPSEL64N32:
|
||||||
|
return C.C_ARCH_MIPSEL64N32
|
||||||
|
case ArchNative:
|
||||||
|
return C.C_ARCH_NATIVE
|
||||||
|
default:
|
||||||
|
return 0x0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only use with sanitized ops, no error handling
|
||||||
|
func (a ScmpCompareOp) toNative() C.int {
|
||||||
|
switch a {
|
||||||
|
case CompareNotEqual:
|
||||||
|
return C.C_CMP_NE
|
||||||
|
case CompareLess:
|
||||||
|
return C.C_CMP_LT
|
||||||
|
case CompareLessOrEqual:
|
||||||
|
return C.C_CMP_LE
|
||||||
|
case CompareEqual:
|
||||||
|
return C.C_CMP_EQ
|
||||||
|
case CompareGreaterEqual:
|
||||||
|
return C.C_CMP_GE
|
||||||
|
case CompareGreater:
|
||||||
|
return C.C_CMP_GT
|
||||||
|
case CompareMaskedEqual:
|
||||||
|
return C.C_CMP_MASKED_EQ
|
||||||
|
default:
|
||||||
|
return 0x0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func actionFromNative(a C.uint32_t) (ScmpAction, error) {
|
||||||
|
aTmp := a & 0xFFFF
|
||||||
|
switch a & 0xFFFF0000 {
|
||||||
|
case C.C_ACT_KILL:
|
||||||
|
return ActKill, nil
|
||||||
|
case C.C_ACT_TRAP:
|
||||||
|
return ActTrap, nil
|
||||||
|
case C.C_ACT_ERRNO:
|
||||||
|
return ActErrno.SetReturnCode(int16(aTmp)), nil
|
||||||
|
case C.C_ACT_TRACE:
|
||||||
|
return ActTrace.SetReturnCode(int16(aTmp)), nil
|
||||||
|
case C.C_ACT_ALLOW:
|
||||||
|
return ActAllow, nil
|
||||||
|
default:
|
||||||
|
return 0x0, fmt.Errorf("unrecognized action")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only use with sanitized actions, no error handling
|
||||||
|
func (a ScmpAction) toNative() C.uint32_t {
|
||||||
|
switch a & 0xFFFF {
|
||||||
|
case ActKill:
|
||||||
|
return C.C_ACT_KILL
|
||||||
|
case ActTrap:
|
||||||
|
return C.C_ACT_TRAP
|
||||||
|
case ActErrno:
|
||||||
|
return C.C_ACT_ERRNO | (C.uint32_t(a) >> 16)
|
||||||
|
case ActTrace:
|
||||||
|
return C.C_ACT_TRACE | (C.uint32_t(a) >> 16)
|
||||||
|
case ActAllow:
|
||||||
|
return C.C_ACT_ALLOW
|
||||||
|
default:
|
||||||
|
return 0x0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal only, assumes safe attribute
|
||||||
|
func (a scmpFilterAttr) toNative() uint32 {
|
||||||
|
switch a {
|
||||||
|
case filterAttrActDefault:
|
||||||
|
return uint32(C.C_ATTRIBUTE_DEFAULT)
|
||||||
|
case filterAttrActBadArch:
|
||||||
|
return uint32(C.C_ATTRIBUTE_BADARCH)
|
||||||
|
case filterAttrNNP:
|
||||||
|
return uint32(C.C_ATTRIBUTE_NNP)
|
||||||
|
case filterAttrTsync:
|
||||||
|
return uint32(C.C_ATTRIBUTE_TSYNC)
|
||||||
|
default:
|
||||||
|
return 0x0
|
||||||
|
}
|
||||||
|
}
|
@ -16,12 +16,14 @@ package influxdb
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
info "github.com/google/cadvisor/info/v1"
|
info "github.com/google/cadvisor/info/v1"
|
||||||
"github.com/google/cadvisor/storage"
|
"github.com/google/cadvisor/storage"
|
||||||
|
"github.com/google/cadvisor/version"
|
||||||
|
|
||||||
influxdb "github.com/influxdb/influxdb/client"
|
influxdb "github.com/influxdb/influxdb/client"
|
||||||
)
|
)
|
||||||
@ -31,39 +33,44 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type influxdbStorage struct {
|
type influxdbStorage struct {
|
||||||
client *influxdb.Client
|
client *influxdb.Client
|
||||||
machineName string
|
machineName string
|
||||||
tableName string
|
database string
|
||||||
bufferDuration time.Duration
|
retentionPolicy string
|
||||||
lastWrite time.Time
|
bufferDuration time.Duration
|
||||||
series []*influxdb.Series
|
lastWrite time.Time
|
||||||
lock sync.Mutex
|
points []*influxdb.Point
|
||||||
readyToFlush func() bool
|
lock sync.Mutex
|
||||||
|
readyToFlush func() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Series names
|
||||||
const (
|
const (
|
||||||
colTimestamp string = "time"
|
// Cumulative CPU usage
|
||||||
colMachineName string = "machine"
|
serCpuUsageTotal string = "cpu_usage_total"
|
||||||
colContainerName string = "container_name"
|
serCpuUsageSystem string = "cpu_usage_system"
|
||||||
colCpuCumulativeUsage string = "cpu_cumulative_usage"
|
serCpuUsageUser string = "cpu_usage_user"
|
||||||
|
serCpuUsagePerCpu string = "cpu_usage_per_cpu"
|
||||||
|
// Smoothed average of number of runnable threads x 1000.
|
||||||
|
serLoadAverage string = "load_average"
|
||||||
// Memory Usage
|
// Memory Usage
|
||||||
colMemoryUsage string = "memory_usage"
|
serMemoryUsage string = "memory_usage"
|
||||||
// Working set size
|
// Working set size
|
||||||
colMemoryWorkingSet string = "memory_working_set"
|
serMemoryWorkingSet string = "memory_working_set"
|
||||||
// Cumulative count of bytes received.
|
// Cumulative count of bytes received.
|
||||||
colRxBytes string = "rx_bytes"
|
serRxBytes string = "rx_bytes"
|
||||||
// Cumulative count of receive errors encountered.
|
// Cumulative count of receive errors encountered.
|
||||||
colRxErrors string = "rx_errors"
|
serRxErrors string = "rx_errors"
|
||||||
// Cumulative count of bytes transmitted.
|
// Cumulative count of bytes transmitted.
|
||||||
colTxBytes string = "tx_bytes"
|
serTxBytes string = "tx_bytes"
|
||||||
// Cumulative count of transmit errors encountered.
|
// Cumulative count of transmit errors encountered.
|
||||||
colTxErrors string = "tx_errors"
|
serTxErrors string = "tx_errors"
|
||||||
// Filesystem device.
|
// Filesystem device.
|
||||||
colFsDevice = "fs_device"
|
serFsDevice string = "fs_device"
|
||||||
// Filesystem limit.
|
// Filesystem limit.
|
||||||
colFsLimit = "fs_limit"
|
serFsLimit string = "fs_limit"
|
||||||
// Filesystem usage.
|
// Filesystem usage.
|
||||||
colFsUsage = "fs_usage"
|
serFsUsage string = "fs_usage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func new() (storage.StorageDriver, error) {
|
func new() (storage.StorageDriver, error) {
|
||||||
@ -83,84 +90,122 @@ func new() (storage.StorageDriver, error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *influxdbStorage) getSeriesDefaultValues(
|
// Field names
|
||||||
|
const (
|
||||||
|
fieldValue string = "value"
|
||||||
|
fieldType string = "type"
|
||||||
|
fieldDevice string = "device"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tag names
|
||||||
|
const (
|
||||||
|
tagMachineName string = "machine"
|
||||||
|
tagContainerName string = "container_name"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (self *influxdbStorage) containerFilesystemStatsToPoints(
|
||||||
ref info.ContainerReference,
|
ref info.ContainerReference,
|
||||||
stats *info.ContainerStats,
|
stats *info.ContainerStats) (points []*influxdb.Point) {
|
||||||
columns *[]string,
|
|
||||||
values *[]interface{}) {
|
|
||||||
// Timestamp
|
|
||||||
*columns = append(*columns, colTimestamp)
|
|
||||||
*values = append(*values, stats.Timestamp.UnixNano()/1E3)
|
|
||||||
|
|
||||||
// Machine name
|
|
||||||
*columns = append(*columns, colMachineName)
|
|
||||||
*values = append(*values, self.machineName)
|
|
||||||
|
|
||||||
// Container name
|
|
||||||
*columns = append(*columns, colContainerName)
|
|
||||||
if len(ref.Aliases) > 0 {
|
|
||||||
*values = append(*values, ref.Aliases[0])
|
|
||||||
} else {
|
|
||||||
*values = append(*values, ref.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// In order to maintain a fixed column format, we add a new series for each filesystem partition.
|
|
||||||
func (self *influxdbStorage) containerFilesystemStatsToSeries(
|
|
||||||
ref info.ContainerReference,
|
|
||||||
stats *info.ContainerStats) (series []*influxdb.Series) {
|
|
||||||
if len(stats.Filesystem) == 0 {
|
if len(stats.Filesystem) == 0 {
|
||||||
return series
|
return points
|
||||||
}
|
}
|
||||||
for _, fsStat := range stats.Filesystem {
|
for _, fsStat := range stats.Filesystem {
|
||||||
columns := make([]string, 0)
|
tagsFsUsage := map[string]string{
|
||||||
values := make([]interface{}, 0)
|
fieldDevice: fsStat.Device,
|
||||||
self.getSeriesDefaultValues(ref, stats, &columns, &values)
|
fieldType: "usage",
|
||||||
|
}
|
||||||
|
fieldsFsUsage := map[string]interface{}{
|
||||||
|
fieldValue: int64(fsStat.Usage),
|
||||||
|
}
|
||||||
|
pointFsUsage := &influxdb.Point{
|
||||||
|
Measurement: serFsUsage,
|
||||||
|
Tags: tagsFsUsage,
|
||||||
|
Fields: fieldsFsUsage,
|
||||||
|
}
|
||||||
|
|
||||||
columns = append(columns, colFsDevice)
|
tagsFsLimit := map[string]string{
|
||||||
values = append(values, fsStat.Device)
|
fieldDevice: fsStat.Device,
|
||||||
|
fieldType: "limit",
|
||||||
|
}
|
||||||
|
fieldsFsLimit := map[string]interface{}{
|
||||||
|
fieldValue: int64(fsStat.Limit),
|
||||||
|
}
|
||||||
|
pointFsLimit := &influxdb.Point{
|
||||||
|
Measurement: serFsLimit,
|
||||||
|
Tags: tagsFsLimit,
|
||||||
|
Fields: fieldsFsLimit,
|
||||||
|
}
|
||||||
|
|
||||||
columns = append(columns, colFsLimit)
|
points = append(points, pointFsUsage, pointFsLimit)
|
||||||
values = append(values, fsStat.Limit)
|
|
||||||
|
|
||||||
columns = append(columns, colFsUsage)
|
|
||||||
values = append(values, fsStat.Usage)
|
|
||||||
series = append(series, self.newSeries(columns, values))
|
|
||||||
}
|
}
|
||||||
return series
|
|
||||||
|
self.tagPoints(ref, stats, points)
|
||||||
|
|
||||||
|
return points
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *influxdbStorage) containerStatsToValues(
|
// Set tags and timestamp for all points of the batch.
|
||||||
|
// Points should inherit the tags that are set for BatchPoints, but that does not seem to work.
|
||||||
|
func (self *influxdbStorage) tagPoints(ref info.ContainerReference, stats *info.ContainerStats, points []*influxdb.Point) {
|
||||||
|
// Use container alias if possible
|
||||||
|
var containerName string
|
||||||
|
if len(ref.Aliases) > 0 {
|
||||||
|
containerName = ref.Aliases[0]
|
||||||
|
} else {
|
||||||
|
containerName = ref.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
commonTags := map[string]string{
|
||||||
|
tagMachineName: self.machineName,
|
||||||
|
tagContainerName: containerName,
|
||||||
|
}
|
||||||
|
for i := 0; i < len(points); i++ {
|
||||||
|
// merge with existing tags if any
|
||||||
|
addTagsToPoint(points[i], commonTags)
|
||||||
|
points[i].Time = stats.Timestamp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *influxdbStorage) containerStatsToPoints(
|
||||||
ref info.ContainerReference,
|
ref info.ContainerReference,
|
||||||
stats *info.ContainerStats,
|
stats *info.ContainerStats,
|
||||||
) (columns []string, values []interface{}) {
|
) (points []*influxdb.Point) {
|
||||||
self.getSeriesDefaultValues(ref, stats, &columns, &values)
|
// CPU usage: Total usage in nanoseconds
|
||||||
// Cumulative Cpu Usage
|
points = append(points, makePoint(serCpuUsageTotal, stats.Cpu.Usage.Total))
|
||||||
columns = append(columns, colCpuCumulativeUsage)
|
|
||||||
values = append(values, stats.Cpu.Usage.Total)
|
// CPU usage: Time spend in system space (in nanoseconds)
|
||||||
|
points = append(points, makePoint(serCpuUsageSystem, stats.Cpu.Usage.System))
|
||||||
|
|
||||||
|
// CPU usage: Time spent in user space (in nanoseconds)
|
||||||
|
points = append(points, makePoint(serCpuUsageUser, stats.Cpu.Usage.User))
|
||||||
|
|
||||||
|
// CPU usage per CPU
|
||||||
|
for i := 0; i < len(stats.Cpu.Usage.PerCpu); i++ {
|
||||||
|
point := makePoint(serCpuUsagePerCpu, stats.Cpu.Usage.PerCpu[i])
|
||||||
|
tags := map[string]string{"instance": fmt.Sprintf("%v", i)}
|
||||||
|
addTagsToPoint(point, tags)
|
||||||
|
|
||||||
|
points = append(points, point)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load Average
|
||||||
|
points = append(points, makePoint(serLoadAverage, stats.Cpu.LoadAverage))
|
||||||
|
|
||||||
// Memory Usage
|
// Memory Usage
|
||||||
columns = append(columns, colMemoryUsage)
|
points = append(points, makePoint(serMemoryUsage, stats.Memory.Usage))
|
||||||
values = append(values, stats.Memory.Usage)
|
|
||||||
|
|
||||||
// Working set size
|
// Working Set Size
|
||||||
columns = append(columns, colMemoryWorkingSet)
|
points = append(points, makePoint(serMemoryWorkingSet, stats.Memory.WorkingSet))
|
||||||
values = append(values, stats.Memory.WorkingSet)
|
|
||||||
|
|
||||||
// Network stats.
|
// Network Stats
|
||||||
columns = append(columns, colRxBytes)
|
points = append(points, makePoint(serRxBytes, stats.Network.RxBytes))
|
||||||
values = append(values, stats.Network.RxBytes)
|
points = append(points, makePoint(serRxErrors, stats.Network.RxErrors))
|
||||||
|
points = append(points, makePoint(serTxBytes, stats.Network.TxBytes))
|
||||||
|
points = append(points, makePoint(serTxErrors, stats.Network.TxErrors))
|
||||||
|
|
||||||
columns = append(columns, colRxErrors)
|
self.tagPoints(ref, stats, points)
|
||||||
values = append(values, stats.Network.RxErrors)
|
|
||||||
|
|
||||||
columns = append(columns, colTxBytes)
|
return points
|
||||||
values = append(values, stats.Network.TxBytes)
|
|
||||||
|
|
||||||
columns = append(columns, colTxErrors)
|
|
||||||
values = append(values, stats.Network.TxErrors)
|
|
||||||
|
|
||||||
return columns, values
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *influxdbStorage) OverrideReadyToFlush(readyToFlush func() bool) {
|
func (self *influxdbStorage) OverrideReadyToFlush(readyToFlush func() bool) {
|
||||||
@ -175,27 +220,38 @@ func (self *influxdbStorage) AddStats(ref info.ContainerReference, stats *info.C
|
|||||||
if stats == nil {
|
if stats == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var seriesToFlush []*influxdb.Series
|
var pointsToFlush []*influxdb.Point
|
||||||
func() {
|
func() {
|
||||||
// AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write.
|
// AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write.
|
||||||
self.lock.Lock()
|
self.lock.Lock()
|
||||||
defer self.lock.Unlock()
|
defer self.lock.Unlock()
|
||||||
|
|
||||||
self.series = append(self.series, self.newSeries(self.containerStatsToValues(ref, stats)))
|
self.points = append(self.points, self.containerStatsToPoints(ref, stats)...)
|
||||||
self.series = append(self.series, self.containerFilesystemStatsToSeries(ref, stats)...)
|
self.points = append(self.points, self.containerFilesystemStatsToPoints(ref, stats)...)
|
||||||
if self.readyToFlush() {
|
if self.readyToFlush() {
|
||||||
seriesToFlush = self.series
|
pointsToFlush = self.points
|
||||||
self.series = make([]*influxdb.Series, 0)
|
self.points = make([]*influxdb.Point, 0)
|
||||||
self.lastWrite = time.Now()
|
self.lastWrite = time.Now()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if len(seriesToFlush) > 0 {
|
if len(pointsToFlush) > 0 {
|
||||||
err := self.client.WriteSeriesWithTimePrecision(seriesToFlush, influxdb.Microsecond)
|
points := make([]influxdb.Point, len(pointsToFlush))
|
||||||
if err != nil {
|
for i, p := range pointsToFlush {
|
||||||
|
points[i] = *p
|
||||||
|
}
|
||||||
|
|
||||||
|
batchTags := map[string]string{tagMachineName: self.machineName}
|
||||||
|
bp := influxdb.BatchPoints{
|
||||||
|
Points: points,
|
||||||
|
Database: self.database,
|
||||||
|
Tags: batchTags,
|
||||||
|
Time: stats.Timestamp,
|
||||||
|
}
|
||||||
|
response, err := self.client.Write(bp)
|
||||||
|
if err != nil || checkResponseForErrors(response) != nil {
|
||||||
return fmt.Errorf("failed to write stats to influxDb - %s", err)
|
return fmt.Errorf("failed to write stats to influxDb - %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,21 +260,9 @@ func (self *influxdbStorage) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a new influxdb series.
|
|
||||||
func (self *influxdbStorage) newSeries(columns []string, points []interface{}) *influxdb.Series {
|
|
||||||
out := &influxdb.Series{
|
|
||||||
Name: self.tableName,
|
|
||||||
Columns: columns,
|
|
||||||
// There's only one point for each stats
|
|
||||||
Points: make([][]interface{}, 1),
|
|
||||||
}
|
|
||||||
out.Points[0] = points
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// machineName: A unique identifier to identify the host that current cAdvisor
|
// machineName: A unique identifier to identify the host that current cAdvisor
|
||||||
// instance is running on.
|
// instance is running on.
|
||||||
// influxdbHost: The host which runs influxdb.
|
// influxdbHost: The host which runs influxdb (host:port)
|
||||||
func newStorage(
|
func newStorage(
|
||||||
machineName,
|
machineName,
|
||||||
tablename,
|
tablename,
|
||||||
@ -229,28 +273,97 @@ func newStorage(
|
|||||||
isSecure bool,
|
isSecure bool,
|
||||||
bufferDuration time.Duration,
|
bufferDuration time.Duration,
|
||||||
) (*influxdbStorage, error) {
|
) (*influxdbStorage, error) {
|
||||||
config := &influxdb.ClientConfig{
|
url := &url.URL{
|
||||||
Host: influxdbHost,
|
Scheme: "http",
|
||||||
Username: username,
|
Host: influxdbHost,
|
||||||
Password: password,
|
|
||||||
Database: database,
|
|
||||||
IsSecure: isSecure,
|
|
||||||
}
|
}
|
||||||
client, err := influxdb.NewClient(config)
|
if isSecure {
|
||||||
|
url.Scheme = "https"
|
||||||
|
}
|
||||||
|
|
||||||
|
config := &influxdb.Config{
|
||||||
|
URL: *url,
|
||||||
|
Username: username,
|
||||||
|
Password: password,
|
||||||
|
UserAgent: fmt.Sprintf("%v/%v", "cAdvisor", version.Info["version"]),
|
||||||
|
}
|
||||||
|
client, err := influxdb.NewClient(*config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// TODO(monnand): With go 1.3, we cannot compress data now.
|
|
||||||
client.DisableCompression()
|
|
||||||
|
|
||||||
ret := &influxdbStorage{
|
ret := &influxdbStorage{
|
||||||
client: client,
|
client: client,
|
||||||
machineName: machineName,
|
machineName: machineName,
|
||||||
tableName: tablename,
|
database: database,
|
||||||
bufferDuration: bufferDuration,
|
bufferDuration: bufferDuration,
|
||||||
lastWrite: time.Now(),
|
lastWrite: time.Now(),
|
||||||
series: make([]*influxdb.Series, 0),
|
points: make([]*influxdb.Point, 0),
|
||||||
}
|
}
|
||||||
ret.readyToFlush = ret.defaultReadyToFlush
|
ret.readyToFlush = ret.defaultReadyToFlush
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Creates a measurement point with a single value field
|
||||||
|
func makePoint(name string, value interface{}) *influxdb.Point {
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
fieldValue: toSignedIfUnsigned(value),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &influxdb.Point{
|
||||||
|
Measurement: name,
|
||||||
|
Fields: fields,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds additional tags to the existing tags of a point
|
||||||
|
func addTagsToPoint(point *influxdb.Point, tags map[string]string) {
|
||||||
|
if point.Tags == nil {
|
||||||
|
point.Tags = tags
|
||||||
|
} else {
|
||||||
|
for k, v := range tags {
|
||||||
|
point.Tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks response for possible errors
|
||||||
|
func checkResponseForErrors(response *influxdb.Response) error {
|
||||||
|
const msg = "failed to write stats to influxDb - %s"
|
||||||
|
|
||||||
|
if response != nil && response.Err != nil {
|
||||||
|
return fmt.Errorf(msg, response.Err)
|
||||||
|
}
|
||||||
|
if response != nil && response.Results != nil {
|
||||||
|
for _, result := range response.Results {
|
||||||
|
if result.Err != nil {
|
||||||
|
return fmt.Errorf(msg, result.Err)
|
||||||
|
}
|
||||||
|
if result.Series != nil {
|
||||||
|
for _, row := range result.Series {
|
||||||
|
if row.Err != nil {
|
||||||
|
return fmt.Errorf(msg, row.Err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some stats have type unsigned integer, but the InfluxDB client accepts only signed integers.
|
||||||
|
func toSignedIfUnsigned(value interface{}) interface{} {
|
||||||
|
switch v := value.(type) {
|
||||||
|
case uint64:
|
||||||
|
return int64(v)
|
||||||
|
case uint32:
|
||||||
|
return int32(v)
|
||||||
|
case uint16:
|
||||||
|
return int16(v)
|
||||||
|
case uint8:
|
||||||
|
return int8(v)
|
||||||
|
case uint:
|
||||||
|
return int(v)
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
@ -19,6 +19,8 @@ package influxdb
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -28,6 +30,8 @@ import (
|
|||||||
"github.com/google/cadvisor/storage/test"
|
"github.com/google/cadvisor/storage/test"
|
||||||
|
|
||||||
influxdb "github.com/influxdb/influxdb/client"
|
influxdb "github.com/influxdb/influxdb/client"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The duration in seconds for which stats will be buffered in the influxdb driver.
|
// The duration in seconds for which stats will be buffered in the influxdb driver.
|
||||||
@ -40,10 +44,7 @@ type influxDbTestStorageDriver struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *influxDbTestStorageDriver) readyToFlush() bool {
|
func (self *influxDbTestStorageDriver) readyToFlush() bool {
|
||||||
if self.count >= self.buffer {
|
return self.count >= self.buffer
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *influxDbTestStorageDriver) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
|
func (self *influxDbTestStorageDriver) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
|
||||||
@ -51,18 +52,6 @@ func (self *influxDbTestStorageDriver) AddStats(ref info.ContainerReference, sta
|
|||||||
return self.base.AddStats(ref, stats)
|
return self.base.AddStats(ref, stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *influxDbTestStorageDriver) RecentStats(containerName string, numStats int) ([]*info.ContainerStats, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *influxDbTestStorageDriver) Percentiles(containerName string, cpuUsagePercentiles []int, memUsagePercentiles []int) (*info.ContainerStatsPercentiles, error) {
|
|
||||||
return self.base.Percentiles(containerName, cpuUsagePercentiles, memUsagePercentiles)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *influxDbTestStorageDriver) Samples(containerName string, numSamples int) ([]*info.ContainerStatsSample, error) {
|
|
||||||
return self.base.Samples(containerName, numSamples)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *influxDbTestStorageDriver) Close() error {
|
func (self *influxDbTestStorageDriver) Close() error {
|
||||||
return self.base.Close()
|
return self.base.Close()
|
||||||
}
|
}
|
||||||
@ -75,6 +64,28 @@ func (self *influxDbTestStorageDriver) StatsEq(a, b *info.ContainerStats) bool {
|
|||||||
if a.Cpu.Usage.Total != b.Cpu.Usage.Total {
|
if a.Cpu.Usage.Total != b.Cpu.Usage.Total {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if a.Cpu.Usage.System != b.Cpu.Usage.System {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a.Cpu.Usage.User != b.Cpu.Usage.User {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO simpler way to check if arrays are equal?
|
||||||
|
if a.Cpu.Usage.PerCpu == nil && b.Cpu.Usage.PerCpu != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a.Cpu.Usage.PerCpu != nil && b.Cpu.Usage.PerCpu == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(a.Cpu.Usage.PerCpu) != len(b.Cpu.Usage.PerCpu) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, usage := range a.Cpu.Usage.PerCpu {
|
||||||
|
if usage != b.Cpu.Usage.PerCpu[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if a.Memory.Usage != b.Memory.Usage {
|
if a.Memory.Usage != b.Memory.Usage {
|
||||||
return false
|
return false
|
||||||
@ -96,73 +107,56 @@ func (self *influxDbTestStorageDriver) StatsEq(a, b *info.ContainerStats) bool {
|
|||||||
|
|
||||||
func runStorageTest(f func(test.TestStorageDriver, *testing.T), t *testing.T, bufferCount int) {
|
func runStorageTest(f func(test.TestStorageDriver, *testing.T), t *testing.T, bufferCount int) {
|
||||||
machineName := "machineA"
|
machineName := "machineA"
|
||||||
tablename := "t"
|
database := "cadvisor_test"
|
||||||
database := "cadvisor"
|
|
||||||
username := "root"
|
username := "root"
|
||||||
password := "root"
|
password := "root"
|
||||||
hostname := "localhost:8086"
|
hostname := "localhost:8086"
|
||||||
percentilesDuration := 10 * time.Minute
|
//percentilesDuration := 10 * time.Minute
|
||||||
rootConfig := &influxdb.ClientConfig{
|
|
||||||
Host: hostname,
|
config := influxdb.Config{
|
||||||
|
URL: url.URL{Scheme: "http", Host: hostname},
|
||||||
Username: username,
|
Username: username,
|
||||||
Password: password,
|
Password: password,
|
||||||
IsSecure: false,
|
|
||||||
}
|
|
||||||
rootClient, err := influxdb.NewClient(rootConfig)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// create the data base first.
|
|
||||||
rootClient.CreateDatabase(database)
|
|
||||||
config := &influxdb.ClientConfig{
|
|
||||||
Host: hostname,
|
|
||||||
Username: username,
|
|
||||||
Password: password,
|
|
||||||
Database: database,
|
|
||||||
IsSecure: false,
|
|
||||||
}
|
}
|
||||||
client, err := influxdb.NewClient(config)
|
client, err := influxdb.NewClient(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
client.DisableCompression()
|
|
||||||
deleteAll := fmt.Sprintf("drop series %v", tablename)
|
// Re-create the database first.
|
||||||
_, err = client.Query(deleteAll)
|
if err := prepareDatabase(client, database); err != nil {
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// delete all data by the end of the call
|
|
||||||
defer client.Query(deleteAll)
|
// Delete all data by the end of the call.
|
||||||
|
//defer client.Query(influxdb.Query{Command: fmt.Sprintf("drop database \"%v\"", database)})
|
||||||
|
|
||||||
driver, err := New(machineName,
|
driver, err := New(machineName,
|
||||||
tablename,
|
|
||||||
database,
|
database,
|
||||||
username,
|
username,
|
||||||
password,
|
password,
|
||||||
hostname,
|
hostname,
|
||||||
false,
|
false,
|
||||||
time.Duration(bufferCount),
|
time.Duration(bufferCount))
|
||||||
percentilesDuration)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer driver.Close()
|
||||||
testDriver := &influxDbTestStorageDriver{buffer: bufferCount}
|
testDriver := &influxDbTestStorageDriver{buffer: bufferCount}
|
||||||
driver.OverrideReadyToFlush(testDriver.readyToFlush)
|
driver.OverrideReadyToFlush(testDriver.readyToFlush)
|
||||||
testDriver.base = driver
|
testDriver.base = driver
|
||||||
|
|
||||||
// generate another container's data on same machine.
|
// Generate another container's data on same machine.
|
||||||
test.StorageDriverFillRandomStatsFunc("containerOnSameMachine", 100, testDriver, t)
|
test.StorageDriverFillRandomStatsFunc("containerOnSameMachine", 100, testDriver, t)
|
||||||
|
|
||||||
// generate another container's data on another machine.
|
// Generate another container's data on another machine.
|
||||||
driverForAnotherMachine, err := New("machineB",
|
driverForAnotherMachine, err := New("machineB",
|
||||||
tablename,
|
|
||||||
database,
|
database,
|
||||||
username,
|
username,
|
||||||
password,
|
password,
|
||||||
hostname,
|
hostname,
|
||||||
false,
|
false,
|
||||||
time.Duration(bufferCount),
|
time.Duration(bufferCount))
|
||||||
percentilesDuration)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -175,14 +169,138 @@ func runStorageTest(f func(test.TestStorageDriver, *testing.T), t *testing.T, bu
|
|||||||
f(testDriver, t)
|
f(testDriver, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRetrievePartialRecentStats(t *testing.T) {
|
func prepareDatabase(client *influxdb.Client, database string) error {
|
||||||
runStorageTest(test.StorageDriverTestRetrievePartialRecentStats, t, 20)
|
dropDbQuery := influxdb.Query{
|
||||||
|
Command: fmt.Sprintf("drop database \"%v\"", database),
|
||||||
|
}
|
||||||
|
createDbQuery := influxdb.Query{
|
||||||
|
Command: fmt.Sprintf("create database \"%v\"", database),
|
||||||
|
}
|
||||||
|
// A default retention policy must always be present.
|
||||||
|
// Depending on the InfluxDB configuration it may be created automatically with the database or not.
|
||||||
|
// TODO create ret. policy only if not present
|
||||||
|
createPolicyQuery := influxdb.Query{
|
||||||
|
Command: fmt.Sprintf("create retention policy \"default\" on \"%v\" duration 1h replication 1 default", database),
|
||||||
|
}
|
||||||
|
_, err := client.Query(dropDbQuery)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = client.Query(createDbQuery)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = client.Query(createPolicyQuery)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRetrieveAllRecentStats(t *testing.T) {
|
func TestContainerFileSystemStatsToPoints(t *testing.T) {
|
||||||
runStorageTest(test.StorageDriverTestRetrieveAllRecentStats, t, 10)
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
machineName := "testMachine"
|
||||||
|
database := "cadvisor_test"
|
||||||
|
username := "root"
|
||||||
|
password := "root"
|
||||||
|
influxdbHost := "localhost:8086"
|
||||||
|
|
||||||
|
storage, err := New(machineName,
|
||||||
|
database,
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
influxdbHost,
|
||||||
|
false, 2*time.Minute)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
ref := info.ContainerReference{
|
||||||
|
Name: "containerName",
|
||||||
|
}
|
||||||
|
stats := &info.ContainerStats{}
|
||||||
|
points := storage.containerFilesystemStatsToPoints(ref, stats)
|
||||||
|
|
||||||
|
// stats.Filesystem is always nil, not sure why
|
||||||
|
assert.Nil(points)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoRecentStats(t *testing.T) {
|
func TestContainerStatsToPoints(t *testing.T) {
|
||||||
runStorageTest(test.StorageDriverTestNoRecentStats, t, kCacheDuration)
|
// Given
|
||||||
|
storage, err := createTestStorage()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.NotNil(t, storage)
|
||||||
|
|
||||||
|
ref, stats := createTestStats()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.NotNil(t, stats)
|
||||||
|
|
||||||
|
// When
|
||||||
|
points := storage.containerStatsToPoints(*ref, stats)
|
||||||
|
|
||||||
|
// Then
|
||||||
|
assert.NotEmpty(t, points)
|
||||||
|
assert.Len(t, points, 10+len(stats.Cpu.Usage.PerCpu))
|
||||||
|
|
||||||
|
assertContainsPointWithValue(t, points, serCpuUsageTotal, stats.Cpu.Usage.Total)
|
||||||
|
assertContainsPointWithValue(t, points, serCpuUsageSystem, stats.Cpu.Usage.System)
|
||||||
|
assertContainsPointWithValue(t, points, serCpuUsageUser, stats.Cpu.Usage.User)
|
||||||
|
assertContainsPointWithValue(t, points, serMemoryUsage, stats.Memory.Usage)
|
||||||
|
assertContainsPointWithValue(t, points, serLoadAverage, stats.Cpu.LoadAverage)
|
||||||
|
assertContainsPointWithValue(t, points, serMemoryWorkingSet, stats.Memory.WorkingSet)
|
||||||
|
assertContainsPointWithValue(t, points, serRxBytes, stats.Network.RxBytes)
|
||||||
|
assertContainsPointWithValue(t, points, serRxErrors, stats.Network.RxErrors)
|
||||||
|
assertContainsPointWithValue(t, points, serTxBytes, stats.Network.TxBytes)
|
||||||
|
assertContainsPointWithValue(t, points, serTxBytes, stats.Network.TxErrors)
|
||||||
|
|
||||||
|
for _, cpu_usage := range stats.Cpu.Usage.PerCpu {
|
||||||
|
assertContainsPointWithValue(t, points, serCpuUsagePerCpu, cpu_usage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertContainsPointWithValue(t *testing.T, points []*influxdb.Point, name string, value interface{}) bool {
|
||||||
|
found := false
|
||||||
|
for _, point := range points {
|
||||||
|
if point.Measurement == name && point.Fields[fieldValue] == toSignedIfUnsigned(value) {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return assert.True(t, found, "no point found with name='%v' and value=%v", name, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestStorage() (*influxdbStorage, error) {
|
||||||
|
machineName := "testMachine"
|
||||||
|
database := "cadvisor_test"
|
||||||
|
username := "root"
|
||||||
|
password := "root"
|
||||||
|
influxdbHost := "localhost:8086"
|
||||||
|
|
||||||
|
storage, err := New(machineName,
|
||||||
|
database,
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
influxdbHost,
|
||||||
|
false, 2*time.Minute)
|
||||||
|
|
||||||
|
return storage, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestStats() (*info.ContainerReference, *info.ContainerStats) {
|
||||||
|
ref := &info.ContainerReference{
|
||||||
|
Name: "testContainername",
|
||||||
|
Aliases: []string{"testContainerAlias1", "testContainerAlias2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuUsage := info.CpuUsage{
|
||||||
|
Total: uint64(rand.Intn(10000)),
|
||||||
|
PerCpu: []uint64{uint64(rand.Intn(1000)), uint64(rand.Intn(1000)), uint64(rand.Intn(1000))},
|
||||||
|
User: uint64(rand.Intn(10000)),
|
||||||
|
System: uint64(rand.Intn(10000)),
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := &info.ContainerStats{
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Cpu: info.CpuStats{
|
||||||
|
Usage: cpuUsage,
|
||||||
|
LoadAverage: int32(rand.Intn(1000)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return ref, stats
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user