diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.gitignore b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.gitignore
new file mode 100644
index 00000000..3bf973ee
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.gitignore
@@ -0,0 +1,29 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+/generator
+/cluster-test/cluster-test
+/cluster-test/*.log
+/cluster-test/es-chaos-monkey
+/spec
+/tmp
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.travis.yml b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.travis.yml
new file mode 100644
index 00000000..2a9ed8b8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.travis.yml
@@ -0,0 +1,19 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.5
+ - tip
+
+env:
+ matrix:
+ - ES_VERSION=1.6.2
+ - ES_VERSION=1.7.1
+
+before_script:
+ - mkdir ${HOME}/elasticsearch
+ - wget http://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-${ES_VERSION}.tar.gz
+ - tar -xzf elasticsearch-${ES_VERSION}.tar.gz -C ${HOME}/elasticsearch
+ - ${HOME}/elasticsearch/elasticsearch-${ES_VERSION}/bin/elasticsearch >& /dev/null &
+ - sleep 15
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTING.md b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTING.md
new file mode 100644
index 00000000..bb61408c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTING.md
@@ -0,0 +1,27 @@
+# How to contribute
+
+Elastic is an open-source project and we are looking forward to each
+contribution.
+
+## Your Pull Request
+
+To make it easy to review and understand your changes, please keep the
+following things in mind before submitting your pull request:
+
+* Work on the latest possible state of `olivere/elastic`.
+* Create a branch dedicated to your change.
+* If possible, write a test case which confirms your change.
+* Make sure your changes and your tests work with all recent versions of
+ Elasticsearch. At the moment, we're targeting the current and the previous
+ release, e.g. the 1.4 and the 1.3 branch.
+* Test your changes before creating a pull request (`go test ./...`).
+* Don't mix several features or bug fixes in one pull request.
+* Create a meaningful commit message.
+* Explain your change, e.g. provide a link to the issue you are fixing and
+ probably a link to the Elasticsearch documentation and/or source code.
+* Format your source with `go fmt`.
+
+## Additional Resources
+
+* [GitHub documentation](http://help.github.com/)
+* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTORS b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTORS
new file mode 100644
index 00000000..4b74d090
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTORS
@@ -0,0 +1,22 @@
+# This is a list of people who have contributed code
+# to the Elastic repository.
+#
+# It is just my small "thank you" to all those that helped
+# making Elastic what it is.
+#
+# Please keep this list sorted.
+
+Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
+Conrad Pankoff [@deoxxa](https://github.com/deoxxa)
+Corey Scott [@corsc](https://github.com/corsc)
+Gerhard Häring [@ghaering](https://github.com/ghaering)
+Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos)
+Jack Lindamood [@cep21](https://github.com/cep21)
+Junpei Tsuji [@jun06t](https://github.com/jun06t)
+Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh)
+Mara Kim [@autochthe](https://github.com/autochthe)
+Medhi Bechina [@mdzor](https://github.com/mdzor)
+Nicholas Wolff [@nwolff](https://github.com/nwolff)
+Sacheendra talluri [@sacheendra](https://github.com/sacheendra)
+Sean DuBois [@Sean-Der](https://github.com/Sean-Der)
+zakthomas [@zakthomas](https://github.com/zakthomas)
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/LICENSE
new file mode 100644
index 00000000..8b22cdb6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+Copyright © 2012-2015 Oliver Eilhard
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the “Software”), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/README.md b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/README.md
new file mode 100644
index 00000000..81edd5de
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/README.md
@@ -0,0 +1,419 @@
+# Elastic
+
+Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
+[Go](http://www.golang.org/) programming language.
+
+[](https://travis-ci.org/olivere/elastic)
+[](https://godoc.org/github.com/olivere/elastic)
+[](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
+
+See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
+
+
+## Releases
+
+**Notice that the master branch always refers to the latest version of Elastic. If you want to use stable versions of Elastic, you should use the packages released via [gopkg.in](https://gopkg.in).**
+
+Here's the version matrix:
+
+Elasticsearch version | Elastic version -| Package URL
+----------------------|------------------|------------
+2.x | 3.0 **beta** | [`gopkg.in/olivere/elastic.v3-unstable`](https://gopkg.in/olivere/elastic.v3-unstable) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3-unstable))
+1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2))
+0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1))
+
+**Example:**
+
+You have Elasticsearch 1.6.0 installed and want to use Elastic. As listed above, you should use Elastic 2.0. So you first install Elastic 2.0.
+
+```sh
+$ go get gopkg.in/olivere/elastic.v2
+```
+
+Then you use it via the following import path:
+
+```go
+import "gopkg.in/olivere/elastic.v2"
+```
+
+### Elastic 3.0
+
+Elastic 3.0 targets Elasticsearch 2.x and is currently under [active development](https://github.com/olivere/elastic/tree/release-branch.v3). It is not published to gokpg yet.
+
+There are a lot of [breaking changes in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-2.0.html) and we will use this as an opportunity to [clean up and refactor Elastic as well](https://github.com/olivere/elastic/blob/release-branch.v3/CHANGELOG-3.0.md).
+
+### Elastic 2.0
+
+Elastic 2.0 targets Elasticsearch 1.x and published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2).
+
+### Elastic 1.0
+
+Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic
+to a recent version.
+
+However, if you cannot update for some reason, don't worry. Version 1.0 is
+still available. All you need to do is go-get it and change your import path
+as described above.
+
+
+## Status
+
+We use Elastic in production since 2012. Although Elastic is quite stable
+from our experience, we don't have a stable API yet. The reason for this
+is that Elasticsearch changes quite often and at a fast pace.
+At this moment we focus on features, not on a stable API.
+
+Having said that, there have been no big API changes that required you
+to rewrite your application big time.
+More often than not it's renaming APIs and adding/removing features
+so that we are in sync with the Elasticsearch API.
+
+Elastic has been used in production with the following Elasticsearch versions:
+0.90, 1.0, 1.1, 1.2, 1.3, 1.4, and 1.5.
+Furthermore, we use [Travis CI](https://travis-ci.org/)
+to test Elastic with the most recent versions of Elasticsearch and Go.
+See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
+file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
+for the results.
+
+Elasticsearch has quite a few features. A lot of them are
+not yet implemented in Elastic (see below for details).
+I add features and APIs as required. It's straightforward
+to implement missing pieces. I'm accepting pull requests :-)
+
+Having said that, I hope you find the project useful.
+
+
+## Usage
+
+The first thing you do is to create a Client. The client connects to
+Elasticsearch on http://127.0.0.1:9200 by default.
+
+You typically create one client for your app. Here's a complete example.
+
+```go
+// Create a client
+client, err := elastic.NewClient()
+if err != nil {
+ // Handle error
+}
+
+// Create an index
+_, err = client.CreateIndex("twitter").Do()
+if err != nil {
+ // Handle error
+ panic(err)
+}
+
+// Add a document to the index
+tweet := Tweet{User: "olivere", Message: "Take Five"}
+_, err = client.Index().
+ Index("twitter").
+ Type("tweet").
+ Id("1").
+ BodyJson(tweet).
+ Do()
+if err != nil {
+ // Handle error
+ panic(err)
+}
+
+// Search with a term query
+termQuery := elastic.NewTermQuery("user", "olivere")
+searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(&termQuery). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Pretty(true). // pretty print request and response JSON
+ Do() // execute
+if err != nil {
+ // Handle error
+ panic(err)
+}
+
+// searchResult is of type SearchResult and returns hits, suggestions,
+// and all kinds of other information from Elasticsearch.
+fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+// Each is a convenience function that iterates over hits in a search result.
+// It makes sure you don't need to check for nil values in the response.
+// However, it ignores errors in serialization. If you want full control
+// over iterating the hits, see below.
+var ttyp Tweet
+for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+ if t, ok := item.(Tweet); ok {
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+}
+// TotalHits is another convenience function that works even when something goes wrong.
+fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+// Here's how you iterate through results with full control over each step.
+if searchResult.Hits != nil {
+ fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var t Tweet
+ err := json.Unmarshal(*hit.Source, &t)
+ if err != nil {
+ // Deserialization failed
+ }
+
+ // Work with tweet
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+} else {
+ // No hits
+ fmt.Print("Found no tweets\n")
+}
+
+// Delete the index again
+_, err = client.DeleteIndex("twitter").Do()
+if err != nil {
+ // Handle error
+ panic(err)
+}
+```
+
+See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
+
+
+## API Status
+
+Here's the current API status.
+
+### APIs
+
+- [x] Search (most queries, filters, facets, aggregations etc. are implemented: see below)
+- [x] Index
+- [x] Get
+- [x] Delete
+- [x] Delete By Query
+- [x] Update
+- [x] Multi Get
+- [x] Bulk
+- [ ] Bulk UDP
+- [ ] Term vectors
+- [ ] Multi term vectors
+- [x] Count
+- [ ] Validate
+- [x] Explain
+- [x] Search
+- [ ] Search shards
+- [x] Search template
+- [x] Facets (most are implemented, see below)
+- [x] Aggregates (most are implemented, see below)
+- [x] Multi Search
+- [x] Percolate
+- [ ] More like this
+- [ ] Benchmark
+
+### Indices
+
+- [x] Create index
+- [x] Delete index
+- [x] Get index
+- [x] Indices exists
+- [x] Open/close index
+- [x] Put mapping
+- [x] Get mapping
+- [ ] Get field mapping
+- [x] Types exist
+- [x] Delete mapping
+- [x] Index aliases
+- [ ] Update indices settings
+- [x] Get settings
+- [ ] Analyze
+- [x] Index templates
+- [ ] Warmers
+- [ ] Status
+- [x] Indices stats
+- [ ] Indices segments
+- [ ] Indices recovery
+- [ ] Clear cache
+- [x] Flush
+- [x] Refresh
+- [x] Optimize
+- [ ] Upgrade
+
+### Snapshot and Restore
+
+- [ ] Snapshot
+- [ ] Restore
+- [ ] Snapshot status
+- [ ] Monitoring snapshot/restore progress
+- [ ] Partial restore
+
+### Cat APIs
+
+Not implemented. Those are better suited for operating with Elasticsearch
+on the command line.
+
+### Cluster
+
+- [x] Health
+- [x] State
+- [x] Stats
+- [ ] Pending cluster tasks
+- [ ] Cluster reroute
+- [ ] Cluster update settings
+- [ ] Nodes stats
+- [x] Nodes info
+- [ ] Nodes hot_threads
+- [ ] Nodes shutdown
+
+### Search
+
+- [x] Inner hits (for ES >= 1.5.0; see [docs](www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html))
+
+### Query DSL
+
+#### Queries
+
+- [x] `match`
+- [x] `multi_match`
+- [x] `bool`
+- [x] `boosting`
+- [ ] `common_terms`
+- [ ] `constant_score`
+- [x] `dis_max`
+- [x] `filtered`
+- [x] `fuzzy_like_this_query` (`flt`)
+- [x] `fuzzy_like_this_field_query` (`flt_field`)
+- [x] `function_score`
+- [x] `fuzzy`
+- [ ] `geo_shape`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `match_all`
+- [x] `mlt`
+- [x] `mlt_field`
+- [x] `nested`
+- [x] `prefix`
+- [x] `query_string`
+- [x] `simple_query_string`
+- [x] `range`
+- [x] `regexp`
+- [ ] `span_first`
+- [ ] `span_multi_term`
+- [ ] `span_near`
+- [ ] `span_not`
+- [ ] `span_or`
+- [ ] `span_term`
+- [x] `term`
+- [x] `terms`
+- [ ] `top_children`
+- [x] `wildcard`
+- [ ] `minimum_should_match`
+- [ ] `multi_term_query_rewrite`
+- [x] `template_query`
+
+#### Filters
+
+- [x] `and`
+- [x] `bool`
+- [x] `exists`
+- [ ] `geo_bounding_box`
+- [x] `geo_distance`
+- [ ] `geo_distance_range`
+- [x] `geo_polygon`
+- [ ] `geoshape`
+- [ ] `geohash`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `limit`
+- [x] `match_all`
+- [x] `missing`
+- [x] `nested`
+- [x] `not`
+- [x] `or`
+- [x] `prefix`
+- [x] `query`
+- [x] `range`
+- [x] `regexp`
+- [ ] `script`
+- [x] `term`
+- [x] `terms`
+- [x] `type`
+
+### Facets
+
+- [x] Terms
+- [x] Range
+- [x] Histogram
+- [x] Date Histogram
+- [x] Filter
+- [x] Query
+- [x] Statistical
+- [x] Terms Stats
+- [x] Geo Distance
+
+### Aggregations
+
+- [x] min
+- [x] max
+- [x] sum
+- [x] avg
+- [x] stats
+- [x] extended stats
+- [x] value count
+- [x] percentiles
+- [x] percentile ranks
+- [x] cardinality
+- [x] geo bounds
+- [x] top hits
+- [ ] scripted metric
+- [x] global
+- [x] filter
+- [x] filters
+- [x] missing
+- [x] nested
+- [x] reverse nested
+- [x] children
+- [x] terms
+- [x] significant terms
+- [x] range
+- [x] date range
+- [x] ipv4 range
+- [x] histogram
+- [x] date histogram
+- [x] geo distance
+- [x] geohash grid
+
+### Sorting
+
+- [x] Sort by score
+- [x] Sort by field
+- [x] Sort by geo distance
+- [x] Sort by script
+
+### Scan
+
+Scrolling through documents (e.g. `search_type=scan`) are implemented via
+the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well.
+
+## How to contribute
+
+Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
+
+## Credits
+
+Thanks a lot for the great folks working hard on
+[Elasticsearch](http://www.elasticsearch.org/)
+and
+[Go](http://www.golang.org/).
+
+## LICENSE
+
+MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
+or the LICENSE file provided in the repository for details.
+
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/alias.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/alias.go
new file mode 100644
index 00000000..1bc5a0fb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/alias.go
@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+)
+
+type AliasService struct {
+ client *Client
+ actions []aliasAction
+ pretty bool
+}
+
+type aliasAction struct {
+ // "add" or "remove"
+ Type string
+ // Index name
+ Index string
+ // Alias name
+ Alias string
+ // Filter
+ Filter *Filter
+}
+
+func NewAliasService(client *Client) *AliasService {
+ builder := &AliasService{
+ client: client,
+ actions: make([]aliasAction, 0),
+ }
+ return builder
+}
+
+func (s *AliasService) Pretty(pretty bool) *AliasService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
+ action := aliasAction{Type: "add", Index: indexName, Alias: aliasName}
+ s.actions = append(s.actions, action)
+ return s
+}
+
+func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter *Filter) *AliasService {
+ action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter}
+ s.actions = append(s.actions, action)
+ return s
+}
+
+func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
+ action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName}
+ s.actions = append(s.actions, action)
+ return s
+}
+
+func (s *AliasService) Do() (*AliasResult, error) {
+ // Build url
+ path := "/_aliases"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Actions
+ body := make(map[string]interface{})
+ actionsJson := make([]interface{}, 0)
+
+ for _, action := range s.actions {
+ actionJson := make(map[string]interface{})
+ detailsJson := make(map[string]interface{})
+ detailsJson["index"] = action.Index
+ detailsJson["alias"] = action.Alias
+ if action.Filter != nil {
+ detailsJson["filter"] = (*action.Filter).Source()
+ }
+ actionJson[action.Type] = detailsJson
+ actionsJson = append(actionsJson, actionJson)
+ }
+
+ body["actions"] = actionsJson
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return results
+ ret := new(AliasResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasResult struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/alias_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/alias_test.go
new file mode 100644
index 00000000..45bf7980
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/alias_test.go
@@ -0,0 +1,123 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+const (
+ testAliasName = "elastic-test-alias"
+)
+
+func TestAliasLifecycle(t *testing.T) {
+ var err error
+
+ client := setupTestClientAndCreateIndex(t)
+
+ // Some tweets
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
+ tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+ // Add tweets to first index
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Add tweets to second index
+ _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Flush
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Flush().Index(testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ /*
+ // Alias should not yet exist
+ aliasesResult1, err := client.Aliases().Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(aliasesResult1.Indices) != 0 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 0, len(aliasesResult1.Indices))
+ }
+ */
+
+ // Add both indices to a new alias
+ aliasCreate, err := client.Alias().
+ Add(testIndexName, testAliasName).
+ Add(testIndexName2, testAliasName).
+ //Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !aliasCreate.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
+ }
+
+ // Search should return all 3 tweets
+ matchAll := NewMatchAllQuery()
+ searchResult1, err := client.Search().Index(testAliasName).Query(&matchAll).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult1.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult1.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits)
+ }
+
+ /*
+ // Alias should return both indices
+ aliasesResult2, err := client.Aliases().Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(aliasesResult2.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
+ }
+ */
+
+ // Remove first index should remove two tweets, so should only yield 1
+ aliasRemove1, err := client.Alias().
+ Remove(testIndexName, testAliasName).
+ //Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !aliasRemove1.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
+ }
+
+ searchResult2, err := client.Search().Index(testAliasName).Query(&matchAll).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult2.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult2.Hits.TotalHits != 1 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits)
+ }
+
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/aliases.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/aliases.go
new file mode 100644
index 00000000..dddc231d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/aliases.go
@@ -0,0 +1,160 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type AliasesService struct {
+ client *Client
+ indices []string
+ pretty bool
+}
+
+func NewAliasesService(client *Client) *AliasesService {
+ builder := &AliasesService{
+ client: client,
+ indices: make([]string, 0),
+ }
+ return builder
+}
+
+func (s *AliasesService) Pretty(pretty bool) *AliasesService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *AliasesService) Index(indexName string) *AliasesService {
+ s.indices = append(s.indices, indexName)
+ return s
+}
+
+func (s *AliasesService) Indices(indexNames ...string) *AliasesService {
+ s.indices = append(s.indices, indexNames...)
+ return s
+}
+
+func (s *AliasesService) Do() (*AliasesResult, error) {
+ var err error
+
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err = uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ path += strings.Join(indexPart, ",")
+
+ // TODO Add types here
+
+ // Search
+ path += "/_aliases"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // {
+ // "indexName" : {
+ // "aliases" : {
+ // "alias1" : { },
+ // "alias2" : { }
+ // }
+ // },
+ // "indexName2" : {
+ // ...
+ // },
+ // }
+ indexMap := make(map[string]interface{})
+ if err := json.Unmarshal(res.Body, &indexMap); err != nil {
+ return nil, err
+ }
+
+ // Each (indexName, _)
+ ret := &AliasesResult{
+ Indices: make(map[string]indexResult),
+ }
+ for indexName, indexData := range indexMap {
+ indexOut, found := ret.Indices[indexName]
+ if !found {
+ indexOut = indexResult{Aliases: make([]aliasResult, 0)}
+ }
+
+ // { "aliases" : { ... } }
+ indexDataMap, ok := indexData.(map[string]interface{})
+ if ok {
+ aliasesData, ok := indexDataMap["aliases"].(map[string]interface{})
+ if ok {
+ for aliasName, _ := range aliasesData {
+ aliasRes := aliasResult{AliasName: aliasName}
+ indexOut.Aliases = append(indexOut.Aliases, aliasRes)
+ }
+ }
+ }
+
+ ret.Indices[indexName] = indexOut
+ }
+
+ return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasesResult struct {
+ Indices map[string]indexResult
+}
+
+type indexResult struct {
+ Aliases []aliasResult
+}
+
+type aliasResult struct {
+ AliasName string
+}
+
+func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
+ indices := make([]string, 0)
+
+ for indexName, indexInfo := range ar.Indices {
+ for _, aliasInfo := range indexInfo.Aliases {
+ if aliasInfo.AliasName == aliasName {
+ indices = append(indices, indexName)
+ }
+ }
+ }
+
+ return indices
+}
+
+func (ir indexResult) HasAlias(aliasName string) bool {
+ for _, alias := range ir.Aliases {
+ if alias.AliasName == aliasName {
+ return true
+ }
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/aliases_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/aliases_test.go
new file mode 100644
index 00000000..5d3949cb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/aliases_test.go
@@ -0,0 +1,146 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestAliases(t *testing.T) {
+ var err error
+
+ client := setupTestClientAndCreateIndex(t)
+
+ // Some tweets
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
+ tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+ // Add tweets to first index
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Add tweets to second index
+ _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Flush
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Flush().Index(testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Alias should not yet exist
+ aliasesResult1, err := client.Aliases().
+ Indices(testIndexName, testIndexName2).
+ //Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(aliasesResult1.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices))
+ }
+ for indexName, indexDetails := range aliasesResult1.Indices {
+ if len(indexDetails.Aliases) != 0 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
+ }
+ }
+
+ // Add both indices to a new alias
+ aliasCreate, err := client.Alias().
+ Add(testIndexName, testAliasName).
+ Add(testIndexName2, testAliasName).
+ //Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !aliasCreate.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
+ }
+
+ // Alias should now exist
+ aliasesResult2, err := client.Aliases().
+ Indices(testIndexName, testIndexName2).
+ //Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(aliasesResult2.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
+ }
+ for indexName, indexDetails := range aliasesResult2.Indices {
+ if len(indexDetails.Aliases) != 1 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
+ }
+ }
+
+ // Check the reverse function:
+ indexInfo1, found := aliasesResult2.Indices[testIndexName]
+ if !found {
+ t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
+ }
+ aliasFound := indexInfo1.HasAlias(testAliasName)
+ if !aliasFound {
+ t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound)
+ }
+
+ // Check the reverse function:
+ indexInfo2, found := aliasesResult2.Indices[testIndexName2]
+ if !found {
+ t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
+ }
+ aliasFound = indexInfo2.HasAlias(testAliasName)
+ if !aliasFound {
+ t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound)
+ }
+
+ // Remove first index should remove two tweets, so should only yield 1
+ aliasRemove1, err := client.Alias().
+ Remove(testIndexName, testAliasName).
+ //Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !aliasRemove1.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
+ }
+
+ // Alias should now exist only for index 2
+ aliasesResult3, err := client.Aliases().Indices(testIndexName, testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(aliasesResult3.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices))
+ }
+ for indexName, indexDetails := range aliasesResult3.Indices {
+ if indexName == testIndexName {
+ if len(indexDetails.Aliases) != 0 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
+ }
+ } else if indexName == testIndexName2 {
+ if len(indexDetails.Aliases) != 1 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
+ }
+ } else {
+ t.Errorf("got index %s", indexName)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk.go
new file mode 100644
index 00000000..90a52b9e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk.go
@@ -0,0 +1,301 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type BulkService struct {
+ client *Client
+
+ index string
+ _type string
+ requests []BulkableRequest
+ //replicationType string
+ //consistencyLevel string
+ timeout string
+ refresh *bool
+ pretty bool
+}
+
+func NewBulkService(client *Client) *BulkService {
+ builder := &BulkService{
+ client: client,
+ requests: make([]BulkableRequest, 0),
+ }
+ return builder
+}
+
+func (s *BulkService) reset() {
+ s.requests = make([]BulkableRequest, 0)
+}
+
+func (s *BulkService) Index(index string) *BulkService {
+ s.index = index
+ return s
+}
+
+func (s *BulkService) Type(_type string) *BulkService {
+ s._type = _type
+ return s
+}
+
+func (s *BulkService) Timeout(timeout string) *BulkService {
+ s.timeout = timeout
+ return s
+}
+
+func (s *BulkService) Refresh(refresh bool) *BulkService {
+ s.refresh = &refresh
+ return s
+}
+
+func (s *BulkService) Pretty(pretty bool) *BulkService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *BulkService) Add(r BulkableRequest) *BulkService {
+ s.requests = append(s.requests, r)
+ return s
+}
+
+func (s *BulkService) NumberOfActions() int {
+ return len(s.requests)
+}
+
+func (s *BulkService) bodyAsString() (string, error) {
+ buf := bytes.NewBufferString("")
+
+ for _, req := range s.requests {
+ source, err := req.Source()
+ if err != nil {
+ return "", err
+ }
+ for _, line := range source {
+ _, err := buf.WriteString(fmt.Sprintf("%s\n", line))
+ if err != nil {
+ return "", nil
+ }
+ }
+ }
+
+ return buf.String(), nil
+}
+
+func (s *BulkService) Do() (*BulkResponse, error) {
+ // No actions?
+ if s.NumberOfActions() == 0 {
+ return nil, errors.New("elastic: No bulk actions to commit")
+ }
+
+ // Get body
+ body, err := s.bodyAsString()
+ if err != nil {
+ return nil, err
+ }
+
+ // Build url
+ path := "/"
+ if s.index != "" {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": s.index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ path += index + "/"
+ }
+ if s._type != "" {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": s._type,
+ })
+ if err != nil {
+ return nil, err
+ }
+ path += typ + "/"
+ }
+ path += "_bulk"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.refresh != nil {
+ params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return results
+ ret := new(BulkResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+
+ // Reset so the request can be reused
+ s.reset()
+
+ return ret, nil
+}
+
+// BulkResponse is a response to a bulk execution.
+//
+// Example:
+// {
+// "took":3,
+// "errors":false,
+// "items":[{
+// "index":{
+// "_index":"index1",
+// "_type":"tweet",
+// "_id":"1",
+// "_version":3,
+// "status":201
+// }
+// },{
+// "index":{
+// "_index":"index2",
+// "_type":"tweet",
+// "_id":"2",
+// "_version":3,
+// "status":200
+// }
+// },{
+// "delete":{
+// "_index":"index1",
+// "_type":"tweet",
+// "_id":"1",
+// "_version":4,
+// "status":200,
+// "found":true
+// }
+// },{
+// "update":{
+// "_index":"index2",
+// "_type":"tweet",
+// "_id":"2",
+// "_version":4,
+// "status":200
+// }
+// }]
+// }
+type BulkResponse struct {
+ Took int `json:"took,omitempty"`
+ Errors bool `json:"errors,omitempty"`
+ Items []map[string]*BulkResponseItem `json:"items,omitempty"`
+}
+
+// BulkResponseItem is the result of a single bulk request.
+type BulkResponseItem struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int `json:"_version,omitempty"`
+ Status int `json:"status,omitempty"`
+ Found bool `json:"found,omitempty"`
+ Error string `json:"error,omitempty"`
+}
+
+// Indexed returns all bulk request results of "index" actions.
+func (r *BulkResponse) Indexed() []*BulkResponseItem {
+ return r.ByAction("index")
+}
+
+// Created returns all bulk request results of "create" actions.
+func (r *BulkResponse) Created() []*BulkResponseItem {
+ return r.ByAction("create")
+}
+
+// Updated returns all bulk request results of "update" actions.
+func (r *BulkResponse) Updated() []*BulkResponseItem {
+ return r.ByAction("update")
+}
+
+// Deleted returns all bulk request results of "delete" actions.
+func (r *BulkResponse) Deleted() []*BulkResponseItem {
+ return r.ByAction("delete")
+}
+
+// ByAction returns all bulk request results of a certain action,
+// e.g. "index" or "delete".
+func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ items := make([]*BulkResponseItem, 0)
+ for _, item := range r.Items {
+ if result, found := item[action]; found {
+ items = append(items, result)
+ }
+ }
+ return items
+}
+
+// ById returns all bulk request results of a given document id,
+// regardless of the action ("index", "delete" etc.).
+func (r *BulkResponse) ById(id string) []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ items := make([]*BulkResponseItem, 0)
+ for _, item := range r.Items {
+ for _, result := range item {
+ if result.Id == id {
+ items = append(items, result)
+ }
+ }
+ }
+ return items
+}
+
+// Failed returns those items of a bulk response that have errors,
+// i.e. those that don't have a status code between 200 and 299.
+func (r *BulkResponse) Failed() []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ errors := make([]*BulkResponseItem, 0)
+ for _, item := range r.Items {
+ for _, result := range item {
+ if !(result.Status >= 200 && result.Status <= 299) {
+ errors = append(errors, result)
+ }
+ }
+ }
+ return errors
+}
+
+// Succeeded returns those items of a bulk response that have no errors,
+// i.e. those have a status code between 200 and 299.
+func (r *BulkResponse) Succeeded() []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ succeeded := make([]*BulkResponseItem, 0)
+ for _, item := range r.Items {
+ for _, result := range item {
+ if result.Status >= 200 && result.Status <= 299 {
+ succeeded = append(succeeded, result)
+ }
+ }
+ }
+ return succeeded
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_delete_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_delete_request.go
new file mode 100644
index 00000000..0ea37220
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_delete_request.go
@@ -0,0 +1,112 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// -- Bulk delete request --
+
+// Bulk request to remove document from Elasticsearch.
+type BulkDeleteRequest struct {
+ BulkableRequest
+ index string
+ typ string
+ id string
+ routing string
+ refresh *bool
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+}
+
+func NewBulkDeleteRequest() *BulkDeleteRequest {
+ return &BulkDeleteRequest{}
+}
+
+func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
+ r.index = index
+ return r
+}
+
+func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
+ r.typ = typ
+ return r
+}
+
+func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
+ r.id = id
+ return r
+}
+
+func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
+ r.routing = routing
+ return r
+}
+
+func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest {
+ r.refresh = &refresh
+ return r
+}
+
+func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
+ r.version = version
+ return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
+ r.versionType = versionType
+ return r
+}
+
+func (r *BulkDeleteRequest) String() string {
+ lines, err := r.Source()
+ if err == nil {
+ return strings.Join(lines, "\n")
+ }
+ return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkDeleteRequest) Source() ([]string, error) {
+ lines := make([]string, 1)
+
+ source := make(map[string]interface{})
+ deleteCommand := make(map[string]interface{})
+ if r.index != "" {
+ deleteCommand["_index"] = r.index
+ }
+ if r.typ != "" {
+ deleteCommand["_type"] = r.typ
+ }
+ if r.id != "" {
+ deleteCommand["_id"] = r.id
+ }
+ if r.routing != "" {
+ deleteCommand["_routing"] = r.routing
+ }
+ if r.version > 0 {
+ deleteCommand["_version"] = r.version
+ }
+ if r.versionType != "" {
+ deleteCommand["_version_type"] = r.versionType
+ }
+ if r.refresh != nil {
+ deleteCommand["refresh"] = *r.refresh
+ }
+ source["delete"] = deleteCommand
+
+ body, err := json.Marshal(source)
+ if err != nil {
+ return nil, err
+ }
+
+ lines[0] = string(body)
+
+ return lines, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_delete_request_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_delete_request_test.go
new file mode 100644
index 00000000..73abfcd4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_delete_request_test.go
@@ -0,0 +1,42 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestBulkDeleteRequestSerialization(t *testing.T) {
+ tests := []struct {
+ Request BulkableRequest
+ Expected []string
+ }{
+ // #0
+ {
+ Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"),
+ Expected: []string{
+ `{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ },
+ },
+ }
+
+ for i, test := range tests {
+ lines, err := test.Request.Source()
+ if err != nil {
+ t.Fatalf("case #%d: expected no error, got: %v", i, err)
+ }
+ if lines == nil {
+ t.Fatalf("case #%d: expected lines, got nil", i)
+ }
+ if len(lines) != len(test.Expected) {
+ t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+ }
+ for j, line := range lines {
+ if line != test.Expected[j] {
+ t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_index_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_index_request.go
new file mode 100644
index 00000000..49569467
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_index_request.go
@@ -0,0 +1,173 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// Bulk request to add document to Elasticsearch.
+type BulkIndexRequest struct {
+ BulkableRequest
+ index string
+ typ string
+ id string
+ opType string
+ routing string
+ parent string
+ timestamp string
+ ttl int64
+ refresh *bool
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+ doc interface{}
+}
+
+func NewBulkIndexRequest() *BulkIndexRequest {
+ return &BulkIndexRequest{
+ opType: "index",
+ }
+}
+
+func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
+ r.index = index
+ return r
+}
+
+func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
+ r.typ = typ
+ return r
+}
+
+func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
+ r.id = id
+ return r
+}
+
+func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
+ r.opType = opType
+ return r
+}
+
+func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
+ r.routing = routing
+ return r
+}
+
+func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
+ r.parent = parent
+ return r
+}
+
+func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest {
+ r.timestamp = timestamp
+ return r
+}
+
+func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest {
+ r.ttl = ttl
+ return r
+}
+
+func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest {
+ r.refresh = &refresh
+ return r
+}
+
+func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
+ r.version = version
+ return r
+}
+
+func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
+ r.versionType = versionType
+ return r
+}
+
+func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
+ r.doc = doc
+ return r
+}
+
+func (r *BulkIndexRequest) String() string {
+ lines, err := r.Source()
+ if err == nil {
+ return strings.Join(lines, "\n")
+ }
+ return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkIndexRequest) Source() ([]string, error) {
+ // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
+ // { "field1" : "value1" }
+
+ lines := make([]string, 2)
+
+ // "index" ...
+ command := make(map[string]interface{})
+ indexCommand := make(map[string]interface{})
+ if r.index != "" {
+ indexCommand["_index"] = r.index
+ }
+ if r.typ != "" {
+ indexCommand["_type"] = r.typ
+ }
+ if r.id != "" {
+ indexCommand["_id"] = r.id
+ }
+ if r.routing != "" {
+ indexCommand["_routing"] = r.routing
+ }
+ if r.parent != "" {
+ indexCommand["_parent"] = r.parent
+ }
+ if r.timestamp != "" {
+ indexCommand["_timestamp"] = r.timestamp
+ }
+ if r.ttl > 0 {
+ indexCommand["_ttl"] = r.ttl
+ }
+ if r.version > 0 {
+ indexCommand["_version"] = r.version
+ }
+ if r.versionType != "" {
+ indexCommand["_version_type"] = r.versionType
+ }
+ if r.refresh != nil {
+ indexCommand["refresh"] = *r.refresh
+ }
+ command[r.opType] = indexCommand
+ line, err := json.Marshal(command)
+ if err != nil {
+ return nil, err
+ }
+ lines[0] = string(line)
+
+ // "field1" ...
+ if r.doc != nil {
+ switch t := r.doc.(type) {
+ default:
+ body, err := json.Marshal(r.doc)
+ if err != nil {
+ return nil, err
+ }
+ lines[1] = string(body)
+ case json.RawMessage:
+ lines[1] = string(t)
+ case *json.RawMessage:
+ lines[1] = string(*t)
+ case string:
+ lines[1] = t
+ case *string:
+ lines[1] = *t
+ }
+ } else {
+ lines[1] = "{}"
+ }
+
+ return lines, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_index_request_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_index_request_test.go
new file mode 100644
index 00000000..271347e3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_index_request_test.go
@@ -0,0 +1,63 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+ "time"
+)
+
+func TestBulkIndexRequestSerialization(t *testing.T) {
+ tests := []struct {
+ Request BulkableRequest
+ Expected []string
+ }{
+ // #0
+ {
+ Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ Expected: []string{
+ `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ },
+ },
+ // #1
+ {
+ Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ Expected: []string{
+ `{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ },
+ },
+ // #2
+ {
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ Expected: []string{
+ `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ },
+ },
+ }
+
+ for i, test := range tests {
+ lines, err := test.Request.Source()
+ if err != nil {
+ t.Fatalf("case #%d: expected no error, got: %v", i, err)
+ }
+ if lines == nil {
+ t.Fatalf("case #%d: expected lines, got nil", i)
+ }
+ if len(lines) != len(test.Expected) {
+ t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+ }
+ for j, line := range lines {
+ if line != test.Expected[j] {
+ t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_request.go
new file mode 100644
index 00000000..315b535c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_request.go
@@ -0,0 +1,17 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+)
+
+// -- Bulkable request (index/update/delete) --
+
+// Generic interface to bulkable requests.
+type BulkableRequest interface {
+ fmt.Stringer
+ Source() ([]string, error)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_test.go
new file mode 100644
index 00000000..6bfb82c3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_test.go
@@ -0,0 +1,370 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestBulk(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+
+ bulkRequest := client.Bulk()
+ bulkRequest = bulkRequest.Add(index1Req)
+ bulkRequest = bulkRequest.Add(index2Req)
+ bulkRequest = bulkRequest.Add(delete1Req)
+
+ if bulkRequest.NumberOfActions() != 3 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
+ }
+
+ bulkResponse, err := bulkRequest.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ }
+
+ if bulkRequest.NumberOfActions() != 0 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+ }
+
+ // Document with Id="1" should not exist
+ exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Errorf("expected exists %v; got %v", false, exists)
+ }
+
+ // Document with Id="2" should exist
+ exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Errorf("expected exists %v; got %v", true, exists)
+ }
+
+ // Update
+ updateDoc := struct {
+ Retweets int `json:"retweets"`
+ }{
+ 42,
+ }
+ update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc)
+ bulkRequest = client.Bulk()
+ bulkRequest = bulkRequest.Add(update1Req)
+
+ if bulkRequest.NumberOfActions() != 1 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
+ }
+
+ bulkResponse, err = bulkRequest.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ }
+
+ if bulkRequest.NumberOfActions() != 0 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+ }
+
+ // Document with Id="1" should have a retweets count of 42
+ doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if doc == nil {
+ t.Fatal("expected doc to be != nil; got nil")
+ }
+ if !doc.Found {
+ t.Fatalf("expected doc to be found; got found = %v", doc.Found)
+ }
+ if doc.Source == nil {
+ t.Fatal("expected doc source to be != nil; got nil")
+ }
+ var updatedTweet tweet
+ err = json.Unmarshal(*doc.Source, &updatedTweet)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if updatedTweet.Retweets != 42 {
+ t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets)
+ }
+}
+
+func TestBulkWithIndexSetOnClient(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+
+ bulkRequest := client.Bulk().Index(testIndexName).Type("tweet")
+ bulkRequest = bulkRequest.Add(index1Req)
+ bulkRequest = bulkRequest.Add(index2Req)
+ bulkRequest = bulkRequest.Add(delete1Req)
+
+ if bulkRequest.NumberOfActions() != 3 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
+ }
+
+ bulkResponse, err := bulkRequest.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ }
+
+ // Document with Id="1" should not exist
+ exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Errorf("expected exists %v; got %v", false, exists)
+ }
+
+ // Document with Id="2" should exist
+ exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Errorf("expected exists %v; got %v", true, exists)
+ }
+}
+
+func TestBulkRequestsSerialization(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+ update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
+ Doc(struct {
+ Retweets int `json:"retweets"`
+ }{
+ Retweets: 42,
+ })
+
+ bulkRequest := client.Bulk()
+ bulkRequest = bulkRequest.Add(index1Req)
+ bulkRequest = bulkRequest.Add(index2Req)
+ bulkRequest = bulkRequest.Add(delete1Req)
+ bulkRequest = bulkRequest.Add(update2Req)
+
+ if bulkRequest.NumberOfActions() != 4 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
+ }
+
+ expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
+{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
+{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
+{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
+{"doc":{"retweets":42}}
+`
+ got, err := bulkRequest.bodyAsString()
+ if err != nil {
+ t.Fatalf("expected no error, got: %v", err)
+ }
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+
+ // Run the bulk request
+ bulkResponse, err := bulkRequest.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ }
+ if bulkResponse.Took == 0 {
+ t.Errorf("expected took to be > 0; got %d", bulkResponse.Took)
+ }
+ if bulkResponse.Errors {
+ t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors)
+ }
+ if len(bulkResponse.Items) != 4 {
+ t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items))
+ }
+
+ // Indexed actions
+ indexed := bulkResponse.Indexed()
+ if indexed == nil {
+ t.Fatal("expected indexed to be != nil; got nil")
+ }
+ if len(indexed) != 1 {
+ t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed))
+ }
+ if indexed[0].Id != "1" {
+ t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id)
+ }
+ if indexed[0].Status != 201 {
+ t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status)
+ }
+
+ // Created actions
+ created := bulkResponse.Created()
+ if created == nil {
+ t.Fatal("expected created to be != nil; got nil")
+ }
+ if len(created) != 1 {
+ t.Fatalf("expected len(created) == %d; got %d", 1, len(created))
+ }
+ if created[0].Id != "2" {
+ t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id)
+ }
+ if created[0].Status != 201 {
+ t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status)
+ }
+
+ // Deleted actions
+ deleted := bulkResponse.Deleted()
+ if deleted == nil {
+ t.Fatal("expected deleted to be != nil; got nil")
+ }
+ if len(deleted) != 1 {
+ t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted))
+ }
+ if deleted[0].Id != "1" {
+ t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id)
+ }
+ if deleted[0].Status != 200 {
+ t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status)
+ }
+ if !deleted[0].Found {
+ t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found)
+ }
+
+ // Updated actions
+ updated := bulkResponse.Updated()
+ if updated == nil {
+ t.Fatal("expected updated to be != nil; got nil")
+ }
+ if len(updated) != 1 {
+ t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated))
+ }
+ if updated[0].Id != "2" {
+ t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id)
+ }
+ if updated[0].Status != 200 {
+ t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status)
+ }
+ if updated[0].Version != 2 {
+ t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version)
+ }
+
+ // Succeeded actions
+ succeeded := bulkResponse.Succeeded()
+ if succeeded == nil {
+ t.Fatal("expected succeeded to be != nil; got nil")
+ }
+ if len(succeeded) != 4 {
+ t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded))
+ }
+
+ // ById
+ id1Results := bulkResponse.ById("1")
+ if id1Results == nil {
+ t.Fatal("expected id1Results to be != nil; got nil")
+ }
+ if len(id1Results) != 2 {
+ t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results))
+ }
+ if id1Results[0].Id != "1" {
+ t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id)
+ }
+ if id1Results[0].Status != 201 {
+ t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status)
+ }
+ if id1Results[0].Version != 1 {
+ t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version)
+ }
+ if id1Results[1].Id != "1" {
+ t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id)
+ }
+ if id1Results[1].Status != 200 {
+ t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status)
+ }
+ if id1Results[1].Version != 2 {
+ t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version)
+ }
+}
+
+func TestFailedBulkRequests(t *testing.T) {
+ js := `{
+ "took" : 2,
+ "errors" : true,
+ "items" : [ {
+ "index" : {
+ "_index" : "elastic-test",
+ "_type" : "tweet",
+ "_id" : "1",
+ "_version" : 1,
+ "status" : 201
+ }
+ }, {
+ "create" : {
+ "_index" : "elastic-test",
+ "_type" : "tweet",
+ "_id" : "2",
+ "_version" : 1,
+ "status" : 423,
+ "error" : "Locked"
+ }
+ }, {
+ "delete" : {
+ "_index" : "elastic-test",
+ "_type" : "tweet",
+ "_id" : "1",
+ "_version" : 2,
+ "status" : 404,
+ "found" : false
+ }
+ }, {
+ "update" : {
+ "_index" : "elastic-test",
+ "_type" : "tweet",
+ "_id" : "2",
+ "_version" : 2,
+ "status" : 200
+ }
+ } ]
+}`
+
+ var resp BulkResponse
+ err := json.Unmarshal([]byte(js), &resp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ failed := resp.Failed()
+ if len(failed) != 2 {
+ t.Errorf("expected %d failed items; got: %d", 2, len(failed))
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_update_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_update_request.go
new file mode 100644
index 00000000..eba9f0d9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_update_request.go
@@ -0,0 +1,244 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// Bulk request to update document in Elasticsearch.
+type BulkUpdateRequest struct {
+ BulkableRequest
+ index string
+ typ string
+ id string
+
+ routing string
+ parent string
+ script string
+ scriptType string
+ scriptLang string
+ scriptParams map[string]interface{}
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+ retryOnConflict *int
+ refresh *bool
+ upsert interface{}
+ docAsUpsert *bool
+ doc interface{}
+ ttl int64
+ timestamp string
+}
+
+func NewBulkUpdateRequest() *BulkUpdateRequest {
+ return &BulkUpdateRequest{}
+}
+
+func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
+ r.index = index
+ return r
+}
+
+func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
+ r.typ = typ
+ return r
+}
+
+func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
+ r.id = id
+ return r
+}
+
+func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
+ r.routing = routing
+ return r
+}
+
+func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
+ r.parent = parent
+ return r
+}
+
+func (r *BulkUpdateRequest) Script(script string) *BulkUpdateRequest {
+ r.script = script
+ return r
+}
+
+func (r *BulkUpdateRequest) ScriptType(scriptType string) *BulkUpdateRequest {
+ r.scriptType = scriptType
+ return r
+}
+
+func (r *BulkUpdateRequest) ScriptLang(scriptLang string) *BulkUpdateRequest {
+ r.scriptLang = scriptLang
+ return r
+}
+
+func (r *BulkUpdateRequest) ScriptParams(params map[string]interface{}) *BulkUpdateRequest {
+ r.scriptParams = params
+ return r
+}
+
+func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
+ r.retryOnConflict = &retryOnConflict
+ return r
+}
+
+func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
+ r.version = version
+ return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
+ r.versionType = versionType
+ return r
+}
+
+func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest {
+ r.refresh = &refresh
+ return r
+}
+
+func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
+ r.doc = doc
+ return r
+}
+
+func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
+ r.docAsUpsert = &docAsUpsert
+ return r
+}
+
+func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
+ r.upsert = doc
+ return r
+}
+
+func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest {
+ r.ttl = ttl
+ return r
+}
+
+func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest {
+ r.timestamp = timestamp
+ return r
+}
+
+func (r *BulkUpdateRequest) String() string {
+ lines, err := r.Source()
+ if err == nil {
+ return strings.Join(lines, "\n")
+ }
+ return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) {
+ switch t := data.(type) {
+ default:
+ body, err := json.Marshal(data)
+ if err != nil {
+ return "", err
+ }
+ return string(body), nil
+ case json.RawMessage:
+ return string(t), nil
+ case *json.RawMessage:
+ return string(*t), nil
+ case string:
+ return t, nil
+ case *string:
+ return *t, nil
+ }
+}
+
+func (r BulkUpdateRequest) Source() ([]string, error) {
+ // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+ // { "doc" : { "field1" : "value1", ... } }
+ // or
+ // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+ // { "script" : { ... } }
+
+ lines := make([]string, 2)
+
+ // "update" ...
+ command := make(map[string]interface{})
+ updateCommand := make(map[string]interface{})
+ if r.index != "" {
+ updateCommand["_index"] = r.index
+ }
+ if r.typ != "" {
+ updateCommand["_type"] = r.typ
+ }
+ if r.id != "" {
+ updateCommand["_id"] = r.id
+ }
+ if r.routing != "" {
+ updateCommand["_routing"] = r.routing
+ }
+ if r.parent != "" {
+ updateCommand["_parent"] = r.parent
+ }
+ if r.timestamp != "" {
+ updateCommand["_timestamp"] = r.timestamp
+ }
+ if r.ttl > 0 {
+ updateCommand["_ttl"] = r.ttl
+ }
+ if r.version > 0 {
+ updateCommand["_version"] = r.version
+ }
+ if r.versionType != "" {
+ updateCommand["_version_type"] = r.versionType
+ }
+ if r.refresh != nil {
+ updateCommand["refresh"] = *r.refresh
+ }
+ if r.retryOnConflict != nil {
+ updateCommand["_retry_on_conflict"] = *r.retryOnConflict
+ }
+ if r.upsert != nil {
+ updateCommand["upsert"] = r.upsert
+ }
+ command["update"] = updateCommand
+ line, err := json.Marshal(command)
+ if err != nil {
+ return nil, err
+ }
+ lines[0] = string(line)
+
+ // 2nd line: {"doc" : { ... }} or {"script": {...}}
+ source := make(map[string]interface{})
+ if r.docAsUpsert != nil {
+ source["doc_as_upsert"] = *r.docAsUpsert
+ }
+ if r.doc != nil {
+ // {"doc":{...}}
+ source["doc"] = r.doc
+ } else if r.script != "" {
+ // {"script":...}
+ source["script"] = r.script
+ if r.scriptLang != "" {
+ source["lang"] = r.scriptLang
+ }
+ /*
+ if r.scriptType != "" {
+ source["script_type"] = r.scriptType
+ }
+ */
+ if r.scriptParams != nil && len(r.scriptParams) > 0 {
+ source["params"] = r.scriptParams
+ }
+ }
+ lines[1], err = r.getSourceAsString(source)
+ if err != nil {
+ return nil, err
+ }
+
+ return lines, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_update_request_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_update_request_test.go
new file mode 100644
index 00000000..1d4ebafa
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_update_request_test.go
@@ -0,0 +1,79 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestBulkUpdateRequestSerialization(t *testing.T) {
+ tests := []struct {
+ Request BulkableRequest
+ Expected []string
+ }{
+ // #0
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"doc":{"counter":42}}`,
+ },
+ },
+ // #1
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+ RetryOnConflict(3).
+ DocAsUpsert(true).
+ Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
+ `{"doc":{"counter":42},"doc_as_upsert":true}`,
+ },
+ },
+ // #2
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+ RetryOnConflict(3).
+ Script(`ctx._source.retweets += param1`).
+ ScriptLang("js").
+ ScriptParams(map[string]interface{}{"param1": 42}).
+ Upsert(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet","upsert":{"counter":42}}}`,
+ `{"lang":"js","params":{"param1":42},"script":"ctx._source.retweets += param1"}`,
+ },
+ },
+ }
+
+ for i, test := range tests {
+ lines, err := test.Request.Source()
+ if err != nil {
+ t.Fatalf("case #%d: expected no error, got: %v", i, err)
+ }
+ if lines == nil {
+ t.Fatalf("case #%d: expected lines, got nil", i)
+ }
+ if len(lines) != len(test.Expected) {
+ t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+ }
+ for j, line := range lines {
+ if line != test.Expected[j] {
+ t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/canonicalize.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/canonicalize.go
new file mode 100644
index 00000000..64593085
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/canonicalize.go
@@ -0,0 +1,28 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "net/url"
+
+// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
+// remove anything but scheme, userinfo, host, and port. It also removes the
+// slash at the end. It also skips invalid URLs or URLs that do not use
+// protocol http or https.
+//
+// Example:
+// http://127.0.0.1:9200/path?query=1 -> http://127.0.0.1:9200
+func canonicalize(rawurls ...string) []string {
+ canonicalized := make([]string, 0)
+ for _, rawurl := range rawurls {
+ u, err := url.Parse(rawurl)
+ if err == nil && (u.Scheme == "http" || u.Scheme == "https") {
+ u.Fragment = ""
+ u.Path = ""
+ u.RawQuery = ""
+ canonicalized = append(canonicalized, u.String())
+ }
+ }
+ return canonicalized
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/canonicalize_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/canonicalize_test.go
new file mode 100644
index 00000000..ada2ff22
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/canonicalize_test.go
@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestCanonicalize(t *testing.T) {
+ tests := []struct {
+ Input []string
+ Output []string
+ }{
+ {
+ Input: []string{"http://127.0.0.1/"},
+ Output: []string{"http://127.0.0.1"},
+ },
+ {
+ Input: []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"},
+ Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"},
+ },
+ {
+ Input: []string{"http://user:secret@127.0.0.1/path?query=1#fragment"},
+ Output: []string{"http://user:secret@127.0.0.1"},
+ },
+ {
+ Input: []string{"https://somewhere.on.mars:9999/path?query=1#fragment"},
+ Output: []string{"https://somewhere.on.mars:9999"},
+ },
+ }
+
+ for _, test := range tests {
+ got := canonicalize(test.Input...)
+ if !reflect.DeepEqual(got, test.Output) {
+ t.Errorf("expected %v; got: %v", test.Output, got)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/clear_scroll.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/clear_scroll.go
new file mode 100644
index 00000000..13ac771e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/clear_scroll.go
@@ -0,0 +1,96 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// ClearScrollService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-request-scroll.html.
+type ClearScrollService struct {
+ client *Client
+ pretty bool
+ scrollId []string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewClearScrollService creates a new ClearScrollService.
+func NewClearScrollService(client *Client) *ClearScrollService {
+ return &ClearScrollService{
+ client: client,
+ scrollId: make([]string, 0),
+ }
+}
+
+// ScrollId is a list of scroll IDs to clear.
+// Use _all to clear all search contexts.
+func (s *ClearScrollService) ScrollId(scrollId ...string) *ClearScrollService {
+ s.scrollId = make([]string, 0)
+ s.scrollId = append(s.scrollId, scrollId...)
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClearScrollService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/_search/scroll", map[string]string{})
+ if err != nil {
+ return "", url.Values{}, err
+ }
+ return path, url.Values{}, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClearScrollService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClearScrollService) Do() (*ClearScrollResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ body := strings.Join(s.scrollId, ",")
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("DELETE", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClearScrollResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClearScrollResponse is the response of ClearScrollService.Do.
+type ClearScrollResponse struct {
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/clear_scroll_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/clear_scroll_test.go
new file mode 100644
index 00000000..c251fc2d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/clear_scroll_test.go
@@ -0,0 +1,72 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ _ "net/http"
+ "testing"
+)
+
+func TestClearScroll(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ res, err := client.Scroll(testIndexName).Size(1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Errorf("expected results != nil; got nil")
+ }
+ if res.ScrollId == "" {
+ t.Errorf("expected scrollId in results; got %q", res.ScrollId)
+ }
+
+ // Search should succeed
+ _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Clear scroll id
+ clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if clearScrollRes == nil {
+ t.Error("expected results != nil; got nil")
+ }
+
+ // Search result should fail
+ _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do()
+ if err == nil {
+ t.Fatalf("expected scroll to fail")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/client.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/client.go
new file mode 100644
index 00000000..881cefa9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/client.go
@@ -0,0 +1,1291 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // Version is the current version of Elastic.
+ Version = "2.0.7"
+
+ // DefaultUrl is the default endpoint of Elasticsearch on the local machine.
+ // It is used e.g. when initializing a new Client without a specific URL.
+ DefaultURL = "http://127.0.0.1:9200"
+
+ // DefaultScheme is the default protocol scheme to use when sniffing
+ // the Elasticsearch cluster.
+ DefaultScheme = "http"
+
+ // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default.
+ DefaultHealthcheckEnabled = true
+
+ // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits
+ // for a response from Elasticsearch on startup, i.e. when creating a
+ // client. After the client is started, a shorter timeout is commonly used
+ // (its default is specified in DefaultHealthcheckTimeout).
+ DefaultHealthcheckTimeoutStartup = 5 * time.Second
+
+ // DefaultHealthcheckTimeout specifies the time a running client waits for
+ // a response from Elasticsearch. Notice that the healthcheck timeout
+ // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup).
+ DefaultHealthcheckTimeout = 1 * time.Second
+
+ // DefaultHealthcheckInterval is the default interval between
+ // two health checks of the nodes in the cluster.
+ DefaultHealthcheckInterval = 60 * time.Second
+
+ // DefaultSnifferEnabled specifies if the sniffer is enabled by default.
+ DefaultSnifferEnabled = true
+
+ // DefaultSnifferInterval is the interval between two sniffing procedures,
+ // i.e. the lookup of all nodes in the cluster and their addition/removal
+ // from the list of actual connections.
+ DefaultSnifferInterval = 15 * time.Minute
+
+ // DefaultSnifferTimeoutStartup is the default timeout for the sniffing
+ // process that is initiated while creating a new client. For subsequent
+ // sniffing processes, DefaultSnifferTimeout is used (by default).
+ DefaultSnifferTimeoutStartup = 5 * time.Second
+
+ // DefaultSnifferTimeout is the default timeout after which the
+ // sniffing process times out. Notice that for the initial sniffing
+ // process, DefaultSnifferTimeoutStartup is used.
+ DefaultSnifferTimeout = 2 * time.Second
+
+ // DefaultMaxRetries is the number of retries for a single request after
+ // Elastic will give up and return an error. It is zero by default, so
+ // retry is disabled by default.
+ DefaultMaxRetries = 0
+)
+
+var (
+ // ErrNoClient is raised when no Elasticsearch node is available.
+ ErrNoClient = errors.New("no Elasticsearch node available")
+
+ // ErrRetry is raised when a request cannot be executed after the configured
+ // number of retries.
+ ErrRetry = errors.New("cannot connect after several retries")
+
+ // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus
+ // didn't return in time.
+ ErrTimeout = errors.New("timeout")
+)
+
+// ClientOptionFunc is a function that configures a Client.
+// It is used in NewClient.
+type ClientOptionFunc func(*Client) error
+
+// Client is an Elasticsearch client. Create one by calling NewClient.
+type Client struct {
+ c *http.Client // net/http Client to use for requests
+
+ connsMu sync.RWMutex // connsMu guards the next block
+ conns []*conn // all connections
+ cindex int // index into conns
+
+ mu sync.RWMutex // guards the next block
+ urls []string // set of URLs passed initially to the client
+ running bool // true if the client's background processes are running
+ errorlog *log.Logger // error log for critical messages
+ infolog *log.Logger // information log for e.g. response times
+ tracelog *log.Logger // trace log for debugging
+ maxRetries int // max. number of retries
+ scheme string // http or https
+ healthcheckEnabled bool // healthchecks enabled or disabled
+ healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup
+ healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch
+ healthcheckInterval time.Duration // interval between healthchecks
+ healthcheckStop chan bool // notify healthchecker to stop, and notify back
+ snifferEnabled bool // sniffer enabled or disabled
+ snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup
+ snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API
+ snifferInterval time.Duration // interval between sniffing
+ snifferStop chan bool // notify sniffer to stop, and notify back
+ decoder Decoder // used to decode data sent from Elasticsearch
+}
+
+// NewClient creates a new client to work with Elasticsearch.
+//
+// The caller can configure the new client by passing configuration options
+// to the func.
+//
+// Example:
+//
+// client, err := elastic.NewClient(
+// elastic.SetURL("http://localhost:9200", "http://localhost:9201"),
+// elastic.SetMaxRetries(10))
+//
+// If no URL is configured, Elastic uses DefaultURL by default.
+//
+// If the sniffer is enabled (the default), the new client then sniffes
+// the cluster via the Nodes Info API
+// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info).
+// It uses the URLs specified by the caller. The caller is responsible
+// to only pass a list of URLs of nodes that belong to the same cluster.
+// This sniffing process is run on startup and periodically.
+// Use SnifferInterval to set the interval between two sniffs (default is
+// 15 minutes). In other words: By default, the client will find new nodes
+// in the cluster and remove those that are no longer available every
+// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient.
+//
+// The list of nodes found in the sniffing process will be used to make
+// connections to the REST API of Elasticsearch. These nodes are also
+// periodically checked in a shorter time frame. This process is called
+// a health check. By default, a health check is done every 60 seconds.
+// You can set a shorter or longer interval by SetHealthcheckInterval.
+// Disabling health checks is not recommended, but can be done by
+// SetHealthcheck(false).
+//
+// Connections are automatically marked as dead or healthy while
+// making requests to Elasticsearch. When a request fails, Elastic will
+// retry up to a maximum number of retries configured with SetMaxRetries.
+// Retries are disabled by default.
+//
+// If no HttpClient is configured, then http.DefaultClient is used.
+// You can use your own http.Client with some http.Transport for
+// advanced scenarios.
+//
+// An error is also returned when some configuration option is invalid or
+// the new client cannot sniff the cluster (if enabled).
+func NewClient(options ...ClientOptionFunc) (*Client, error) {
+ // Set up the client
+ c := &Client{
+ c: http.DefaultClient,
+ conns: make([]*conn, 0),
+ cindex: -1,
+ scheme: DefaultScheme,
+ decoder: &DefaultDecoder{},
+ maxRetries: DefaultMaxRetries,
+ healthcheckEnabled: DefaultHealthcheckEnabled,
+ healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup,
+ healthcheckTimeout: DefaultHealthcheckTimeout,
+ healthcheckInterval: DefaultHealthcheckInterval,
+ healthcheckStop: make(chan bool),
+ snifferEnabled: DefaultSnifferEnabled,
+ snifferTimeoutStartup: DefaultSnifferTimeoutStartup,
+ snifferTimeout: DefaultSnifferTimeout,
+ snifferInterval: DefaultSnifferInterval,
+ snifferStop: make(chan bool),
+ }
+
+ // Run the options on it
+ for _, option := range options {
+ if err := option(c); err != nil {
+ return nil, err
+ }
+ }
+
+ if len(c.urls) == 0 {
+ c.urls = []string{DefaultURL}
+ }
+ c.urls = canonicalize(c.urls...)
+
+ // Check if we can make a request to any of the specified URLs
+ if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil {
+ return nil, err
+ }
+
+ if c.snifferEnabled {
+ // Sniff the cluster initially
+ if err := c.sniff(c.snifferTimeoutStartup); err != nil {
+ return nil, err
+ }
+ } else {
+ // Do not sniff the cluster initially. Use the provided URLs instead.
+ for _, url := range c.urls {
+ c.conns = append(c.conns, newConn(url, url))
+ }
+ }
+
+ // Perform an initial health check and
+ // ensure that we have at least one connection available
+ c.healthcheck(c.healthcheckTimeoutStartup, true)
+ if err := c.mustActiveConn(); err != nil {
+ return nil, err
+ }
+
+ go c.sniffer() // periodically update cluster information
+ go c.healthchecker() // start goroutine periodically ping all nodes of the cluster
+
+ c.mu.Lock()
+ c.running = true
+ c.mu.Unlock()
+
+ return c, nil
+}
+
+// SetHttpClient can be used to specify the http.Client to use when making
+// HTTP requests to Elasticsearch.
+func SetHttpClient(httpClient *http.Client) ClientOptionFunc {
+ return func(c *Client) error {
+ if httpClient != nil {
+ c.c = httpClient
+ } else {
+ c.c = http.DefaultClient
+ }
+ return nil
+ }
+}
+
+// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that
+// when sniffing is enabled, these URLs are used to initially sniff the
+// cluster on startup.
+func SetURL(urls ...string) ClientOptionFunc {
+ return func(c *Client) error {
+ switch len(urls) {
+ case 0:
+ c.urls = []string{DefaultURL}
+ default:
+ c.urls = urls
+ }
+ return nil
+ }
+}
+
+// SetScheme sets the HTTP scheme to look for when sniffing (http or https).
+// This is http by default.
+func SetScheme(scheme string) ClientOptionFunc {
+ return func(c *Client) error {
+ c.scheme = scheme
+ return nil
+ }
+}
+
+// SetSniff enables or disables the sniffer (enabled by default).
+func SetSniff(enabled bool) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferEnabled = enabled
+ return nil
+ }
+}
+
+// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used
+// when creating a new client. The default is 5 seconds. Notice that the
+// timeout being used for subsequent sniffing processes is set with
+// SetSnifferTimeout.
+func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferTimeoutStartup = timeout
+ return nil
+ }
+}
+
+// SetSnifferTimeout sets the timeout for the sniffer that finds the
+// nodes in a cluster. The default is 2 seconds. Notice that the timeout
+// used when creating a new client on startup is usually greater and can
+// be set with SetSnifferTimeoutStartup.
+func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferTimeout = timeout
+ return nil
+ }
+}
+
+// SetSnifferInterval sets the interval between two sniffing processes.
+// The default interval is 15 minutes.
+func SetSnifferInterval(interval time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferInterval = interval
+ return nil
+ }
+}
+
+// SetHealthcheck enables or disables healthchecks (enabled by default).
+func SetHealthcheck(enabled bool) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckEnabled = enabled
+ return nil
+ }
+}
+
+// SetHealthcheckTimeoutStartup sets the timeout for the initial health check.
+// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup).
+// Notice that timeouts for subsequent health checks can be modified with
+// SetHealthcheckTimeout.
+func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckTimeoutStartup = timeout
+ return nil
+ }
+}
+
+// SetHealthcheckTimeout sets the timeout for periodic health checks.
+// The default timeout is 1 second (see DefaultHealthcheckTimeout).
+// Notice that a different (usually larger) timeout is used for the initial
+// healthcheck, which is initiated while creating a new client.
+// The startup timeout can be modified with SetHealthcheckTimeoutStartup.
+func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckTimeout = timeout
+ return nil
+ }
+}
+
+// SetHealthcheckInterval sets the interval between two health checks.
+// The default interval is 60 seconds.
+func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckInterval = interval
+ return nil
+ }
+}
+
+// SetMaxRetries sets the maximum number of retries before giving up when
+// performing a HTTP request to Elasticsearch.
+func SetMaxRetries(maxRetries int) func(*Client) error {
+ return func(c *Client) error {
+ if maxRetries < 0 {
+ return errors.New("MaxRetries must be greater than or equal to 0")
+ }
+ c.maxRetries = maxRetries
+ return nil
+ }
+}
+
+// SetDecoder sets the Decoder to use when decoding data from Elasticsearch.
+// DefaultDecoder is used by default.
+func SetDecoder(decoder Decoder) func(*Client) error {
+ return func(c *Client) error {
+ if decoder != nil {
+ c.decoder = decoder
+ } else {
+ c.decoder = &DefaultDecoder{}
+ }
+ return nil
+ }
+}
+
+// SetErrorLog sets the logger for critical messages like nodes joining
+// or leaving the cluster or failing requests. It is nil by default.
+func SetErrorLog(logger *log.Logger) func(*Client) error {
+ return func(c *Client) error {
+ c.errorlog = logger
+ return nil
+ }
+}
+
+// SetInfoLog sets the logger for informational messages, e.g. requests
+// and their response times. It is nil by default.
+func SetInfoLog(logger *log.Logger) func(*Client) error {
+ return func(c *Client) error {
+ c.infolog = logger
+ return nil
+ }
+}
+
+// SetTraceLog specifies the log.Logger to use for output of HTTP requests
+// and responses which is helpful during debugging. It is nil by default.
+func SetTraceLog(logger *log.Logger) func(*Client) error {
+ return func(c *Client) error {
+ c.tracelog = logger
+ return nil
+ }
+}
+
+// String returns a string representation of the client status.
+func (c *Client) String() string {
+ c.connsMu.Lock()
+ conns := c.conns
+ c.connsMu.Unlock()
+
+ var buf bytes.Buffer
+ for i, conn := range conns {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(conn.String())
+ }
+ return buf.String()
+}
+
+// IsRunning returns true if the background processes of the client are
+// running, false otherwise.
+func (c *Client) IsRunning() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.running
+}
+
+// Start starts the background processes like sniffing the cluster and
+// periodic health checks. You don't need to run Start when creating a
+// client with NewClient; the background processes are run by default.
+//
+// If the background processes are already running, this is a no-op.
+func (c *Client) Start() {
+ c.mu.RLock()
+ if c.running {
+ c.mu.RUnlock()
+ return
+ }
+ c.mu.RUnlock()
+
+ go c.sniffer()
+ go c.healthchecker()
+
+ c.mu.Lock()
+ c.running = true
+ c.mu.Unlock()
+
+ c.infof("elastic: client started")
+}
+
+// Stop stops the background processes that the client is running,
+// i.e. sniffing the cluster periodically and running health checks
+// on the nodes.
+//
+// If the background processes are not running, this is a no-op.
+func (c *Client) Stop() {
+ c.mu.RLock()
+ if !c.running {
+ c.mu.RUnlock()
+ return
+ }
+ c.mu.RUnlock()
+
+ c.healthcheckStop <- true
+ <-c.healthcheckStop
+
+ c.snifferStop <- true
+ <-c.snifferStop
+
+ c.mu.Lock()
+ c.running = false
+ c.mu.Unlock()
+
+ c.infof("elastic: client stopped")
+}
+
+// errorf logs to the error log.
+func (c *Client) errorf(format string, args ...interface{}) {
+ if c.errorlog != nil {
+ c.errorlog.Printf(format, args...)
+ }
+}
+
+// infof logs informational messages.
+func (c *Client) infof(format string, args ...interface{}) {
+ if c.infolog != nil {
+ c.infolog.Printf(format, args...)
+ }
+}
+
+// tracef logs to the trace log.
+func (c *Client) tracef(format string, args ...interface{}) {
+ if c.tracelog != nil {
+ c.tracelog.Printf(format, args...)
+ }
+}
+
+// dumpRequest dumps the given HTTP request to the trace log.
+func (c *Client) dumpRequest(r *http.Request) {
+ if c.tracelog != nil {
+ out, err := httputil.DumpRequestOut(r, true)
+ if err == nil {
+ c.tracef("%s\n", string(out))
+ }
+ }
+}
+
+// dumpResponse dumps the given HTTP response to the trace log.
+func (c *Client) dumpResponse(resp *http.Response) {
+ if c.tracelog != nil {
+ out, err := httputil.DumpResponse(resp, true)
+ if err == nil {
+ c.tracef("%s\n", string(out))
+ }
+ }
+}
+
+// sniffer periodically runs sniff.
+func (c *Client) sniffer() {
+ for {
+ c.mu.RLock()
+ timeout := c.snifferTimeout
+ ticker := time.After(c.snifferInterval)
+ c.mu.RUnlock()
+
+ select {
+ case <-c.snifferStop:
+ // we are asked to stop, so we signal back that we're stopping now
+ c.snifferStop <- true
+ return
+ case <-ticker:
+ c.sniff(timeout)
+ }
+ }
+}
+
+// sniff uses the Node Info API to return the list of nodes in the cluster.
+// It uses the list of URLs passed on startup plus the list of URLs found
+// by the preceding sniffing process (if sniffing is enabled).
+//
+// If sniffing is disabled, this is a no-op.
+func (c *Client) sniff(timeout time.Duration) error {
+ c.mu.RLock()
+ if !c.snifferEnabled {
+ c.mu.RUnlock()
+ return nil
+ }
+
+ // Use all available URLs provided to sniff the cluster.
+ urlsMap := make(map[string]bool)
+ urls := make([]string, 0)
+
+ // Add all URLs provided on startup
+ for _, url := range c.urls {
+ urlsMap[url] = true
+ urls = append(urls, url)
+ }
+ c.mu.RUnlock()
+
+ // Add all URLs found by sniffing
+ c.connsMu.RLock()
+ for _, conn := range c.conns {
+ if !conn.IsDead() {
+ url := conn.URL()
+ if _, found := urlsMap[url]; !found {
+ urls = append(urls, url)
+ }
+ }
+ }
+ c.connsMu.RUnlock()
+
+ if len(urls) == 0 {
+ return ErrNoClient
+ }
+
+ // Start sniffing on all found URLs
+ ch := make(chan []*conn, len(urls))
+ for _, url := range urls {
+ go func(url string) { ch <- c.sniffNode(url) }(url)
+ }
+
+ // Wait for the results to come back, or the process times out.
+ for {
+ select {
+ case conns := <-ch:
+ if len(conns) > 0 {
+ c.updateConns(conns)
+ return nil
+ }
+ case <-time.After(timeout):
+ // We get here if no cluster responds in time
+ return ErrNoClient
+ }
+ }
+}
+
+// reSniffHostAndPort is used to extract hostname and port from a result
+// from a Nodes Info API (example: "inet[/127.0.0.1:9200]").
+var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`)
+
+// sniffNode sniffs a single node. This method is run as a goroutine
+// in sniff. If successful, it returns the list of node URLs extracted
+// from the result of calling Nodes Info API. Otherwise, an empty array
+// is returned.
+func (c *Client) sniffNode(url string) []*conn {
+ nodes := make([]*conn, 0)
+
+ // Call the Nodes Info API at /_nodes/http
+ req, err := NewRequest("GET", url+"/_nodes/http")
+ if err != nil {
+ return nodes
+ }
+
+ res, err := c.c.Do((*http.Request)(req))
+ if err != nil {
+ return nodes
+ }
+ if res == nil {
+ return nodes
+ }
+
+ if res.Body != nil {
+ defer res.Body.Close()
+ }
+
+ var info NodesInfoResponse
+ if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
+ if len(info.Nodes) > 0 {
+ switch c.scheme {
+ case "https":
+ for nodeID, node := range info.Nodes {
+ m := reSniffHostAndPort.FindStringSubmatch(node.HTTPSAddress)
+ if len(m) == 3 {
+ url := fmt.Sprintf("https://%s:%s", m[1], m[2])
+ nodes = append(nodes, newConn(nodeID, url))
+ }
+ }
+ default:
+ for nodeID, node := range info.Nodes {
+ m := reSniffHostAndPort.FindStringSubmatch(node.HTTPAddress)
+ if len(m) == 3 {
+ url := fmt.Sprintf("http://%s:%s", m[1], m[2])
+ nodes = append(nodes, newConn(nodeID, url))
+ }
+ }
+ }
+ }
+ }
+ return nodes
+}
+
+// updateConns updates the clients' connections with new information
+// gather by a sniff operation.
+func (c *Client) updateConns(conns []*conn) {
+ c.connsMu.Lock()
+
+ newConns := make([]*conn, 0)
+
+ // Build up new connections:
+ // If we find an existing connection, use that (including no. of failures etc.).
+ // If we find a new connection, add it.
+ for _, conn := range conns {
+ var found bool
+ for _, oldConn := range c.conns {
+ if oldConn.NodeID() == conn.NodeID() {
+ // Take over the old connection
+ newConns = append(newConns, oldConn)
+ found = true
+ break
+ }
+ }
+ if !found {
+ // New connection didn't exist, so add it to our list of new conns.
+ c.errorf("elastic: %s joined the cluster", conn.URL())
+ newConns = append(newConns, conn)
+ }
+ }
+
+ c.conns = newConns
+ c.cindex = -1
+ c.connsMu.Unlock()
+}
+
+// healthchecker periodically runs healthcheck.
+func (c *Client) healthchecker() {
+ for {
+ c.mu.RLock()
+ timeout := c.healthcheckTimeout
+ ticker := time.After(c.healthcheckInterval)
+ c.mu.RUnlock()
+
+ select {
+ case <-c.healthcheckStop:
+ // we are asked to stop, so we signal back that we're stopping now
+ c.healthcheckStop <- true
+ return
+ case <-ticker:
+ c.healthcheck(timeout, false)
+ }
+ }
+}
+
+// healthcheck does a health check on all nodes in the cluster. Depending on
+// the node state, it marks connections as dead, sets them alive etc.
+// If healthchecks are disabled this is a no-op.
+// The timeout specifies how long to wait for a response from Elasticsearch.
+func (c *Client) healthcheck(timeout time.Duration, force bool) {
+ c.mu.RLock()
+ if !c.healthcheckEnabled && !force {
+ c.mu.RUnlock()
+ return
+ }
+ c.mu.RUnlock()
+
+ c.connsMu.RLock()
+ conns := c.conns
+ c.connsMu.RUnlock()
+
+ timeoutInMillis := int64(timeout / time.Millisecond)
+
+ for _, conn := range conns {
+ params := make(url.Values)
+ params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis))
+ req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode())
+ if err == nil {
+ res, err := c.c.Do((*http.Request)(req))
+ if err == nil {
+ if res.Body != nil {
+ defer res.Body.Close()
+ }
+ if res.StatusCode >= 200 && res.StatusCode < 300 {
+ conn.MarkAsAlive()
+ } else {
+ conn.MarkAsDead()
+ c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode)
+ }
+ } else {
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ }
+ } else {
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ }
+ }
+}
+
+// startupHealthcheck is used at startup to check if the server is available
+// at all.
+func (c *Client) startupHealthcheck(timeout time.Duration) error {
+ c.mu.Lock()
+ urls := c.urls
+ c.mu.Unlock()
+
+ // If we don't get a connection after "timeout", we bail.
+ start := time.Now()
+ for {
+ cl := &http.Client{Timeout: timeout}
+ for _, url := range urls {
+ res, err := cl.Head(url)
+ if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 {
+ return nil
+ }
+ }
+ time.Sleep(1 * time.Second)
+ if time.Now().Sub(start) > timeout {
+ break
+ }
+ }
+ return ErrNoClient
+}
+
+// next returns the next available connection, or ErrNoClient.
+func (c *Client) next() (*conn, error) {
+ // We do round-robin here.
+ // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients.
+ c.connsMu.Lock()
+ defer c.connsMu.Unlock()
+
+ i := 0
+ numConns := len(c.conns)
+ for {
+ i += 1
+ if i > numConns {
+ break // we visited all conns: they all seem to be dead
+ }
+ c.cindex += 1
+ if c.cindex >= numConns {
+ c.cindex = 0
+ }
+ conn := c.conns[c.cindex]
+ if !conn.IsDead() {
+ return conn, nil
+ }
+ }
+
+ // TODO(oe) As a last resort, we could try to awake a dead connection here.
+
+ // We tried hard, but there is no node available
+ return nil, ErrNoClient
+}
+
+// mustActiveConn returns nil if there is an active connection,
+// otherwise ErrNoClient is returned.
+func (c *Client) mustActiveConn() error {
+ c.connsMu.Lock()
+ defer c.connsMu.Unlock()
+
+ for _, c := range c.conns {
+ if !c.IsDead() {
+ return nil
+ }
+ }
+ return ErrNoClient
+}
+
+// PerformRequest does a HTTP request to Elasticsearch.
+// It returns a response and an error on failure.
+func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}) (*Response, error) {
+ start := time.Now().UTC()
+
+ c.mu.RLock()
+ timeout := c.healthcheckTimeout
+ retries := c.maxRetries
+ c.mu.RUnlock()
+
+ var err error
+ var conn *conn
+ var req *Request
+ var resp *Response
+ var retried bool
+
+ // We wait between retries, using simple exponential back-off.
+ // TODO: Make this configurable, including the jitter.
+ retryWaitMsec := int64(100 + (rand.Intn(20) - 10))
+
+ for {
+ pathWithParams := path
+ if len(params) > 0 {
+ pathWithParams += "?" + params.Encode()
+ }
+
+ // Get a connection
+ conn, err = c.next()
+ if err == ErrNoClient {
+ if !retried {
+ // Force a healtcheck as all connections seem to be dead.
+ c.healthcheck(timeout, false)
+ }
+ retries -= 1
+ if retries <= 0 {
+ return nil, err
+ }
+ retried = true
+ time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+ retryWaitMsec += retryWaitMsec
+ continue // try again
+ }
+ if err != nil {
+ c.errorf("elastic: cannot get connection from pool")
+ return nil, err
+ }
+
+ req, err = NewRequest(method, conn.URL()+pathWithParams)
+ if err != nil {
+ c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err)
+ return nil, err
+ }
+
+ // Set body
+ if body != nil {
+ switch b := body.(type) {
+ case string:
+ req.SetBodyString(b)
+ break
+ default:
+ req.SetBodyJson(body)
+ break
+ }
+ }
+
+ // Tracing
+ c.dumpRequest((*http.Request)(req))
+
+ // Get response
+ res, err := c.c.Do((*http.Request)(req))
+ if err != nil {
+ retries -= 1
+ if retries <= 0 {
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ return nil, err
+ }
+ retried = true
+ time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+ retryWaitMsec += retryWaitMsec
+ continue // try again
+ }
+ if res.Body != nil {
+ defer res.Body.Close()
+ }
+
+ // Check for errors
+ if err := checkResponse(res); err != nil {
+ retries -= 1
+ if retries <= 0 {
+ return nil, err
+ }
+ retried = true
+ time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+ retryWaitMsec += retryWaitMsec
+ continue // try again
+ }
+
+ // Tracing
+ c.dumpResponse(res)
+
+ // We successfully made a request with this connection
+ conn.MarkAsHealthy()
+
+ resp, err = c.newResponse(res)
+ if err != nil {
+ return nil, err
+ }
+
+ break
+ }
+
+ duration := time.Now().UTC().Sub(start)
+ c.infof("%s %s [status:%d, request:%.3fs]",
+ strings.ToUpper(method),
+ req.URL,
+ resp.StatusCode,
+ float64(int64(duration/time.Millisecond))/1000)
+
+ return resp, nil
+}
+
+// ElasticsearchVersion returns the version number of Elasticsearch
+// running on the given URL.
+func (c *Client) ElasticsearchVersion(url string) (string, error) {
+ res, _, err := c.Ping().URL(url).Do()
+ if err != nil {
+ return "", err
+ }
+ return res.Version.Number, nil
+}
+
+// IndexNames returns the names of all indices in the cluster.
+func (c *Client) IndexNames() ([]string, error) {
+ res, err := c.IndexGetSettings().Index("_all").Do()
+ if err != nil {
+ return nil, err
+ }
+ var names []string
+ for name, _ := range res {
+ names = append(names, name)
+ }
+ return names, nil
+}
+
+// Ping checks if a given node in a cluster exists and (optionally)
+// returns some basic information about the Elasticsearch server,
+// e.g. the Elasticsearch version number.
+func (c *Client) Ping() *PingService {
+ return NewPingService(c)
+}
+
+// CreateIndex returns a service to create a new index.
+func (c *Client) CreateIndex(name string) *CreateIndexService {
+ builder := NewCreateIndexService(c)
+ builder.Index(name)
+ return builder
+}
+
+// DeleteIndex returns a service to delete an index.
+func (c *Client) DeleteIndex(name string) *DeleteIndexService {
+ builder := NewDeleteIndexService(c)
+ builder.Index(name)
+ return builder
+}
+
+// IndexExists allows to check if an index exists.
+func (c *Client) IndexExists(name string) *IndexExistsService {
+ builder := NewIndexExistsService(c)
+ builder.Index(name)
+ return builder
+}
+
+// TypeExists allows to check if one or more types exist in one or more indices.
+func (c *Client) TypeExists() *IndicesExistsTypeService {
+ return NewIndicesExistsTypeService(c)
+}
+
+// IndexStats provides statistics on different operations happining
+// in one or more indices.
+func (c *Client) IndexStats(indices ...string) *IndicesStatsService {
+ builder := NewIndicesStatsService(c)
+ builder = builder.Index(indices...)
+ return builder
+}
+
+// OpenIndex opens an index.
+func (c *Client) OpenIndex(name string) *OpenIndexService {
+ builder := NewOpenIndexService(c)
+ builder.Index(name)
+ return builder
+}
+
+// CloseIndex closes an index.
+func (c *Client) CloseIndex(name string) *CloseIndexService {
+ builder := NewCloseIndexService(c)
+ builder.Index(name)
+ return builder
+}
+
+// Index a document.
+func (c *Client) Index() *IndexService {
+ builder := NewIndexService(c)
+ return builder
+}
+
+// IndexGet retrieves information about one or more indices.
+// IndexGet is only available for Elasticsearch 1.4 or later.
+func (c *Client) IndexGet() *IndicesGetService {
+ builder := NewIndicesGetService(c)
+ return builder
+}
+
+// IndexGetSettings retrieves settings about one or more indices.
+func (c *Client) IndexGetSettings() *IndicesGetSettingsService {
+ builder := NewIndicesGetSettingsService(c)
+ return builder
+}
+
+// Update a document.
+func (c *Client) Update() *UpdateService {
+ builder := NewUpdateService(c)
+ return builder
+}
+
+// Delete a document.
+func (c *Client) Delete() *DeleteService {
+ builder := NewDeleteService(c)
+ return builder
+}
+
+// DeleteByQuery deletes documents as found by a query.
+func (c *Client) DeleteByQuery() *DeleteByQueryService {
+ builder := NewDeleteByQueryService(c)
+ return builder
+}
+
+// Get a document.
+func (c *Client) Get() *GetService {
+ builder := NewGetService(c)
+ return builder
+}
+
+// MultiGet retrieves multiple documents in one roundtrip.
+func (c *Client) MultiGet() *MultiGetService {
+ builder := NewMultiGetService(c)
+ return builder
+}
+
+// Exists checks if a document exists.
+func (c *Client) Exists() *ExistsService {
+ builder := NewExistsService(c)
+ return builder
+}
+
+// Count documents.
+func (c *Client) Count(indices ...string) *CountService {
+ builder := NewCountService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Search is the entry point for searches.
+func (c *Client) Search(indices ...string) *SearchService {
+ builder := NewSearchService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Percolate allows to send a document and return matching queries.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html.
+func (c *Client) Percolate() *PercolateService {
+ builder := NewPercolateService(c)
+ return builder
+}
+
+// MultiSearch is the entry point for multi searches.
+func (c *Client) MultiSearch() *MultiSearchService {
+ return NewMultiSearchService(c)
+}
+
+// Suggest returns a service to return suggestions.
+func (c *Client) Suggest(indices ...string) *SuggestService {
+ builder := NewSuggestService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Scan through documents. Use this to iterate inside a server process
+// where the results will be processed without returning them to a client.
+func (c *Client) Scan(indices ...string) *ScanService {
+ builder := NewScanService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Scroll through documents. Use this to efficiently scroll through results
+// while returning the results to a client. Use Scan when you don't need
+// to return requests to a client (i.e. not paginating via request/response).
+func (c *Client) Scroll(indices ...string) *ScrollService {
+ builder := NewScrollService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// ClearScroll can be used to clear search contexts manually.
+func (c *Client) ClearScroll() *ClearScrollService {
+ builder := NewClearScrollService(c)
+ return builder
+}
+
+// Optimize asks Elasticsearch to optimize one or more indices.
+func (c *Client) Optimize(indices ...string) *OptimizeService {
+ builder := NewOptimizeService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Refresh asks Elasticsearch to refresh one or more indices.
+func (c *Client) Refresh(indices ...string) *RefreshService {
+ builder := NewRefreshService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Flush asks Elasticsearch to free memory from the index and
+// flush data to disk.
+func (c *Client) Flush() *FlushService {
+ builder := NewFlushService(c)
+ return builder
+}
+
+// Explain computes a score explanation for a query and a specific document.
+func (c *Client) Explain(index, typ, id string) *ExplainService {
+ builder := NewExplainService(c)
+ builder = builder.Index(index).Type(typ).Id(id)
+ return builder
+}
+
+// Bulk is the entry point to mass insert/update/delete documents.
+func (c *Client) Bulk() *BulkService {
+ builder := NewBulkService(c)
+ return builder
+}
+
+// Alias enables the caller to add and/or remove aliases.
+func (c *Client) Alias() *AliasService {
+ builder := NewAliasService(c)
+ return builder
+}
+
+// Aliases returns aliases by index name(s).
+func (c *Client) Aliases() *AliasesService {
+ builder := NewAliasesService(c)
+ return builder
+}
+
+// GetTemplate gets a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) GetTemplate() *GetTemplateService {
+ return NewGetTemplateService(c)
+}
+
+// PutTemplate creates or updates a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) PutTemplate() *PutTemplateService {
+ return NewPutTemplateService(c)
+}
+
+// DeleteTemplate deletes a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) DeleteTemplate() *DeleteTemplateService {
+ return NewDeleteTemplateService(c)
+}
+
+// IndexGetTemplate gets an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService {
+ builder := NewIndicesGetTemplateService(c)
+ builder = builder.Name(names...)
+ return builder
+}
+
+// IndexTemplateExists gets check if an index template exists.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService {
+ builder := NewIndicesExistsTemplateService(c)
+ builder = builder.Name(name)
+ return builder
+}
+
+// IndexPutTemplate creates or updates an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService {
+ builder := NewIndicesPutTemplateService(c)
+ builder = builder.Name(name)
+ return builder
+}
+
+// IndexDeleteTemplate deletes an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService {
+ builder := NewIndicesDeleteTemplateService(c)
+ builder = builder.Name(name)
+ return builder
+}
+
+// GetMapping gets a mapping.
+func (c *Client) GetMapping() *GetMappingService {
+ return NewGetMappingService(c)
+}
+
+// PutMapping registers a mapping.
+func (c *Client) PutMapping() *PutMappingService {
+ return NewPutMappingService(c)
+}
+
+// DeleteMapping deletes a mapping.
+func (c *Client) DeleteMapping() *DeleteMappingService {
+ return NewDeleteMappingService(c)
+}
+
+// ClusterHealth retrieves the health of the cluster.
+func (c *Client) ClusterHealth() *ClusterHealthService {
+ return NewClusterHealthService(c)
+}
+
+// ClusterState retrieves the state of the cluster.
+func (c *Client) ClusterState() *ClusterStateService {
+ return NewClusterStateService(c)
+}
+
+// ClusterStats retrieves cluster statistics.
+func (c *Client) ClusterStats() *ClusterStatsService {
+ return NewClusterStatsService(c)
+}
+
+// NodesInfo retrieves one or more or all of the cluster nodes information.
+func (c *Client) NodesInfo() *NodesInfoService {
+ return NewNodesInfoService(c)
+}
+
+// Reindex returns a service that will reindex documents from a source
+// index into a target index. See
+// http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
+// for more information about reindexing.
+func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer {
+ return NewReindexer(c, sourceIndex, CopyToTargetIndex(targetIndex))
+}
+
+// WaitForStatus waits for the cluster to have the given status.
+// This is a shortcut method for the ClusterHealth service.
+//
+// WaitForStatus waits for the specified timeout, e.g. "10s".
+// If the cluster will have the given state within the timeout, nil is returned.
+// If the request timed out, ErrTimeout is returned.
+func (c *Client) WaitForStatus(status string, timeout string) error {
+ health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do()
+ if err != nil {
+ return err
+ }
+ if health.TimedOut {
+ return ErrTimeout
+ }
+ return nil
+}
+
+// WaitForGreenStatus waits for the cluster to have the "green" status.
+// See WaitForStatus for more details.
+func (c *Client) WaitForGreenStatus(timeout string) error {
+ return c.WaitForStatus("green", timeout)
+}
+
+// WaitForYellowStatus waits for the cluster to have the "yellow" status.
+// See WaitForStatus for more details.
+func (c *Client) WaitForYellowStatus(timeout string) error {
+ return c.WaitForStatus("yellow", timeout)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/client_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/client_test.go
new file mode 100644
index 00000000..705a4822
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/client_test.go
@@ -0,0 +1,620 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+ "log"
+ "net/http"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+)
+
+func findConn(s string, slice ...*conn) (int, bool) {
+ for i, t := range slice {
+ if s == t.URL() {
+ return i, true
+ }
+ }
+ return -1, false
+}
+
+// -- NewClient --
+
+func TestClientDefaults(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if client.healthcheckEnabled != true {
+ t.Errorf("expected health checks to be enabled, got: %v", client.healthcheckEnabled)
+ }
+ if client.healthcheckTimeoutStartup != DefaultHealthcheckTimeoutStartup {
+ t.Errorf("expected health checks timeout on startup = %v, got: %v", DefaultHealthcheckTimeoutStartup, client.healthcheckTimeoutStartup)
+ }
+ if client.healthcheckTimeout != DefaultHealthcheckTimeout {
+ t.Errorf("expected health checks timeout = %v, got: %v", DefaultHealthcheckTimeout, client.healthcheckTimeout)
+ }
+ if client.healthcheckInterval != DefaultHealthcheckInterval {
+ t.Errorf("expected health checks interval = %v, got: %v", DefaultHealthcheckInterval, client.healthcheckInterval)
+ }
+ if client.snifferEnabled != true {
+ t.Errorf("expected sniffing to be enabled, got: %v", client.snifferEnabled)
+ }
+ if client.snifferTimeoutStartup != DefaultSnifferTimeoutStartup {
+ t.Errorf("expected sniffer timeout on startup = %v, got: %v", DefaultSnifferTimeoutStartup, client.snifferTimeoutStartup)
+ }
+ if client.snifferTimeout != DefaultSnifferTimeout {
+ t.Errorf("expected sniffer timeout = %v, got: %v", DefaultSnifferTimeout, client.snifferTimeout)
+ }
+ if client.snifferInterval != DefaultSnifferInterval {
+ t.Errorf("expected sniffer interval = %v, got: %v", DefaultSnifferInterval, client.snifferInterval)
+ }
+}
+
+func TestClientWithoutURL(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Two things should happen here:
+ // 1. The client starts sniffing the cluster on DefaultURL
+ // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL
+ if len(client.conns) == 0 {
+ t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns)
+ }
+ if !isTravis() {
+ if _, found := findConn(DefaultURL, client.conns...); !found {
+ t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+ }
+ }
+}
+
+func TestClientWithSingleURL(t *testing.T) {
+ client, err := NewClient(SetURL("http://localhost:9200"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Two things should happen here:
+ // 1. The client starts sniffing the cluster on DefaultURL
+ // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL
+ if len(client.conns) == 0 {
+ t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns)
+ }
+ if !isTravis() {
+ if _, found := findConn(DefaultURL, client.conns...); !found {
+ t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+ }
+ }
+}
+
+func TestClientWithMultipleURLs(t *testing.T) {
+ client, err := NewClient(SetURL("http://localhost:9200", "http://localhost:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The client should sniff both URLs, but only localhost:9200 should return nodes.
+ if len(client.conns) != 1 {
+ t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns)
+ }
+ if !isTravis() {
+ if client.conns[0].URL() != DefaultURL {
+ t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+ }
+ }
+}
+
+func TestClientSniffSuccess(t *testing.T) {
+ client, err := NewClient(SetURL("http://localhost:19200", "http://localhost:9200"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The client should sniff both URLs, but only localhost:9200 should return nodes.
+ if len(client.conns) != 1 {
+ t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns)
+ }
+}
+
+func TestClientSniffFailure(t *testing.T) {
+ _, err := NewClient(SetURL("http://localhost:19200", "http://localhost:19201"))
+ if err == nil {
+ t.Fatalf("expected cluster to fail with no nodes found")
+ }
+}
+
+func TestClientSniffDisabled(t *testing.T) {
+ client, err := NewClient(SetSniff(false), SetURL("http://localhost:9200", "http://localhost:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The client should not sniff, so it should have two connections.
+ if len(client.conns) != 2 {
+ t.Fatalf("expected 2 nodes, got: %d (%v)", len(client.conns), client.conns)
+ }
+ // Make two requests, so that both connections are being used
+ for i := 0; i < len(client.conns); i++ {
+ client.Flush().Do()
+ }
+ // The first connection (localhost:9200) should now be okay.
+ if i, found := findConn("http://localhost:9200", client.conns...); !found {
+ t.Fatalf("expected connection to %q to be found", "http://localhost:9200")
+ } else {
+ if conn := client.conns[i]; conn.IsDead() {
+ t.Fatal("expected connection to be alive, but it is dead")
+ }
+ }
+ // The second connection (localhost:9201) should now be marked as dead.
+ if i, found := findConn("http://localhost:9201", client.conns...); !found {
+ t.Fatalf("expected connection to %q to be found", "http://localhost:9201")
+ } else {
+ if conn := client.conns[i]; !conn.IsDead() {
+ t.Fatal("expected connection to be dead, but it is alive")
+ }
+ }
+}
+
+func TestClientHealthcheckStartupTimeout(t *testing.T) {
+ start := time.Now()
+ _, err := NewClient(SetURL("http://localhost:9299"), SetHealthcheckTimeoutStartup(5*time.Second))
+ duration := time.Now().Sub(start)
+ if err != ErrNoClient {
+ t.Fatal(err)
+ }
+ if duration < 5*time.Second {
+ t.Fatalf("expected a timeout in more than 5 seconds; got: %v", duration)
+ }
+}
+
+// -- Start and stop --
+
+func TestClientStartAndStop(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ running := client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ }
+
+ // Stop
+ client.Stop()
+ running = client.IsRunning()
+ if running {
+ t.Fatalf("expected background processes to be stopped; got: %v", running)
+ }
+
+ // Stop again => no-op
+ client.Stop()
+ running = client.IsRunning()
+ if running {
+ t.Fatalf("expected background processes to be stopped; got: %v", running)
+ }
+
+ // Start
+ client.Start()
+ running = client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ }
+
+ // Start again => no-op
+ client.Start()
+ running = client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ }
+}
+
+// -- Sniffing --
+
+func TestClientSniffNode(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch := make(chan []*conn)
+ go func() { ch <- client.sniffNode(DefaultURL) }()
+
+ select {
+ case nodes := <-ch:
+ if len(nodes) != 1 {
+ t.Fatalf("expected %d nodes; got: %d", 1, len(nodes))
+ }
+ pattern := `http:\/\/[\d\.]+:9200`
+ matched, err := regexp.MatchString(pattern, nodes[0].URL())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Fatalf("expected node URL pattern %q; got: %q", pattern, nodes[0].URL())
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("expected no timeout in sniff node")
+ break
+ }
+}
+
+func TestClientSniffOnDefaultURL(t *testing.T) {
+ client, _ := NewClient()
+ if client == nil {
+ t.Fatal("no client returned")
+ }
+
+ ch := make(chan error, 1)
+ go func() {
+ ch <- client.sniff(DefaultSnifferTimeoutStartup)
+ }()
+
+ select {
+ case err := <-ch:
+ if err != nil {
+ t.Fatalf("expected sniff to succeed; got: %v", err)
+ }
+ if len(client.conns) != 1 {
+ t.Fatalf("expected %d nodes; got: %d", 1, len(client.conns))
+ }
+ pattern := `http:\/\/[\d\.]+:9200`
+ matched, err := regexp.MatchString(pattern, client.conns[0].URL())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Fatalf("expected node URL pattern %q; got: %q", pattern, client.conns[0].URL())
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("expected no timeout in sniff")
+ break
+ }
+}
+
+// -- Selector --
+
+func TestClientSelectConnHealthy(t *testing.T) {
+ client, err := NewClient(
+ SetSniff(false),
+ SetHealthcheck(false),
+ SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Both are healthy, so we should get both URLs in round-robin
+ client.conns[0].MarkAsHealthy()
+ client.conns[1].MarkAsHealthy()
+
+ // #1: Return 1st
+ c, err := client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+ // #2: Return 2nd
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[1].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+ }
+ // #3: Return 1st
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+}
+
+func TestClientSelectConnHealthyAndDead(t *testing.T) {
+ client, err := NewClient(
+ SetSniff(false),
+ SetHealthcheck(false),
+ SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // 1st is healthy, second is dead
+ client.conns[0].MarkAsHealthy()
+ client.conns[1].MarkAsDead()
+
+ // #1: Return 1st
+ c, err := client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+ // #2: Return 1st again
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+ // #3: Return 1st again and again
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+}
+
+func TestClientSelectConnDeadAndHealthy(t *testing.T) {
+ client, err := NewClient(
+ SetSniff(false),
+ SetHealthcheck(false),
+ SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // 1st is dead, 2nd is healthy
+ client.conns[0].MarkAsDead()
+ client.conns[1].MarkAsHealthy()
+
+ // #1: Return 2nd
+ c, err := client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[1].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+ }
+ // #2: Return 2nd again
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[1].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+ }
+ // #3: Return 2nd again and again
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[1].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+ }
+}
+
+func TestClientSelectConnAllDead(t *testing.T) {
+ client, err := NewClient(
+ SetSniff(false),
+ SetHealthcheck(false),
+ SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Both are dead
+ client.conns[0].MarkAsDead()
+ client.conns[1].MarkAsDead()
+
+ // #1: Return ErrNoClient
+ c, err := client.next()
+ if err != ErrNoClient {
+ t.Fatal(err)
+ }
+ if c != nil {
+ t.Fatalf("expected no connection; got: %v", c)
+ }
+ // #2: Return ErrNoClient again
+ c, err = client.next()
+ if err != ErrNoClient {
+ t.Fatal(err)
+ }
+ if c != nil {
+ t.Fatalf("expected no connection; got: %v", c)
+ }
+ // #3: Return ErrNoClient again and again
+ c, err = client.next()
+ if err != ErrNoClient {
+ t.Fatal(err)
+ }
+ if c != nil {
+ t.Fatalf("expected no connection; got: %v", c)
+ }
+}
+
+// -- ElasticsearchVersion --
+
+func TestElasticsearchVersion(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ version, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if version == "" {
+ t.Errorf("expected a version number, got: %q", version)
+ }
+}
+
+// -- IndexNames --
+
+func TestIndexNames(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+ names, err := client.IndexNames()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(names) == 0 {
+ t.Fatalf("expected some index names, got: %d", len(names))
+ }
+ var found bool
+ for _, name := range names {
+ if name == testIndexName {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatalf("expected to find index %q; got: %v", testIndexName, found)
+ }
+}
+
+// -- PerformRequest --
+
+func TestPerformRequest(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := client.PerformRequest("GET", "/", nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response to be != nil")
+ }
+
+ ret := new(PingResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ t.Fatalf("expected no error on decode; got: %v", err)
+ }
+ if ret.Status != 200 {
+ t.Errorf("expected HTTP status 200; got: %d", ret.Status)
+ }
+}
+
+func TestPerformRequestWithLogger(t *testing.T) {
+ var w bytes.Buffer
+ out := log.New(&w, "LOGGER ", log.LstdFlags)
+
+ client, err := NewClient(SetInfoLog(out))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest("GET", "/", nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response to be != nil")
+ }
+
+ ret := new(PingResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ t.Fatalf("expected no error on decode; got: %v", err)
+ }
+ if ret.Status != 200 {
+ t.Errorf("expected HTTP status 200; got: %d", ret.Status)
+ }
+
+ got := w.String()
+ pattern := `^LOGGER \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n`
+ matched, err := regexp.MatchString(pattern, got)
+ if err != nil {
+ t.Fatalf("expected log line to match %q; got: %v", pattern, err)
+ }
+ if !matched {
+ t.Errorf("expected log line to match %q; got: %v", pattern, got)
+ }
+}
+
+func TestPerformRequestWithLoggerAndTracer(t *testing.T) {
+ var lw bytes.Buffer
+ lout := log.New(&lw, "LOGGER ", log.LstdFlags)
+
+ var tw bytes.Buffer
+ tout := log.New(&tw, "TRACER ", log.LstdFlags)
+
+ client, err := NewClient(SetInfoLog(lout), SetTraceLog(tout))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest("GET", "/", nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response to be != nil")
+ }
+
+ ret := new(PingResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ t.Fatalf("expected no error on decode; got: %v", err)
+ }
+ if ret.Status != 200 {
+ t.Errorf("expected HTTP status 200; got: %d", ret.Status)
+ }
+
+ lgot := lw.String()
+ if lgot == "" {
+ t.Errorf("expected logger output; got: %q", lgot)
+ }
+
+ tgot := tw.String()
+ if tgot == "" {
+ t.Errorf("expected tracer output; got: %q", tgot)
+ }
+}
+
+// failingTransport will run a fail callback if it sees a given URL path prefix.
+type failingTransport struct {
+ path string // path prefix to look for
+ fail func(*http.Request) (*http.Response, error) // call when path prefix is found
+ next http.RoundTripper // next round-tripper (use http.DefaultTransport if nil)
+}
+
+// RoundTrip implements a failing transport.
+func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ if strings.HasPrefix(r.URL.Path, tr.path) && tr.fail != nil {
+ return tr.fail(r)
+ }
+ if tr.next != nil {
+ return tr.next.RoundTrip(r)
+ }
+ return http.DefaultTransport.RoundTrip(r)
+}
+
+func TestPerformRequestWithMaxRetries(t *testing.T) {
+ var numFailedReqs int
+ fail := func(r *http.Request) (*http.Response, error) {
+ numFailedReqs += 1
+ return &http.Response{Request: r, StatusCode: 400}, nil
+ }
+
+ // Run against a failing endpoint and see if PerformRequest
+ // retries correctly.
+ tr := &failingTransport{path: "/fail", fail: fail}
+ httpClient := &http.Client{Transport: tr}
+
+ client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest("GET", "/fail", nil, nil)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if res != nil {
+ t.Fatal("expected no response")
+ }
+ // Connection should be marked as dead after it failed
+ if numFailedReqs != 5 {
+ t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/Makefile b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/Makefile
new file mode 100644
index 00000000..cc6261db
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/Makefile
@@ -0,0 +1,16 @@
+.PHONY: build run-omega-cluster-test
+
+default: build
+
+build:
+ go build cluster-test.go
+
+run-omega-cluster-test:
+ go run -race cluster-test.go \
+ -nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \
+ -n=5 \
+ -retries=5 \
+ -sniff=true -sniffer=10s \
+ -healthcheck=true -healthchecker=5s \
+ -errorlog=errors.log
+
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/README.md b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/README.md
new file mode 100644
index 00000000..f10748cc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/README.md
@@ -0,0 +1,63 @@
+# Cluster Test
+
+This directory contains a program you can use to test a cluster.
+
+Here's how:
+
+First, install a cluster of Elasticsearch nodes. You can install them on
+different computers, or start several nodes on a single machine.
+
+Build cluster-test by `go build cluster-test.go` (or build with `make`).
+
+Run `./cluster-test -h` to get a list of flags:
+
+```sh
+$ ./cluster-test -h
+Usage of ./cluster-test:
+ -errorlog="": error log file
+ -healthcheck=true: enable or disable healthchecks
+ -healthchecker=1m0s: healthcheck interval
+ -index="twitter": name of ES index to use
+ -infolog="": info log file
+ -n=5: number of goroutines that run searches
+ -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')
+ -retries=0: number of retries
+ -sniff=true: enable or disable sniffer
+ -sniffer=15m0s: sniffer interval
+ -tracelog="": trace log file
+```
+
+Example:
+
+```sh
+$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log
+```
+
+The above example will create an index and start some search jobs on the
+cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201,
+and http://127.0.0.1:9202.
+
+* It will create an index called `twitter` on the cluster (`-index=twitter`)
+* It will run 5 search jobs in parallel (`-n=5`).
+* It will retry failed requests 5 times (`-retries=5`).
+* It will sniff the cluster periodically (`-sniff=true`).
+* It will sniff the cluster every 10 seconds (`-sniffer=10s`).
+* It will perform health checks periodically (`-healthcheck=true`).
+* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`).
+* It will write an error log file (`-errorlog=error.log`).
+
+If you want to test Elastic with nodes going up and down, you can use a
+chaos monkey script like this and run it on the nodes of your cluster:
+
+```sh
+#!/bin/bash
+while true
+do
+ echo "Starting ES node"
+ elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid
+ sleep `jot -r 1 10 300` # wait for 10-300s
+ echo "Stopping ES node"
+ kill -TERM `cat es.pid`
+ sleep `jot -r 1 10 60` # wait for 10-60s
+done
+```
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/cluster-test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/cluster-test.go
new file mode 100644
index 00000000..a9ce8bb9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/cluster-test.go
@@ -0,0 +1,357 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "os"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ elastic "gopkg.in/olivere/elastic.v2"
+)
+
+type Tweet struct {
+ User string `json:"user"`
+ Message string `json:"message"`
+ Retweets int `json:"retweets"`
+ Image string `json:"image,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Location string `json:"location,omitempty"`
+ Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
+}
+
+var (
+ nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')")
+ n = flag.Int("n", 5, "number of goroutines that run searches")
+ index = flag.String("index", "twitter", "name of ES index to use")
+ errorlogfile = flag.String("errorlog", "", "error log file")
+ infologfile = flag.String("infolog", "", "info log file")
+ tracelogfile = flag.String("tracelog", "", "trace log file")
+ retries = flag.Int("retries", elastic.DefaultMaxRetries, "number of retries")
+ sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer")
+ sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval")
+ healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks")
+ healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval")
+)
+
+func main() {
+ flag.Parse()
+
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ if *nodes == "" {
+ log.Fatal("no nodes specified")
+ }
+ urls := strings.SplitN(*nodes, ",", -1)
+
+ testcase, err := NewTestCase(*index, urls)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ testcase.SetErrorLogFile(*errorlogfile)
+ testcase.SetInfoLogFile(*infologfile)
+ testcase.SetTraceLogFile(*tracelogfile)
+ testcase.SetMaxRetries(*retries)
+ testcase.SetHealthcheck(*healthcheck)
+ testcase.SetHealthcheckInterval(*healthchecker)
+ testcase.SetSniff(*sniff)
+ testcase.SetSnifferInterval(*sniffer)
+
+ if err := testcase.Run(*n); err != nil {
+ log.Fatal(err)
+ }
+
+ select {}
+}
+
+type RunInfo struct {
+ Success bool
+}
+
+type TestCase struct {
+ nodes []string
+ client *elastic.Client
+ runs int64
+ failures int64
+ runCh chan RunInfo
+ index string
+ errorlogfile string
+ infologfile string
+ tracelogfile string
+ maxRetries int
+ healthcheck bool
+ healthcheckInterval time.Duration
+ sniff bool
+ snifferInterval time.Duration
+}
+
+func NewTestCase(index string, nodes []string) (*TestCase, error) {
+ if index == "" {
+ return nil, errors.New("no index name specified")
+ }
+
+ return &TestCase{
+ index: index,
+ nodes: nodes,
+ runCh: make(chan RunInfo),
+ }, nil
+}
+
+func (t *TestCase) SetIndex(name string) {
+ t.index = name
+}
+
+func (t *TestCase) SetErrorLogFile(name string) {
+ t.errorlogfile = name
+}
+
+func (t *TestCase) SetInfoLogFile(name string) {
+ t.infologfile = name
+}
+
+func (t *TestCase) SetTraceLogFile(name string) {
+ t.tracelogfile = name
+}
+
+func (t *TestCase) SetMaxRetries(n int) {
+ t.maxRetries = n
+}
+
+func (t *TestCase) SetSniff(enabled bool) {
+ t.sniff = enabled
+}
+
+func (t *TestCase) SetSnifferInterval(d time.Duration) {
+ t.snifferInterval = d
+}
+
+func (t *TestCase) SetHealthcheck(enabled bool) {
+ t.healthcheck = enabled
+}
+
+func (t *TestCase) SetHealthcheckInterval(d time.Duration) {
+ t.healthcheckInterval = d
+}
+
+func (t *TestCase) Run(n int) error {
+ if err := t.setup(); err != nil {
+ return err
+ }
+
+ for i := 1; i < n; i++ {
+ go t.search()
+ }
+
+ go t.monitor()
+
+ return nil
+}
+
+func (t *TestCase) monitor() {
+ print := func() {
+ fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ")
+ }
+
+ for {
+ select {
+ case run := <-t.runCh:
+ atomic.AddInt64(&t.runs, 1)
+ if !run.Success {
+ atomic.AddInt64(&t.failures, 1)
+ fmt.Println()
+ }
+ print()
+ case <-time.After(5 * time.Second):
+ // Print stats after some inactivity
+ print()
+ break
+ }
+ }
+}
+
+func (t *TestCase) setup() error {
+ var errorlogger *log.Logger
+ if t.errorlogfile != "" {
+ f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ if err != nil {
+ return err
+ }
+ errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
+ }
+
+ var infologger *log.Logger
+ if t.infologfile != "" {
+ f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ if err != nil {
+ return err
+ }
+ infologger = log.New(f, "", log.LstdFlags)
+ }
+
+ // Trace request and response details like this
+ var tracelogger *log.Logger
+ if t.tracelogfile != "" {
+ f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ if err != nil {
+ return err
+ }
+ tracelogger = log.New(f, "", log.LstdFlags)
+ }
+
+ client, err := elastic.NewClient(
+ elastic.SetURL(t.nodes...),
+ elastic.SetErrorLog(errorlogger),
+ elastic.SetInfoLog(infologger),
+ elastic.SetTraceLog(tracelogger),
+ elastic.SetMaxRetries(t.maxRetries),
+ elastic.SetSniff(t.sniff),
+ elastic.SetSnifferInterval(t.snifferInterval),
+ elastic.SetHealthcheck(t.healthcheck),
+ elastic.SetHealthcheckInterval(t.healthcheckInterval))
+ if err != nil {
+ // Handle error
+ return err
+ }
+ t.client = client
+
+ // Use the IndexExists service to check if a specified index exists.
+ exists, err := t.client.IndexExists(t.index).Do()
+ if err != nil {
+ return err
+ }
+ if exists {
+ deleteIndex, err := t.client.DeleteIndex(t.index).Do()
+ if err != nil {
+ return err
+ }
+ if !deleteIndex.Acknowledged {
+ return errors.New("delete index not acknowledged")
+ }
+ }
+
+ // Create a new index.
+ createIndex, err := t.client.CreateIndex(t.index).Do()
+ if err != nil {
+ return err
+ }
+ if !createIndex.Acknowledged {
+ return errors.New("create index not acknowledged")
+ }
+
+ // Index a tweet (using JSON serialization)
+ tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+ _, err = t.client.Index().
+ Index(t.index).
+ Type("tweet").
+ Id("1").
+ BodyJson(tweet1).
+ Do()
+ if err != nil {
+ return err
+ }
+
+ // Index a second tweet (by string)
+ tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+ _, err = t.client.Index().
+ Index(t.index).
+ Type("tweet").
+ Id("2").
+ BodyString(tweet2).
+ Do()
+ if err != nil {
+ return err
+ }
+
+ // Flush to make sure the documents got written.
+ _, err = t.client.Flush().Index(t.index).Do()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *TestCase) search() {
+ // Loop forever to check for connection issues
+ for {
+ // Get tweet with specified ID
+ get1, err := t.client.Get().
+ Index(t.index).
+ Type("tweet").
+ Id("1").
+ Do()
+ if err != nil {
+ //failf("Get failed: %v", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+ if !get1.Found {
+ //log.Printf("Document %s not found\n", "1")
+ //fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+
+ // Search with a term query
+ termQuery := elastic.NewTermQuery("user", "olivere")
+ searchResult, err := t.client.Search().
+ Index(t.index). // search in index t.index
+ Query(&termQuery). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Pretty(true). // pretty print request and response JSON
+ Do() // execute
+ if err != nil {
+ //failf("Search failed: %v\n", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ //fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+ // Number of hits
+ if searchResult.Hits != nil {
+ //fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var tweet Tweet
+ err := json.Unmarshal(*hit.Source, &tweet)
+ if err != nil {
+ // Deserialization failed
+ //failf("Deserialize failed: %v\n", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+
+ // Work with tweet
+ //fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ } else {
+ // No hits
+ //fmt.Print("Found no tweets\n")
+ }
+
+ t.runCh <- RunInfo{Success: true}
+
+ // Sleep some time
+ time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_health.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_health.go
new file mode 100644
index 00000000..48a354c4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_health.go
@@ -0,0 +1,186 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ClusterHealthService allows to get the status of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-health.html.
+type ClusterHealthService struct {
+ client *Client
+ pretty bool
+ indices []string
+ waitForStatus string
+ level string
+ local *bool
+ masterTimeout string
+ timeout string
+ waitForActiveShards *int
+ waitForNodes string
+ waitForRelocatingShards *int
+}
+
+// NewClusterHealthService creates a new ClusterHealthService.
+func NewClusterHealthService(client *Client) *ClusterHealthService {
+ return &ClusterHealthService{client: client, indices: make([]string, 0)}
+}
+
+// Index limits the information returned to a specific index.
+func (s *ClusterHealthService) Index(index string) *ClusterHealthService {
+ s.indices = make([]string, 0)
+ s.indices = append(s.indices, index)
+ return s
+}
+
+// Indices limits the information returned to specific indices.
+func (s *ClusterHealthService) Indices(indices ...string) *ClusterHealthService {
+ s.indices = make([]string, 0)
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout specifies an explicit operation timeout.
+func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
+ s.timeout = timeout
+ return s
+}
+
+// WaitForActiveShards can be used to wait until the specified number of shards are active.
+func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
+ s.waitForActiveShards = &waitForActiveShards
+ return s
+}
+
+// WaitForNodes can be used to wait until the specified number of nodes are available.
+func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
+ s.waitForNodes = waitForNodes
+ return s
+}
+
+// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished.
+func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService {
+ s.waitForRelocatingShards = &waitForRelocatingShards
+ return s
+}
+
+// WaitForStatus can be used to wait until the cluster is in a specific state.
+// Valid values are: green, yellow, or red.
+func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
+ s.waitForStatus = waitForStatus
+ return s
+}
+
+// Level specifies the level of detail for returned information.
+func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
+ s.level = level
+ return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
+ s.local = &local
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_cluster/health/{index}", map[string]string{
+ "index": strings.Join(s.indices, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.waitForRelocatingShards != nil {
+ params.Set("wait_for_relocating_shards", fmt.Sprintf("%d", *s.waitForRelocatingShards))
+ }
+ if s.waitForStatus != "" {
+ params.Set("wait_for_status", s.waitForStatus)
+ }
+ if s.level != "" {
+ params.Set("level", s.level)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.waitForActiveShards != nil {
+ params.Set("wait_for_active_shards", fmt.Sprintf("%d", *s.waitForActiveShards))
+ }
+ if s.waitForNodes != "" {
+ params.Set("wait_for_nodes", s.waitForNodes)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterHealthService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ resp := new(ClusterHealthResponse)
+ if err := json.Unmarshal(res.Body, resp); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ClusterHealthResponse is the response of ClusterHealthService.Do.
+type ClusterHealthResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Status string `json:"status"`
+ TimedOut bool `json:"timed_out"`
+ NumberOfNodes int `json:"number_of_nodes"`
+ NumberOfDataNodes int `json:"number_of_data_nodes"`
+ ActivePrimaryShards int `json:"active_primary_shards"`
+ ActiveShards int `json:"active_shards"`
+ RelocatingShards int `json:"relocating_shards"`
+ InitializingShards int `json:"initializing_shards"`
+ UnassignedShards int `json:"unassigned_shards"`
+ NumberOfPendingTasks int `json:"number_of_pending_tasks"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_health_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_health_test.go
new file mode 100644
index 00000000..455f1b87
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_health_test.go
@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "net/url"
+ "testing"
+)
+
+func TestClusterHealth(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Get cluster health
+ res, err := client.ClusterHealth().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected res to be != nil; got: %v", res)
+ }
+ if res.Status != "green" && res.Status != "red" && res.Status != "yellow" {
+ t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status)
+ }
+}
+
+func TestClusterHealthURLs(t *testing.T) {
+ tests := []struct {
+ Service *ClusterHealthService
+ ExpectedPath string
+ ExpectedParams url.Values
+ }{
+ {
+ Service: &ClusterHealthService{
+ indices: []string{},
+ },
+ ExpectedPath: "/_cluster/health/",
+ },
+ {
+ Service: &ClusterHealthService{
+ indices: []string{"twitter"},
+ },
+ ExpectedPath: "/_cluster/health/twitter",
+ },
+ {
+ Service: &ClusterHealthService{
+ indices: []string{"twitter", "gplus"},
+ },
+ ExpectedPath: "/_cluster/health/twitter%2Cgplus",
+ },
+ {
+ Service: &ClusterHealthService{
+ indices: []string{"twitter"},
+ waitForStatus: "yellow",
+ },
+ ExpectedPath: "/_cluster/health/twitter",
+ ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}},
+ },
+ }
+
+ for _, test := range tests {
+ gotPath, gotParams, err := test.Service.buildURL()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if gotPath != test.ExpectedPath {
+ t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+ }
+ if gotParams.Encode() != test.ExpectedParams.Encode() {
+ t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+ }
+ }
+}
+
+func TestClusterHealthWaitForStatus(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Cluster health on an index that does not exist should never get to yellow
+ health, err := client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if health.TimedOut != true {
+ t.Fatalf("expected to timeout; got: %v", health.TimedOut)
+ }
+ if health.Status != "red" {
+ t.Fatalf("expected health = %q; got: %q", "red", health.Status)
+ }
+
+ // Cluster wide health
+ health, err = client.ClusterHealth().WaitForStatus("green").Timeout("10s").Do()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if health.TimedOut != false {
+ t.Fatalf("expected no timeout; got: %v "+
+ "(does your local cluster contain unassigned shards?)", health.TimedOut)
+ }
+ if health.Status != "green" {
+ t.Fatalf("expected health = %q; got: %q", "green", health.Status)
+ }
+
+ // Cluster wide health via shortcut on client
+ err = client.WaitForGreenStatus("10s")
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_state.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_state.go
new file mode 100644
index 00000000..9361f73d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_state.go
@@ -0,0 +1,197 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ClusterStateService returns the state of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-state.html.
+type ClusterStateService struct {
+ client *Client
+ pretty bool
+ indices []string
+ metrics []string
+ local *bool
+ masterTimeout string
+ flatSettings *bool
+}
+
+// NewClusterStateService creates a new ClusterStateService.
+func NewClusterStateService(client *Client) *ClusterStateService {
+ return &ClusterStateService{
+ client: client,
+ indices: make([]string, 0),
+ metrics: make([]string, 0),
+ }
+}
+
+// Index the name of the index. Use _all or an empty string to perform
+// the operation on all indices.
+func (s *ClusterStateService) Index(index string) *ClusterStateService {
+ s.indices = make([]string, 0)
+ s.indices = append(s.indices, index)
+ return s
+}
+
+// Indices is a list of index names. Use _all or an empty string to
+// perform the operation on all indices.
+func (s *ClusterStateService) Indices(indices ...string) *ClusterStateService {
+ s.indices = make([]string, 0)
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Metric limits the information returned to the specified metric.
+// It can be one of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metric(metric string) *ClusterStateService {
+ s.metrics = make([]string, 0)
+ s.metrics = append(s.metrics, metric)
+ return s
+}
+
+// Metrics limits the information returned to the specified metrics.
+// It can be any of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metrics(metrics ...string) *ClusterStateService {
+ s.metrics = make([]string, 0)
+ s.metrics = append(s.metrics, metrics...)
+ return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterStateService) Local(local bool) *ClusterStateService {
+ s.local = &local
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterStateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ metrics := strings.Join(s.metrics, ",")
+ if metrics == "" {
+ metrics = "_all"
+ }
+ indices := strings.Join(s.indices, ",")
+ if indices == "" {
+ indices = "_all"
+ }
+ path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
+ "metrics": metrics,
+ "indices": indices,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterStateService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClusterStateService) Do() (*ClusterStateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClusterStateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClusterStateResponse is the response of ClusterStateService.Do.
+type ClusterStateResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Version int `json:"version"`
+ MasterNode string `json:"master_node"`
+ Blocks map[string]interface{} `json:"blocks"`
+ Nodes map[string]*ClusterStateNode `json:"nodes"`
+ Metadata *ClusterStateMetadata `json:"metadata"`
+ RoutingTable map[string]*ClusterStateRoutingTable `json:"routing_table"`
+ RoutingNodes *ClusterStateRoutingNode `json:"routing_nodes"`
+ Allocations []interface{} `json:"allocations"`
+ Customs map[string]interface{} `json:"customs"`
+}
+
+type ClusterStateMetadata struct {
+ Templates map[string]interface{} `json:"templates"`
+ Indices map[string]interface{} `json:"indices"`
+ Repositories map[string]interface{} `json:"repositories"`
+}
+
+type ClusterStateNode struct {
+ Name string `json:"name"`
+ TransportAddress string `json:"transport_address"`
+ Attributes map[string]interface{} `json:"attributes"`
+
+ // TODO(oe) are these still valid?
+ State string `json:"state"`
+ Primary bool `json:"primary"`
+ Node string `json:"node"`
+ RelocatingNode *string `json:"relocating_node"`
+ Shard int `json:"shard"`
+ Index string `json:"index"`
+}
+
+type ClusterStateRoutingTable struct {
+ Indices map[string]interface{} `json:"indices"`
+}
+
+type ClusterStateRoutingNode struct {
+ Unassigned []interface{} `json:"unassigned"`
+ Nodes map[string]interface{} `json:"nodes"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_state_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_state_test.go
new file mode 100644
index 00000000..9c036bd8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_state_test.go
@@ -0,0 +1,92 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "net/url"
+ "testing"
+)
+
+func TestClusterState(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Get cluster state
+ res, err := client.ClusterState().Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected res to be != nil; got: %v", res)
+ }
+ if res.ClusterName == "" {
+ t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
+ }
+}
+
+func TestClusterStateURLs(t *testing.T) {
+ tests := []struct {
+ Service *ClusterStateService
+ ExpectedPath string
+ ExpectedParams url.Values
+ }{
+ {
+ Service: &ClusterStateService{
+ indices: []string{},
+ metrics: []string{},
+ },
+ ExpectedPath: "/_cluster/state/_all/_all",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{"twitter"},
+ metrics: []string{},
+ },
+ ExpectedPath: "/_cluster/state/_all/twitter",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{"twitter", "gplus"},
+ metrics: []string{},
+ },
+ ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{},
+ metrics: []string{"nodes"},
+ },
+ ExpectedPath: "/_cluster/state/nodes/_all",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{"twitter"},
+ metrics: []string{"nodes"},
+ },
+ ExpectedPath: "/_cluster/state/nodes/twitter",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{"twitter"},
+ metrics: []string{"nodes"},
+ masterTimeout: "1s",
+ },
+ ExpectedPath: "/_cluster/state/nodes/twitter",
+ ExpectedParams: url.Values{"master_timeout": []string{"1s"}},
+ },
+ }
+
+ for _, test := range tests {
+ gotPath, gotParams, err := test.Service.buildURL()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if gotPath != test.ExpectedPath {
+ t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+ }
+ if gotParams.Encode() != test.ExpectedParams.Encode() {
+ t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_stats.go
new file mode 100644
index 00000000..a3756b9c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_stats.go
@@ -0,0 +1,349 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html.
+type ClusterStatsService struct {
+ client *Client
+ pretty bool
+ nodeId []string
+ flatSettings *bool
+ human *bool
+}
+
+// NewClusterStatsService creates a new ClusterStatsService.
+func NewClusterStatsService(client *Client) *ClusterStatsService {
+ return &ClusterStatsService{
+ client: client,
+ nodeId: make([]string, 0),
+ }
+}
+
+// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes.
+func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService {
+ s.nodeId = nodeId
+ return s
+}
+
+// FlatSettings is documented as: Return settings in flat format (default: false).
+func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Human is documented as: Whether to return time and byte values in human-readable format..
+func (s *ClusterStatsService) Human(human bool) *ClusterStatsService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterStatsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+
+ if len(s.nodeId) > 0 {
+ path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{
+ "node_id": strings.Join(s.nodeId, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+ } else {
+ path, err = uritemplates.Expand("/_cluster/stats", map[string]string{})
+ if err != nil {
+ return "", url.Values{}, err
+ }
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterStatsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClusterStatsResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClusterStatsResponse is the response of ClusterStatsService.Do.
+type ClusterStatsResponse struct {
+ Timestamp int64 `json:"timestamp"`
+ ClusterName string `json:"cluster_name"`
+ ClusterUUID string `json:"uuid"`
+ Status string `json:"status"`
+ Indices *ClusterStatsIndices `json:"indices"`
+ Nodes *ClusterStatsNodes `json:"nodes"`
+}
+
+type ClusterStatsIndices struct {
+ Count int `json:"count"`
+ Shards *ClusterStatsIndicesShards `json:"shards"`
+ Docs *ClusterStatsIndicesDocs `json:"docs"`
+ Store *ClusterStatsIndicesStore `json:"store"`
+ FieldData *ClusterStatsIndicesFieldData `json:"fielddata"`
+ FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"`
+ IdCache *ClusterStatsIndicesIdCache `json:"id_cache"`
+ Completion *ClusterStatsIndicesCompletion `json:"completion"`
+ Segments *ClusterStatsIndicesSegments `json:"segments"`
+ Percolate *ClusterStatsIndicesPercolate `json:"percolate"`
+}
+
+type ClusterStatsIndicesShards struct {
+ Total int `json:"total"`
+ Primaries int `json:"primaries"`
+ Replication float64 `json:"replication"`
+ Index *ClusterStatsIndicesShardsIndex `json:"index"`
+}
+
+type ClusterStatsIndicesShardsIndex struct {
+ Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"`
+ Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"`
+ Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"`
+}
+
+type ClusterStatsIndicesShardsIndexIntMinMax struct {
+ Min int `json:"min"`
+ Max int `json:"max"`
+ Avg float64 `json:"avg"`
+}
+
+type ClusterStatsIndicesShardsIndexFloat64MinMax struct {
+ Min float64 `json:"min"`
+ Max float64 `json:"max"`
+ Avg float64 `json:"avg"`
+}
+
+type ClusterStatsIndicesDocs struct {
+ Count int `json:"count"`
+ Deleted int `json:"deleted"`
+}
+
+type ClusterStatsIndicesStore struct {
+ Size string `json:"size"` // e.g. "5.3gb"
+ SizeInBytes int64 `json:"size_in_bytes"`
+ ThrottleTime string `json:"throttle_time"` // e.g. "0s"
+ ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
+}
+
+type ClusterStatsIndicesFieldData struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ Evictions int64 `json:"evictions"`
+ Fields map[string]struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ } `json:"fields"`
+}
+
+type ClusterStatsIndicesFilterCache struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ Evictions int64 `json:"evictions"`
+}
+
+type ClusterStatsIndicesIdCache struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+}
+
+type ClusterStatsIndicesCompletion struct {
+ Size string `json:"size"` // e.g. "61.3kb"
+ SizeInBytes int64 `json:"size_in_bytes"`
+ Fields map[string]struct {
+ Size string `json:"size"` // e.g. "61.3kb"
+ SizeInBytes int64 `json:"size_in_bytes"`
+ } `json:"fields"`
+}
+
+type ClusterStatsIndicesSegments struct {
+ Count int64 `json:"count"`
+ Memory string `json:"memory"` // e.g. "61.3kb"
+ MemoryInBytes int64 `json:"memory_in_bytes"`
+ IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb"
+ IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"`
+ IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb"
+ IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"`
+ VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb"
+ VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"`
+ FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb"
+ FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"`
+}
+
+type ClusterStatsIndicesPercolate struct {
+ Total int64 `json:"total"`
+ // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems
+ Time string `json:"get_time"` // e.g. "1s"
+ TimeInBytes int64 `json:"time_in_millis"`
+ Current int64 `json:"current"`
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"`
+ Queries int64 `json:"queries"`
+}
+
+// ---
+
+type ClusterStatsNodes struct {
+ Count *ClusterStatsNodesCounts `json:"counts"`
+ Versions []string `json:"versions"`
+ OS *ClusterStatsNodesOsStats `json:"os"`
+ Process *ClusterStatsNodesProcessStats `json:"process"`
+ JVM *ClusterStatsNodesJvmStats `json:"jvm"`
+ FS *ClusterStatsNodesFsStats `json:"fs"`
+ Plugins []*ClusterStatsNodesPlugin `json:"plugins"`
+}
+
+type ClusterStatsNodesCounts struct {
+ Total int `json:"total"`
+ MasterOnly int `json:"master_only"`
+ DataOnly int `json:"data_only"`
+ MasterData int `json:"master_data"`
+ Client int `json:"client"`
+}
+
+type ClusterStatsNodesOsStats struct {
+ AvailableProcessors int `json:"available_processors"`
+ Mem *ClusterStatsNodesOsStatsMem `json:"mem"`
+ CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"`
+}
+
+type ClusterStatsNodesOsStatsMem struct {
+ Total string `json:"total"` // e.g. "16gb"
+ TotalInBytes int64 `json:"total_in_bytes"`
+}
+
+type ClusterStatsNodesOsStatsCPU struct {
+ Vendor string `json:"vendor"`
+ Model string `json:"model"`
+ MHz int `json:"mhz"`
+ TotalCores int `json:"total_cores"`
+ TotalSockets int `json:"total_sockets"`
+ CoresPerSocket int `json:"cores_per_socket"`
+ CacheSize string `json:"cache_size"` // e.g. "256b"
+ CacheSizeInBytes int64 `json:"cache_size_in_bytes"`
+ Count int `json:"count"`
+}
+
+type ClusterStatsNodesProcessStats struct {
+ CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"`
+ OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"`
+}
+
+type ClusterStatsNodesProcessStatsCPU struct {
+ Percent float64 `json:"percent"`
+}
+
+type ClusterStatsNodesProcessStatsOpenFileDescriptors struct {
+ Min int64 `json:"min"`
+ Max int64 `json:"max"`
+ Avg int64 `json:"avg"`
+}
+
+type ClusterStatsNodesJvmStats struct {
+ MaxUptime string `json:"max_uptime"` // e.g. "5h"
+ MaxUptimeInMillis int64 `json:"max_uptime_in_millis"`
+ Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"`
+ Mem *ClusterStatsNodesJvmStatsMem `json:"mem"`
+ Threads int64 `json:"threads"`
+}
+
+type ClusterStatsNodesJvmStatsVersion struct {
+ Version string `json:"version"` // e.g. "1.8.0_45"
+ VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
+ VMVersion string `json:"vm_version"` // e.g. "25.45-b02"
+ VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
+ Count int `json:"count"`
+}
+
+type ClusterStatsNodesJvmStatsMem struct {
+ HeapUsed string `json:"heap_used"`
+ HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
+ HeapMax string `json:"heap_max"`
+ HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
+}
+
+type ClusterStatsNodesFsStats struct {
+ Path string `json:"path"`
+ Mount string `json:"mount"`
+ Dev string `json:"dev"`
+ Total string `json:"total"` // e.g. "930.7gb"`
+ TotalInBytes int64 `json:"total_in_bytes"`
+ Free string `json:"free"` // e.g. "930.7gb"`
+ FreeInBytes int64 `json:"free_in_bytes"`
+ Available string `json:"available"` // e.g. "930.7gb"`
+ AvailableInBytes int64 `json:"available_in_bytes"`
+ DiskReads int64 `json:"disk_reads"`
+ DiskWrites int64 `json:"disk_writes"`
+ DiskIOOp int64 `json:"disk_io_op"`
+ DiskReadSize string `json:"disk_read_size"` // e.g. "0b"`
+ DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"`
+ DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"`
+ DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"`
+ DiskIOSize string `json:"disk_io_size"` // e.g. "0b"`
+ DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"`
+ DiskQueue string `json:"disk_queue"`
+ DiskServiceTime string `json:"disk_service_time"`
+}
+
+type ClusterStatsNodesPlugin struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Description string `json:"description"`
+ URL string `json:"url"`
+ JVM bool `json:"jvm"`
+ Site bool `json:"site"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_stats_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_stats_test.go
new file mode 100644
index 00000000..74326a6e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_stats_test.go
@@ -0,0 +1,85 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "net/url"
+ "testing"
+)
+
+func TestClusterStats(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Get cluster stats
+ res, err := client.ClusterStats().Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected res to be != nil; got: %v", res)
+ }
+ if res.ClusterName == "" {
+ t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
+ }
+}
+
+func TestClusterStatsURLs(t *testing.T) {
+ fFlag := false
+ tFlag := true
+
+ tests := []struct {
+ Service *ClusterStatsService
+ ExpectedPath string
+ ExpectedParams url.Values
+ }{
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{},
+ },
+ ExpectedPath: "/_cluster/stats",
+ },
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{"node1"},
+ },
+ ExpectedPath: "/_cluster/stats/nodes/node1",
+ },
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{"node1", "node2"},
+ },
+ ExpectedPath: "/_cluster/stats/nodes/node1%2Cnode2",
+ },
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{},
+ flatSettings: &tFlag,
+ },
+ ExpectedPath: "/_cluster/stats",
+ ExpectedParams: url.Values{"flat_settings": []string{"true"}},
+ },
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{"node1"},
+ flatSettings: &fFlag,
+ },
+ ExpectedPath: "/_cluster/stats/nodes/node1",
+ ExpectedParams: url.Values{"flat_settings": []string{"false"}},
+ },
+ }
+
+ for _, test := range tests {
+ gotPath, gotParams, err := test.Service.buildURL()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if gotPath != test.ExpectedPath {
+ t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+ }
+ if gotParams.Encode() != test.ExpectedParams.Encode() {
+ t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/connection.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/connection.go
new file mode 100644
index 00000000..b8b5bf8a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/connection.go
@@ -0,0 +1,90 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+// conn represents a single connection to a node in a cluster.
+type conn struct {
+ sync.RWMutex
+ nodeID string // node ID
+ url string
+ failures int
+ dead bool
+ deadSince *time.Time
+}
+
+// newConn creates a new connection to the given URL.
+func newConn(nodeID, url string) *conn {
+ c := &conn{
+ nodeID: nodeID,
+ url: url,
+ }
+ return c
+}
+
+// String returns a representation of the connection status.
+func (c *conn) String() string {
+ c.RLock()
+ defer c.RUnlock()
+ return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
+}
+
+// NodeID returns the ID of the node of this connection.
+func (c *conn) NodeID() string {
+ c.RLock()
+ defer c.RUnlock()
+ return c.nodeID
+}
+
+// URL returns the URL of this connection.
+func (c *conn) URL() string {
+ c.RLock()
+ defer c.RUnlock()
+ return c.url
+}
+
+// IsDead returns true if this connection is marked as dead, i.e. a previous
+// request to the URL has been unsuccessful.
+func (c *conn) IsDead() bool {
+ c.RLock()
+ defer c.RUnlock()
+ return c.dead
+}
+
+// MarkAsDead marks this connection as dead, increments the failures
+// counter and stores the current time in dead since.
+func (c *conn) MarkAsDead() {
+ c.Lock()
+ c.dead = true
+ if c.deadSince == nil {
+ utcNow := time.Now().UTC()
+ c.deadSince = &utcNow
+ }
+ c.failures += 1
+ c.Unlock()
+}
+
+// MarkAsAlive marks this connection as eligible to be returned from the
+// pool of connections by the selector.
+func (c *conn) MarkAsAlive() {
+ c.Lock()
+ c.dead = false
+ c.Unlock()
+}
+
+// MarkAsHealthy marks this connection as healthy, i.e. a request has been
+// successfully performed with it.
+func (c *conn) MarkAsHealthy() {
+ c.Lock()
+ c.dead = false
+ c.deadSince = nil
+ c.failures = 0
+ c.Unlock()
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/count.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/count.go
new file mode 100644
index 00000000..bb4c0ac2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/count.go
@@ -0,0 +1,152 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// CountService is a convenient service for determining the
+// number of documents in an index. Use SearchService with
+// a SearchType of count for counting with queries etc.
+type CountService struct {
+ client *Client
+ indices []string
+ types []string
+ query Query
+ pretty bool
+}
+
+// CountResult is the result returned from using the Count API
+// (http://www.elasticsearch.org/guide/reference/api/count/)
+type CountResult struct {
+ Count int64 `json:"count"`
+ Shards shardsInfo `json:"_shards,omitempty"`
+}
+
+func NewCountService(client *Client) *CountService {
+ builder := &CountService{
+ client: client,
+ }
+ return builder
+}
+
+func (s *CountService) Index(index string) *CountService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *CountService) Indices(indices ...string) *CountService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *CountService) Type(typ string) *CountService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, typ)
+ return s
+}
+
+func (s *CountService) Types(types ...string) *CountService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+func (s *CountService) Query(query Query) *CountService {
+ s.query = query
+ return s
+}
+
+func (s *CountService) Pretty(pretty bool) *CountService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *CountService) Do() (int64, error) {
+ var err error
+
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err = uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return 0, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ // Types part
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err = uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return 0, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ if len(typesPart) > 0 {
+ path += "/" + strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_count"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Set body if there is a query specified
+ var body interface{}
+ if s.query != nil {
+ query := make(map[string]interface{})
+ query["query"] = s.query.Source()
+ body = query
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return 0, err
+ }
+
+ // Return result
+ ret := new(CountResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return 0, err
+ }
+ if ret != nil {
+ return ret.Count, nil
+ }
+
+ return int64(0), nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/count_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/count_test.go
new file mode 100644
index 00000000..65bf6fd1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/count_test.go
@@ -0,0 +1,83 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestCount(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Count documents
+ count, err := client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ }
+
+ // Count documents
+ count, err = client.Count(testIndexName).Type("tweet").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ }
+
+ // Count documents
+ count, err = client.Count(testIndexName).Type("gezwitscher").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 0 {
+ t.Errorf("expected Count = %d; got %d", 0, count)
+ }
+
+ // Count with query
+ query := NewTermQuery("user", "olivere")
+ count, err = client.Count(testIndexName).Query(query).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Errorf("expected Count = %d; got %d", 2, count)
+ }
+
+ // Count with query and type
+ query = NewTermQuery("user", "olivere")
+ count, err = client.Count(testIndexName).Type("tweet").Query(query).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Errorf("expected Count = %d; got %d", 2, count)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/create_index.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/create_index.go
new file mode 100644
index 00000000..28cb6fe2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/create_index.go
@@ -0,0 +1,126 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "errors"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// CreateIndexService creates a new index.
+type CreateIndexService struct {
+ client *Client
+ pretty bool
+ index string
+ timeout string
+ masterTimeout string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewCreateIndexService returns a new CreateIndexService.
+func NewCreateIndexService(client *Client) *CreateIndexService {
+ return &CreateIndexService{client: client}
+}
+
+// Index is the name of the index to create.
+func (b *CreateIndexService) Index(index string) *CreateIndexService {
+ b.index = index
+ return b
+}
+
+// Timeout the explicit operation timeout, e.g. "5s".
+func (s *CreateIndexService) Timeout(timeout string) *CreateIndexService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *CreateIndexService) MasterTimeout(masterTimeout string) *CreateIndexService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Body specifies the configuration of the index as a string.
+// It is an alias for BodyString.
+func (b *CreateIndexService) Body(body string) *CreateIndexService {
+ b.bodyString = body
+ return b
+}
+
+// BodyString specifies the configuration of the index as a string.
+func (b *CreateIndexService) BodyString(body string) *CreateIndexService {
+ b.bodyString = body
+ return b
+}
+
+// BodyJson specifies the configuration of the index. The interface{} will
+// be serializes as a JSON document, so use a map[string]interface{}.
+func (b *CreateIndexService) BodyJson(body interface{}) *CreateIndexService {
+ b.bodyJson = body
+ return b
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (b *CreateIndexService) Pretty(pretty bool) *CreateIndexService {
+ b.pretty = pretty
+ return b
+}
+
+// Do executes the operation.
+func (b *CreateIndexService) Do() (*CreateIndexResult, error) {
+ if b.index == "" {
+ return nil, errors.New("missing index name")
+ }
+
+ // Build url
+ path, err := uritemplates.Expand("/{index}", map[string]string{
+ "index": b.index,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ params := make(url.Values)
+ if b.pretty {
+ params.Set("pretty", "1")
+ }
+ if b.masterTimeout != "" {
+ params.Set("master_timeout", b.masterTimeout)
+ }
+ if b.timeout != "" {
+ params.Set("timeout", b.timeout)
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if b.bodyJson != nil {
+ body = b.bodyJson
+ } else {
+ body = b.bodyString
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("PUT", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := new(CreateIndexResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a create index request.
+
+// CreateIndexResult is the outcome of creating a new index.
+type CreateIndexResult struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/decoder.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/decoder.go
new file mode 100644
index 00000000..765a5be3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/decoder.go
@@ -0,0 +1,26 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+)
+
+// Decoder is used to decode responses from Elasticsearch.
+// Users of elastic can implement their own marshaler for advanced purposes
+// and set them per Client (see SetDecoder). If none is specified,
+// DefaultDecoder is used.
+type Decoder interface {
+ Decode(data []byte, v interface{}) error
+}
+
+// DefaultDecoder uses json.Unmarshal from the Go standard library
+// to decode JSON data.
+type DefaultDecoder struct{}
+
+// Decode decodes with json.Unmarshal from the Go standard library.
+func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/decoder_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/decoder_test.go
new file mode 100644
index 00000000..5cfce9f5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/decoder_test.go
@@ -0,0 +1,49 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+ "sync/atomic"
+ "testing"
+)
+
+type decoder struct {
+ dec json.Decoder
+
+ N int64
+}
+
+func (d *decoder) Decode(data []byte, v interface{}) error {
+ atomic.AddInt64(&d.N, 1)
+ dec := json.NewDecoder(bytes.NewReader(data))
+ dec.UseNumber()
+ return dec.Decode(v)
+}
+
+func TestDecoder(t *testing.T) {
+ dec := &decoder{}
+ client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0))
+
+ tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("tweet").
+ Id("1").
+ BodyJson(&tweet).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+ if dec.N <= 0 {
+ t.Errorf("expected at least 1 call of decoder; got: %d", dec.N)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete.go
new file mode 100644
index 00000000..e6f88707
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete.go
@@ -0,0 +1,130 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type DeleteService struct {
+ client *Client
+ index string
+ _type string
+ id string
+ routing string
+ refresh *bool
+ version *int
+ pretty bool
+}
+
+func NewDeleteService(client *Client) *DeleteService {
+ builder := &DeleteService{
+ client: client,
+ }
+ return builder
+}
+
+func (s *DeleteService) Index(index string) *DeleteService {
+ s.index = index
+ return s
+}
+
+func (s *DeleteService) Type(_type string) *DeleteService {
+ s._type = _type
+ return s
+}
+
+func (s *DeleteService) Id(id string) *DeleteService {
+ s.id = id
+ return s
+}
+
+func (s *DeleteService) Parent(parent string) *DeleteService {
+ if s.routing == "" {
+ s.routing = parent
+ }
+ return s
+}
+
+func (s *DeleteService) Refresh(refresh bool) *DeleteService {
+ s.refresh = &refresh
+ return s
+}
+
+func (s *DeleteService) Version(version int) *DeleteService {
+ s.version = &version
+ return s
+}
+
+func (s *DeleteService) Pretty(pretty bool) *DeleteService {
+ s.pretty = pretty
+ return s
+}
+
+// Do deletes the document. It fails if any of index, type, and identifier
+// are missing.
+func (s *DeleteService) Do() (*DeleteResult, error) {
+ if s.index == "" {
+ return nil, ErrMissingIndex
+ }
+ if s._type == "" {
+ return nil, ErrMissingType
+ }
+ if s.id == "" {
+ return nil, ErrMissingId
+ }
+
+ // Build url
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "index": s.index,
+ "type": s._type,
+ "id": s.id,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Parameters
+ params := make(url.Values)
+ if s.refresh != nil {
+ params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *s.version))
+ }
+ if s.routing != "" {
+ params.Set("routing", fmt.Sprintf("%s", s.routing))
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("DELETE", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return response
+ ret := new(DeleteResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a delete request.
+
+type DeleteResult struct {
+ Found bool `json:"found"`
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int64 `json:"_version"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_by_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_by_query.go
new file mode 100644
index 00000000..06282412
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_by_query.go
@@ -0,0 +1,292 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// DeleteByQueryService deletes documents that match a query.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html.
+type DeleteByQueryService struct {
+ client *Client
+ indices []string
+ types []string
+ analyzer string
+ consistency string
+ defaultOper string
+ df string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ replication string
+ routing string
+ timeout string
+ pretty bool
+ q string
+ query Query
+}
+
+// NewDeleteByQueryService creates a new DeleteByQueryService.
+// You typically use the client's DeleteByQuery to get a reference to
+// the service.
+func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
+ builder := &DeleteByQueryService{
+ client: client,
+ }
+ return builder
+}
+
+// Index limits the delete-by-query to a single index.
+// You can use _all to perform the operation on all indices.
+func (s *DeleteByQueryService) Index(index string) *DeleteByQueryService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+// Indices sets the indices on which to perform the delete operation.
+func (s *DeleteByQueryService) Indices(indices ...string) *DeleteByQueryService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Type limits the delete operation to the given type.
+func (s *DeleteByQueryService) Type(typ string) *DeleteByQueryService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, typ)
+ return s
+}
+
+// Types limits the delete operation to the given types.
+func (s *DeleteByQueryService) Types(types ...string) *DeleteByQueryService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Analyzer to use for the query string.
+func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
+ s.analyzer = analyzer
+ return s
+}
+
+// Consistency represents the specific write consistency setting for the operation.
+// It can be one, quorum, or all.
+func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService {
+ s.consistency = consistency
+ return s
+}
+
+// DefaultOperator for query string query (AND or OR).
+func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
+ s.defaultOper = defaultOperator
+ return s
+}
+
+// DF is the field to use as default where no field prefix is given in the query string.
+func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
+ s.df = defaultField
+ return s
+}
+
+// DefaultField is the field to use as default where no field prefix is given in the query string.
+// It is an alias to the DF func.
+func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
+ s.df = defaultField
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
+ s.ignoreUnavailable = &ignore
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices (including the _all string
+// or when no indices have been specified).
+func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
+ s.allowNoIndices = &allow
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both. It can be "open" or "closed".
+func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
+ s.expandWildcards = expand
+ return s
+}
+
+// Replication sets a specific replication type (sync or async).
+func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService {
+ s.replication = replication
+ return s
+}
+
+// Q specifies the query in Lucene query string syntax. You can also use
+// Query to programmatically specify the query.
+func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
+ s.q = query
+ return s
+}
+
+// QueryString is an alias to Q. Notice that you can also use Query to
+// programmatically set the query.
+func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
+ s.q = query
+ return s
+}
+
+// Routing sets a specific routing value.
+func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService {
+ s.routing = routing
+ return s
+}
+
+// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms".
+func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
+ s.timeout = timeout
+ return s
+}
+
+// Pretty indents the JSON output from Elasticsearch.
+func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
+ s.pretty = pretty
+ return s
+}
+
+// Query sets the query programmatically.
+func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
+ s.query = query
+ return s
+}
+
+// Do executes the delete-by-query operation.
+func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) {
+ var err error
+
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err = uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ // Types part
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err = uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return nil, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ if len(typesPart) > 0 {
+ path += "/" + strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_query"
+
+ // Parameters
+ params := make(url.Values)
+ if s.analyzer != "" {
+ params.Set("analyzer", s.analyzer)
+ }
+ if s.consistency != "" {
+ params.Set("consistency", s.consistency)
+ }
+ if s.defaultOper != "" {
+ params.Set("default_operator", s.defaultOper)
+ }
+ if s.df != "" {
+ params.Set("df", s.df)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.replication != "" {
+ params.Set("replication", s.replication)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.q != "" {
+ params.Set("q", s.q)
+ }
+
+ // Set body if there is a query set
+ var body interface{}
+ if s.query != nil {
+ query := make(map[string]interface{})
+ query["query"] = s.query.Source()
+ body = query
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("DELETE", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(DeleteByQueryResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService.
+type DeleteByQueryResult struct {
+ Indices map[string]IndexDeleteByQueryResult `json:"_indices"`
+}
+
+// IndexDeleteByQueryResult is the result of a delete-by-query for a specific
+// index.
+type IndexDeleteByQueryResult struct {
+ Shards shardsInfo `json:"_shards"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_by_query_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_by_query_test.go
new file mode 100644
index 00000000..a9a235d2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_by_query_test.go
@@ -0,0 +1,76 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestDeleteByQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Count documents
+ count, err := client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Fatalf("expected count = %d; got: %d", 3, count)
+ }
+
+ // Delete all documents by sandrae
+ q := NewTermQuery("user", "sandrae")
+ res, err := client.DeleteByQuery().Index(testIndexName).Type("tweet").Query(q).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected response != nil; got: %v", res)
+ }
+ idx, found := res.Indices[testIndexName]
+ if !found {
+ t.Errorf("expected Found = true; got: %v", found)
+ }
+ if idx.Shards.Failed > 0 {
+ t.Errorf("expected no failed shards; got: %d", idx.Shards.Failed)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ count, err = client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Fatalf("expected Count = %d; got: %d", 2, count)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_index.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_index.go
new file mode 100644
index 00000000..57d08b49
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_index.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type DeleteIndexService struct {
+ client *Client
+ index string
+}
+
+func NewDeleteIndexService(client *Client) *DeleteIndexService {
+ builder := &DeleteIndexService{
+ client: client,
+ }
+ return builder
+}
+
+func (b *DeleteIndexService) Index(index string) *DeleteIndexService {
+ b.index = index
+ return b
+}
+
+func (b *DeleteIndexService) Do() (*DeleteIndexResult, error) {
+ // Build url
+ path, err := uritemplates.Expand("/{index}/", map[string]string{
+ "index": b.index,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("DELETE", path, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(DeleteIndexResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a delete index request.
+
+type DeleteIndexResult struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_mapping.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_mapping.go
new file mode 100644
index 00000000..20bc6f51
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_mapping.go
@@ -0,0 +1,136 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// DeleteMappingService allows to delete a mapping along with its data.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-mapping.html.
+type DeleteMappingService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ masterTimeout string
+}
+
+// NewDeleteMappingService creates a new DeleteMappingService.
+func NewDeleteMappingService(client *Client) *DeleteMappingService {
+ return &DeleteMappingService{
+ client: client,
+ index: make([]string, 0),
+ typ: make([]string, 0),
+ }
+}
+
+// Index is a list of index names (supports wildcards). Use `_all` for all indices.
+func (s *DeleteMappingService) Index(index ...string) *DeleteMappingService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type is a list of document types to delete (supports wildcards).
+// Use `_all` to delete all document types in the specified indices..
+func (s *DeleteMappingService) Type(typ ...string) *DeleteMappingService {
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// MasterTimeout specifies the timeout for connecting to master.
+func (s *DeleteMappingService) MasterTimeout(masterTimeout string) *DeleteMappingService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *DeleteMappingService) Pretty(pretty bool) *DeleteMappingService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteMappingService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": strings.Join(s.typ, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteMappingService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(s.typ) == 0 {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *DeleteMappingService) Do() (*DeleteMappingResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("DELETE", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(DeleteMappingResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// DeleteMappingResponse is the response of DeleteMappingService.Do.
+type DeleteMappingResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_mapping_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_mapping_test.go
new file mode 100644
index 00000000..517477d4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_mapping_test.go
@@ -0,0 +1,40 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestDeleteMappingURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Expected string
+ }{
+ {
+ []string{"twitter"},
+ []string{"tweet"},
+ "/twitter/_mapping/tweet",
+ },
+ {
+ []string{"store-1", "store-2"},
+ []string{"tweet", "user"},
+ "/store-1%2Cstore-2/_mapping/tweet%2Cuser",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.DeleteMapping().Index(test.Indices...).Type(test.Types...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_template.go
new file mode 100644
index 00000000..cfbe057f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_template.go
@@ -0,0 +1,118 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// DeleteTemplateService deletes a search template. More information can
+// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type DeleteTemplateService struct {
+ client *Client
+ pretty bool
+ id string
+ version *int
+ versionType string
+}
+
+// NewDeleteTemplateService creates a new DeleteTemplateService.
+func NewDeleteTemplateService(client *Client) *DeleteTemplateService {
+ return &DeleteTemplateService{
+ client: client,
+ }
+}
+
+// Id is the template ID.
+func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService {
+ s.id = id
+ return s
+}
+
+// Version an explicit version number for concurrency control.
+func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService {
+ s.version = &version
+ return s
+}
+
+// VersionType specifies a version type.
+func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService {
+ s.versionType = versionType
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+ "id": s.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteTemplateService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("DELETE", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(DeleteTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// DeleteTemplateResponse is the response of DeleteTemplateService.Do.
+type DeleteTemplateResponse struct {
+ Found bool `json:"found"`
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int `json:"_version"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_test.go
new file mode 100644
index 00000000..ed07842f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_test.go
@@ -0,0 +1,115 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestDelete(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Count documents
+ count, err := client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ }
+
+ // Delete document 1
+ res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != true {
+ t.Errorf("expected Found = true; got %v", res.Found)
+ }
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ count, err = client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Errorf("expected Count = %d; got %d", 2, count)
+ }
+
+ // Delete non existent document 99
+ res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh(true).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != false {
+ t.Errorf("expected Found = false; got %v", res.Found)
+ }
+ count, err = client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Errorf("expected Count = %d; got %d", 2, count)
+ }
+}
+
+func TestDeleteWithEmptyIDFails(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Delete document with blank ID
+ _, err = client.Delete().Index(testIndexName).Type("tweet").Id("").Do()
+ if err != ErrMissingId {
+ t.Fatalf("expected to not accept delete without identifier, got: %v", err)
+ }
+
+ // Delete document with blank type
+ _, err = client.Delete().Index(testIndexName).Type("").Id("1").Do()
+ if err != ErrMissingType {
+ t.Fatalf("expected to not accept delete without type, got: %v", err)
+ }
+
+ // Delete document with blank index
+ _, err = client.Delete().Index("").Type("tweet").Id("1").Do()
+ if err != ErrMissingIndex {
+ t.Fatalf("expected to not accept delete without index, got: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/doc.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/doc.go
new file mode 100644
index 00000000..336a734d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/doc.go
@@ -0,0 +1,51 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+/*
+Package elastic provides an interface to the Elasticsearch server
+(http://www.elasticsearch.org/).
+
+The first thing you do is to create a Client. If you have Elasticsearch
+installed and running with its default settings
+(i.e. available at http://127.0.0.1:9200), all you need to do is:
+
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ }
+
+If your Elasticsearch server is running on a different IP and/or port,
+just provide a URL to NewClient:
+
+ // Create a client and connect to http://192.168.2.10:9201
+ client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
+ if err != nil {
+ // Handle error
+ }
+
+You can pass many more configuration parameters to NewClient. Review the
+documentation of NewClient for more information.
+
+If no Elasticsearch server is available, services will fail when creating
+a new request and will return ErrNoClient.
+
+A Client provides services. The services usually come with a variety of
+methods to prepare the query and a Do function to execute it against the
+Elasticsearch REST interface and return a response. Here is an example
+of the IndexExists service that checks if a given index already exists.
+
+ exists, err := client.IndexExists("twitter").Do()
+ if err != nil {
+ // Handle error
+ }
+ if !exists {
+ // Index does not exist yet.
+ }
+
+Look up the documentation for Client to get an idea of the services provided
+and what kinds of responses you get when executing the Do function of a service.
+Also see the wiki on Github for more details.
+
+*/
+package elastic
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/errors.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/errors.go
new file mode 100644
index 00000000..abbb09c6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/errors.go
@@ -0,0 +1,63 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+)
+
+var (
+ // ErrMissingIndex is returned e.g. from DeleteService if the index is missing.
+ ErrMissingIndex = errors.New("elastic: index is missing")
+
+ // ErrMissingType is returned e.g. from DeleteService if the type is missing.
+ ErrMissingType = errors.New("elastic: type is missing")
+
+ // ErrMissingId is returned e.g. from DeleteService if the document identifier is missing.
+ ErrMissingId = errors.New("elastic: id is missing")
+)
+
+func checkResponse(res *http.Response) error {
+ // 200-299 and 404 are valid status codes
+ if (res.StatusCode >= 200 && res.StatusCode <= 299) || res.StatusCode == http.StatusNotFound {
+ return nil
+ }
+ if res.Body == nil {
+ return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("elastic: Error %d (%s) when reading body: %v", res.StatusCode, http.StatusText(res.StatusCode), err)
+ }
+ errReply := new(Error)
+ err = json.Unmarshal(slurp, errReply)
+ if err != nil {
+ return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ }
+ if errReply != nil {
+ if errReply.Status == 0 {
+ errReply.Status = res.StatusCode
+ }
+ return errReply
+ }
+ return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+}
+
+type Error struct {
+ Status int `json:"status"`
+ Message string `json:"error"`
+}
+
+func (e *Error) Error() string {
+ if e.Message != "" {
+ return fmt.Sprintf("elastic: Error %d (%s): %s", e.Status, http.StatusText(e.Status), e.Message)
+ } else {
+ return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/errors_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/errors_test.go
new file mode 100644
index 00000000..553288d5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/errors_test.go
@@ -0,0 +1,74 @@
+package elastic
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "strings"
+ "testing"
+)
+
+func TestResponseError(t *testing.T) {
+ message := "Something went seriously wrong."
+ raw := "HTTP/1.1 500 Internal Server Error\r\n" +
+ "\r\n" +
+ `{"status":500,"error":"` + message + `"}` + "\r\n"
+ r := bufio.NewReader(strings.NewReader(raw))
+
+ resp, err := http.ReadResponse(r, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = checkResponse(resp)
+ if err == nil {
+ t.Fatalf("expected error; got: %v", err)
+ }
+
+ // Check for correct error message
+ expected := fmt.Sprintf("elastic: Error %d (%s): %s", resp.StatusCode, http.StatusText(resp.StatusCode), message)
+ got := err.Error()
+ if got != expected {
+ t.Fatalf("expected %q; got: %q", expected, got)
+ }
+
+ // Check that error is of type *elastic.Error, which contains additional information
+ e, ok := err.(*Error)
+ if !ok {
+ t.Fatal("expected error to be of type *elastic.Error")
+ }
+ if e.Status != resp.StatusCode {
+ t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status)
+ }
+ if e.Message != message {
+ t.Fatalf("expected error message %q; got: %q", message, e.Message)
+ }
+}
+
+func TestResponseErrorHTML(t *testing.T) {
+ raw := "HTTP/1.1 413 Request Entity Too Large\r\n" +
+ "\r\n" +
+ `
+
413 Request Entity Too Large
+
+413 Request Entity Too Large
+
nginx/1.6.2
+
+` + "\r\n"
+ r := bufio.NewReader(strings.NewReader(raw))
+
+ resp, err := http.ReadResponse(r, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = checkResponse(resp)
+ if err == nil {
+ t.Fatalf("expected error; got: %v", err)
+ }
+
+ // Check for correct error message
+ expected := fmt.Sprintf("elastic: Error %d (%s)", http.StatusRequestEntityTooLarge, http.StatusText(http.StatusRequestEntityTooLarge))
+ got := err.Error()
+ if got != expected {
+ t.Fatalf("expected %q; got: %q", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/example_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/example_test.go
new file mode 100644
index 00000000..a84c4be4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/example_test.go
@@ -0,0 +1,547 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic_test
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "time"
+
+ elastic "gopkg.in/olivere/elastic.v2"
+)
+
+type Tweet struct {
+ User string `json:"user"`
+ Message string `json:"message"`
+ Retweets int `json:"retweets"`
+ Image string `json:"image,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Location string `json:"location,omitempty"`
+ Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
+}
+
+func Example() {
+ errorlog := log.New(os.Stdout, "APP ", log.LstdFlags)
+
+ // Obtain a client. You can provide your own HTTP client here.
+ client, err := elastic.NewClient(elastic.SetErrorLog(errorlog))
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Trace request and response details like this
+ //client.SetTracer(log.New(os.Stdout, "", 0))
+
+ // Ping the Elasticsearch server to get e.g. the version number
+ info, code, err := client.Ping().Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number)
+
+ // Getting the ES version number is quite common, so there's a shortcut
+ esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200")
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("Elasticsearch version %s", esversion)
+
+ // Use the IndexExists service to check if a specified index exists.
+ exists, err := client.IndexExists("twitter").Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !exists {
+ // Create a new index.
+ createIndex, err := client.CreateIndex("twitter").Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !createIndex.Acknowledged {
+ // Not acknowledged
+ }
+ }
+
+ // Index a tweet (using JSON serialization)
+ tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+ put1, err := client.Index().
+ Index("twitter").
+ Type("tweet").
+ Id("1").
+ BodyJson(tweet1).
+ Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
+
+ // Index a second tweet (by string)
+ tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+ put2, err := client.Index().
+ Index("twitter").
+ Type("tweet").
+ Id("2").
+ BodyString(tweet2).
+ Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type)
+
+ // Get tweet with specified ID
+ get1, err := client.Get().
+ Index("twitter").
+ Type("tweet").
+ Id("1").
+ Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if get1.Found {
+ fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+ }
+
+ // Flush to make sure the documents got written.
+ _, err = client.Flush().Index("twitter").Do()
+ if err != nil {
+ panic(err)
+ }
+
+ // Search with a term query
+ termQuery := elastic.NewTermQuery("user", "olivere")
+ searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(&termQuery). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Pretty(true). // pretty print request and response JSON
+ Do() // execute
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+ // Each is a convenience function that iterates over hits in a search result.
+ // It makes sure you don't need to check for nil values in the response.
+ // However, it ignores errors in serialization. If you want full control
+ // over iterating the hits, see below.
+ var ttyp Tweet
+ for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+ t := item.(Tweet)
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ // TotalHits is another convenience function that works even when something goes wrong.
+ fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+ // Here's how you iterate through results with full control over each step.
+ if searchResult.Hits != nil {
+ fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var t Tweet
+ err := json.Unmarshal(*hit.Source, &t)
+ if err != nil {
+ // Deserialization failed
+ }
+
+ // Work with tweet
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ } else {
+ // No hits
+ fmt.Print("Found no tweets\n")
+ }
+
+ // Update a tweet by the update API of Elasticsearch.
+ // We just increment the number of retweets.
+ update, err := client.Update().Index("twitter").Type("tweet").Id("1").
+ Script("ctx._source.retweets += num").
+ ScriptParams(map[string]interface{}{"num": 1}).
+ Upsert(map[string]interface{}{"retweets": 0}).
+ Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version)
+
+ // ...
+
+ // Delete an index.
+ deleteIndex, err := client.DeleteIndex("twitter").Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !deleteIndex.Acknowledged {
+ // Not acknowledged
+ }
+}
+
+func ExampleClient_NewClient_default() {
+ // Obtain a client to the Elasticsearch instance on http://localhost:9200.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ fmt.Printf("connection failed: %v\n", err)
+ } else {
+ fmt.Println("connected")
+ }
+ _ = client
+ // Output:
+ // connected
+}
+
+func ExampleClient_NewClient_cluster() {
+ // Obtain a client for an Elasticsearch cluster of two nodes,
+ // running on 10.0.1.1 and 10.0.1.2.
+ client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"))
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ _ = client
+}
+
+func ExampleClient_NewClient_manyOptions() {
+ // Obtain a client for an Elasticsearch cluster of two nodes,
+ // running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer.
+ // Set the healthcheck interval to 10s. When requests fail,
+ // retry 5 times. Print error messages to os.Stderr and informational
+ // messages to os.Stdout.
+ client, err := elastic.NewClient(
+ elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"),
+ elastic.SetSniff(false),
+ elastic.SetHealthcheckInterval(10*time.Second),
+ elastic.SetMaxRetries(5),
+ elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)),
+ elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags)))
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ _ = client
+}
+
+func ExampleIndexExistsService() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ // Use the IndexExists service to check if the index "twitter" exists.
+ exists, err := client.IndexExists("twitter").Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if exists {
+ // ...
+ }
+}
+
+func ExampleCreateIndexService() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ // Create a new index.
+ createIndex, err := client.CreateIndex("twitter").Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !createIndex.Acknowledged {
+ // Not acknowledged
+ }
+}
+
+func ExampleDeleteIndexService() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ // Delete an index.
+ deleteIndex, err := client.DeleteIndex("twitter").Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !deleteIndex.Acknowledged {
+ // Not acknowledged
+ }
+}
+
+func ExampleSearchService() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Search with a term query
+ termQuery := elastic.NewTermQuery("user", "olivere")
+ searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(&termQuery). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Pretty(true). // pretty print request and response JSON
+ Do() // execute
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+ // Number of hits
+ if searchResult.Hits != nil {
+ fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var t Tweet
+ err := json.Unmarshal(*hit.Source, &t)
+ if err != nil {
+ // Deserialization failed
+ }
+
+ // Work with tweet
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ } else {
+ // No hits
+ fmt.Print("Found no tweets\n")
+ }
+}
+
+func ExampleAggregations() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year).
+ timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
+ histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year")
+ timeline = timeline.SubAggregation("history", histogram)
+
+ // Search with a term query
+ searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(elastic.NewMatchAllQuery()). // return all results, but ...
+ SearchType("count"). // ... do not return hits, just the count
+ Aggregation("timeline", timeline). // add our aggregation to the query
+ Pretty(true). // pretty print request and response JSON
+ Do() // execute
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Access "timeline" aggregate in search result.
+ agg, found := searchResult.Aggregations.Terms("timeline")
+ if !found {
+ log.Fatalf("we sould have a terms aggregation called %q", "timeline")
+ }
+ for _, userBucket := range agg.Buckets {
+ // Every bucket should have the user field as key.
+ user := userBucket.Key
+
+ // The sub-aggregation history should have the number of tweets per year.
+ histogram, found := userBucket.DateHistogram("history")
+ if found {
+ for _, year := range histogram.Buckets {
+ fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString)
+ }
+ }
+ }
+}
+
+func ExampleSearchResult() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Do a search
+ searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do()
+ if err != nil {
+ panic(err)
+ }
+
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+ // Each is a utility function that iterates over hits in a search result.
+ // It makes sure you don't need to check for nil values in the response.
+ // However, it ignores errors in serialization. If you want full control
+ // over iterating the hits, see below.
+ var ttyp Tweet
+ for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+ t := item.(Tweet)
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+ // Here's how you iterate hits with full control.
+ if searchResult.Hits != nil {
+ fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var t Tweet
+ err := json.Unmarshal(*hit.Source, &t)
+ if err != nil {
+ // Deserialization failed
+ }
+
+ // Work with tweet
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ } else {
+ // No hits
+ fmt.Print("Found no tweets\n")
+ }
+}
+
+func ExamplePutTemplateService() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Create search template
+ tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}`
+
+ // Create template
+ resp, err := client.PutTemplate().
+ Id("my-search-template"). // Name of the template
+ BodyString(tmpl). // Search template itself
+ Do() // Execute
+ if err != nil {
+ panic(err)
+ }
+ if resp.Created {
+ fmt.Println("search template created")
+ }
+}
+
+func ExampleGetTemplateService() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Get template stored under "my-search-template"
+ resp, err := client.GetTemplate().Id("my-search-template").Do()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("search template is: %q\n", resp.Template)
+}
+
+func ExampleDeleteTemplateService() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Delete template
+ resp, err := client.DeleteTemplate().Id("my-search-template").Do()
+ if err != nil {
+ panic(err)
+ }
+ if resp != nil && resp.Found {
+ fmt.Println("template deleted")
+ }
+}
+
+func ExampleClusterHealthService() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Get cluster health
+ res, err := client.ClusterHealth().Index("twitter").Do()
+ if err != nil {
+ panic(err)
+ }
+ if res == nil {
+ panic(err)
+ }
+ fmt.Printf("Cluster status is %q\n", res.Status)
+}
+
+func ExampleClusterHealthService_WaitForGreen() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Wait for status green
+ res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do()
+ if err != nil {
+ panic(err)
+ }
+ if res.TimedOut {
+ fmt.Printf("time out waiting for cluster status %q\n", "green")
+ } else {
+ fmt.Printf("cluster status is %q\n", res.Status)
+ }
+}
+
+func ExampleClusterStateService() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Get cluster state
+ res, err := client.ClusterState().Metric("version").Do()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/exists.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/exists.go
new file mode 100644
index 00000000..534ad5d4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/exists.go
@@ -0,0 +1,176 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ExistsService checks if a document exists.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
+// for details.
+type ExistsService struct {
+ client *Client
+ pretty bool
+ id string
+ index string
+ typ string
+ parent string
+ preference string
+ realtime *bool
+ refresh *bool
+ routing string
+}
+
+// NewExistsService creates a new ExistsService.
+func NewExistsService(client *Client) *ExistsService {
+ return &ExistsService{
+ client: client,
+ }
+}
+
+// Id is the document ID.
+func (s *ExistsService) Id(id string) *ExistsService {
+ s.id = id
+ return s
+}
+
+// Index is the name of the index.
+func (s *ExistsService) Index(index string) *ExistsService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document (use `_all` to fetch the first
+// document matching the ID across all types).
+func (s *ExistsService) Type(typ string) *ExistsService {
+ s.typ = typ
+ return s
+}
+
+// Parent is the ID of the parent document.
+func (s *ExistsService) Parent(parent string) *ExistsService {
+ s.parent = parent
+ return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: random).
+func (s *ExistsService) Preference(preference string) *ExistsService {
+ s.preference = preference
+ return s
+}
+
+// Realtime specifies whether to perform the operation in realtime or search mode.
+func (s *ExistsService) Realtime(realtime bool) *ExistsService {
+ s.realtime = &realtime
+ return s
+}
+
+// Refresh the shard containing the document before performing the operation.
+func (s *ExistsService) Refresh(refresh bool) *ExistsService {
+ s.refresh = &refresh
+ return s
+}
+
+// Routing is the specific routing value.
+func (s *ExistsService) Routing(routing string) *ExistsService {
+ s.routing = routing
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ExistsService) Pretty(pretty bool) *ExistsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ExistsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "id": s.id,
+ "index": s.index,
+ "type": s.typ,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.realtime != nil {
+ params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
+ }
+ if s.refresh != nil {
+ params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ExistsService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *ExistsService) Do() (bool, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return false, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("HEAD", path, params, nil)
+ if err != nil {
+ return false, err
+ }
+
+ // Evaluate operation response
+ switch res.StatusCode {
+ case http.StatusOK:
+ return true, nil
+ case http.StatusNotFound:
+ return false, nil
+ default:
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/exists_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/exists_test.go
new file mode 100644
index 00000000..80573a7f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/exists_test.go
@@ -0,0 +1,19 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestExists(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ exists, err := client.Exists().Index(testIndexName).Type("comment").Id("1").Parent("tweet").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Fatal("expected document to exist")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/explain.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/explain.go
new file mode 100644
index 00000000..b6b96484
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/explain.go
@@ -0,0 +1,329 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// ExplainService computes a score explanation for a query and
+// a specific document.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html.
+type ExplainService struct {
+ client *Client
+ pretty bool
+ id string
+ index string
+ typ string
+ q string
+ routing string
+ lenient *bool
+ analyzer string
+ df string
+ fields []string
+ lowercaseExpandedTerms *bool
+ xSourceInclude []string
+ analyzeWildcard *bool
+ parent string
+ preference string
+ xSource []string
+ defaultOperator string
+ xSourceExclude []string
+ source string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewExplainService creates a new ExplainService.
+func NewExplainService(client *Client) *ExplainService {
+ return &ExplainService{
+ client: client,
+ xSource: make([]string, 0),
+ xSourceExclude: make([]string, 0),
+ fields: make([]string, 0),
+ xSourceInclude: make([]string, 0),
+ }
+}
+
+// Id is the document ID.
+func (s *ExplainService) Id(id string) *ExplainService {
+ s.id = id
+ return s
+}
+
+// Index is the name of the index.
+func (s *ExplainService) Index(index string) *ExplainService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document.
+func (s *ExplainService) Type(typ string) *ExplainService {
+ s.typ = typ
+ return s
+}
+
+// Source is the URL-encoded query definition (instead of using the request body).
+func (s *ExplainService) Source(source string) *ExplainService {
+ s.source = source
+ return s
+}
+
+// XSourceExclude is a list of fields to exclude from the returned _source field.
+func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
+ s.xSourceExclude = make([]string, 0)
+ s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
+ return s
+}
+
+// Lenient specifies whether format-based query failures
+// (such as providing text to a numeric field) should be ignored.
+func (s *ExplainService) Lenient(lenient bool) *ExplainService {
+ s.lenient = &lenient
+ return s
+}
+
+// Query in the Lucene query string syntax.
+func (s *ExplainService) Q(q string) *ExplainService {
+ s.q = q
+ return s
+}
+
+// Routing sets a specific routing value.
+func (s *ExplainService) Routing(routing string) *ExplainService {
+ s.routing = routing
+ return s
+}
+
+// AnalyzeWildcard specifies whether wildcards and prefix queries
+// in the query string query should be analyzed (default: false).
+func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
+ s.analyzeWildcard = &analyzeWildcard
+ return s
+}
+
+// Analyzer is the analyzer for the query string query.
+func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
+ s.analyzer = analyzer
+ return s
+}
+
+// Df is the default field for query string query (default: _all).
+func (s *ExplainService) Df(df string) *ExplainService {
+ s.df = df
+ return s
+}
+
+// Fields is a list of fields to return in the response.
+func (s *ExplainService) Fields(fields ...string) *ExplainService {
+ s.fields = make([]string, 0)
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// LowercaseExpandedTerms specifies whether query terms should be lowercased.
+func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
+ s.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return s
+}
+
+// XSourceInclude is a list of fields to extract and return from the _source field.
+func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
+ s.xSourceInclude = make([]string, 0)
+ s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
+ return s
+}
+
+// DefaultOperator is the default operator for query string query (AND or OR).
+func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
+ s.defaultOperator = defaultOperator
+ return s
+}
+
+// Parent is the ID of the parent document.
+func (s *ExplainService) Parent(parent string) *ExplainService {
+ s.parent = parent
+ return s
+}
+
+// Preference specifies the node or shard the operation should be performed on (default: random).
+func (s *ExplainService) Preference(preference string) *ExplainService {
+ s.preference = preference
+ return s
+}
+
+// XSource is true or false to return the _source field or not, or a list of fields to return.
+func (s *ExplainService) XSource(xSource ...string) *ExplainService {
+ s.xSource = make([]string, 0)
+ s.xSource = append(s.xSource, xSource...)
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ExplainService) Pretty(pretty bool) *ExplainService {
+ s.pretty = pretty
+ return s
+}
+
+// Query sets a query definition using the Query DSL.
+func (s *ExplainService) Query(query Query) *ExplainService {
+ body := make(map[string]interface{})
+ body["query"] = query.Source()
+ s.bodyJson = body
+ return s
+}
+
+// BodyJson sets the query definition using the Query DSL.
+func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString sets the query definition using the Query DSL as a string.
+func (s *ExplainService) BodyString(body string) *ExplainService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ExplainService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
+ "id": s.id,
+ "index": s.index,
+ "type": s.typ,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if len(s.xSource) > 0 {
+ params.Set("_source", strings.Join(s.xSource, ","))
+ }
+ if s.defaultOperator != "" {
+ params.Set("default_operator", s.defaultOperator)
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.source != "" {
+ params.Set("source", s.source)
+ }
+ if len(s.xSourceExclude) > 0 {
+ params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
+ }
+ if s.lenient != nil {
+ params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
+ }
+ if s.q != "" {
+ params.Set("q", s.q)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ if s.lowercaseExpandedTerms != nil {
+ params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
+ }
+ if len(s.xSourceInclude) > 0 {
+ params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
+ }
+ if s.analyzeWildcard != nil {
+ params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
+ }
+ if s.analyzer != "" {
+ params.Set("analyzer", s.analyzer)
+ }
+ if s.df != "" {
+ params.Set("df", s.df)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ExplainService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *ExplainService) Do() (*ExplainResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ExplainResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ExplainResponse is the response of ExplainService.Do.
+type ExplainResponse struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Matched bool `json:"matched"`
+ Explanation map[string]interface{} `json:"explanation"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/explain_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/explain_test.go
new file mode 100644
index 00000000..e799d6c5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/explain_test.go
@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestExplain(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("tweet").
+ Id("1").
+ BodyJson(&tweet1).
+ Refresh(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+
+ // Explain
+ query := NewTermQuery("user", "olivere")
+ expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if expl == nil {
+ t.Fatal("expected to return an explanation")
+ }
+ if !expl.Matched {
+ t.Errorf("expected matched to be %v; got: %v", true, expl.Matched)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/fetch_source_context.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/fetch_source_context.go
new file mode 100644
index 00000000..6c9b91b8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/fetch_source_context.go
@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "net/url"
+ "strings"
+)
+
+type FetchSourceContext struct {
+ fetchSource bool
+ transformSource bool
+ includes []string
+ excludes []string
+}
+
+func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
+ return &FetchSourceContext{
+ fetchSource: fetchSource,
+ includes: make([]string, 0),
+ excludes: make([]string, 0),
+ }
+}
+
+func (fsc *FetchSourceContext) FetchSource() bool {
+ return fsc.fetchSource
+}
+
+func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
+ fsc.fetchSource = fetchSource
+}
+
+func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
+ fsc.includes = append(fsc.includes, includes...)
+ return fsc
+}
+
+func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
+ fsc.excludes = append(fsc.excludes, excludes...)
+ return fsc
+}
+
+func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext {
+ fsc.transformSource = transformSource
+ return fsc
+}
+
+func (fsc *FetchSourceContext) Source() interface{} {
+ if !fsc.fetchSource {
+ return false
+ }
+ return map[string]interface{}{
+ "includes": fsc.includes,
+ "excludes": fsc.excludes,
+ }
+}
+
+// Query returns the parameters in a form suitable for a URL query string.
+func (fsc *FetchSourceContext) Query() url.Values {
+ params := url.Values{}
+ if !fsc.fetchSource {
+ params.Add("_source", "false")
+ return params
+ }
+ if len(fsc.includes) > 0 {
+ params.Add("_source_include", strings.Join(fsc.includes, ","))
+ }
+ if len(fsc.excludes) > 0 {
+ params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
+ }
+ return params
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/fetch_source_context_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/fetch_source_context_test.go
new file mode 100644
index 00000000..f329fee8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/fetch_source_context_test.go
@@ -0,0 +1,92 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFetchSourceContextNoFetchSource(t *testing.T) {
+ builder := NewFetchSourceContext(false)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `false`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) {
+ builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c")
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `false`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFetchSourceContextFetchSource(t *testing.T) {
+ builder := NewFetchSourceContext(true)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"excludes":[],"includes":[]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) {
+ builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"excludes":["c"],"includes":["a","b"]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFetchSourceContextQueryDefaults(t *testing.T) {
+ builder := NewFetchSourceContext(true)
+ values := builder.Query()
+ got := values.Encode()
+ expected := ""
+ if got != expected {
+ t.Errorf("expected %q; got: %q", expected, got)
+ }
+}
+
+func TestFetchSourceContextQueryNoFetchSource(t *testing.T) {
+ builder := NewFetchSourceContext(false)
+ values := builder.Query()
+ got := values.Encode()
+ expected := "_source=false"
+ if got != expected {
+ t.Errorf("expected %q; got: %q", expected, got)
+ }
+}
+
+func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) {
+ builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
+ values := builder.Query()
+ got := values.Encode()
+ expected := "_source_exclude=c&_source_include=a%2Cb"
+ if got != expected {
+ t.Errorf("expected %q; got: %q", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/filter.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/filter.go
new file mode 100644
index 00000000..ba1f0126
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/filter.go
@@ -0,0 +1,9 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Filter interface {
+ Source() interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/flush.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/flush.go
new file mode 100644
index 00000000..c7233710
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/flush.go
@@ -0,0 +1,167 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// Flush allows to flush one or more indices. The flush process of an index
+// basically frees memory from the index by flushing data to the index
+// storage and clearing the internal transaction log.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html
+// for details.
+type FlushService struct {
+ client *Client
+
+ indices []string
+ force *bool
+ full *bool
+ waitIfOngoing *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+func NewFlushService(client *Client) *FlushService {
+ builder := &FlushService{
+ client: client,
+ }
+ return builder
+}
+
+func (s *FlushService) Index(index string) *FlushService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *FlushService) Indices(indices ...string) *FlushService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Force specifies whether to force a flush even if it is not necessary.
+func (s *FlushService) Force(force bool) *FlushService {
+ s.force = &force
+ return s
+}
+
+// Full, when set to true, creates a new index writer for the index and
+// refreshes all settings related to the index.
+func (s *FlushService) Full(full bool) *FlushService {
+ s.full = &full
+ return s
+}
+
+// WaitIfOngoing will block until the flush can be executed (if set to true)
+// if another flush operation is already executing. The default is false
+// and will cause an exception to be thrown on the shard level if another
+// flush operation is already running. [1.4.0.Beta1]
+func (s *FlushService) WaitIfOngoing(wait bool) *FlushService {
+ s.waitIfOngoing = &wait
+ return s
+}
+
+// IgnoreUnavailable specifies whether concrete indices should be ignored
+// when unavailable (e.g. missing or closed).
+func (s *FlushService) IgnoreUnavailable(ignoreUnavailable bool) *FlushService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices specifies whether to ignore if a wildcard expression
+// yields no indices. This includes the _all index or when no indices
+// have been specified.
+func (s *FlushService) AllowNoIndices(allowNoIndices bool) *FlushService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards specifies whether to expand wildcards to concrete indices
+// that are open, closed, or both. Use one of "open", "closed", "none", or "all".
+func (s *FlushService) ExpandWildcards(expandWildcards string) *FlushService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Do executes the service.
+func (s *FlushService) Do() (*FlushResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ if len(s.indices) > 0 {
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ path += strings.Join(indexPart, ",") + "/"
+ }
+ path += "_flush"
+
+ // Parameters
+ params := make(url.Values)
+ if s.force != nil {
+ params.Set("force", fmt.Sprintf("%v", *s.force))
+ }
+ if s.full != nil {
+ params.Set("full", fmt.Sprintf("%v", *s.full))
+ }
+ if s.waitIfOngoing != nil {
+ params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(FlushResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a flush request.
+
+type shardsInfo struct {
+ Total int `json:"total"`
+ Successful int `json:"successful"`
+ Failed int `json:"failed"`
+}
+
+type FlushResult struct {
+ Shards shardsInfo `json:"_shards"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/flush_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/flush_test.go
new file mode 100644
index 00000000..515ff3a7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/flush_test.go
@@ -0,0 +1,22 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestFlush(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Flush all indices
+ res, err := client.Flush().Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Errorf("expected res to be != nil; got: %v", res)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/geo_point.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/geo_point.go
new file mode 100644
index 00000000..4f559557
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/geo_point.go
@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// GeoPoint is a geographic position described via latitude and longitude.
+type GeoPoint struct {
+ Lat, Lon float64
+}
+
+// Source returns the object to be serialized in Elasticsearch DSL.
+func (pt *GeoPoint) Source() map[string]float64 {
+ return map[string]float64{
+ "lat": pt.Lat,
+ "lon": pt.Lon,
+ }
+}
+
+// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
+func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
+ return &GeoPoint{Lat: lat, Lon: lon}
+}
+
+// GeoPointFromString initializes a new GeoPoint by a string that is
+// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
+func GeoPointFromString(latLon string) (*GeoPoint, error) {
+ latlon := strings.SplitN(latLon, ",", 2)
+ if len(latlon) != 2 {
+ return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
+ }
+ lat, err := strconv.ParseFloat(latlon[0], 64)
+ if err != nil {
+ return nil, err
+ }
+ lon, err := strconv.ParseFloat(latlon[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ return &GeoPoint{Lat: lat, Lon: lon}, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/geo_point_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/geo_point_test.go
new file mode 100644
index 00000000..ebc28c2e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/geo_point_test.go
@@ -0,0 +1,24 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoPointSource(t *testing.T) {
+ pt := GeoPoint{Lat: 40, Lon: -70}
+
+ data, err := json.Marshal(pt.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"lat":40,"lon":-70}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get.go
new file mode 100644
index 00000000..94cde57a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get.go
@@ -0,0 +1,223 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type GetService struct {
+ client *Client
+ index string
+ typ string
+ id string
+ routing string
+ preference string
+ fields []string
+ refresh *bool
+ realtime *bool
+ fsc *FetchSourceContext
+ versionType string
+ version *int64
+ ignoreErrorsOnGeneratedFields *bool
+}
+
+func NewGetService(client *Client) *GetService {
+ builder := &GetService{
+ client: client,
+ typ: "_all",
+ }
+ return builder
+}
+
+func (b *GetService) String() string {
+ return fmt.Sprintf("[%v][%v][%v]: routing [%v]",
+ b.index,
+ b.typ,
+ b.id,
+ b.routing)
+}
+
+func (b *GetService) Index(index string) *GetService {
+ b.index = index
+ return b
+}
+
+func (b *GetService) Type(typ string) *GetService {
+ b.typ = typ
+ return b
+}
+
+func (b *GetService) Id(id string) *GetService {
+ b.id = id
+ return b
+}
+
+func (b *GetService) Parent(parent string) *GetService {
+ if b.routing == "" {
+ b.routing = parent
+ }
+ return b
+}
+
+func (b *GetService) Routing(routing string) *GetService {
+ b.routing = routing
+ return b
+}
+
+func (b *GetService) Preference(preference string) *GetService {
+ b.preference = preference
+ return b
+}
+
+func (b *GetService) Fields(fields ...string) *GetService {
+ if b.fields == nil {
+ b.fields = make([]string, 0)
+ }
+ b.fields = append(b.fields, fields...)
+ return b
+}
+
+func (s *GetService) FetchSource(fetchSource bool) *GetService {
+ if s.fsc == nil {
+ s.fsc = NewFetchSourceContext(fetchSource)
+ } else {
+ s.fsc.SetFetchSource(fetchSource)
+ }
+ return s
+}
+
+func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
+ s.fsc = fetchSourceContext
+ return s
+}
+
+func (b *GetService) Refresh(refresh bool) *GetService {
+ b.refresh = &refresh
+ return b
+}
+
+func (b *GetService) Realtime(realtime bool) *GetService {
+ b.realtime = &realtime
+ return b
+}
+
+func (b *GetService) VersionType(versionType string) *GetService {
+ b.versionType = versionType
+ return b
+}
+
+func (b *GetService) Version(version int64) *GetService {
+ b.version = &version
+ return b
+}
+
+func (b *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
+ b.ignoreErrorsOnGeneratedFields = &ignore
+ return b
+}
+
+// Validate checks if the operation is valid.
+func (s *GetService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+func (b *GetService) Do() (*GetResult, error) {
+ // Check pre-conditions
+ if err := b.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Build url
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "index": b.index,
+ "type": b.typ,
+ "id": b.id,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ params := make(url.Values)
+ if b.realtime != nil {
+ params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+ }
+ if len(b.fields) > 0 {
+ params.Add("fields", strings.Join(b.fields, ","))
+ }
+ if b.routing != "" {
+ params.Add("routing", b.routing)
+ }
+ if b.preference != "" {
+ params.Add("preference", b.preference)
+ }
+ if b.refresh != nil {
+ params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+ }
+ if b.realtime != nil {
+ params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+ }
+ if b.ignoreErrorsOnGeneratedFields != nil {
+ params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *b.ignoreErrorsOnGeneratedFields))
+ }
+ if len(b.fields) > 0 {
+ params.Add("_fields", strings.Join(b.fields, ","))
+ }
+ if b.version != nil {
+ params.Add("version", fmt.Sprintf("%d", *b.version))
+ }
+ if b.versionType != "" {
+ params.Add("version_type", b.versionType)
+ }
+ if b.fsc != nil {
+ for k, values := range b.fsc.Query() {
+ params.Add(k, strings.Join(values, ","))
+ }
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(GetResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a get request.
+
+type GetResult struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int64 `json:"_version,omitempty"`
+ Source *json.RawMessage `json:"_source,omitempty"`
+ Found bool `json:"found,omitempty"`
+ Fields map[string]interface{} `json:"fields,omitempty"`
+ Error string `json:"error,omitempty"` // used only in MultiGet
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_mapping.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_mapping.go
new file mode 100644
index 00000000..13ad343a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_mapping.go
@@ -0,0 +1,172 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// GetMappingService retrieves the mapping definitions for an index or
+// index/type. See at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-mapping.html.
+type GetMappingService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ local *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+// NewGetMappingService creates a new GetMappingService.
+func NewGetMappingService(client *Client) *GetMappingService {
+ return &GetMappingService{
+ client: client,
+ index: make([]string, 0),
+ typ: make([]string, 0),
+ }
+}
+
+// Index is a list of index names.
+func (s *GetMappingService) Index(index ...string) *GetMappingService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type is a list of document types.
+func (s *GetMappingService) Type(typ ...string) *GetMappingService {
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *GetMappingService) AllowNoIndices(allowNoIndices bool) *GetMappingService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *GetMappingService) ExpandWildcards(expandWildcards string) *GetMappingService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *GetMappingService) Local(local bool) *GetMappingService {
+ s.local = &local
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *GetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *GetMappingService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *GetMappingService) Pretty(pretty bool) *GetMappingService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetMappingService) buildURL() (string, url.Values, error) {
+ var index, typ []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.typ) > 0 {
+ typ = s.typ
+ } else {
+ typ = []string{"_all"}
+ }
+
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(index, ","),
+ "type": strings.Join(typ, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *GetMappingService) Validate() error {
+ return nil
+}
+
+// Do executes the operation. When successful, it returns a json.RawMessage.
+// If you specify an index, Elasticsearch returns HTTP status 404.
+// if you specify a type that does not exist, Elasticsearch returns
+// an empty map.
+func (s *GetMappingService) Do() (map[string]interface{}, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]interface{}
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_mapping_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_mapping_test.go
new file mode 100644
index 00000000..1cdbd0b2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_mapping_test.go
@@ -0,0 +1,50 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestGetMappingURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_all/_mapping/_all",
+ },
+ {
+ []string{},
+ []string{"tweet"},
+ "/_all/_mapping/tweet",
+ },
+ {
+ []string{"twitter"},
+ []string{"tweet"},
+ "/twitter/_mapping/tweet",
+ },
+ {
+ []string{"store-1", "store-2"},
+ []string{"tweet", "user"},
+ "/store-1%2Cstore-2/_mapping/tweet%2Cuser",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.GetMapping().Index(test.Indices...).Type(test.Types...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_template.go
new file mode 100644
index 00000000..ef9d5611
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_template.go
@@ -0,0 +1,113 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// GetTemplateService reads a search template.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type GetTemplateService struct {
+ client *Client
+ pretty bool
+ id string
+ version interface{}
+ versionType string
+}
+
+// NewGetTemplateService creates a new GetTemplateService.
+func NewGetTemplateService(client *Client) *GetTemplateService {
+ return &GetTemplateService{
+ client: client,
+ }
+}
+
+// Id is the template ID.
+func (s *GetTemplateService) Id(id string) *GetTemplateService {
+ s.id = id
+ return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *GetTemplateService) Version(version interface{}) *GetTemplateService {
+ s.version = version
+ return s
+}
+
+// VersionType is a specific version type.
+func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService {
+ s.versionType = versionType
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+ "id": s.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *GetTemplateService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation and returns the template.
+func (s *GetTemplateService) Do() (*GetTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(GetTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type GetTemplateResponse struct {
+ Template string `json:"template"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_template_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_template_test.go
new file mode 100644
index 00000000..00aea689
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_template_test.go
@@ -0,0 +1,51 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestGetPutDeleteTemplate(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // This is a search template, not an index template!
+ tmpl := `{
+ "template": {
+ "query" : { "term" : { "{{my_field}}" : "{{my_value}}" } },
+ "size" : "{{my_size}}"
+ },
+ "params":{
+ "my_field" : "user",
+ "my_value" : "olivere",
+ "my_size" : 5
+ }
+}`
+ putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if putres == nil {
+ t.Fatalf("expected response; got: %v", putres)
+ }
+ if !putres.Created {
+ t.Fatalf("expected template to be created; got: %v", putres.Created)
+ }
+
+ // Always delete template
+ defer client.DeleteTemplate().Id("elastic-template").Do()
+
+ // Get template
+ getres, err := client.GetTemplate().Id("elastic-template").Do()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if getres == nil {
+ t.Fatalf("expected response; got: %v", getres)
+ }
+ if getres.Template == "" {
+ t.Errorf("expected template %q; got: %q", tmpl, getres.Template)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_test.go
new file mode 100644
index 00000000..64f54449
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_test.go
@@ -0,0 +1,168 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGet(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Get document 1
+ res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != true {
+ t.Errorf("expected Found = true; got %v", res.Found)
+ }
+ if res.Source == nil {
+ t.Errorf("expected Source != nil; got %v", res.Source)
+ }
+
+ // Get non existent document 99
+ res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != false {
+ t.Errorf("expected Found = false; got %v", res.Found)
+ }
+ if res.Source != nil {
+ t.Errorf("expected Source == nil; got %v", res.Source)
+ }
+}
+
+func TestGetWithSourceFiltering(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Get document 1, without source
+ res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != true {
+ t.Errorf("expected Found = true; got %v", res.Found)
+ }
+ if res.Source != nil {
+ t.Errorf("expected Source == nil; got %v", res.Source)
+ }
+
+ // Get document 1, exclude Message field
+ fsc := NewFetchSourceContext(true).Exclude("message")
+ res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != true {
+ t.Errorf("expected Found = true; got %v", res.Found)
+ }
+ if res.Source == nil {
+ t.Errorf("expected Source != nil; got %v", res.Source)
+ }
+ var tw tweet
+ err = json.Unmarshal(*res.Source, &tw)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tw.User != "olivere" {
+ t.Errorf("expected user %q; got: %q", "olivere", tw.User)
+ }
+ if tw.Message != "" {
+ t.Errorf("expected message %q; got: %q", "", tw.Message)
+ }
+}
+
+func TestGetWithFields(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").Timestamp("12345").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Get document 1, specifying fields
+ res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Fields("message", "_timestamp").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != true {
+ t.Errorf("expected Found = true; got %v", res.Found)
+ }
+
+ timestamp, ok := res.Fields["_timestamp"].(float64)
+ if !ok {
+ t.Fatalf("Cannot retrieve \"_timestamp\" field from document")
+ }
+ if timestamp != 12345 {
+ t.Fatalf("Expected timestamp %v; got %v", 12345, timestamp)
+ }
+
+ messageField, ok := res.Fields["message"]
+ if !ok {
+ t.Fatalf("Cannot retrieve \"message\" field from document")
+ }
+
+ // Depending on the version of elasticsearch the message field will be returned
+ // as a string or a slice of strings. This test works in both cases.
+
+ messageString, ok := messageField.(string)
+ if !ok {
+ messageArray, ok := messageField.([]interface{})
+ if ok {
+ messageString, ok = messageArray[0].(string)
+ }
+ if !ok {
+ t.Fatalf("\"message\" field should be a string or a slice of strings")
+ }
+ }
+
+ if messageString != tweet1.Message {
+ t.Errorf("Expected message %s; got %s", tweet1.Message, messageString)
+ }
+}
+
+func TestGetFailsWithMissingParams(t *testing.T) {
+ // Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name
+ client := setupTestClientAndCreateIndex(t)
+ if _, err := client.Get().Do(); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ if _, err := client.Get().Index(testIndexName).Do(); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ if _, err := client.Get().Type("tweet").Do(); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ if _, err := client.Get().Id("1").Do(); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ if _, err := client.Get().Index(testIndexName).Type("tweet").Do(); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ /*
+ if _, err := client.Get().Index(testIndexName).Id("1").Do(); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ */
+ if _, err := client.Get().Type("tweet").Id("1").Do(); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/highlight.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/highlight.go
new file mode 100644
index 00000000..dab8c45b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/highlight.go
@@ -0,0 +1,496 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Highlight allows highlighting search results on one or more fields.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+type Highlight struct {
+ fields []*HighlighterField
+ tagsSchema *string
+ highlightFilter *bool
+ fragmentSize *int
+ numOfFragments *int
+ preTags []string
+ postTags []string
+ order *string
+ encoder *string
+ requireFieldMatch *bool
+ boundaryMaxScan *int
+ boundaryChars []rune
+ highlighterType *string
+ fragmenter *string
+ highlightQuery Query
+ noMatchSize *int
+ phraseLimit *int
+ options map[string]interface{}
+ forceSource *bool
+ useExplicitFieldOrder bool
+}
+
+func NewHighlight() *Highlight {
+ hl := &Highlight{
+ fields: make([]*HighlighterField, 0),
+ preTags: make([]string, 0),
+ postTags: make([]string, 0),
+ boundaryChars: make([]rune, 0),
+ options: make(map[string]interface{}),
+ }
+ return hl
+}
+
+func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
+ hl.fields = append(hl.fields, fields...)
+ return hl
+}
+
+func (hl *Highlight) Field(name string) *Highlight {
+ field := NewHighlighterField(name)
+ hl.fields = append(hl.fields, field)
+ return hl
+}
+
+func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
+ hl.tagsSchema = &schemaName
+ return hl
+}
+
+func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
+ hl.highlightFilter = &highlightFilter
+ return hl
+}
+
+func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
+ hl.fragmentSize = &fragmentSize
+ return hl
+}
+
+func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
+ hl.numOfFragments = &numOfFragments
+ return hl
+}
+
+func (hl *Highlight) Encoder(encoder string) *Highlight {
+ hl.encoder = &encoder
+ return hl
+}
+
+func (hl *Highlight) PreTags(preTags ...string) *Highlight {
+ hl.preTags = make([]string, 0)
+ hl.preTags = append(hl.preTags, preTags...)
+ return hl
+}
+
+func (hl *Highlight) PostTags(postTags ...string) *Highlight {
+ hl.postTags = make([]string, 0)
+ hl.postTags = append(hl.postTags, postTags...)
+ return hl
+}
+
+func (hl *Highlight) Order(order string) *Highlight {
+ hl.order = &order
+ return hl
+}
+
+func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
+ hl.requireFieldMatch = &requireFieldMatch
+ return hl
+}
+
+func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
+ hl.boundaryMaxScan = &boundaryMaxScan
+ return hl
+}
+
+func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight {
+ hl.boundaryChars = make([]rune, 0)
+ hl.boundaryChars = append(hl.boundaryChars, boundaryChars...)
+ return hl
+}
+
+func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
+ hl.highlighterType = &highlighterType
+ return hl
+}
+
+func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
+ hl.fragmenter = &fragmenter
+ return hl
+}
+
+func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
+ hl.highlightQuery = highlightQuery
+ return hl
+}
+
+func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
+ hl.noMatchSize = &noMatchSize
+ return hl
+}
+
+func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
+ hl.options = options
+ return hl
+}
+
+func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
+ hl.forceSource = &forceSource
+ return hl
+}
+
+func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
+ hl.useExplicitFieldOrder = useExplicitFieldOrder
+ return hl
+}
+
+// Creates the query source for the bool query.
+func (hl *Highlight) Source() interface{} {
+ // Returns the map inside of "highlight":
+ // "highlight":{
+ // ... this ...
+ // }
+ source := make(map[string]interface{})
+ if hl.tagsSchema != nil {
+ source["tags_schema"] = *hl.tagsSchema
+ }
+ if hl.preTags != nil && len(hl.preTags) > 0 {
+ source["pre_tags"] = hl.preTags
+ }
+ if hl.postTags != nil && len(hl.postTags) > 0 {
+ source["post_tags"] = hl.postTags
+ }
+ if hl.order != nil {
+ source["order"] = *hl.order
+ }
+ if hl.highlightFilter != nil {
+ source["highlight_filter"] = *hl.highlightFilter
+ }
+ if hl.fragmentSize != nil {
+ source["fragment_size"] = *hl.fragmentSize
+ }
+ if hl.numOfFragments != nil {
+ source["number_of_fragments"] = *hl.numOfFragments
+ }
+ if hl.encoder != nil {
+ source["encoder"] = *hl.encoder
+ }
+ if hl.requireFieldMatch != nil {
+ source["require_field_match"] = *hl.requireFieldMatch
+ }
+ if hl.boundaryMaxScan != nil {
+ source["boundary_max_scan"] = *hl.boundaryMaxScan
+ }
+ if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 {
+ source["boundary_chars"] = hl.boundaryChars
+ }
+ if hl.highlighterType != nil {
+ source["type"] = *hl.highlighterType
+ }
+ if hl.fragmenter != nil {
+ source["fragmenter"] = *hl.fragmenter
+ }
+ if hl.highlightQuery != nil {
+ source["highlight_query"] = hl.highlightQuery.Source()
+ }
+ if hl.noMatchSize != nil {
+ source["no_match_size"] = *hl.noMatchSize
+ }
+ if hl.phraseLimit != nil {
+ source["phrase_limit"] = *hl.phraseLimit
+ }
+ if hl.options != nil && len(hl.options) > 0 {
+ source["options"] = hl.options
+ }
+ if hl.forceSource != nil {
+ source["force_source"] = *hl.forceSource
+ }
+
+ if hl.fields != nil && len(hl.fields) > 0 {
+ if hl.useExplicitFieldOrder {
+ // Use a slice for the fields
+ fields := make([]map[string]interface{}, 0)
+ for _, field := range hl.fields {
+ fmap := make(map[string]interface{})
+ fmap[field.Name] = field.Source()
+ fields = append(fields, fmap)
+ }
+ source["fields"] = fields
+ } else {
+ // Use a map for the fields
+ fields := make(map[string]interface{}, 0)
+ for _, field := range hl.fields {
+ fields[field.Name] = field.Source()
+ }
+ source["fields"] = fields
+ }
+ }
+
+ return source
+
+ /*
+ highlightS := make(map[string]interface{})
+
+ if hl.tagsSchema != "" {
+ highlightS["tags_schema"] = hl.tagsSchema
+ }
+ if len(hl.preTags) > 0 {
+ highlightS["pre_tags"] = hl.preTags
+ }
+ if len(hl.postTags) > 0 {
+ highlightS["post_tags"] = hl.postTags
+ }
+ if hl.order != "" {
+ highlightS["order"] = hl.order
+ }
+ if hl.encoder != "" {
+ highlightS["encoder"] = hl.encoder
+ }
+ if hl.requireFieldMatch != nil {
+ highlightS["require_field_match"] = *hl.requireFieldMatch
+ }
+ if hl.highlighterType != "" {
+ highlightS["type"] = hl.highlighterType
+ }
+ if hl.fragmenter != "" {
+ highlightS["fragmenter"] = hl.fragmenter
+ }
+ if hl.highlightQuery != nil {
+ highlightS["highlight_query"] = hl.highlightQuery.Source()
+ }
+ if hl.noMatchSize != nil {
+ highlightS["no_match_size"] = *hl.noMatchSize
+ }
+ if len(hl.options) > 0 {
+ highlightS["options"] = hl.options
+ }
+ if hl.forceSource != nil {
+ highlightS["force_source"] = *hl.forceSource
+ }
+ if len(hl.fields) > 0 {
+ fieldsS := make(map[string]interface{})
+ for _, field := range hl.fields {
+ fieldsS[field.Name] = field.Source()
+ }
+ highlightS["fields"] = fieldsS
+ }
+
+ return highlightS
+ */
+}
+
+// HighlighterField specifies a highlighted field.
+type HighlighterField struct {
+ Name string
+
+ preTags []string
+ postTags []string
+ fragmentSize int
+ fragmentOffset int
+ numOfFragments int
+ highlightFilter *bool
+ order *string
+ requireFieldMatch *bool
+ boundaryMaxScan int
+ boundaryChars []rune
+ highlighterType *string
+ fragmenter *string
+ highlightQuery Query
+ noMatchSize *int
+ matchedFields []string
+ phraseLimit *int
+ options map[string]interface{}
+ forceSource *bool
+
+ /*
+ Name string
+ preTags []string
+ postTags []string
+ fragmentSize int
+ numOfFragments int
+ fragmentOffset int
+ highlightFilter *bool
+ order string
+ requireFieldMatch *bool
+ boundaryMaxScan int
+ boundaryChars []rune
+ highlighterType string
+ fragmenter string
+ highlightQuery Query
+ noMatchSize *int
+ matchedFields []string
+ options map[string]interface{}
+ forceSource *bool
+ */
+}
+
+func NewHighlighterField(name string) *HighlighterField {
+ return &HighlighterField{
+ Name: name,
+ preTags: make([]string, 0),
+ postTags: make([]string, 0),
+ fragmentSize: -1,
+ fragmentOffset: -1,
+ numOfFragments: -1,
+ boundaryMaxScan: -1,
+ boundaryChars: make([]rune, 0),
+ matchedFields: make([]string, 0),
+ options: make(map[string]interface{}),
+ }
+}
+
+func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
+ f.preTags = make([]string, 0)
+ f.preTags = append(f.preTags, preTags...)
+ return f
+}
+
+func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
+ f.postTags = make([]string, 0)
+ f.postTags = append(f.postTags, postTags...)
+ return f
+}
+
+func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
+ f.fragmentSize = fragmentSize
+ return f
+}
+
+func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
+ f.fragmentOffset = fragmentOffset
+ return f
+}
+
+func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
+ f.numOfFragments = numOfFragments
+ return f
+}
+
+func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
+ f.highlightFilter = &highlightFilter
+ return f
+}
+
+func (f *HighlighterField) Order(order string) *HighlighterField {
+ f.order = &order
+ return f
+}
+
+func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
+ f.requireFieldMatch = &requireFieldMatch
+ return f
+}
+
+func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
+ f.boundaryMaxScan = boundaryMaxScan
+ return f
+}
+
+func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
+ f.boundaryChars = make([]rune, 0)
+ f.boundaryChars = append(f.boundaryChars, boundaryChars...)
+ return f
+}
+
+func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
+ f.highlighterType = &highlighterType
+ return f
+}
+
+func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
+ f.fragmenter = &fragmenter
+ return f
+}
+
+func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
+ f.highlightQuery = highlightQuery
+ return f
+}
+
+func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
+ f.noMatchSize = &noMatchSize
+ return f
+}
+
+func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
+ f.options = options
+ return f
+}
+
+func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
+ f.matchedFields = make([]string, 0)
+ f.matchedFields = append(f.matchedFields, matchedFields...)
+ return f
+}
+
+func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
+ f.phraseLimit = &phraseLimit
+ return f
+}
+
+func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
+ f.forceSource = &forceSource
+ return f
+}
+
+func (f *HighlighterField) Source() interface{} {
+ source := make(map[string]interface{})
+
+ if f.preTags != nil && len(f.preTags) > 0 {
+ source["pre_tags"] = f.preTags
+ }
+ if f.postTags != nil && len(f.postTags) > 0 {
+ source["post_tags"] = f.postTags
+ }
+ if f.fragmentSize != -1 {
+ source["fragment_size"] = f.fragmentSize
+ }
+ if f.numOfFragments != -1 {
+ source["number_of_fragments"] = f.numOfFragments
+ }
+ if f.fragmentOffset != -1 {
+ source["fragment_offset"] = f.fragmentOffset
+ }
+ if f.highlightFilter != nil {
+ source["highlight_filter"] = *f.highlightFilter
+ }
+ if f.order != nil {
+ source["order"] = *f.order
+ }
+ if f.requireFieldMatch != nil {
+ source["require_field_match"] = *f.requireFieldMatch
+ }
+ if f.boundaryMaxScan != -1 {
+ source["boundary_max_scan"] = f.boundaryMaxScan
+ }
+ if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
+ source["boundary_chars"] = f.boundaryChars
+ }
+ if f.highlighterType != nil {
+ source["type"] = *f.highlighterType
+ }
+ if f.fragmenter != nil {
+ source["fragmenter"] = *f.fragmenter
+ }
+ if f.highlightQuery != nil {
+ source["highlight_query"] = f.highlightQuery.Source()
+ }
+ if f.noMatchSize != nil {
+ source["no_match_size"] = *f.noMatchSize
+ }
+ if f.matchedFields != nil && len(f.matchedFields) > 0 {
+ source["matched_fields"] = f.matchedFields
+ }
+ if f.phraseLimit != nil {
+ source["phrase_limit"] = *f.phraseLimit
+ }
+ if f.options != nil && len(f.options) > 0 {
+ source["options"] = f.options
+ }
+ if f.forceSource != nil {
+ source["force_source"] = *f.forceSource
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/highlight_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/highlight_test.go
new file mode 100644
index 00000000..9538172d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/highlight_test.go
@@ -0,0 +1,168 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestHighlighterField(t *testing.T) {
+ field := NewHighlighterField("grade")
+ data, err := json.Marshal(field.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlighterFieldWithOptions(t *testing.T) {
+ field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1)
+ data, err := json.Marshal(field.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fragment_size":2,"number_of_fragments":1}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlightWithStringField(t *testing.T) {
+ builder := NewHighlight().Field("grade")
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":{"grade":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlightWithFields(t *testing.T) {
+ gradeField := NewHighlighterField("grade")
+ builder := NewHighlight().Fields(gradeField)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":{"grade":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlightWithMultipleFields(t *testing.T) {
+ gradeField := NewHighlighterField("grade")
+ colorField := NewHighlighterField("color")
+ builder := NewHighlight().Fields(gradeField, colorField)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":{"color":{},"grade":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlighterWithExplicitFieldOrder(t *testing.T) {
+ gradeField := NewHighlighterField("grade").FragmentSize(2)
+ colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1)
+ builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlightWithTermQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Specify highlighter
+ hl := NewHighlight()
+ hl = hl.Fields(NewHighlighterField("message"))
+ hl = hl.PreTags("").PostTags("")
+
+ // Match all should return all documents
+ query := NewPrefixQuery("message", "golang")
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Highlight(hl).
+ Query(&query).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Fatalf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 1 {
+ t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 1 {
+ t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
+ }
+
+ hit := searchResult.Hits.Hits[0]
+ var tw tweet
+ if err := json.Unmarshal(*hit.Source, &tw); err != nil {
+ t.Fatal(err)
+ }
+ if hit.Highlight == nil || len(hit.Highlight) == 0 {
+ t.Fatal("expected hit to have a highlight; got nil")
+ }
+ if hl, found := hit.Highlight["message"]; found {
+ if len(hl) != 1 {
+ t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl))
+ }
+ expected := "Welcome to Golang and Elasticsearch."
+ if hl[0] != expected {
+ t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0])
+ }
+ } else {
+ t.Fatal("expected to have a highlight on field \"message\"; got none")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index.go
new file mode 100644
index 00000000..4262ecba
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index.go
@@ -0,0 +1,217 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndexResult is the result of indexing a document in Elasticsearch.
+type IndexResult struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int `json:"_version"`
+ Created bool `json:"created"`
+}
+
+// IndexService adds documents to Elasticsearch.
+type IndexService struct {
+ client *Client
+ index string
+ _type string
+ id string
+ routing string
+ parent string
+ opType string
+ refresh *bool
+ version *int64
+ versionType string
+ timestamp string
+ ttl string
+ timeout string
+ bodyString string
+ bodyJson interface{}
+ pretty bool
+}
+
+func NewIndexService(client *Client) *IndexService {
+ builder := &IndexService{
+ client: client,
+ }
+ return builder
+}
+
+func (b *IndexService) Index(name string) *IndexService {
+ b.index = name
+ return b
+}
+
+func (b *IndexService) Type(_type string) *IndexService {
+ b._type = _type
+ return b
+}
+
+func (b *IndexService) Id(id string) *IndexService {
+ b.id = id
+ return b
+}
+
+func (b *IndexService) Routing(routing string) *IndexService {
+ b.routing = routing
+ return b
+}
+
+func (b *IndexService) Parent(parent string) *IndexService {
+ b.parent = parent
+ return b
+}
+
+// OpType is either "create" or "index" (the default).
+func (b *IndexService) OpType(opType string) *IndexService {
+ b.opType = opType
+ return b
+}
+
+func (b *IndexService) Refresh(refresh bool) *IndexService {
+ b.refresh = &refresh
+ return b
+}
+
+func (b *IndexService) Version(version int64) *IndexService {
+ b.version = &version
+ return b
+}
+
+// VersionType is either "internal" (default), "external",
+// "external_gt", "external_gte", or "force".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
+// for details.
+func (b *IndexService) VersionType(versionType string) *IndexService {
+ b.versionType = versionType
+ return b
+}
+
+func (b *IndexService) Timestamp(timestamp string) *IndexService {
+ b.timestamp = timestamp
+ return b
+}
+
+func (b *IndexService) TTL(ttl string) *IndexService {
+ b.ttl = ttl
+ return b
+}
+
+func (b *IndexService) Timeout(timeout string) *IndexService {
+ b.timeout = timeout
+ return b
+}
+
+func (b *IndexService) BodyString(body string) *IndexService {
+ b.bodyString = body
+ return b
+}
+
+func (b *IndexService) BodyJson(json interface{}) *IndexService {
+ b.bodyJson = json
+ return b
+}
+
+func (b *IndexService) Pretty(pretty bool) *IndexService {
+ b.pretty = pretty
+ return b
+}
+
+func (b *IndexService) Do() (*IndexResult, error) {
+ // Build url
+ var path, method string
+ if b.id != "" {
+ // Create document with manual id
+ method = "PUT"
+ path = "/{index}/{type}/{id}"
+ } else {
+ // Automatic ID generation
+ // See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
+ method = "POST"
+ path = "/{index}/{type}/"
+ }
+ path, err := uritemplates.Expand(path, map[string]string{
+ "index": b.index,
+ "type": b._type,
+ "id": b.id,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Parameters
+ params := make(url.Values)
+ if b.pretty {
+ params.Set("pretty", "true")
+ }
+ if b.routing != "" {
+ params.Set("routing", b.routing)
+ }
+ if b.parent != "" {
+ params.Set("parent", b.parent)
+ }
+ if b.opType != "" {
+ params.Set("op_type", b.opType)
+ }
+ if b.refresh != nil && *b.refresh {
+ params.Set("refresh", "true")
+ }
+ if b.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *b.version))
+ }
+ if b.versionType != "" {
+ params.Set("version_type", b.versionType)
+ }
+ if b.timestamp != "" {
+ params.Set("timestamp", b.timestamp)
+ }
+ if b.ttl != "" {
+ params.Set("ttl", b.ttl)
+ }
+ if b.timeout != "" {
+ params.Set("timeout", b.timeout)
+ }
+
+ /*
+ routing string
+ parent string
+ opType string
+ refresh *bool
+ version *int64
+ versionType string
+ timestamp string
+ ttl string
+ */
+
+ // Body
+ var body interface{}
+ if b.bodyJson != nil {
+ body = b.bodyJson
+ } else {
+ body = b.bodyString
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest(method, path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(IndexResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_close.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_close.go
new file mode 100644
index 00000000..7b0481cb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_close.go
@@ -0,0 +1,145 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// CloseIndexService closes an index.
+// See documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type CloseIndexService struct {
+ client *Client
+ pretty bool
+ index string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ timeout string
+ masterTimeout string
+}
+
+// NewCloseIndexService creates a new CloseIndexService.
+func NewCloseIndexService(client *Client) *CloseIndexService {
+ return &CloseIndexService{client: client}
+}
+
+// Index is the name of the index.
+func (s *CloseIndexService) Index(index string) *CloseIndexService {
+ s.index = index
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *CloseIndexService) Timeout(timeout string) *CloseIndexService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *CloseIndexService) MasterTimeout(masterTimeout string) *CloseIndexService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *CloseIndexService) IgnoreUnavailable(ignoreUnavailable bool) *CloseIndexService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
+func (s *CloseIndexService) AllowNoIndices(allowNoIndices bool) *CloseIndexService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *CloseIndexService) ExpandWildcards(expandWildcards string) *CloseIndexService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *CloseIndexService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_close", map[string]string{
+ "index": s.index,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *CloseIndexService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *CloseIndexService) Do() (*CloseIndexResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(CloseIndexResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// CloseIndexResponse is the response of CloseIndexService.Do.
+type CloseIndexResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_exists.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_exists.go
new file mode 100644
index 00000000..fcf4ada7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_exists.go
@@ -0,0 +1,50 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type IndexExistsService struct {
+ client *Client
+ index string
+}
+
+func NewIndexExistsService(client *Client) *IndexExistsService {
+ builder := &IndexExistsService{
+ client: client,
+ }
+ return builder
+}
+
+func (b *IndexExistsService) Index(index string) *IndexExistsService {
+ b.index = index
+ return b
+}
+
+func (b *IndexExistsService) Do() (bool, error) {
+ // Build url
+ path, err := uritemplates.Expand("/{index}", map[string]string{
+ "index": b.index,
+ })
+ if err != nil {
+ return false, err
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("HEAD", path, nil, nil)
+ if err != nil {
+ return false, err
+ }
+ if res.StatusCode == 200 {
+ return true, nil
+ } else if res.StatusCode == 404 {
+ return false, nil
+ }
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get.go
new file mode 100644
index 00000000..89aecb60
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get.go
@@ -0,0 +1,186 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// IndicesGetService retrieves information about one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-index.html.
+type IndicesGetService struct {
+ client *Client
+ pretty bool
+ index []string
+ feature []string
+ expandWildcards string
+ local *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+}
+
+// NewIndicesGetService creates a new IndicesGetService.
+func NewIndicesGetService(client *Client) *IndicesGetService {
+ return &IndicesGetService{
+ client: client,
+ index: make([]string, 0),
+ feature: make([]string, 0),
+ }
+}
+
+// Index is a list of index names. Use _all to retrieve information about
+// all indices of a cluster.
+func (s *IndicesGetService) Index(index ...string) *IndicesGetService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Feature is a list of features (e.g. _settings,_mappings,_warmers, and _aliases).
+func (s *IndicesGetService) Feature(feature ...string) *IndicesGetService {
+ s.feature = append(s.feature, feature...)
+ return s
+}
+
+// ExpandWildcards indicates whether wildcard expressions should
+// get expanded to open or closed indices (default: open).
+func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local indicates whether to return local information (do not retrieve
+// the state from master node (default: false)).
+func (s *IndicesGetService) Local(local bool) *IndicesGetService {
+ s.local = &local
+ return s
+}
+
+// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false).
+func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard expression
+// resolves to no concrete indices (default: false).
+func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+ var index []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.feature) > 0 {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{
+ "index": strings.Join(index, ","),
+ "feature": strings.Join(s.feature, ","),
+ })
+ } else {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}", map[string]string{
+ "index": strings.Join(index, ","),
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]*IndicesGetResponse
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesGetResponse is part of the response of IndicesGetService.Do.
+type IndicesGetResponse struct {
+ Aliases map[string]interface{} `json:"aliases"`
+ Mappings map[string]interface{} `json:"mappings"`
+ Settings map[string]interface{} `json:"settings"`
+ Warmers map[string]interface{} `json:"warmers"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_settings.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_settings.go
new file mode 100644
index 00000000..f498e810
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_settings.go
@@ -0,0 +1,189 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// IndicesGetSettingsService allows to retrieve settings of one
+// or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-get-settings.html.
+type IndicesGetSettingsService struct {
+ client *Client
+ pretty bool
+ index []string
+ name []string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ flatSettings *bool
+ local *bool
+}
+
+// NewIndicesGetSettingsService creates a new IndicesGetSettingsService.
+func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService {
+ return &IndicesGetSettingsService{
+ client: client,
+ index: make([]string, 0),
+ name: make([]string, 0),
+ }
+}
+
+// Index is a list of index names; use `_all` or empty string to perform the operation on all indices.
+func (s *IndicesGetSettingsService) Index(index ...string) *IndicesGetSettingsService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Name are the names of the settings that should be included.
+func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService {
+ s.name = append(s.name, name...)
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression
+// to concrete indices that are open, closed or both.
+// Options: open, closed, none, all. Default: open,closed.
+func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+ var index []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.name) > 0 {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{
+ "index": strings.Join(index, ","),
+ "name": strings.Join(s.name, ","),
+ })
+ } else {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
+ "index": strings.Join(index, ","),
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetSettingsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]*IndicesGetSettingsResponse
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do.
+type IndicesGetSettingsResponse struct {
+ Settings map[string]interface{} `json:"settings"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_settings_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_settings_test.go
new file mode 100644
index 00000000..f53512d5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_settings_test.go
@@ -0,0 +1,81 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestIndexGetSettingsURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Names []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_all/_settings",
+ },
+ {
+ []string{},
+ []string{"index.merge.*"},
+ "/_all/_settings/index.merge.%2A",
+ },
+ {
+ []string{"twitter-*"},
+ []string{"index.merge.*", "_settings"},
+ "/twitter-%2A/_settings/index.merge.%2A%2C_settings",
+ },
+ {
+ []string{"store-1", "store-2"},
+ []string{"index.merge.*", "_settings"},
+ "/store-1%2Cstore-2/_settings/index.merge.%2A%2C_settings",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IndexGetSettings().Index(test.Indices...).Name(test.Names...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestIndexGetSettingsService(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "1.4.0" {
+ t.Skip("Index Get API is available since 1.4")
+ return
+ }
+
+ res, err := client.IndexGetSettings().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected result; got: %v", res)
+ }
+ info, found := res[testIndexName]
+ if !found {
+ t.Fatalf("expected index %q to be found; got: %v", testIndexName, found)
+ }
+ if info == nil {
+ t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info)
+ }
+ if info.Settings == nil {
+ t.Fatalf("expected index settings of %q to be != nil; got: %v", testIndexName, info.Settings)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_test.go
new file mode 100644
index 00000000..3883925d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_test.go
@@ -0,0 +1,84 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestIndexGetURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Features []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_all",
+ },
+ {
+ []string{},
+ []string{"_mappings"},
+ "/_all/_mappings",
+ },
+ {
+ []string{"twitter"},
+ []string{"_mappings", "_settings"},
+ "/twitter/_mappings%2C_settings",
+ },
+ {
+ []string{"store-1", "store-2"},
+ []string{"_mappings", "_settings"},
+ "/store-1%2Cstore-2/_mappings%2C_settings",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IndexGet().Index(test.Indices...).Feature(test.Features...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestIndexGetService(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "1.4.0" {
+ t.Skip("Index Get API is available since 1.4")
+ return
+ }
+
+ res, err := client.IndexGet().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected result; got: %v", res)
+ }
+ info, found := res[testIndexName]
+ if !found {
+ t.Fatalf("expected index %q to be found; got: %v", testIndexName, found)
+ }
+ if info == nil {
+ t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info)
+ }
+ if info.Mappings == nil {
+ t.Errorf("expected mappings to be != nil; got: %v", info.Mappings)
+ }
+ if info.Settings == nil {
+ t.Errorf("expected settings to be != nil; got: %v", info.Settings)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_open.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_open.go
new file mode 100644
index 00000000..e93e50e7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_open.go
@@ -0,0 +1,146 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// OpenIndexService opens an index.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type OpenIndexService struct {
+ client *Client
+ pretty bool
+ index string
+ expandWildcards string
+ timeout string
+ masterTimeout string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+}
+
+// NewOpenIndexService creates a new OpenIndexService.
+func NewOpenIndexService(client *Client) *OpenIndexService {
+ return &OpenIndexService{client: client}
+}
+
+// Index is the name of the index to open.
+func (s *OpenIndexService) Index(index string) *OpenIndexService {
+ s.index = index
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *OpenIndexService) Timeout(timeout string) *OpenIndexService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *OpenIndexService) MasterTimeout(masterTimeout string) *OpenIndexService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *OpenIndexService) IgnoreUnavailable(ignoreUnavailable bool) *OpenIndexService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *OpenIndexService) AllowNoIndices(allowNoIndices bool) *OpenIndexService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *OpenIndexService) ExpandWildcards(expandWildcards string) *OpenIndexService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *OpenIndexService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_open", map[string]string{
+ "index": s.index,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *OpenIndexService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *OpenIndexService) Do() (*OpenIndexResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(OpenIndexResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// OpenIndexResponse is the response of OpenIndexService.Do.
+type OpenIndexResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_test.go
new file mode 100644
index 00000000..187eab1a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_test.go
@@ -0,0 +1,552 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "testing"
+ "time"
+)
+
+const (
+ testIndexName = "elastic-test"
+ testIndexName2 = "elastic-test2"
+ testMapping = `
+{
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "_default_": {
+ "_timestamp": {
+ "enabled": true,
+ "store": "yes"
+ },
+ "_ttl": {
+ "enabled": true,
+ "store": "yes"
+ }
+ },
+ "tweet":{
+ "properties":{
+ "tags":{
+ "type":"string"
+ },
+ "location":{
+ "type":"geo_point"
+ },
+ "suggest_field":{
+ "type":"completion",
+ "payloads":true
+ }
+ }
+ },
+ "comment":{
+ "_parent": {
+ "type": "tweet"
+ }
+ }
+ }
+}
+`
+)
+
+type tweet struct {
+ User string `json:"user"`
+ Message string `json:"message"`
+ Retweets int `json:"retweets"`
+ Image string `json:"image,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Location string `json:"location,omitempty"`
+ Suggest *SuggestField `json:"suggest_field,omitempty"`
+}
+
+func (t tweet) String() string {
+ return fmt.Sprintf("tweet{User:%q,Message:%q,Retweets:%d}", t.User, t.Message, t.Retweets)
+}
+
+type comment struct {
+ User string `json:"user"`
+ Comment string `json:"comment"`
+ Created time.Time `json:"created,omitempty"`
+}
+
+func (c comment) String() string {
+ return fmt.Sprintf("comment{User:%q,Comment:%q}", c.User, c.Comment)
+}
+
+func isTravis() bool {
+ return os.Getenv("TRAVIS") != ""
+}
+
+func travisGoVersion() string {
+ return os.Getenv("TRAVIS_GO_VERSION")
+}
+
+type logger interface {
+ Error(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Fail()
+ FailNow()
+ Log(args ...interface{})
+ Logf(format string, args ...interface{})
+}
+
+func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) {
+ var err error
+
+ client, err = NewClient(options...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client.DeleteIndex(testIndexName).Do()
+ client.DeleteIndex(testIndexName2).Do()
+
+ return client
+}
+
+func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Client {
+ client := setupTestClient(t, options...)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
+ }
+
+ // Create second index
+ createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex2 == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex2)
+ }
+
+ return client
+}
+
+func setupTestClientAndCreateIndexAndLog(t logger, options ...ClientOptionFunc) *Client {
+ return setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+}
+
+func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client {
+ client := setupTestClientAndCreateIndex(t, options...)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+ comment1 := comment{User: "nico", Comment: "You bet."}
+
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return client
+}
+
+func TestIndexLifecycle(t *testing.T) {
+ client := setupTestClient(t)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !createIndex.Acknowledged {
+ t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+ }
+
+ // Check if index exists
+ indexExists, err := client.IndexExists(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !indexExists {
+ t.Fatalf("index %s should exist, but doesn't\n", testIndexName)
+ }
+
+ // Delete index
+ deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !deleteIndex.Acknowledged {
+ t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+ }
+
+ // Check if index exists
+ indexExists, err = client.IndexExists(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexExists {
+ t.Fatalf("index %s should not exist, but does\n", testIndexName)
+ }
+}
+
+func TestIndexExistScenarios(t *testing.T) {
+ client := setupTestClient(t)
+
+ // Should return false if index does not exist
+ indexExists, err := client.IndexExists(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexExists {
+ t.Fatalf("expected index exists to return %v, got %v", false, indexExists)
+ }
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !createIndex.Acknowledged {
+ t.Errorf("expected CreateIndexResult.Ack %v; got %v", true, createIndex.Acknowledged)
+ }
+
+ // Should return true if index does not exist
+ indexExists, err = client.IndexExists(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !indexExists {
+ t.Fatalf("expected index exists to return %v, got %v", true, indexExists)
+ }
+}
+
+// TODO(oe): Find out why this test fails on Travis CI.
+/*
+func TestIndexOpenAndClose(t *testing.T) {
+ client := setupTestClient(t)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !createIndex.Acknowledged {
+ t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+ }
+ defer func() {
+ // Delete index
+ deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !deleteIndex.Acknowledged {
+ t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+ }
+ }()
+
+ waitForYellow := func() {
+ // Wait for status yellow
+ res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res != nil && res.TimedOut {
+ t.Fatalf("cluster time out waiting for status %q", "yellow")
+ }
+ }
+
+ // Wait for cluster
+ waitForYellow()
+
+ // Close index
+ cresp, err := client.CloseIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !cresp.Acknowledged {
+ t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName)
+ }
+
+ // Wait for cluster
+ waitForYellow()
+
+ // Open index again
+ oresp, err := client.OpenIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !oresp.Acknowledged {
+ t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName)
+ }
+}
+*/
+
+func TestDocumentLifecycle(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("tweet").
+ Id("1").
+ BodyJson(&tweet1).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+
+ // Exists
+ exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Errorf("expected exists %v; got %v", true, exists)
+ }
+
+ // Get document
+ getResult, err := client.Get().
+ Index(testIndexName).
+ Type("tweet").
+ Id("1").
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if getResult.Index != testIndexName {
+ t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+ }
+ if getResult.Type != "tweet" {
+ t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+ }
+ if getResult.Id != "1" {
+ t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id)
+ }
+ if getResult.Source == nil {
+ t.Errorf("expected GetResult.Source to be != nil; got nil")
+ }
+
+ // Decode the Source field
+ var tweetGot tweet
+ err = json.Unmarshal(*getResult.Source, &tweetGot)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tweetGot.User != tweet1.User {
+ t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
+ }
+ if tweetGot.Message != tweet1.Message {
+ t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
+ }
+
+ // Delete document again
+ deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if deleteResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", deleteResult)
+ }
+
+ // Exists
+ exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Errorf("expected exists %v; got %v", false, exists)
+ }
+}
+
+func TestDocumentLifecycleWithAutomaticIDGeneration(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("tweet").
+ BodyJson(&tweet1).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+ if indexResult.Id == "" {
+ t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id)
+ }
+ id := indexResult.Id
+
+ // Exists
+ exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Errorf("expected exists %v; got %v", true, exists)
+ }
+
+ // Get document
+ getResult, err := client.Get().
+ Index(testIndexName).
+ Type("tweet").
+ Id(id).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if getResult.Index != testIndexName {
+ t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+ }
+ if getResult.Type != "tweet" {
+ t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+ }
+ if getResult.Id != id {
+ t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id)
+ }
+ if getResult.Source == nil {
+ t.Errorf("expected GetResult.Source to be != nil; got nil")
+ }
+
+ // Decode the Source field
+ var tweetGot tweet
+ err = json.Unmarshal(*getResult.Source, &tweetGot)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tweetGot.User != tweet1.User {
+ t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
+ }
+ if tweetGot.Message != tweet1.Message {
+ t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
+ }
+
+ // Delete document again
+ deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if deleteResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", deleteResult)
+ }
+
+ // Exists
+ exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Errorf("expected exists %v; got %v", false, exists)
+ }
+}
+
+func TestIndexCreateExistsOpenCloseDelete(t *testing.T) {
+ // TODO: Find out how to make these test robust
+ t.Skip("test fails regularly with 409 (Conflict): " +
+ "IndexPrimaryShardNotAllocatedException[[elastic-test] " +
+ "primary not allocated post api... skipping")
+
+ client := setupTestClient(t)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Fatalf("expected response; got: %v", createIndex)
+ }
+ if !createIndex.Acknowledged {
+ t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged)
+ }
+
+ // Exists
+ indexExists, err := client.IndexExists(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !indexExists {
+ t.Fatalf("expected index exists=%v; got %v", true, indexExists)
+ }
+
+ // Flush
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Close index
+ closeIndex, err := client.CloseIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if closeIndex == nil {
+ t.Fatalf("expected response; got: %v", closeIndex)
+ }
+ if !closeIndex.Acknowledged {
+ t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged)
+ }
+
+ // Open index
+ openIndex, err := client.OpenIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if openIndex == nil {
+ t.Fatalf("expected response; got: %v", openIndex)
+ }
+ if !openIndex.Acknowledged {
+ t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged)
+ }
+
+ // Flush
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Delete index
+ deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if deleteIndex == nil {
+ t.Fatalf("expected response; got: %v", deleteIndex)
+ }
+ if !deleteIndex.Acknowledged {
+ t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_delete_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_delete_template.go
new file mode 100644
index 00000000..faaeb3a7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_delete_template.go
@@ -0,0 +1,122 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesDeleteTemplateService deletes index templates.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesDeleteTemplateService struct {
+ client *Client
+ pretty bool
+ name string
+ timeout string
+ masterTimeout string
+}
+
+// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
+func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
+ return &IndicesDeleteTemplateService{
+ client: client,
+ }
+}
+
+// Name is the name of the template.
+func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
+ s.name = name
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesDeleteTemplateService) Validate() error {
+ var invalid []string
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("DELETE", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesDeleteTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
+type IndicesDeleteTemplateResponse struct {
+ Acknowledged bool `json:"acknowledged,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_template.go
new file mode 100644
index 00000000..e96e9a1a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_template.go
@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesExistsTemplateService checks if a given template exists.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists
+// for documentation.
+type IndicesExistsTemplateService struct {
+ client *Client
+ pretty bool
+ name string
+ local *bool
+}
+
+// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
+func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
+ return &IndicesExistsTemplateService{
+ client: client,
+ }
+}
+
+// Name is the name of the template.
+func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
+ s.name = name
+ return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTemplateService) Validate() error {
+ var invalid []string
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTemplateService) Do() (bool, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return false, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("HEAD", path, params, nil)
+ if err != nil {
+ return false, err
+ }
+ if res.StatusCode == 200 {
+ return true, nil
+ } else if res.StatusCode == 404 {
+ return false, nil
+ }
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_template_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_template_test.go
new file mode 100644
index 00000000..32fb82ad
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_template_test.go
@@ -0,0 +1,68 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestIndexExistsTemplate(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tmpl := `{
+ "template":"elastic-test*",
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "tweet":{
+ "properties":{
+ "tags":{
+ "type":"string"
+ },
+ "location":{
+ "type":"geo_point"
+ },
+ "suggest_field":{
+ "type":"completion",
+ "payloads":true
+ }
+ }
+ }
+ }
+}`
+ putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if putres == nil {
+ t.Fatalf("expected response; got: %v", putres)
+ }
+ if !putres.Acknowledged {
+ t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged)
+ }
+
+ // Always delete template
+ defer client.IndexDeleteTemplate("elastic-template").Do()
+
+ // Check if template exists
+ exists, err := client.IndexTemplateExists("elastic-template").Do()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if !exists {
+ t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists)
+ }
+
+ // Get template
+ getres, err := client.IndexGetTemplate("elastic-template").Do()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if getres == nil {
+ t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_type.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_type.go
new file mode 100644
index 00000000..257a2f09
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_type.go
@@ -0,0 +1,155 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesExistsTypeService checks if one or more types exist in one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-types-exists.html.
+type IndicesExistsTypeService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ allowNoIndices *bool
+ expandWildcards string
+ local *bool
+ ignoreUnavailable *bool
+}
+
+// NewIndicesExistsTypeService creates a new IndicesExistsTypeService.
+func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
+ return &IndicesExistsTypeService{
+ client: client,
+ index: make([]string, 0),
+ typ: make([]string, 0),
+ }
+}
+
+// Index is a list of index names; use `_all` to check the types across all indices.
+func (s *IndicesExistsTypeService) Index(index ...string) *IndicesExistsTypeService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type is a list of document types to check.
+func (s *IndicesExistsTypeService) Type(typ ...string) *IndicesExistsTypeService {
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local specifies whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
+ if err := s.Validate(); err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}", map[string]string{
+ "type": strings.Join(s.typ, ","),
+ "index": strings.Join(s.index, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTypeService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(s.typ) == 0 {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTypeService) Do() (bool, error) {
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("HEAD", path, params, nil)
+ if err != nil {
+ return false, err
+ }
+
+ // Return operation response
+ if res.StatusCode == 200 {
+ return true, nil
+ } else if res.StatusCode == 404 {
+ return false, nil
+ }
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_type_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_type_test.go
new file mode 100644
index 00000000..b37d42f9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_type_test.go
@@ -0,0 +1,121 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestTypeExistsBuildURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Expected string
+ ExpectValidateFailure bool
+ }{
+ {
+ []string{},
+ []string{},
+ "",
+ true,
+ },
+ {
+ []string{"index1"},
+ []string{},
+ "",
+ true,
+ },
+ {
+ []string{},
+ []string{"type1"},
+ "",
+ true,
+ },
+ {
+ []string{"index1"},
+ []string{"type1"},
+ "/index1/type1",
+ false,
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"type1"},
+ "/index1%2Cindex2/type1",
+ false,
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"type1", "type2"},
+ "/index1%2Cindex2/type1%2Ctype2",
+ false,
+ },
+ }
+
+ for i, test := range tests {
+ err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate()
+ if err == nil && test.ExpectValidateFailure {
+ t.Errorf("case #%d: expected validate to fail", i+1)
+ continue
+ }
+ if err != nil && !test.ExpectValidateFailure {
+ t.Errorf("case #%d: expected validate to succeed", i+1)
+ continue
+ }
+ if !test.ExpectValidateFailure {
+ path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL()
+ if err != nil {
+ t.Fatalf("case #%d: %v", i+1, err)
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+ }
+}
+
+func TestTypeExists(t *testing.T) {
+ client := setupTestClient(t)
+
+ // Create index with tweet type
+ createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
+ }
+ if !createIndex.Acknowledged {
+ t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+ }
+
+ // Check if type exists
+ exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Fatalf("type %s should exist in index %s, but doesn't\n", "tweet", testIndexName)
+ }
+
+ // Delete index
+ deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !deleteIndex.Acknowledged {
+ t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+ }
+
+ // Check if type exists
+ exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Fatalf("type %s should not exist in index %s, but it does\n", "tweet", testIndexName)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_get_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_get_template.go
new file mode 100644
index 00000000..1462ee39
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_get_template.go
@@ -0,0 +1,128 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesGetTemplateService returns an index template.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesGetTemplateService struct {
+ client *Client
+ pretty bool
+ name []string
+ flatSettings *bool
+ local *bool
+}
+
+// NewIndicesGetTemplateService creates a new IndicesGetTemplateService.
+func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService {
+ return &IndicesGetTemplateService{
+ client: client,
+ name: make([]string, 0),
+ }
+}
+
+// Name is the name of the index template.
+func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService {
+ s.name = append(s.name, name...)
+ return s
+}
+
+// FlatSettings is returns settings in flat format (default: false).
+func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.name) > 0 {
+ path, err = uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": strings.Join(s.name, ","),
+ })
+ } else {
+ path = "/_template"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetTemplateService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]*IndicesGetTemplateResponse
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do.
+type IndicesGetTemplateResponse struct {
+ Order int `json:"order,omitempty"`
+ Template string `json:"template,omitempty"`
+ Settings map[string]interface{} `json:"settings,omitempty"`
+ Mappings map[string]interface{} `json:"mappings,omitempty"`
+ Aliases map[string]interface{} `json:"aliases,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_get_template_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_get_template_test.go
new file mode 100644
index 00000000..693cde5e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_get_template_test.go
@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestIndexGetTemplateURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Names []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_template",
+ },
+ {
+ []string{"index1"},
+ "/_template/index1",
+ },
+ {
+ []string{"index1", "index2"},
+ "/_template/index1%2Cindex2",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IndexGetTemplate().Name(test.Names...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_put_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_put_template.go
new file mode 100644
index 00000000..7a97240c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_put_template.go
@@ -0,0 +1,179 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesPutTemplateService creates or updates index mappings.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesPutTemplateService struct {
+ client *Client
+ pretty bool
+ name string
+ order interface{}
+ create *bool
+ timeout string
+ masterTimeout string
+ flatSettings *bool
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIndicesPutTemplateService creates a new IndicesPutTemplateService.
+func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService {
+ return &IndicesPutTemplateService{
+ client: client,
+ }
+}
+
+// Name is the name of the index template.
+func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService {
+ s.name = name
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Order is the order for this template when merging multiple matching ones
+// (higher numbers are merged later, overriding the lower numbers).
+func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService {
+ s.order = order
+ return s
+}
+
+// Create indicates whether the index template should only be added if
+// new or can also replace an existing one.
+func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService {
+ s.create = &create
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.order != nil {
+ params.Set("order", fmt.Sprintf("%v", s.order))
+ }
+ if s.create != nil {
+ params.Set("create", fmt.Sprintf("%v", *s.create))
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesPutTemplateService) Validate() error {
+ var invalid []string
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("PUT", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesPutTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do.
+type IndicesPutTemplateResponse struct {
+ Acknowledged bool `json:"acknowledged,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_stats.go
new file mode 100644
index 00000000..5f033786
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_stats.go
@@ -0,0 +1,385 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesStatsService provides stats on various metrics of one or more
+// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html.
+type IndicesStatsService struct {
+ client *Client
+ pretty bool
+ metric []string
+ index []string
+ level string
+ types []string
+ completionFields []string
+ fielddataFields []string
+ fields []string
+ groups []string
+ human *bool
+}
+
+// NewIndicesStatsService creates a new IndicesStatsService.
+func NewIndicesStatsService(client *Client) *IndicesStatsService {
+ return &IndicesStatsService{
+ client: client,
+ index: make([]string, 0),
+ metric: make([]string, 0),
+ completionFields: make([]string, 0),
+ fielddataFields: make([]string, 0),
+ fields: make([]string, 0),
+ groups: make([]string, 0),
+ types: make([]string, 0),
+ }
+}
+
+// Metric limits the information returned the specific metrics. Options are:
+// docs, store, indexing, get, search, completion, fielddata, flush, merge,
+// query_cache, refresh, suggest, and warmer.
+func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService {
+ s.metric = append(s.metric, metric...)
+ return s
+}
+
+// Index is the list of index names; use `_all` or empty string to perform
+// the operation on all indices.
+func (s *IndicesStatsService) Index(index ...string) *IndicesStatsService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Level returns stats aggregated at cluster, index or shard level.
+func (s *IndicesStatsService) Level(level string) *IndicesStatsService {
+ s.level = level
+ return s
+}
+
+// Types is a list of document types for the `indexing` index metric.
+func (s *IndicesStatsService) Types(types ...string) *IndicesStatsService {
+ s.types = append(s.types, types...)
+ return s
+}
+
+// CompletionFields is a list of fields for `fielddata` and `suggest`
+// index metric (supports wildcards).
+func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService {
+ s.completionFields = append(s.completionFields, completionFields...)
+ return s
+}
+
+// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards).
+func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService {
+ s.fielddataFields = append(s.fielddataFields, fielddataFields...)
+ return s
+}
+
+// Fields is a list of fields for `fielddata` and `completion` index metric
+// (supports wildcards).
+func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService {
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// Groups is a list of search groups for `search` index metric.
+func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService {
+ s.groups = append(s.groups, groups...)
+ return s
+}
+
+// Human indicates whether to return time and byte values in human-readable format..
+func (s *IndicesStatsService) Human(human bool) *IndicesStatsService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesStatsService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+ if len(s.index) > 0 && len(s.metric) > 0 {
+ path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_stats", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else if len(s.metric) > 0 {
+ path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else {
+ path = "/_stats"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if len(s.groups) > 0 {
+ params.Set("groups", strings.Join(s.groups, ","))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.level != "" {
+ params.Set("level", s.level)
+ }
+ if len(s.types) > 0 {
+ params.Set("types", strings.Join(s.types, ","))
+ }
+ if len(s.completionFields) > 0 {
+ params.Set("completion_fields", strings.Join(s.completionFields, ","))
+ }
+ if len(s.fielddataFields) > 0 {
+ params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesStatsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesStatsResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesStatsResponse is the response of IndicesStatsService.Do.
+type IndicesStatsResponse struct {
+ // Shards provides information returned from shards.
+ Shards shardsInfo `json:"_shards"`
+
+ // All provides summary stats about all indices.
+ All *IndexStats `json:"_all,omitempty"`
+
+ // Indices provides a map into the stats of an index. The key of the
+ // map is the index name.
+ Indices map[string]*IndexStats `json:"indices,omitempty"`
+}
+
+// IndexStats is index stats for a specific index.
+type IndexStats struct {
+ Primaries *IndexStatsDetails `json:"primaries,omitempty"`
+ Total *IndexStatsDetails `json:"total,omitempty"`
+}
+
+type IndexStatsDetails struct {
+ Docs *IndexStatsDocs `json:"docs,omitempty"`
+ Store *IndexStatsStore `json:"store,omitempty"`
+ Indexing *IndexStatsIndexing `json:"indexing,omitempty"`
+ Get *IndexStatsGet `json:"get,omitempty"`
+ Search *IndexStatsSearch `json:"search,omitempty"`
+ Merges *IndexStatsMerges `json:"merges,omitempty"`
+ Refresh *IndexStatsRefresh `json:"refresh,omitempty"`
+ Flush *IndexStatsFlush `json:"flush,omitempty"`
+ Warmer *IndexStatsWarmer `json:"warmer,omitempty"`
+ FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"`
+ IdCache *IndexStatsIdCache `json:"id_cache,omitempty"`
+ Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"`
+ Percolate *IndexStatsPercolate `json:"percolate,omitempty"`
+ Completion *IndexStatsCompletion `json:"completion,omitempty"`
+ Segments *IndexStatsSegments `json:"segments,omitempty"`
+ Translog *IndexStatsTranslog `json:"translog,omitempty"`
+ Suggest *IndexStatsSuggest `json:"suggest,omitempty"`
+ QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"`
+}
+
+type IndexStatsDocs struct {
+ Count int64 `json:"count,omitempty"`
+ Deleted int64 `json:"deleted,omitempty"`
+}
+
+type IndexStatsStore struct {
+ Size string `json:"size,omitempty"` // human size, e.g. 119.3mb
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+ ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s
+ ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"`
+}
+
+type IndexStatsIndexing struct {
+ IndexTotal int64 `json:"index_total,omitempty"`
+ IndexTime string `json:"index_time,omitempty"`
+ IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"`
+ IndexCurrent int64 `json:"index_current,omitempty"`
+ DeleteTotal int64 `json:"delete_total,omitempty"`
+ DeleteTime string `json:"delete_time,omitempty"`
+ DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"`
+ DeleteCurrent int64 `json:"delete_current,omitempty"`
+ NoopUpdateTotal int64 `json:"noop_update_total,omitempty"`
+ IsThrottled bool `json:"is_throttled,omitempty"`
+ ThrottleTime string `json:"throttle_time,omitempty"`
+ ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"`
+}
+
+type IndexStatsGet struct {
+ Total int64 `json:"total,omitempty"`
+ GetTime string `json:"get_time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ ExistsTotal int64 `json:"exists_total,omitempty"`
+ ExistsTime string `json:"exists_time,omitempty"`
+ ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"`
+ MissingTotal int64 `json:"missing_total,omitempty"`
+ MissingTime string `json:"missing_time,omitempty"`
+ MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+}
+
+type IndexStatsSearch struct {
+ OpenContexts int64 `json:"open_contexts,omitempty"`
+ QueryTotal int64 `json:"query_total,omitempty"`
+ QueryTime string `json:"query_time,omitempty"`
+ QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"`
+ QueryCurrent int64 `json:"query_current,omitempty"`
+ FetchTotal int64 `json:"fetch_total,omitempty"`
+ FetchTime string `json:"fetch_time,omitempty"`
+ FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"`
+ FetchCurrent int64 `json:"fetch_current,omitempty"`
+}
+
+type IndexStatsMerges struct {
+ Current int64 `json:"current,omitempty"`
+ CurrentDocs int64 `json:"current_docs,omitempty"`
+ CurrentSize string `json:"current_size,omitempty"`
+ CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+ TotalDocs int64 `json:"total_docs,omitempty"`
+ TotalSize string `json:"total_size,omitempty"`
+ TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"`
+}
+
+type IndexStatsRefresh struct {
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsFlush struct {
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsWarmer struct {
+ Current int64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsFilterCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+}
+
+type IndexStatsIdCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+}
+
+type IndexStatsFielddata struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+}
+
+type IndexStatsPercolate struct {
+ Total int64 `json:"total,omitempty"`
+ GetTime string `json:"get_time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Queries int64 `json:"queries,omitempty"`
+}
+
+type IndexStatsCompletion struct {
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+}
+
+type IndexStatsSegments struct {
+ Count int64 `json:"count,omitempty"`
+ Memory string `json:"memory,omitempty"`
+ MemoryInBytes int64 `json:"memory_in_bytes,omitempty"`
+ IndexWriterMemory string `json:"index_writer_memory,omitempty"`
+ IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"`
+ IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"`
+ IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"`
+ VersionMapMemory string `json:"version_map_memory,omitempty"`
+ VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"`
+ FixedBitSetMemory string `json:"fixed_bit_set,omitempty"`
+ FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"`
+}
+
+type IndexStatsTranslog struct {
+ Operations int64 `json:"operations,omitempty"`
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+}
+
+type IndexStatsSuggest struct {
+ Total int64 `json:"total,omitempty"`
+ Time string `json:"time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+}
+
+type IndexStatsQueryCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+ HitCount int64 `json:"hit_count,omitempty"`
+ MissCount int64 `json:"miss_count,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_stats_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_stats_test.go
new file mode 100644
index 00000000..2a72858d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_stats_test.go
@@ -0,0 +1,85 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestIndexStatsBuildURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Metrics []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_stats",
+ },
+ {
+ []string{"index1"},
+ []string{},
+ "/index1/_stats",
+ },
+ {
+ []string{},
+ []string{"metric1"},
+ "/_stats/metric1",
+ },
+ {
+ []string{"index1"},
+ []string{"metric1"},
+ "/index1/_stats/metric1",
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"metric1"},
+ "/index1%2Cindex2/_stats/metric1",
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"metric1", "metric2"},
+ "/index1%2Cindex2/_stats/metric1%2Cmetric2",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.IndexStats().Index(test.Indices...).Metric(test.Metrics...).buildURL()
+ if err != nil {
+ t.Fatalf("case #%d: %v", i+1, err)
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestIndexStats(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ stats, err := client.IndexStats(testIndexName).Do()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if stats == nil {
+ t.Fatalf("expected response; got: %v", stats)
+ }
+ stat, found := stats.Indices[testIndexName]
+ if !found {
+ t.Fatalf("expected stats about index %q; got: %v", testIndexName, found)
+ }
+ if stat.Total == nil {
+ t.Fatalf("expected total to be != nil; got: %v", stat.Total)
+ }
+ if stat.Total.Docs == nil {
+ t.Fatalf("expected total docs to be != nil; got: %v", stat.Total.Docs)
+ }
+ if stat.Total.Docs.Count == 0 {
+ t.Fatalf("expected total docs count to be > 0; got: %d", stat.Total.Docs.Count)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/inner_hit.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/inner_hit.go
new file mode 100644
index 00000000..0dcf693b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/inner_hit.go
@@ -0,0 +1,156 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// InnerHit implements a simple join for parent/child, nested, and even
+// top-level documents in Elasticsearch.
+// It is an experimental feature for Elasticsearch versions 1.5 (or greater).
+// See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html
+// for documentation.
+//
+// See the tests for SearchSource, HasChildFilter, HasChildQuery,
+// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery
+// for usage examples.
+type InnerHit struct {
+ source *SearchSource
+ path string
+ typ string
+
+ name string
+}
+
+// NewInnerHit creates a new InnerHit.
+func NewInnerHit() *InnerHit {
+ return &InnerHit{source: NewSearchSource()}
+}
+
+func (hit *InnerHit) Path(path string) *InnerHit {
+ hit.path = path
+ return hit
+}
+
+func (hit *InnerHit) Type(typ string) *InnerHit {
+ hit.typ = typ
+ return hit
+}
+
+func (hit *InnerHit) Query(query Query) *InnerHit {
+ hit.source.Query(query)
+ return hit
+}
+
+func (hit *InnerHit) From(from int) *InnerHit {
+ hit.source.From(from)
+ return hit
+}
+
+func (hit *InnerHit) Size(size int) *InnerHit {
+ hit.source.Size(size)
+ return hit
+}
+
+func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit {
+ hit.source.TrackScores(trackScores)
+ return hit
+}
+
+func (hit *InnerHit) Explain(explain bool) *InnerHit {
+ hit.source.Explain(explain)
+ return hit
+}
+
+func (hit *InnerHit) Version(version bool) *InnerHit {
+ hit.source.Version(version)
+ return hit
+}
+
+func (hit *InnerHit) Field(fieldName string) *InnerHit {
+ hit.source.Field(fieldName)
+ return hit
+}
+
+func (hit *InnerHit) Fields(fieldNames ...string) *InnerHit {
+ hit.source.Fields(fieldNames...)
+ return hit
+}
+
+func (hit *InnerHit) NoFields() *InnerHit {
+ hit.source.NoFields()
+ return hit
+}
+
+func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit {
+ hit.source.FetchSource(fetchSource)
+ return hit
+}
+
+func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit {
+ hit.source.FetchSourceContext(fetchSourceContext)
+ return hit
+}
+
+func (hit *InnerHit) FieldDataFields(fieldDataFields ...string) *InnerHit {
+ hit.source.FieldDataFields(fieldDataFields...)
+ return hit
+}
+
+func (hit *InnerHit) FieldDataField(fieldDataField string) *InnerHit {
+ hit.source.FieldDataField(fieldDataField)
+ return hit
+}
+
+func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit {
+ hit.source.ScriptFields(scriptFields...)
+ return hit
+}
+
+func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit {
+ hit.source.ScriptField(scriptField)
+ return hit
+}
+
+func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit {
+ hit.source.Sort(field, ascending)
+ return hit
+}
+
+func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit {
+ hit.source.SortWithInfo(info)
+ return hit
+}
+
+func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit {
+ hit.source.SortBy(sorter...)
+ return hit
+}
+
+func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit {
+ hit.source.Highlight(highlight)
+ return hit
+}
+
+func (hit *InnerHit) Highlighter() *Highlight {
+ return hit.source.Highlighter()
+}
+
+func (hit *InnerHit) Name(name string) *InnerHit {
+ hit.name = name
+ return hit
+}
+
+func (hit *InnerHit) Source() interface{} {
+ source, ok := hit.source.Source().(map[string]interface{})
+ if !ok {
+ return nil
+ }
+
+ // Notice that hit.typ and hit.path are not exported here.
+ // They are only used with SearchSource and serialized there.
+
+ if hit.name != "" {
+ source["name"] = hit.name
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/inner_hit_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/inner_hit_test.go
new file mode 100644
index 00000000..dfd77ec9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/inner_hit_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestInnerHitEmpty(t *testing.T) {
+ hit := NewInnerHit()
+ data, err := json.Marshal(hit.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestInnerHitWithName(t *testing.T) {
+ hit := NewInnerHit().Name("comments")
+ data, err := json.Marshal(hit.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"name":"comments"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_get.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_get.go
new file mode 100644
index 00000000..5ab946ce
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_get.go
@@ -0,0 +1,194 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+)
+
+type MultiGetService struct {
+ client *Client
+ preference string
+ realtime *bool
+ refresh *bool
+ items []*MultiGetItem
+}
+
+func NewMultiGetService(client *Client) *MultiGetService {
+ builder := &MultiGetService{
+ client: client,
+ items: make([]*MultiGetItem, 0),
+ }
+ return builder
+}
+
+func (b *MultiGetService) Preference(preference string) *MultiGetService {
+ b.preference = preference
+ return b
+}
+
+func (b *MultiGetService) Refresh(refresh bool) *MultiGetService {
+ b.refresh = &refresh
+ return b
+}
+
+func (b *MultiGetService) Realtime(realtime bool) *MultiGetService {
+ b.realtime = &realtime
+ return b
+}
+
+func (b *MultiGetService) Add(items ...*MultiGetItem) *MultiGetService {
+ b.items = append(b.items, items...)
+ return b
+}
+
+func (b *MultiGetService) Source() interface{} {
+ source := make(map[string]interface{})
+ items := make([]interface{}, len(b.items))
+ for i, item := range b.items {
+ items[i] = item.Source()
+ }
+ source["docs"] = items
+ return source
+}
+
+func (b *MultiGetService) Do() (*MultiGetResult, error) {
+ // Build url
+ path := "/_mget"
+
+ params := make(url.Values)
+ if b.realtime != nil {
+ params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+ }
+ if b.preference != "" {
+ params.Add("preference", b.preference)
+ }
+ if b.refresh != nil {
+ params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+ }
+
+ // Set body
+ body := b.Source()
+
+ // Get response
+ res, err := b.client.PerformRequest("GET", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(MultiGetResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Multi Get Item --
+
+// MultiGetItem is a single document to retrieve via the MultiGetService.
+type MultiGetItem struct {
+ index string
+ typ string
+ id string
+ routing string
+ fields []string
+ version *int64 // see org.elasticsearch.common.lucene.uid.Versions
+ versionType string // see org.elasticsearch.index.VersionType
+ fsc *FetchSourceContext
+}
+
+func NewMultiGetItem() *MultiGetItem {
+ return &MultiGetItem{}
+}
+
+func (item *MultiGetItem) Index(index string) *MultiGetItem {
+ item.index = index
+ return item
+}
+
+func (item *MultiGetItem) Type(typ string) *MultiGetItem {
+ item.typ = typ
+ return item
+}
+
+func (item *MultiGetItem) Id(id string) *MultiGetItem {
+ item.id = id
+ return item
+}
+
+func (item *MultiGetItem) Routing(routing string) *MultiGetItem {
+ item.routing = routing
+ return item
+}
+
+func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem {
+ if item.fields == nil {
+ item.fields = make([]string, 0)
+ }
+ item.fields = append(item.fields, fields...)
+ return item
+}
+
+// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1),
+// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions.
+// The default in Elasticsearch is MatchAny (-3).
+func (item *MultiGetItem) Version(version int64) *MultiGetItem {
+ item.version = &version
+ return item
+}
+
+// VersionType can be "internal", "external", "external_gt", "external_gte",
+// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source.
+// It is "internal" by default.
+func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem {
+ item.versionType = versionType
+ return item
+}
+
+func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem {
+ item.fsc = fetchSourceContext
+ return item
+}
+
+// Source returns the serialized JSON to be sent to Elasticsearch as
+// part of a MultiGet search.
+func (item *MultiGetItem) Source() interface{} {
+ source := make(map[string]interface{})
+
+ source["_id"] = item.id
+
+ if item.index != "" {
+ source["_index"] = item.index
+ }
+ if item.typ != "" {
+ source["_type"] = item.typ
+ }
+ if item.fsc != nil {
+ source["_source"] = item.fsc.Source()
+ }
+ if item.fields != nil {
+ source["fields"] = item.fields
+ }
+ if item.routing != "" {
+ source["_routing"] = item.routing
+ }
+ if item.version != nil {
+ source["version"] = fmt.Sprintf("%d", *item.version)
+ }
+ if item.versionType != "" {
+ source["version_type"] = item.versionType
+ }
+
+ return source
+}
+
+// -- Result of a Multi Get request.
+
+type MultiGetResult struct {
+ Docs []*GetResult `json:"docs,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_get_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_get_test.go
new file mode 100644
index 00000000..64b47221
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_get_test.go
@@ -0,0 +1,95 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMultiGet(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add some documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Count documents
+ count, err := client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ }
+
+ // Get documents 1 and 3
+ res, err := client.MultiGet().
+ Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("1")).
+ Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("3")).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected result to be != nil; got nil")
+ }
+ if res.Docs == nil {
+ t.Fatal("expected result docs to be != nil; got nil")
+ }
+ if len(res.Docs) != 2 {
+ t.Fatalf("expected to have 2 docs; got %d", len(res.Docs))
+ }
+
+ item := res.Docs[0]
+ if item.Error != "" {
+ t.Errorf("expected no error on item 0; got %q", item.Error)
+ }
+ if item.Source == nil {
+ t.Errorf("expected Source != nil; got %v", item.Source)
+ }
+ var doc tweet
+ if err := json.Unmarshal(*item.Source, &doc); err != nil {
+ t.Fatalf("expected to unmarshal item Source; got %v", err)
+ }
+ if doc.Message != tweet1.Message {
+ t.Errorf("expected Message of first tweet to be %q; got %q", tweet1.Message, doc.Message)
+ }
+
+ item = res.Docs[1]
+ if item.Error != "" {
+ t.Errorf("expected no error on item 1; got %q", item.Error)
+ }
+ if item.Source == nil {
+ t.Errorf("expected Source != nil; got %v", item.Source)
+ }
+ if err := json.Unmarshal(*item.Source, &doc); err != nil {
+ t.Fatalf("expected to unmarshal item Source; got %v", err)
+ }
+ if doc.Message != tweet3.Message {
+ t.Errorf("expected Message of second tweet to be %q; got %q", tweet3.Message, doc.Message)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_search.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_search.go
new file mode 100644
index 00000000..f42d5e5f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_search.go
@@ -0,0 +1,101 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// MultiSearch executes one or more searches in one roundtrip.
+// See http://www.elasticsearch.org/guide/reference/api/multi-search/
+type MultiSearchService struct {
+ client *Client
+ requests []*SearchRequest
+ indices []string
+ pretty bool
+ routing string
+ preference string
+}
+
+func NewMultiSearchService(client *Client) *MultiSearchService {
+ builder := &MultiSearchService{
+ client: client,
+ requests: make([]*SearchRequest, 0),
+ indices: make([]string, 0),
+ }
+ return builder
+}
+
+func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService {
+ s.requests = append(s.requests, requests...)
+ return s
+}
+
+func (s *MultiSearchService) Index(index string) *MultiSearchService {
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *MultiSearchService) Indices(indices ...string) *MultiSearchService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *MultiSearchService) Do() (*MultiSearchResult, error) {
+ // Build url
+ path := "/_msearch"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Set body
+ lines := make([]string, 0)
+ for _, sr := range s.requests {
+ // Set default indices if not specified in the request
+ if !sr.HasIndices() && len(s.indices) > 0 {
+ sr = sr.Indices(s.indices...)
+ }
+
+ header, err := json.Marshal(sr.header())
+ if err != nil {
+ return nil, err
+ }
+ body, err := json.Marshal(sr.body())
+ if err != nil {
+ return nil, err
+ }
+ lines = append(lines, string(header))
+ lines = append(lines, string(body))
+ }
+ body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n
+
+ // Get response
+ res, err := s.client.PerformRequest("GET", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(MultiSearchResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type MultiSearchResult struct {
+ Responses []*SearchResult `json:"responses,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_search_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_search_test.go
new file mode 100644
index 00000000..1741890c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_search_test.go
@@ -0,0 +1,197 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestMultiSearch(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere",
+ Message: "Welcome to Golang and Elasticsearch.",
+ Tags: []string{"golang", "elasticsearch"},
+ }
+ tweet2 := tweet{
+ User: "olivere",
+ Message: "Another unrelated topic.",
+ Tags: []string{"golang"},
+ }
+ tweet3 := tweet{
+ User: "sandrae",
+ Message: "Cycling is fun.",
+ Tags: []string{"sports", "cycling"},
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Spawn two search queries with one roundtrip
+ q1 := NewMatchAllQuery()
+ q2 := NewTermQuery("tags", "golang")
+
+ sreq1 := NewSearchRequest().Indices(testIndexName, testIndexName2).
+ Source(NewSearchSource().Query(q1).Size(10))
+ sreq2 := NewSearchRequest().Index(testIndexName).Type("tweet").
+ Source(NewSearchSource().Query(q2))
+
+ searchResult, err := client.MultiSearch().
+ Add(sreq1, sreq2).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Responses == nil {
+ t.Fatal("expected responses != nil; got nil")
+ }
+ if len(searchResult.Responses) != 2 {
+ t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses))
+ }
+
+ sres := searchResult.Responses[0]
+ if sres.Hits == nil {
+ t.Errorf("expected Hits != nil; got nil")
+ }
+ if sres.Hits.TotalHits != 3 {
+ t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
+ }
+ if len(sres.Hits.Hits) != 3 {
+ t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
+ }
+ for _, hit := range sres.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ sres = searchResult.Responses[1]
+ if sres.Hits == nil {
+ t.Errorf("expected Hits != nil; got nil")
+ }
+ if sres.Hits.TotalHits != 2 {
+ t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits)
+ }
+ if len(sres.Hits.Hits) != 2 {
+ t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits))
+ }
+ for _, hit := range sres.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestMultiSearchWithOneRequest(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere",
+ Message: "Welcome to Golang and Elasticsearch.",
+ Tags: []string{"golang", "elasticsearch"},
+ }
+ tweet2 := tweet{
+ User: "olivere",
+ Message: "Another unrelated topic.",
+ Tags: []string{"golang"},
+ }
+ tweet3 := tweet{
+ User: "sandrae",
+ Message: "Cycling is fun.",
+ Tags: []string{"sports", "cycling"},
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Spawn two search queries with one roundtrip
+ query := NewMatchAllQuery()
+ source := NewSearchSource().Query(query).Size(10)
+ sreq := NewSearchRequest().Source(source)
+
+ searchResult, err := client.MultiSearch().
+ Index(testIndexName).
+ Add(sreq).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Responses == nil {
+ t.Fatal("expected responses != nil; got nil")
+ }
+ if len(searchResult.Responses) != 1 {
+ t.Fatalf("expected 1 responses; got %d", len(searchResult.Responses))
+ }
+
+ sres := searchResult.Responses[0]
+ if sres.Hits == nil {
+ t.Errorf("expected Hits != nil; got nil")
+ }
+ if sres.Hits.TotalHits != 3 {
+ t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
+ }
+ if len(sres.Hits.Hits) != 3 {
+ t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
+ }
+ for _, hit := range sres.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/nodes_info.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/nodes_info.go
new file mode 100644
index 00000000..e0f601ec
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/nodes_info.go
@@ -0,0 +1,311 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+ "time"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// NodesInfoService allows to retrieve one or more or all of the
+// cluster nodes information.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html.
+type NodesInfoService struct {
+ client *Client
+ pretty bool
+ nodeId []string
+ metric []string
+ flatSettings *bool
+ human *bool
+}
+
+// NewNodesInfoService creates a new NodesInfoService.
+func NewNodesInfoService(client *Client) *NodesInfoService {
+ return &NodesInfoService{
+ client: client,
+ nodeId: []string{"_all"},
+ metric: []string{"_all"},
+ }
+}
+
+// NodeId is a list of node IDs or names to limit the returned information.
+// Use "_local" to return information from the node you're connecting to,
+// leave empty to get information from all nodes.
+func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService {
+ s.nodeId = make([]string, 0)
+ s.nodeId = append(s.nodeId, nodeId...)
+ return s
+}
+
+// Metric is a list of metrics you wish returned. Leave empty to return all.
+// Valid metrics are: settings, os, process, jvm, thread_pool, network,
+// transport, http, and plugins.
+func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService {
+ s.metric = make([]string, 0)
+ s.metric = append(s.metric, metric...)
+ return s
+}
+
+// FlatSettings returns settings in flat format (default: false).
+func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Human indicates whether to return time and byte values in human-readable format.
+func (s *NodesInfoService) Human(human bool) *NodesInfoService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates whether to indent the returned JSON.
+func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *NodesInfoService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{
+ "node_id": strings.Join(s.nodeId, ","),
+ "metric": strings.Join(s.metric, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *NodesInfoService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *NodesInfoService) Do() (*NodesInfoResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(NodesInfoResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// NodesInfoResponse is the response of NodesInfoService.Do.
+type NodesInfoResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Nodes map[string]*NodesInfoNode `json:"nodes"`
+}
+
+type NodesInfoNode struct {
+ // Name of the node, e.g. "Mister Fear"
+ Name string `json:"name"`
+ // TransportAddress, e.g. "inet[/127.0.0.1:9300]"
+ TransportAddress string `json:"transport_address"`
+ // Host is the host name, e.g. "macbookair"
+ Host string `json:"host"`
+ // IP is the IP address, e.g. "192.168.1.2"
+ IP string `json:"ip"`
+ // Version is the Elasticsearch version running on the node, e.g. "1.4.3"
+ Version string `json:"version"`
+ // Build is the Elasticsearch build, e.g. "36a29a7"
+ Build string `json:"build"`
+ // HTTPAddress, e.g. "inet[/127.0.0.1:9200]"
+ HTTPAddress string `json:"http_address"`
+ // HTTPSAddress, e.g. "inet[/127.0.0.1:9200]"
+ HTTPSAddress string `json:"https_address"`
+
+ // Settings of the node, e.g. paths and pidfile.
+ Settings map[string]interface{} `json:"settings"`
+
+ // OS information, e.g. CPU and memory.
+ OS *NodesInfoNodeOS `json:"os"`
+
+ // Process information, e.g. max file descriptors.
+ Process *NodesInfoNodeProcess `json:"process"`
+
+ // JVM information, e.g. VM version.
+ JVM *NodesInfoNodeProcess `json:"jvm"`
+
+ // ThreadPool information.
+ ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"`
+
+ // Network information.
+ Network *NodesInfoNodeNetwork `json:"network"`
+
+ // Network information.
+ Transport *NodesInfoNodeTransport `json:"transport"`
+
+ // HTTP information.
+ HTTP *NodesInfoNodeHTTP `json:"http"`
+
+ // Plugins information.
+ Plugins []*NodesInfoNodePlugin `json:"plugins"`
+}
+
+type NodesInfoNodeOS struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ AvailableProcessors int `json:"available_processors"` // e.g. 4
+
+ // CPU information
+ CPU struct {
+ Vendor string `json:"vendor"` // e.g. Intel
+ Model string `json:"model"` // e.g. iMac15,1
+ MHz int `json:"mhz"` // e.g. 3500
+ TotalCores int `json:"total_cores"` // e.g. 4
+ TotalSockets int `json:"total_sockets"` // e.g. 4
+ CoresPerSocket int `json:"cores_per_socket"` // e.g. 16
+ CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256
+ } `json:"cpu"`
+
+ // Mem information
+ Mem struct {
+ Total string `json:"total"` // e.g. 16gb
+ TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184
+ } `json:"mem"`
+
+ // Swap information
+ Swap struct {
+ Total string `json:"total"` // e.g. 1gb
+ TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824
+ } `json:"swap"`
+}
+
+type NodesInfoNodeProcess struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ ID int `json:"id"` // process id, e.g. 87079
+ MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768
+ Mlockall bool `json:"mlockall"` // e.g. false
+}
+
+type NodesInfoNodeJVM struct {
+ PID int `json:"pid"` // process id, e.g. 87079
+ Version string `json:"version"` // e.g. "1.8.0_25"
+ VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
+ VMVersion string `json:"vm_version"` // e.g. "25.25-b02"
+ VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
+ StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z"
+ StartTimeInMillis int64 `json:"start_time_in_millis"`
+
+ // Mem information
+ Mem struct {
+ HeapInit string `json:"heap_init"` // e.g. 1gb
+ HeapInitInBytes int `json:"heap_init_in_bytes"`
+ HeapMax string `json:"heap_max"` // e.g. 4gb
+ HeapMaxInBytes int `json:"heap_max_in_bytes"`
+ NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb
+ NonHeapInitInBytes int `json:"non_heap_init_in_bytes"`
+ NonHeapMax string `json:"non_heap_max"` // e.g. 0b
+ NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"`
+ DirectMax string `json:"direct_max"` // e.g. 4gb
+ DirectMaxInBytes int `json:"direct_max_in_bytes"`
+ } `json:"mem"`
+
+ GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"]
+ MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"]
+}
+
+type NodesInfoNodeThreadPool struct {
+ Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"`
+ Bench *NodesInfoNodeThreadPoolSection `json:"bench"`
+ Listener *NodesInfoNodeThreadPoolSection `json:"listener"`
+ Index *NodesInfoNodeThreadPoolSection `json:"index"`
+ Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"`
+ Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"`
+ Generic *NodesInfoNodeThreadPoolSection `json:"generic"`
+ Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"`
+ Search *NodesInfoNodeThreadPoolSection `json:"search"`
+ Flush *NodesInfoNodeThreadPoolSection `json:"flush"`
+ Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"`
+ Management *NodesInfoNodeThreadPoolSection `json:"management"`
+ Get *NodesInfoNodeThreadPoolSection `json:"get"`
+ Merge *NodesInfoNodeThreadPoolSection `json:"merge"`
+ Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"`
+ Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"`
+}
+
+type NodesInfoNodeThreadPoolSection struct {
+ Type string `json:"type"` // e.g. fixed
+ Min int `json:"min"` // e.g. 4
+ Max int `json:"max"` // e.g. 4
+ KeepAlive string `json:"keep_alive"` // e.g. "5m"
+ QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1
+}
+
+type NodesInfoNodeNetwork struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ PrimaryInterface struct {
+ Address string `json:"address"` // e.g. 192.168.1.2
+ Name string `json:"name"` // e.g. en0
+ MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66
+ } `json:"primary_interface"`
+}
+
+type NodesInfoNodeTransport struct {
+ BoundAddress string `json:"bound_address"` // e.g. inet[/127.0.0.1:9300]
+ PublishAddress string `json:"publish_address"` // e.g. inet[/127.0.0.1:9300]
+}
+
+type NodesInfoNodeHTTP struct {
+ BoundAddress string `json:"bound_address"` // e.g. inet[/127.0.0.1:9300]
+ PublishAddress string `json:"publish_address"` // e.g. inet[/127.0.0.1:9300]
+ MaxContentLength string `json:"max_content_length"` // e.g. "100mb"
+ MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"`
+}
+
+type NodesInfoNodePlugin struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Site bool `json:"site"`
+ JVM bool `json:"jvm"`
+ URL string `json:"url"` // e.g. /_plugin/dummy/
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/nodes_info_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/nodes_info_test.go
new file mode 100644
index 00000000..0402b270
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/nodes_info_test.go
@@ -0,0 +1,40 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestNodesInfo(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ info, err := client.NodesInfo().Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if info == nil {
+ t.Fatal("expected nodes info")
+ }
+
+ if info.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", info.ClusterName)
+ }
+ if len(info.Nodes) == 0 {
+ t.Errorf("expected some nodes; got: %d", len(info.Nodes))
+ }
+ for id, node := range info.Nodes {
+ if id == "" {
+ t.Errorf("expected node id; got: %q", id)
+ }
+ if node == nil {
+ t.Fatalf("expected node info; got: %v", node)
+ }
+ if node.IP == "" {
+ t.Errorf("expected node IP; got: %q", node.IP)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/optimize.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/optimize.go
new file mode 100644
index 00000000..16488d4e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/optimize.go
@@ -0,0 +1,135 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type OptimizeService struct {
+ client *Client
+ indices []string
+ maxNumSegments *int
+ onlyExpungeDeletes *bool
+ flush *bool
+ waitForMerge *bool
+ force *bool
+ pretty bool
+}
+
+func NewOptimizeService(client *Client) *OptimizeService {
+ builder := &OptimizeService{
+ client: client,
+ indices: make([]string, 0),
+ }
+ return builder
+}
+
+func (s *OptimizeService) Index(index string) *OptimizeService {
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *OptimizeService) Indices(indices ...string) *OptimizeService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService {
+ s.maxNumSegments = &maxNumSegments
+ return s
+}
+
+func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService {
+ s.onlyExpungeDeletes = &onlyExpungeDeletes
+ return s
+}
+
+func (s *OptimizeService) Flush(flush bool) *OptimizeService {
+ s.flush = &flush
+ return s
+}
+
+func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService {
+ s.waitForMerge = &waitForMerge
+ return s
+}
+
+func (s *OptimizeService) Force(force bool) *OptimizeService {
+ s.force = &force
+ return s
+}
+
+func (s *OptimizeService) Pretty(pretty bool) *OptimizeService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *OptimizeService) Do() (*OptimizeResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ path += "/_optimize"
+
+ // Parameters
+ params := make(url.Values)
+ if s.maxNumSegments != nil {
+ params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments))
+ }
+ if s.onlyExpungeDeletes != nil {
+ params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
+ }
+ if s.flush != nil {
+ params.Set("flush", fmt.Sprintf("%v", *s.flush))
+ }
+ if s.waitForMerge != nil {
+ params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge))
+ }
+ if s.force != nil {
+ params.Set("force", fmt.Sprintf("%v", *s.force))
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(OptimizeResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of an optimize request.
+
+type OptimizeResult struct {
+ Shards shardsInfo `json:"_shards,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/optimize_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/optimize_test.go
new file mode 100644
index 00000000..c47de3a9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/optimize_test.go
@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestOptimize(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add some documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Optimize documents
+ res, err := client.Optimize(testIndexName, testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected result; got nil")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/percolate.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/percolate.go
new file mode 100644
index 00000000..69d04dfc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/percolate.go
@@ -0,0 +1,310 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html.
+type PercolateService struct {
+ client *Client
+ pretty bool
+ index string
+ typ string
+ id string
+ version interface{}
+ versionType string
+ routing []string
+ preference string
+ ignoreUnavailable *bool
+ percolateIndex string
+ percolatePreference string
+ percolateRouting string
+ source string
+ allowNoIndices *bool
+ expandWildcards string
+ percolateFormat string
+ percolateType string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewPercolateService creates a new PercolateService.
+func NewPercolateService(client *Client) *PercolateService {
+ return &PercolateService{
+ client: client,
+ routing: make([]string, 0),
+ }
+}
+
+// Index is the name of the index of the document being percolated.
+func (s *PercolateService) Index(index string) *PercolateService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document being percolated.
+func (s *PercolateService) Type(typ string) *PercolateService {
+ s.typ = typ
+ return s
+}
+
+// Id is to substitute the document in the request body with a
+// document that is known by the specified id. On top of the id,
+// the index and type parameter will be used to retrieve
+// the document from within the cluster.
+func (s *PercolateService) Id(id string) *PercolateService {
+ s.id = id
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expressions
+// to concrete indices that are open, closed or both.
+func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// PercolateFormat indicates whether to return an array of matching
+// query IDs instead of objects.
+func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService {
+ s.percolateFormat = percolateFormat
+ return s
+}
+
+// PercolateType is the type to percolate document into. Defaults to type.
+func (s *PercolateService) PercolateType(percolateType string) *PercolateService {
+ s.percolateType = percolateType
+ return s
+}
+
+// PercolateRouting is the routing value to use when percolating
+// the existing document.
+func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService {
+ s.percolateRouting = percolateRouting
+ return s
+}
+
+// Source is the URL-encoded request definition.
+func (s *PercolateService) Source(source string) *PercolateService {
+ s.source = source
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// PercolateIndex is the index to percolate the document into. Defaults to index.
+func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService {
+ s.percolateIndex = percolateIndex
+ return s
+}
+
+// PercolatePreference defines which shard to prefer when executing
+// the percolate request.
+func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService {
+ s.percolatePreference = percolatePreference
+ return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *PercolateService) Version(version interface{}) *PercolateService {
+ s.version = version
+ return s
+}
+
+// VersionType is the specific version type.
+func (s *PercolateService) VersionType(versionType string) *PercolateService {
+ s.versionType = versionType
+ return s
+}
+
+// Routing is a list of specific routing values.
+func (s *PercolateService) Routing(routing []string) *PercolateService {
+ s.routing = routing
+ return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: random).
+func (s *PercolateService) Preference(preference string) *PercolateService {
+ s.preference = preference
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *PercolateService) Pretty(pretty bool) *PercolateService {
+ s.pretty = pretty
+ return s
+}
+
+// Doc wraps the given document into the "doc" key of the body.
+func (s *PercolateService) Doc(doc interface{}) *PercolateService {
+ return s.BodyJson(map[string]interface{}{"doc": doc})
+}
+
+// BodyJson is the percolator request definition using the percolate DSL.
+func (s *PercolateService) BodyJson(body interface{}) *PercolateService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is the percolator request definition using the percolate DSL.
+func (s *PercolateService) BodyString(body string) *PercolateService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PercolateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var path string
+ var err error
+ if s.id == "" {
+ path, err = uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{
+ "index": s.index,
+ "type": s.typ,
+ })
+ } else {
+ path, err = uritemplates.Expand("/{index}/{type}/{id}/_percolate", map[string]string{
+ "index": s.index,
+ "type": s.typ,
+ "id": s.id,
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+ if len(s.routing) > 0 {
+ params.Set("routing", strings.Join(s.routing, ","))
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.percolateIndex != "" {
+ params.Set("percolate_index", s.percolateIndex)
+ }
+ if s.percolatePreference != "" {
+ params.Set("percolate_preference", s.percolatePreference)
+ }
+ if s.percolateRouting != "" {
+ params.Set("percolate_routing", s.percolateRouting)
+ }
+ if s.source != "" {
+ params.Set("source", s.source)
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.percolateFormat != "" {
+ params.Set("percolate_format", s.percolateFormat)
+ }
+ if s.percolateType != "" {
+ params.Set("percolate_type", s.percolateType)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PercolateService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *PercolateService) Do() (*PercolateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(PercolateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// PercolateResponse is the response of PercolateService.Do.
+type PercolateResponse struct {
+ TookInMillis int64 `json:"took"` // search time in milliseconds
+ Total int64 `json:"total"` // total matches
+ Matches []*PercolateMatch `json:"matches,omitempty"`
+ Facets SearchFacets `json:"facets,omitempty"` // results from facets
+ Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations
+}
+
+// PercolateMatch returns a single match in a PercolateResponse.
+type PercolateMatch struct {
+ Index string `json:"_index,omitempty"`
+ Id string `json:"_id"`
+ Score float64 `json:"_score,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/percolate_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/percolate_test.go
new file mode 100644
index 00000000..cb4863d8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/percolate_test.go
@@ -0,0 +1,88 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestPercolate(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Register a query in the ".percolator" type.
+ search := NewSearchSource().Query(NewMatchQuery("message", "Golang"))
+ _, err = client.Index().
+ Index(testIndexName).Type(".percolator").Id("1").
+ BodyJson(search.Source()).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Percolate should return our registered query
+ newTweet := tweet{User: "olivere", Message: "Golang is fun."}
+ res, err := client.Percolate().
+ Index(testIndexName).Type("tweet").
+ Doc(newTweet). // shortcut for: BodyJson(map[string]interface{}{"doc": newTweet}).
+ Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Errorf("expected results != nil; got nil")
+ }
+ if res.Total != 1 {
+ t.Fatalf("expected 1 result; got: %d", res.Total)
+ }
+ if res.Matches == nil {
+ t.Fatalf("expected Matches; got: %v", res.Matches)
+ }
+ matches := res.Matches
+ if matches == nil {
+ t.Fatalf("expected matches as map; got: %v", matches)
+ }
+ if len(matches) != 1 {
+ t.Fatalf("expected %d registered matches; got: %d", 1, len(matches))
+ }
+ if matches[0].Id != "1" {
+ t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id)
+ }
+
+ // Percolating an existsing document should return our registered query
+ res, err = client.Percolate().
+ Index(testIndexName).Type("tweet").
+ Id("1").
+ Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Errorf("expected results != nil; got nil")
+ }
+ if res.Total != 1 {
+ t.Fatalf("expected 1 result; got: %d", res.Total)
+ }
+ if res.Matches == nil {
+ t.Fatalf("expected Matches; got: %v", res.Matches)
+ }
+ matches = res.Matches
+ if matches == nil {
+ t.Fatalf("expected matches as map; got: %v", matches)
+ }
+ if len(matches) != 1 {
+ t.Fatalf("expected %d registered matches; got: %d", 1, len(matches))
+ }
+ if matches[0].Id != "1" {
+ t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/ping.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/ping.go
new file mode 100644
index 00000000..84a2438d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/ping.go
@@ -0,0 +1,117 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+)
+
+// PingService checks if an Elasticsearch server on a given URL is alive.
+// When asked for, it can also return various information about the
+// Elasticsearch server, e.g. the Elasticsearch version number.
+//
+// Ping simply starts a HTTP GET request to the URL of the server.
+// If the server responds with HTTP Status code 200 OK, the server is alive.
+type PingService struct {
+ client *Client
+ url string
+ timeout string
+ httpHeadOnly bool
+ pretty bool
+}
+
+// PingResult is the result returned from querying the Elasticsearch server.
+type PingResult struct {
+ Status int `json:"status"`
+ Name string `json:"name"`
+ ClusterName string `json:"cluster_name"`
+ Version struct {
+ Number string `json:"number"`
+ BuildHash string `json:"build_hash"`
+ BuildTimestamp string `json:"build_timestamp"`
+ BuildSnapshot bool `json:"build_snapshot"`
+ LuceneVersion string `json:"lucene_version"`
+ } `json:"version"`
+ TagLine string `json:"tagline"`
+}
+
+func NewPingService(client *Client) *PingService {
+ return &PingService{
+ client: client,
+ url: DefaultURL,
+ httpHeadOnly: false,
+ pretty: false,
+ }
+}
+
+func (s *PingService) URL(url string) *PingService {
+ s.url = url
+ return s
+}
+
+func (s *PingService) Timeout(timeout string) *PingService {
+ s.timeout = timeout
+ return s
+}
+
+// HeadOnly makes the service to only return the status code in Do;
+// the PingResult will be nil.
+func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService {
+ s.httpHeadOnly = httpHeadOnly
+ return s
+}
+
+func (s *PingService) Pretty(pretty bool) *PingService {
+ s.pretty = pretty
+ return s
+}
+
+// Do returns the PingResult, the HTTP status code of the Elasticsearch
+// server, and an error.
+func (s *PingService) Do() (*PingResult, int, error) {
+ url_ := s.url + "/"
+
+ params := make(url.Values)
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if len(params) > 0 {
+ url_ += "?" + params.Encode()
+ }
+
+ var method string
+ if s.httpHeadOnly {
+ method = "HEAD"
+ } else {
+ method = "GET"
+ }
+
+ // Notice: This service must NOT use PerformRequest!
+ req, err := NewRequest(method, url_)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ res, err := s.client.c.Do((*http.Request)(req))
+ if err != nil {
+ return nil, 0, err
+ }
+ defer res.Body.Close()
+
+ var ret *PingResult
+ if !s.httpHeadOnly {
+ ret = new(PingResult)
+ if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+ return nil, res.StatusCode, err
+ }
+ }
+
+ return ret, res.StatusCode, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/ping_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/ping_test.go
new file mode 100644
index 00000000..ba76dcf8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/ping_test.go
@@ -0,0 +1,67 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "net/http"
+ "testing"
+)
+
+func TestPingGet(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ res, code, err := client.Ping().Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if code != http.StatusOK {
+ t.Errorf("expected status code = %d; got %d", http.StatusOK, code)
+ }
+ if res == nil {
+ t.Fatalf("expected to return result, got: %v", res)
+ }
+ if res.Status != http.StatusOK {
+ t.Errorf("expected Status = %d; got %d", http.StatusOK, res.Status)
+ }
+ if res.Name == "" {
+ t.Errorf("expected Name != \"\"; got %q", res.Name)
+ }
+ if res.Version.Number == "" {
+ t.Errorf("expected Version.Number != \"\"; got %q", res.Version.Number)
+ }
+}
+
+func TestPingHead(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ res, code, err := client.Ping().HttpHeadOnly(true).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if code != http.StatusOK {
+ t.Errorf("expected status code = %d; got %d", http.StatusOK, code)
+ }
+ if res != nil {
+ t.Errorf("expected not to return result, got: %v", res)
+ }
+}
+
+func TestPingHeadFailure(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ res, code, err := client.Ping().
+ URL("http://127.0.0.1:9299").
+ HttpHeadOnly(true).
+ Do()
+ if err == nil {
+ t.Error("expected error, got nil")
+ }
+ if code == http.StatusOK {
+ t.Errorf("expected status code != %d; got %d", http.StatusOK, code)
+ }
+ if res != nil {
+ t.Errorf("expected not to return result, got: %v", res)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_mapping.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_mapping.go
new file mode 100644
index 00000000..0491e509
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_mapping.go
@@ -0,0 +1,222 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// PutMappingService allows to register specific mapping definition
+// for a specific type.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-put-mapping.html.
+type PutMappingService struct {
+ client *Client
+ pretty bool
+ typ string
+ index []string
+ masterTimeout string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ ignoreConflicts *bool
+ timeout string
+ bodyJson map[string]interface{}
+ bodyString string
+}
+
+// NewPutMappingService creates a new PutMappingService.
+func NewPutMappingService(client *Client) *PutMappingService {
+ return &PutMappingService{
+ client: client,
+ index: make([]string, 0),
+ }
+}
+
+// Index is a list of index names the mapping should be added to
+// (supports wildcards); use `_all` or omit to add the mapping on all indices.
+func (s *PutMappingService) Index(index ...string) *PutMappingService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type is the name of the document type.
+func (s *PutMappingService) Type(typ string) *PutMappingService {
+ s.typ = typ
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *PutMappingService) Timeout(timeout string) *PutMappingService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *PutMappingService) MasterTimeout(masterTimeout string) *PutMappingService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *PutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *PutMappingService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *PutMappingService) AllowNoIndices(allowNoIndices bool) *PutMappingService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *PutMappingService) ExpandWildcards(expandWildcards string) *PutMappingService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// IgnoreConflicts specifies whether to ignore conflicts while updating
+// the mapping (default: false).
+func (s *PutMappingService) IgnoreConflicts(ignoreConflicts bool) *PutMappingService {
+ s.ignoreConflicts = &ignoreConflicts
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *PutMappingService) Pretty(pretty bool) *PutMappingService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson contains the mapping definition.
+func (s *PutMappingService) BodyJson(mapping map[string]interface{}) *PutMappingService {
+ s.bodyJson = mapping
+ return s
+}
+
+// BodyString is the mapping definition serialized as a string.
+func (s *PutMappingService) BodyString(mapping string) *PutMappingService {
+ s.bodyString = mapping
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PutMappingService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ // Build URL: Typ MUST be specified and is verified in Validate.
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": s.typ,
+ })
+ } else {
+ path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{
+ "type": s.typ,
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.ignoreConflicts != nil {
+ params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts))
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PutMappingService) Validate() error {
+ var invalid []string
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *PutMappingService) Do() (*PutMappingResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("PUT", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(PutMappingResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// PutMappingResponse is the response of PutMappingService.Do.
+type PutMappingResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_mapping_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_mapping_test.go
new file mode 100644
index 00000000..d6245c2b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_mapping_test.go
@@ -0,0 +1,94 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestPutMappingURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Type string
+ Expected string
+ }{
+ {
+ []string{},
+ "tweet",
+ "/_mapping/tweet",
+ },
+ {
+ []string{"*"},
+ "tweet",
+ "/%2A/_mapping/tweet",
+ },
+ {
+ []string{"store-1", "store-2"},
+ "tweet",
+ "/store-1%2Cstore-2/_mapping/tweet",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.PutMapping().Index(test.Indices...).Type(test.Type).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestMappingLifecycle(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ mapping := `{
+ "tweetdoc":{
+ "properties":{
+ "message":{
+ "type":"string",
+ "store":true
+ }
+ }
+ }
+ }`
+
+ putresp, err := client.PutMapping().Index(testIndexName2).Type("tweetdoc").BodyString(mapping).Do()
+ if err != nil {
+ t.Fatalf("expected put mapping to succeed; got: %v", err)
+ }
+ if putresp == nil {
+ t.Fatalf("expected put mapping response; got: %v", putresp)
+ }
+ if !putresp.Acknowledged {
+ t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged)
+ }
+
+ getresp, err := client.GetMapping().Index(testIndexName2).Type("tweetdoc").Do()
+ if err != nil {
+ t.Fatalf("expected get mapping to succeed; got: %v", err)
+ }
+ if getresp == nil {
+ t.Fatalf("expected get mapping response; got: %v", getresp)
+ }
+ props, ok := getresp[testIndexName2]
+ if !ok {
+ t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props)
+ }
+
+ delresp, err := client.DeleteMapping().Index(testIndexName2).Type("tweetdoc").Do()
+ if err != nil {
+ t.Fatalf("expected delete mapping to succeed; got: %v", err)
+ }
+ if delresp == nil {
+ t.Fatalf("expected delete mapping response; got: %v", delresp)
+ }
+ if !delresp.Acknowledged {
+ t.Fatalf("expected delete mapping ack; got: %v", delresp.Acknowledged)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_template.go
new file mode 100644
index 00000000..4a4a84b0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_template.go
@@ -0,0 +1,152 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// PutTemplateService creates or updates a search template.
+// The documentation can be found at
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type PutTemplateService struct {
+ client *Client
+ pretty bool
+ id string
+ opType string
+ version *int
+ versionType string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewPutTemplateService creates a new PutTemplateService.
+func NewPutTemplateService(client *Client) *PutTemplateService {
+ return &PutTemplateService{
+ client: client,
+ }
+}
+
+// Id is the template ID.
+func (s *PutTemplateService) Id(id string) *PutTemplateService {
+ s.id = id
+ return s
+}
+
+// OpType is an explicit operation type.
+func (s *PutTemplateService) OpType(opType string) *PutTemplateService {
+ s.opType = opType
+ return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *PutTemplateService) Version(version int) *PutTemplateService {
+ s.version = &version
+ return s
+}
+
+// VersionType is a specific version type.
+func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService {
+ s.versionType = versionType
+ return s
+}
+
+// BodyJson is the document as a JSON serializable object.
+func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is the document as a string.
+func (s *PutTemplateService) BodyString(body string) *PutTemplateService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PutTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+ "id": s.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+ if s.opType != "" {
+ params.Set("op_type", s.opType)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PutTemplateService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *PutTemplateService) Do() (*PutTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("PUT", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(PutTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// PutTemplateResponse is the response of PutTemplateService.Do.
+type PutTemplateResponse struct {
+ Id string `json:"_id"`
+ Version int `json:"_version"`
+ Created bool `json:"created"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/query.go
new file mode 100644
index 00000000..0c9e6706
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/query.go
@@ -0,0 +1,14 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents the generic query interface.
+// A querys' only purpose is to return the
+// source of the query as a JSON-serializable
+// object. Returning a map[string]interface{}
+// will do.
+type Query interface {
+ Source() interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/refresh.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/refresh.go
new file mode 100644
index 00000000..1f0ded80
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/refresh.go
@@ -0,0 +1,99 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type RefreshService struct {
+ client *Client
+ indices []string
+ force *bool
+ pretty bool
+}
+
+func NewRefreshService(client *Client) *RefreshService {
+ builder := &RefreshService{
+ client: client,
+ indices: make([]string, 0),
+ }
+ return builder
+}
+
+func (s *RefreshService) Index(index string) *RefreshService {
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *RefreshService) Indices(indices ...string) *RefreshService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *RefreshService) Force(force bool) *RefreshService {
+ s.force = &force
+ return s
+}
+
+func (s *RefreshService) Pretty(pretty bool) *RefreshService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *RefreshService) Do() (*RefreshResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ path += "/_refresh"
+
+ // Parameters
+ params := make(url.Values)
+ if s.force != nil {
+ params.Set("force", fmt.Sprintf("%v", *s.force))
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(RefreshResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a refresh request.
+
+type RefreshResult struct {
+ Shards shardsInfo `json:"_shards,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/refresh_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/refresh_test.go
new file mode 100644
index 00000000..885e6336
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/refresh_test.go
@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestRefresh(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add some documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Refresh indices
+ res, err := client.Refresh(testIndexName, testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected result; got nil")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/reindexer.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/reindexer.go
new file mode 100644
index 00000000..e36c6563
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/reindexer.go
@@ -0,0 +1,258 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+// Reindexer simplifies the process of reindexing an index. You typically
+// reindex a source index to a target index. However, you can also specify
+// a query that filters out documents from the source index before bulk
+// indexing them into the target index. The caller may also specify a
+// different client for the target, e.g. when copying indices from one
+// Elasticsearch cluster to another.
+//
+// Internally, the Reindex users a scan and scroll operation on the source
+// index and bulk indexing to push data into the target index.
+//
+// By default the reindexer fetches the _source, _parent, and _routing
+// attributes from the source index, using the provided CopyToTargetIndex
+// will copy those attributes into the destinationIndex.
+// This behaviour can be overridden by setting the ScanFields and providing a
+// custom ReindexerFunc.
+//
+// The caller is responsible for setting up and/or clearing the target index
+// before starting the reindex process.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
+// for more information about reindexing.
+type Reindexer struct {
+ sourceClient, targetClient *Client
+ sourceIndex string
+ query Query
+ scanFields []string
+ bulkSize int
+ scroll string
+ reindexerFunc ReindexerFunc
+ progress ReindexerProgressFunc
+ statsOnly bool
+}
+
+// A ReindexerFunc receives each hit from the sourceIndex.
+// It can choose to add any number of BulkableRequests to the bulkService.
+type ReindexerFunc func(hit *SearchHit, bulkService *BulkService) error
+
+// CopyToTargetIndex returns a ReindexerFunc that copies the SearchHit's
+// _source, _parent, and _routing attributes into the targetIndex
+func CopyToTargetIndex(targetIndex string) ReindexerFunc {
+ return func(hit *SearchHit, bulkService *BulkService) error {
+ // TODO(oe) Do we need to deserialize here?
+ source := make(map[string]interface{})
+ if err := json.Unmarshal(*hit.Source, &source); err != nil {
+ return err
+ }
+ req := NewBulkIndexRequest().Index(targetIndex).Type(hit.Type).Id(hit.Id).Doc(source)
+ if parent, ok := hit.Fields["_parent"].(string); ok {
+ req.Parent(parent)
+ }
+ if routing, ok := hit.Fields["_routing"].(string); ok {
+ req.Routing(routing)
+ }
+ bulkService.Add(req)
+ return nil
+ }
+}
+
+// ReindexerProgressFunc is a callback that can be used with Reindexer
+// to report progress while reindexing data.
+type ReindexerProgressFunc func(current, total int64)
+
+// ReindexerResponse is returned from the Do func in a Reindexer.
+// By default, it returns the number of succeeded and failed bulk operations.
+// To return details about all failed items, set StatsOnly to false in
+// Reindexer.
+type ReindexerResponse struct {
+ Success int64
+ Failed int64
+ Errors []*BulkResponseItem
+}
+
+// NewReindexer returns a new Reindexer.
+func NewReindexer(client *Client, source string, reindexerFunc ReindexerFunc) *Reindexer {
+ return &Reindexer{
+ sourceClient: client,
+ sourceIndex: source,
+ reindexerFunc: reindexerFunc,
+ statsOnly: true,
+ }
+}
+
+// TargetClient specifies a different client for the target. This is
+// necessary when the target index is in a different Elasticsearch cluster.
+// By default, the source and target clients are the same.
+func (ix *Reindexer) TargetClient(c *Client) *Reindexer {
+ ix.targetClient = c
+ return ix
+}
+
+// Query specifies the query to apply to the source. It filters out those
+// documents to be indexed into target. A nil query does not filter out any
+// documents.
+func (ix *Reindexer) Query(q Query) *Reindexer {
+ ix.query = q
+ return ix
+}
+
+// ScanFields specifies the fields the scan query should load.
+// The default fields are _source, _parent, _routing.
+func (ix *Reindexer) ScanFields(scanFields ...string) *Reindexer {
+ ix.scanFields = scanFields
+ return ix
+}
+
+// BulkSize returns the number of documents to send to Elasticsearch per chunk.
+// The default is 500.
+func (ix *Reindexer) BulkSize(size int) *Reindexer {
+ ix.bulkSize = size
+ return ix
+}
+
+// Scroll specifies for how long the scroll operation on the source index
+// should be maintained. The default is 5m.
+func (ix *Reindexer) Scroll(timeout string) *Reindexer {
+ ix.scroll = timeout
+ return ix
+}
+
+// Progress indicates a callback that will be called while indexing.
+func (ix *Reindexer) Progress(f ReindexerProgressFunc) *Reindexer {
+ ix.progress = f
+ return ix
+}
+
+// StatsOnly indicates whether the Do method should return details e.g. about
+// the documents that failed while indexing. It is true by default, i.e. only
+// the number of documents that succeeded/failed are returned. Set to false
+// if you want all the details.
+func (ix *Reindexer) StatsOnly(statsOnly bool) *Reindexer {
+ ix.statsOnly = statsOnly
+ return ix
+}
+
+// Do starts the reindexing process.
+func (ix *Reindexer) Do() (*ReindexerResponse, error) {
+ if ix.sourceClient == nil {
+ return nil, errors.New("no source client")
+ }
+ if ix.sourceIndex == "" {
+ return nil, errors.New("no source index")
+ }
+ if ix.targetClient == nil {
+ ix.targetClient = ix.sourceClient
+ }
+ if ix.scanFields == nil {
+ ix.scanFields = []string{"_source", "_parent", "_routing"}
+ }
+ if ix.bulkSize <= 0 {
+ ix.bulkSize = 500
+ }
+ if ix.scroll == "" {
+ ix.scroll = "5m"
+ }
+
+ // Count total to report progress (if necessary)
+ var err error
+ var current, total int64
+ if ix.progress != nil {
+ total, err = ix.count()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Prepare scan and scroll to iterate through the source index
+ scanner := ix.sourceClient.Scan(ix.sourceIndex).Scroll(ix.scroll).Fields(ix.scanFields...)
+ if ix.query != nil {
+ scanner = scanner.Query(ix.query)
+ }
+ cursor, err := scanner.Do()
+
+ bulk := ix.targetClient.Bulk()
+
+ ret := &ReindexerResponse{
+ Errors: make([]*BulkResponseItem, 0),
+ }
+
+ // Main loop iterates through the source index and bulk indexes into target.
+ for {
+ docs, err := cursor.Next()
+ if err == EOS {
+ break
+ }
+ if err != nil {
+ return ret, err
+ }
+
+ if docs.TotalHits() > 0 {
+ for _, hit := range docs.Hits.Hits {
+ if ix.progress != nil {
+ current++
+ ix.progress(current, total)
+ }
+
+ err := ix.reindexerFunc(hit, bulk)
+ if err != nil {
+ return ret, err
+ }
+
+ if bulk.NumberOfActions() >= ix.bulkSize {
+ bulk, err = ix.commit(bulk, ret)
+ if err != nil {
+ return ret, err
+ }
+ }
+ }
+ }
+ }
+
+ // Final flush
+ if bulk.NumberOfActions() > 0 {
+ bulk, err = ix.commit(bulk, ret)
+ if err != nil {
+ return ret, err
+ }
+ bulk = nil
+ }
+
+ return ret, nil
+}
+
+// count returns the number of documents in the source index.
+// The query is taken into account, if specified.
+func (ix *Reindexer) count() (int64, error) {
+ service := ix.sourceClient.Count(ix.sourceIndex)
+ if ix.query != nil {
+ service = service.Query(ix.query)
+ }
+ return service.Do()
+}
+
+// commit commits a bulk, updates the stats, and returns a fresh bulk service.
+func (ix *Reindexer) commit(bulk *BulkService, ret *ReindexerResponse) (*BulkService, error) {
+ bres, err := bulk.Do()
+ if err != nil {
+ return nil, err
+ }
+ ret.Success += int64(len(bres.Succeeded()))
+ failed := bres.Failed()
+ ret.Failed += int64(len(failed))
+ if !ix.statsOnly {
+ ret.Errors = append(ret.Errors, failed...)
+ }
+ bulk = ix.targetClient.Bulk()
+ return bulk, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/reindexer_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/reindexer_test.go
new file mode 100644
index 00000000..26efe487
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/reindexer_test.go
@@ -0,0 +1,288 @@
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestReindexer(t *testing.T) {
+
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ sourceCount, err := client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if sourceCount <= 0 {
+ t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
+ }
+
+ targetCount, err := client.Count(testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != 0 {
+ t.Fatalf("expected %d documents; got: %d", 0, targetCount)
+ }
+
+ r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2))
+ ret, err := r.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ret == nil {
+ t.Fatalf("expected result != %v; got: %v", nil, ret)
+ }
+ if ret.Success != sourceCount {
+ t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success)
+ }
+ if ret.Failed != 0 {
+ t.Errorf("expected failed = %d; got: %d", 0, ret.Failed)
+ }
+ if len(ret.Errors) != 0 {
+ t.Errorf("expected to return no errors by default; got: %v", ret.Errors)
+ }
+
+ if _, err := client.Flush().Index(testIndexName2).Do(); err != nil {
+ t.Fatal(err)
+ }
+
+ targetCount, err = client.Count(testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != sourceCount {
+ t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount)
+ }
+}
+
+func TestReindexerWithQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ q := NewTermQuery("user", "olivere")
+
+ sourceCount, err := client.Count(testIndexName).Query(q).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if sourceCount <= 0 {
+ t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
+ }
+
+ targetCount, err := client.Count(testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != 0 {
+ t.Fatalf("expected %d documents; got: %d", 0, targetCount)
+ }
+
+ r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2))
+ r = r.Query(q)
+ ret, err := r.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ret == nil {
+ t.Fatalf("expected result != %v; got: %v", nil, ret)
+ }
+ if ret.Success != sourceCount {
+ t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success)
+ }
+ if ret.Failed != 0 {
+ t.Errorf("expected failed = %d; got: %d", 0, ret.Failed)
+ }
+ if len(ret.Errors) != 0 {
+ t.Errorf("expected to return no errors by default; got: %v", ret.Errors)
+ }
+
+ if _, err := client.Flush().Index(testIndexName2).Do(); err != nil {
+ t.Fatal(err)
+ }
+
+ targetCount, err = client.Count(testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != sourceCount {
+ t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount)
+ }
+}
+
+func TestReindexerProgress(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ sourceCount, err := client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if sourceCount <= 0 {
+ t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
+ }
+
+ var calls int64
+ totalsOk := true
+ progress := func(current, total int64) {
+ calls += 1
+ totalsOk = totalsOk && total == sourceCount
+ }
+
+ r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2))
+ r = r.Progress(progress)
+ ret, err := r.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ret == nil {
+ t.Fatalf("expected result != %v; got: %v", nil, ret)
+ }
+ if ret.Success != sourceCount {
+ t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success)
+ }
+ if ret.Failed != 0 {
+ t.Errorf("expected failed = %d; got: %d", 0, ret.Failed)
+ }
+ if len(ret.Errors) != 0 {
+ t.Errorf("expected to return no errors by default; got: %v", ret.Errors)
+ }
+
+ if calls != sourceCount {
+ t.Errorf("expected progress to be called %d times; got: %d", sourceCount, calls)
+ }
+ if !totalsOk {
+ t.Errorf("expected totals in progress to be %d", sourceCount)
+ }
+}
+
+func TestReindexerWithTargetClient(t *testing.T) {
+ sourceClient := setupTestClientAndCreateIndexAndAddDocs(t)
+ targetClient, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sourceCount, err := sourceClient.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if sourceCount <= 0 {
+ t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
+ }
+
+ targetCount, err := targetClient.Count(testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != 0 {
+ t.Fatalf("expected %d documents; got: %d", 0, targetCount)
+ }
+
+ r := NewReindexer(sourceClient, testIndexName, CopyToTargetIndex(testIndexName2))
+ r = r.TargetClient(targetClient)
+ ret, err := r.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ret == nil {
+ t.Fatalf("expected result != %v; got: %v", nil, ret)
+ }
+ if ret.Success != sourceCount {
+ t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success)
+ }
+ if ret.Failed != 0 {
+ t.Errorf("expected failed = %d; got: %d", 0, ret.Failed)
+ }
+ if len(ret.Errors) != 0 {
+ t.Errorf("expected to return no errors by default; got: %v", ret.Errors)
+ }
+
+ if _, err := targetClient.Flush().Index(testIndexName2).Do(); err != nil {
+ t.Fatal(err)
+ }
+
+ targetCount, err = targetClient.Count(testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != sourceCount {
+ t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount)
+ }
+}
+
+// TestReindexerPreservingTTL shows how a caller can take control of the
+// copying process by providing ScanFields and a custom ReindexerFunc.
+func TestReindexerPreservingTTL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").TTL("999999").Version(10).VersionType("external").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sourceCount, err := client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if sourceCount <= 0 {
+ t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
+ }
+
+ targetCount, err := client.Count(testIndexName2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != 0 {
+ t.Fatalf("expected %d documents; got: %d", 0, targetCount)
+ }
+
+ // Carries over the source item's ttl to the reindexed item
+ copyWithTTL := func(hit *SearchHit, bulkService *BulkService) error {
+ source := make(map[string]interface{})
+ if err := json.Unmarshal(*hit.Source, &source); err != nil {
+ return err
+ }
+ req := NewBulkIndexRequest().Index(testIndexName2).Type(hit.Type).Id(hit.Id).Doc(source)
+ if ttl, ok := hit.Fields["_ttl"].(float64); ok {
+ req.Ttl(int64(ttl))
+ }
+ bulkService.Add(req)
+ return nil
+ }
+
+ r := NewReindexer(client, testIndexName, copyWithTTL).ScanFields("_source", "_ttl")
+
+ ret, err := r.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ret == nil {
+ t.Fatalf("expected result != %v; got: %v", nil, ret)
+ }
+ if ret.Success != sourceCount {
+ t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success)
+ }
+ if ret.Failed != 0 {
+ t.Errorf("expected failed = %d; got: %d", 0, ret.Failed)
+ }
+ if len(ret.Errors) != 0 {
+ t.Errorf("expected to return no errors by default; got: %v", ret.Errors)
+ }
+
+ getResult, err := client.Get().Index(testIndexName2).Id("1").Fields("_source", "_ttl").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, ok := getResult.Fields["_ttl"].(float64)
+ if !ok {
+ t.Errorf("Cannot retrieve TTL from reindexed document")
+ }
+
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/request.go
new file mode 100644
index 00000000..eb5a3b13
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/request.go
@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "runtime"
+ "strings"
+)
+
+// Elasticsearch-specific HTTP request
+type Request http.Request
+
+func NewRequest(method, url string) (*Request, error) {
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")")
+ req.Header.Add("Accept", "application/json")
+ return (*Request)(req), nil
+}
+
+func (r *Request) SetBodyJson(data interface{}) error {
+ body, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ r.SetBody(bytes.NewReader(body))
+ r.Header.Set("Content-Type", "application/json")
+ return nil
+}
+
+func (r *Request) SetBodyString(body string) error {
+ return r.SetBody(strings.NewReader(body))
+}
+
+func (r *Request) SetBody(body io.Reader) error {
+ rc, ok := body.(io.ReadCloser)
+ if !ok && body != nil {
+ rc = ioutil.NopCloser(body)
+ }
+ r.Body = rc
+ if body != nil {
+ switch v := body.(type) {
+ case *strings.Reader:
+ r.ContentLength = int64(v.Len())
+ case *bytes.Buffer:
+ r.ContentLength = int64(v.Len())
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescore.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescore.go
new file mode 100644
index 00000000..bd57ab7b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescore.go
@@ -0,0 +1,40 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Rescore struct {
+ rescorer Rescorer
+ windowSize *int
+ defaultRescoreWindowSize *int
+}
+
+func NewRescore() *Rescore {
+ return &Rescore{}
+}
+
+func (r *Rescore) WindowSize(windowSize int) *Rescore {
+ r.windowSize = &windowSize
+ return r
+}
+
+func (r *Rescore) IsEmpty() bool {
+ return r.rescorer == nil
+}
+
+func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore {
+ r.rescorer = rescorer
+ return r
+}
+
+func (r *Rescore) Source() interface{} {
+ source := make(map[string]interface{})
+ if r.windowSize != nil {
+ source["window_size"] = *r.windowSize
+ } else if r.defaultRescoreWindowSize != nil {
+ source["window_size"] = *r.defaultRescoreWindowSize
+ }
+ source[r.rescorer.Name()] = r.rescorer.Source()
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescorer.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescorer.go
new file mode 100644
index 00000000..cbb82185
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescorer.go
@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Rescorer interface {
+ Name() string
+ Source() interface{}
+}
+
+// -- Query Rescorer --
+
+type QueryRescorer struct {
+ query Query
+ rescoreQueryWeight *float64
+ queryWeight *float64
+ scoreMode string
+}
+
+func NewQueryRescorer(query Query) *QueryRescorer {
+ return &QueryRescorer{
+ query: query,
+ }
+}
+
+func (r *QueryRescorer) Name() string {
+ return "query"
+}
+
+func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer {
+ r.rescoreQueryWeight = &rescoreQueryWeight
+ return r
+}
+
+func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer {
+ r.queryWeight = &queryWeight
+ return r
+}
+
+func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer {
+ r.scoreMode = scoreMode
+ return r
+}
+
+func (r *QueryRescorer) Source() interface{} {
+ source := make(map[string]interface{})
+ source["rescore_query"] = r.query.Source()
+ if r.queryWeight != nil {
+ source["query_weight"] = *r.queryWeight
+ }
+ if r.rescoreQueryWeight != nil {
+ source["rescore_query_weight"] = *r.rescoreQueryWeight
+ }
+ if r.scoreMode != "" {
+ source["score_mode"] = r.scoreMode
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/response.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/response.go
new file mode 100644
index 00000000..9426c23a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/response.go
@@ -0,0 +1,43 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+)
+
+// Response represents a response from Elasticsearch.
+type Response struct {
+ // StatusCode is the HTTP status code, e.g. 200.
+ StatusCode int
+ // Header is the HTTP header from the HTTP response.
+ // Keys in the map are canonicalized (see http.CanonicalHeaderKey).
+ Header http.Header
+ // Body is the deserialized response body.
+ Body json.RawMessage
+}
+
+// newResponse creates a new response from the HTTP response.
+func (c *Client) newResponse(res *http.Response) (*Response, error) {
+ r := &Response{
+ StatusCode: res.StatusCode,
+ Header: res.Header,
+ }
+ if res.Body != nil {
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ // HEAD requests return a body but no content
+ if len(slurp) > 0 {
+ if err := c.decoder.Decode(slurp, &r.Body); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return r, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scan.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scan.go
new file mode 100644
index 00000000..74b8f38e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scan.go
@@ -0,0 +1,313 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+const (
+ defaultKeepAlive = "5m"
+)
+
+var (
+ // End of stream (or scan)
+ EOS = errors.New("EOS")
+
+ // No ScrollId
+ ErrNoScrollId = errors.New("no scrollId")
+)
+
+// ScanService manages a cursor through documents in Elasticsearch.
+type ScanService struct {
+ client *Client
+ indices []string
+ types []string
+ keepAlive string
+ fields []string
+ query Query
+ sorts []SortInfo
+ size *int
+ pretty bool
+}
+
+func NewScanService(client *Client) *ScanService {
+ builder := &ScanService{
+ client: client,
+ query: NewMatchAllQuery(),
+ }
+ return builder
+}
+
+func (s *ScanService) Index(index string) *ScanService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *ScanService) Indices(indices ...string) *ScanService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *ScanService) Type(typ string) *ScanService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, typ)
+ return s
+}
+
+func (s *ScanService) Types(types ...string) *ScanService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Scroll is an alias for KeepAlive, the time to keep
+// the cursor alive (e.g. "5m" for 5 minutes).
+func (s *ScanService) Scroll(keepAlive string) *ScanService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+// KeepAlive sets the maximum time the cursor will be
+// available before expiration (e.g. "5m" for 5 minutes).
+func (s *ScanService) KeepAlive(keepAlive string) *ScanService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+// Fields specifies the fields the scan query should load.
+// By default fields is nil so _source is loaded
+func (s *ScanService) Fields(fields ...string) *ScanService {
+ s.fields = fields
+ return s
+}
+
+func (s *ScanService) Query(query Query) *ScanService {
+ s.query = query
+ return s
+}
+
+// Sort the results by the given field, in the given order.
+// Use the alternative SortWithInfo to use a struct to define the sorting.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *ScanService) Sort(field string, ascending bool) *ScanService {
+ s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending})
+ return s
+}
+
+// SortWithInfo defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *ScanService) SortWithInfo(info SortInfo) *ScanService {
+ s.sorts = append(s.sorts, info)
+ return s
+}
+
+func (s *ScanService) Pretty(pretty bool) *ScanService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *ScanService) Size(size int) *ScanService {
+ s.size = &size
+ return s
+}
+
+func (s *ScanService) Do() (*ScanCursor, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ // Types
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return nil, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ if len(typesPart) > 0 {
+ path += "/" + strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_search"
+
+ // Parameters
+ params := make(url.Values)
+ if len(s.sorts) == 0 {
+ params.Set("search_type", "scan")
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.keepAlive != "" {
+ params.Set("scroll", s.keepAlive)
+ } else {
+ params.Set("scroll", defaultKeepAlive)
+ }
+ if s.size != nil && *s.size > 0 {
+ params.Set("size", fmt.Sprintf("%d", *s.size))
+ }
+ if s.fields != nil {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+
+ // Set body
+ body := make(map[string]interface{})
+ if s.query != nil {
+ body["query"] = s.query.Source()
+ }
+ if len(s.sorts) > 0 {
+ sortarr := make([]interface{}, 0)
+ for _, sort := range s.sorts {
+ sortarr = append(sortarr, sort.Source())
+ }
+ body["sort"] = sortarr
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ searchResult := new(SearchResult)
+ if err := json.Unmarshal(res.Body, searchResult); err != nil {
+ return nil, err
+ }
+
+ cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, searchResult)
+
+ return cursor, nil
+}
+
+// scanCursor represents a single page of results from
+// an Elasticsearch Scan operation.
+type ScanCursor struct {
+ Results *SearchResult
+
+ client *Client
+ keepAlive string
+ pretty bool
+ currentPage int
+}
+
+// newScanCursor returns a new initialized instance
+// of scanCursor.
+func NewScanCursor(client *Client, keepAlive string, pretty bool, searchResult *SearchResult) *ScanCursor {
+ return &ScanCursor{
+ client: client,
+ keepAlive: keepAlive,
+ pretty: pretty,
+ Results: searchResult,
+ }
+}
+
+// TotalHits is a convenience method that returns the number
+// of hits the cursor will iterate through.
+func (c *ScanCursor) TotalHits() int64 {
+ if c.Results.Hits == nil {
+ return 0
+ }
+ return c.Results.Hits.TotalHits
+}
+
+// Next returns the next search result or nil when all
+// documents have been scanned.
+//
+// Usage:
+//
+// for {
+// res, err := cursor.Next()
+// if err == elastic.EOS {
+// // End of stream (or scan)
+// break
+// }
+// if err != nil {
+// // Handle error
+// }
+// // Work with res
+// }
+//
+func (c *ScanCursor) Next() (*SearchResult, error) {
+ if c.currentPage > 0 {
+ if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 {
+ return nil, EOS
+ }
+ }
+ if c.Results.ScrollId == "" {
+ return nil, EOS
+ }
+
+ // Build url
+ path := "/_search/scroll"
+
+ // Parameters
+ params := make(url.Values)
+ if c.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", c.pretty))
+ }
+ if c.keepAlive != "" {
+ params.Set("scroll", c.keepAlive)
+ } else {
+ params.Set("scroll", defaultKeepAlive)
+ }
+
+ // Set body
+ body := c.Results.ScrollId
+
+ // Get response
+ res, err := c.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ c.Results = &SearchResult{ScrollId: body}
+ if err := json.Unmarshal(res.Body, c.Results); err != nil {
+ return nil, err
+ }
+
+ c.currentPage += 1
+
+ return c.Results, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scan_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scan_test.go
new file mode 100644
index 00000000..b475b805
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scan_test.go
@@ -0,0 +1,415 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestScan(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ cursor, err := client.Scan(testIndexName).Size(1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if cursor.Results == nil {
+ t.Errorf("expected results != nil; got nil")
+ }
+ if cursor.Results.Hits == nil {
+ t.Errorf("expected results.Hits != nil; got nil")
+ }
+ if cursor.Results.Hits.TotalHits != 3 {
+ t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits)
+ }
+ if len(cursor.Results.Hits.Hits) != 0 {
+ t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits))
+ }
+
+ pages := 0
+ numDocs := 0
+
+ for {
+ searchResult, err := cursor.Next()
+ if err == EOS {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pages += 1
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ numDocs += 1
+ }
+ }
+
+ if pages <= 0 {
+ t.Errorf("expected to retrieve at least 1 page; got %d", pages)
+ }
+
+ if numDocs != 3 {
+ t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs)
+ }
+}
+
+func TestScanWithSort(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // We sort on a numerical field, because sorting on the 'message' string field would
+ // raise the whole question of tokenizing and analyzing.
+ cursor, err := client.Scan(testIndexName).Sort("retweets", true).Size(1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if cursor.Results == nil {
+ t.Errorf("expected results != nil; got nil")
+ }
+ if cursor.Results.Hits == nil {
+ t.Errorf("expected results.Hits != nil; got nil")
+ }
+ if cursor.Results.Hits.TotalHits != 3 {
+ t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits)
+ }
+ if len(cursor.Results.Hits.Hits) != 1 {
+ t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 1, len(cursor.Results.Hits.Hits))
+ }
+
+ if cursor.Results.Hits.Hits[0].Id != "3" {
+ t.Errorf("expected hitID = %v; got %v", "3", cursor.Results.Hits.Hits[0].Id)
+
+ }
+
+ numDocs := 1 // The cursor already gave us a result
+ pages := 0
+
+ for {
+ searchResult, err := cursor.Next()
+ if err == EOS {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pages += 1
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ numDocs += 1
+ }
+ }
+
+ if pages <= 0 {
+ t.Errorf("expected to retrieve at least 1 page; got %d", pages)
+ }
+
+ if numDocs != 3 {
+ t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs)
+ }
+}
+
+func TestScanWithQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Return tweets from olivere only
+ termQuery := NewTermQuery("user", "olivere")
+ cursor, err := client.Scan(testIndexName).
+ Size(1).
+ Query(termQuery).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if cursor.Results == nil {
+ t.Errorf("expected results != nil; got nil")
+ }
+ if cursor.Results.Hits == nil {
+ t.Errorf("expected results.Hits != nil; got nil")
+ }
+ if cursor.Results.Hits.TotalHits != 2 {
+ t.Errorf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits)
+ }
+ if len(cursor.Results.Hits.Hits) != 0 {
+ t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits))
+ }
+
+ pages := 0
+ numDocs := 0
+
+ for {
+ searchResult, err := cursor.Next()
+ if err == EOS {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pages += 1
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ numDocs += 1
+ }
+ }
+
+ if pages <= 0 {
+ t.Errorf("expected to retrieve at least 1 page; got %d", pages)
+ }
+
+ if numDocs != 2 {
+ t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs)
+ }
+}
+
+func TestScanAndScrollWithMissingIndex(t *testing.T) {
+ client := setupTestClient(t) // does not create testIndexName
+
+ cursor, err := client.Scan(testIndexName).Scroll("30s").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cursor == nil {
+ t.Fatalf("expected cursor; got: %v", cursor)
+ }
+
+ // First request immediately returns EOS
+ res, err := cursor.Next()
+ if err != EOS {
+ t.Fatal(err)
+ }
+ if res != nil {
+ t.Fatalf("expected results == %v; got: %v", nil, res)
+ }
+}
+
+func TestScanAndScrollWithEmptyIndex(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ if isTravis() {
+ t.Skip("test on Travis failes regularly with " +
+ "Error 503 (Service Unavailable): SearchPhaseExecutionException[Failed to execute phase [init_scan], all shards failed]")
+ }
+
+ _, err := client.Flush().Index(testIndexName).WaitIfOngoing(true).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cursor, err := client.Scan(testIndexName).Scroll("30s").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cursor == nil {
+ t.Fatalf("expected cursor; got: %v", cursor)
+ }
+
+ // First request returns no error, but no hits
+ res, err := cursor.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected results != nil; got: nil")
+ }
+ if res.ScrollId == "" {
+ t.Errorf("expected scrollId in results; got: %q", res.ScrollId)
+ }
+ if res.TotalHits() != 0 {
+ t.Errorf("expected TotalHits() = %d; got %d", 0, res.TotalHits())
+ }
+ if res.Hits == nil {
+ t.Errorf("expected results.Hits != nil; got: nil")
+ }
+ if res.Hits.TotalHits != 0 {
+ t.Errorf("expected results.Hits.TotalHits = %d; got %d", 0, res.Hits.TotalHits)
+ }
+ if res.Hits.Hits == nil {
+ t.Errorf("expected results.Hits.Hits != nil; got: %v", res.Hits.Hits)
+ }
+ if len(res.Hits.Hits) != 0 {
+ t.Errorf("expected len(results.Hits.Hits) == %d; got: %d", 0, len(res.Hits.Hits))
+ }
+
+ // Subsequent requests return EOS
+ res, err = cursor.Next()
+ if err != EOS {
+ t.Fatal(err)
+ }
+ if res != nil {
+ t.Fatalf("expected results == %v; got: %v", nil, res)
+ }
+
+ res, err = cursor.Next()
+ if err != EOS {
+ t.Fatal(err)
+ }
+ if res != nil {
+ t.Fatalf("expected results == %v; got: %v", nil, res)
+ }
+}
+
+func TestIssue119(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ comment1 := comment{User: "nico", Comment: "You bet."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ cursor, err := client.Scan(testIndexName).Fields("_source", "_parent").Size(1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for {
+ searchResult, err := cursor.Next()
+ if err == EOS {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Type == "tweet" {
+ if _, ok := hit.Fields["_parent"].(string); ok {
+ t.Errorf("Type `tweet` cannot have any parent...")
+
+ toPrint, _ := json.MarshalIndent(hit, "", " ")
+ t.Fatal(string(toPrint))
+ }
+ }
+
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scroll.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scroll.go
new file mode 100644
index 00000000..ddc31509
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scroll.go
@@ -0,0 +1,219 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ScrollService manages a cursor through documents in Elasticsearch.
+type ScrollService struct {
+ client *Client
+ indices []string
+ types []string
+ keepAlive string
+ query Query
+ size *int
+ pretty bool
+ scrollId string
+}
+
+func NewScrollService(client *Client) *ScrollService {
+ builder := &ScrollService{
+ client: client,
+ query: NewMatchAllQuery(),
+ }
+ return builder
+}
+
+func (s *ScrollService) Index(index string) *ScrollService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *ScrollService) Indices(indices ...string) *ScrollService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *ScrollService) Type(typ string) *ScrollService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, typ)
+ return s
+}
+
+func (s *ScrollService) Types(types ...string) *ScrollService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Scroll is an alias for KeepAlive, the time to keep
+// the cursor alive (e.g. "5m" for 5 minutes).
+func (s *ScrollService) Scroll(keepAlive string) *ScrollService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+// KeepAlive sets the maximum time the cursor will be
+// available before expiration (e.g. "5m" for 5 minutes).
+func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+func (s *ScrollService) Query(query Query) *ScrollService {
+ s.query = query
+ return s
+}
+
+func (s *ScrollService) Pretty(pretty bool) *ScrollService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *ScrollService) Size(size int) *ScrollService {
+ s.size = &size
+ return s
+}
+
+func (s *ScrollService) ScrollId(scrollId string) *ScrollService {
+ s.scrollId = scrollId
+ return s
+}
+
+func (s *ScrollService) Do() (*SearchResult, error) {
+ if s.scrollId == "" {
+ return s.GetFirstPage()
+ }
+ return s.GetNextPage()
+}
+
+func (s *ScrollService) GetFirstPage() (*SearchResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ // Types
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return nil, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ if len(typesPart) > 0 {
+ path += "/" + strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_search"
+
+ // Parameters
+ params := make(url.Values)
+ params.Set("search_type", "scan")
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.keepAlive != "" {
+ params.Set("scroll", s.keepAlive)
+ } else {
+ params.Set("scroll", defaultKeepAlive)
+ }
+ if s.size != nil && *s.size > 0 {
+ params.Set("size", fmt.Sprintf("%d", *s.size))
+ }
+
+ // Set body
+ body := make(map[string]interface{})
+ if s.query != nil {
+ body["query"] = s.query.Source()
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ searchResult := new(SearchResult)
+ if err := json.Unmarshal(res.Body, searchResult); err != nil {
+ return nil, err
+ }
+
+ return searchResult, nil
+}
+
+func (s *ScrollService) GetNextPage() (*SearchResult, error) {
+ if s.scrollId == "" {
+ return nil, EOS
+ }
+
+ // Build url
+ path := "/_search/scroll"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.keepAlive != "" {
+ params.Set("scroll", s.keepAlive)
+ } else {
+ params.Set("scroll", defaultKeepAlive)
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, s.scrollId)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ searchResult := new(SearchResult)
+ if err := json.Unmarshal(res.Body, searchResult); err != nil {
+ return nil, err
+ }
+
+ // Determine last page
+ if searchResult == nil || searchResult.Hits == nil || len(searchResult.Hits.Hits) == 0 || searchResult.Hits.TotalHits == 0 {
+ return nil, EOS
+ }
+
+ return searchResult, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scroll_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scroll_test.go
new file mode 100644
index 00000000..4a5c4811
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scroll_test.go
@@ -0,0 +1,106 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestScroll(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ res, err := client.Scroll(testIndexName).Size(1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res == nil {
+ t.Errorf("expected results != nil; got nil")
+ }
+ if res.Hits == nil {
+ t.Errorf("expected results.Hits != nil; got nil")
+ }
+ if res.Hits.TotalHits != 3 {
+ t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, res.Hits.TotalHits)
+ }
+ if len(res.Hits.Hits) != 0 {
+ t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(res.Hits.Hits))
+ }
+ if res.ScrollId == "" {
+ t.Errorf("expected scrollId in results; got %q", res.ScrollId)
+ }
+
+ pages := 0
+ numDocs := 0
+ scrollId := res.ScrollId
+
+ for {
+ searchResult, err := client.Scroll(testIndexName).
+ Size(1).
+ ScrollId(scrollId).
+ Do()
+ if err == EOS {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pages += 1
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ numDocs += 1
+ }
+
+ scrollId = searchResult.ScrollId
+ if scrollId == "" {
+ t.Errorf("expeced scrollId in results; got %q", scrollId)
+ }
+ }
+
+ if pages <= 0 {
+ t.Errorf("expected to retrieve at least 1 page; got %d", pages)
+ }
+
+ if numDocs != 3 {
+ t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search.go
new file mode 100644
index 00000000..173d20e9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search.go
@@ -0,0 +1,523 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "reflect"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// Search for documents in Elasticsearch.
+type SearchService struct {
+ client *Client
+ searchSource *SearchSource
+ source interface{}
+ pretty bool
+ searchType string
+ indices []string
+ queryHint string
+ routing string
+ preference string
+ types []string
+}
+
+// NewSearchService creates a new service for searching in Elasticsearch.
+// You typically do not create the service yourself manually, but access
+// it via client.Search().
+func NewSearchService(client *Client) *SearchService {
+ builder := &SearchService{
+ client: client,
+ searchSource: NewSearchSource(),
+ }
+ return builder
+}
+
+// SearchSource sets the search source builder to use with this service.
+func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService {
+ s.searchSource = searchSource
+ if s.searchSource == nil {
+ s.searchSource = NewSearchSource()
+ }
+ return s
+}
+
+// Source allows the user to set the request body manually without using
+// any of the structs and interfaces in Elastic.
+func (s *SearchService) Source(source interface{}) *SearchService {
+ s.source = source
+ return s
+}
+
+// Index sets the name of the index to use for search.
+func (s *SearchService) Index(index string) *SearchService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+// Indices sets the names of the indices to use for search.
+func (s *SearchService) Indices(indices ...string) *SearchService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Type restricts the search for the given type.
+func (s *SearchService) Type(typ string) *SearchService {
+ if s.types == nil {
+ s.types = []string{typ}
+ } else {
+ s.types = append(s.types, typ)
+ }
+ return s
+}
+
+// Types allows to restrict the search to a list of types.
+func (s *SearchService) Types(types ...string) *SearchService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Pretty enables the caller to indent the JSON output.
+func (s *SearchService) Pretty(pretty bool) *SearchService {
+ s.pretty = pretty
+ return s
+}
+
+// Timeout sets the timeout to use, e.g. "1s" or "1000ms".
+func (s *SearchService) Timeout(timeout string) *SearchService {
+ s.searchSource = s.searchSource.Timeout(timeout)
+ return s
+}
+
+// TimeoutInMillis sets the timeout in milliseconds.
+func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService {
+ s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis)
+ return s
+}
+
+// SearchType sets the search operation type. Valid values are:
+// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch",
+// "dfs_query_and_fetch", "count", "scan".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-search-type.html#search-request-search-type
+// for details.
+func (s *SearchService) SearchType(searchType string) *SearchService {
+ s.searchType = searchType
+ return s
+}
+
+// Routing allows for (a comma-separated) list of specific routing values.
+func (s *SearchService) Routing(routing string) *SearchService {
+ s.routing = routing
+ return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: "random").
+func (s *SearchService) Preference(preference string) *SearchService {
+ s.preference = preference
+ return s
+}
+
+func (s *SearchService) QueryHint(queryHint string) *SearchService {
+ s.queryHint = queryHint
+ return s
+}
+
+// Query sets the query to perform, e.g. MatchAllQuery.
+func (s *SearchService) Query(query Query) *SearchService {
+ s.searchSource = s.searchSource.Query(query)
+ return s
+}
+
+// PostFilter is executed as the last filter. It only affects the
+// search hits but not facets. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html
+// for details.
+func (s *SearchService) PostFilter(postFilter Filter) *SearchService {
+ s.searchSource = s.searchSource.PostFilter(postFilter)
+ return s
+}
+
+// Highlight sets the highlighting. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+// for details.
+func (s *SearchService) Highlight(highlight *Highlight) *SearchService {
+ s.searchSource = s.searchSource.Highlight(highlight)
+ return s
+}
+
+// GlobalSuggestText sets the global text for suggesters. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html#global-suggest
+// for details.
+func (s *SearchService) GlobalSuggestText(globalText string) *SearchService {
+ s.searchSource = s.searchSource.GlobalSuggestText(globalText)
+ return s
+}
+
+// Suggester sets the suggester. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html
+// for details.
+func (s *SearchService) Suggester(suggester Suggester) *SearchService {
+ s.searchSource = s.searchSource.Suggester(suggester)
+ return s
+}
+
+// Facet adds a facet to the search. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html
+// to get an overview of Elasticsearch facets.
+func (s *SearchService) Facet(name string, facet Facet) *SearchService {
+ s.searchSource = s.searchSource.Facet(name, facet)
+ return s
+}
+
+// Aggregation adds an aggregation to the search. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html
+// for an overview of aggregations in Elasticsearch.
+func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService {
+ s.searchSource = s.searchSource.Aggregation(name, aggregation)
+ return s
+}
+
+// MinScore excludes documents which have a score less than the minimum
+// specified here. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-min-score.html.
+func (s *SearchService) MinScore(minScore float64) *SearchService {
+ s.searchSource = s.searchSource.MinScore(minScore)
+ return s
+}
+
+// From defines the offset from the first result you want to fetch.
+// Use it in combination with Size to paginate through results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-from-size.html
+// for details.
+func (s *SearchService) From(from int) *SearchService {
+ s.searchSource = s.searchSource.From(from)
+ return s
+}
+
+// Size defines the maximum number of hits to be returned.
+// Use it in combination with From to paginate through results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-from-size.html
+// for details.
+func (s *SearchService) Size(size int) *SearchService {
+ s.searchSource = s.searchSource.Size(size)
+ return s
+}
+
+// Explain can be enabled to provide an explanation for each hit and how its
+// score was computed.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html
+// for details.
+func (s *SearchService) Explain(explain bool) *SearchService {
+ s.searchSource = s.searchSource.Explain(explain)
+ return s
+}
+
+// Version can be set to true to return a version for each search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html.
+func (s *SearchService) Version(version bool) *SearchService {
+ s.searchSource = s.searchSource.Version(version)
+ return s
+}
+
+// Sort the results by the given field, in the given order.
+// Use the alternative SortWithInfo to use a struct to define the sorting.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) Sort(field string, ascending bool) *SearchService {
+ s.searchSource = s.searchSource.Sort(field, ascending)
+ return s
+}
+
+// SortWithInfo defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) SortWithInfo(info SortInfo) *SearchService {
+ s.searchSource = s.searchSource.SortWithInfo(info)
+ return s
+}
+
+// SortBy defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) SortBy(sorter ...Sorter) *SearchService {
+ s.searchSource = s.searchSource.SortBy(sorter...)
+ return s
+}
+
+// Fields tells Elasticsearch to only load specific fields from a search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html.
+func (s *SearchService) Fields(fields ...string) *SearchService {
+ s.searchSource = s.searchSource.Fields(fields...)
+ return s
+}
+
+// Do executes the search and returns a SearchResult.
+func (s *SearchService) Do() (*SearchResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ path += strings.Join(indexPart, ",")
+
+ // Types part
+ if len(s.types) > 0 {
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return nil, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ path += "/"
+ path += strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_search"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.searchType != "" {
+ params.Set("search_type", s.searchType)
+ }
+
+ // Perform request
+ var body interface{}
+ if s.source != nil {
+ body = s.source
+ } else {
+ body = s.searchSource.Source()
+ }
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return search results
+ ret := new(SearchResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// SearchResult is the result of a search in Elasticsearch.
+type SearchResult struct {
+ TookInMillis int64 `json:"took"` // search time in milliseconds
+ ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations
+ Hits *SearchHits `json:"hits"` // the actual search hits
+ Suggest SearchSuggest `json:"suggest"` // results from suggesters
+ Facets SearchFacets `json:"facets"` // results from facets
+ Aggregations Aggregations `json:"aggregations"` // results from aggregations
+ TimedOut bool `json:"timed_out"` // true if the search timed out
+ Error string `json:"error,omitempty"` // used in MultiSearch only
+}
+
+// TotalHits is a convenience function to return the number of hits for
+// a search result.
+func (r *SearchResult) TotalHits() int64 {
+ if r.Hits != nil {
+ return r.Hits.TotalHits
+ }
+ return 0
+}
+
+// Each is a utility function to iterate over all hits. It saves you from
+// checking for nil values. Notice that Each will ignore errors in
+// serializing JSON.
+func (r *SearchResult) Each(typ reflect.Type) []interface{} {
+ if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 {
+ return nil
+ }
+ slice := make([]interface{}, 0)
+ for _, hit := range r.Hits.Hits {
+ v := reflect.New(typ).Elem()
+ if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil {
+ slice = append(slice, v.Interface())
+ }
+ }
+ return slice
+}
+
+// SearchHits specifies the list of search hits.
+type SearchHits struct {
+ TotalHits int64 `json:"total"` // total number of hits found
+ MaxScore *float64 `json:"max_score"` // maximum score of all hits
+ Hits []*SearchHit `json:"hits"` // the actual hits returned
+}
+
+// SearchHit is a single hit.
+type SearchHit struct {
+ Score *float64 `json:"_score"` // computed score
+ Index string `json:"_index"` // index name
+ Id string `json:"_id"` // external or internal
+ Type string `json:"_type"` // type
+ Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
+ Sort []interface{} `json:"sort"` // sort information
+ Highlight SearchHitHighlight `json:"highlight"` // highlighter information
+ Source *json.RawMessage `json:"_source"` // stored document source
+ Fields map[string]interface{} `json:"fields"` // returned fields
+ Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed
+ MatchedQueries map[string]interface{} `json:"matched_queries"` // matched queries
+ InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0
+
+ // Shard
+ // HighlightFields
+ // SortValues
+ // MatchedFilters
+}
+
+type SearchHitInnerHits struct {
+ Hits *SearchHits `json:"hits"`
+}
+
+// SearchExplanation explains how the score for a hit was computed.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html.
+type SearchExplanation struct {
+ Value float64 `json:"value"` // e.g. 1.0
+ Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:"
+ Details []SearchExplanation `json:"details,omitempty"` // recursive details
+}
+
+// Suggest
+
+// SearchSuggest is a map of suggestions.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggest map[string][]SearchSuggestion
+
+// SearchSuggestion is a single search suggestion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggestion struct {
+ Text string `json:"text"`
+ Offset int `json:"offset"`
+ Length int `json:"length"`
+ Options []SearchSuggestionOption `json:"options"`
+}
+
+// SearchSuggestionOption is an option of a SearchSuggestion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggestionOption struct {
+ Text string `json:"text"`
+ Score float32 `json:"score"`
+ Freq int `json:"freq"`
+ Payload interface{} `json:"payload"`
+}
+
+// Facets
+
+// SearchFacets is a map of facets.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html.
+type SearchFacets map[string]*SearchFacet
+
+// SearchFacet is a single facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html.
+type SearchFacet struct {
+ Type string `json:"_type"`
+ Missing int `json:"missing"`
+ Total int `json:"total"`
+ Other int `json:"other"`
+ Terms []searchFacetTerm `json:"terms"`
+ Ranges []searchFacetRange `json:"ranges"`
+ Entries []searchFacetEntry `json:"entries"`
+}
+
+// searchFacetTerm is the result of a terms/terms_stats facet.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/1.7/search-facets-terms-facet.html
+// and https://www.elastic.co/guide/en/elasticsearch/reference/1.7/search-facets-terms-stats-facet.html.
+type searchFacetTerm struct {
+ Term interface{} `json:"term"`
+ Count int `json:"count"`
+
+ // The following fields are returned for terms_stats facets.
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/1.7/search-facets-terms-stats-facet.html.
+
+ TotalCount int `json:"total_count"`
+ Min float64 `json:"min"`
+ Max float64 `json:"max"`
+ Total float64 `json:"total"`
+ Mean float64 `json:"mean"`
+}
+
+// searchFacetRange is the result of a range facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-range-facet.html.
+type searchFacetRange struct {
+ From *float64 `json:"from"`
+ FromStr *string `json:"from_str"`
+ To *float64 `json:"to"`
+ ToStr *string `json:"to_str"`
+ Count int `json:"count"`
+ Min *float64 `json:"min"`
+ Max *float64 `json:"max"`
+ TotalCount int `json:"total_count"`
+ Total *float64 `json:"total"`
+ Mean *float64 `json:"mean"`
+}
+
+// searchFacetEntry is a general facet entry.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html
+type searchFacetEntry struct {
+ // Key for this facet, e.g. in histograms
+ Key interface{} `json:"key"`
+ // Date histograms contain the number of milliseconds as date:
+ // If e.Time = 1293840000000, then: Time.at(1293840000000/1000) => 2011-01-01
+ Time int64 `json:"time"`
+ // Number of hits for this facet
+ Count int `json:"count"`
+ // Min is either a string like "Infinity" or a float64.
+ // This is returned with some DateHistogram facets.
+ Min interface{} `json:"min,omitempty"`
+ // Max is either a string like "-Infinity" or a float64
+ // This is returned with some DateHistogram facets.
+ Max interface{} `json:"max,omitempty"`
+ // Total is the sum of all entries on the recorded Time
+ // This is returned with some DateHistogram facets.
+ Total float64 `json:"total,omitempty"`
+ // TotalCount is the number of entries for Total
+ // This is returned with some DateHistogram facets.
+ TotalCount int `json:"total_count,omitempty"`
+ // Mean is the mean value
+ // This is returned with some DateHistogram facets.
+ Mean float64 `json:"mean,omitempty"`
+}
+
+// Aggregations (see search_aggs.go)
+
+// Highlighting
+
+// SearchHitHighlight is the highlight information of a search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+// for a general discussion of highlighting.
+type SearchHitHighlight map[string][]string
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs.go
new file mode 100644
index 00000000..fb8b2c83
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs.go
@@ -0,0 +1,960 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// Aggregations can be seen as a unit-of-work that build
+// analytic information over a set of documents. It is
+// (in many senses) the follow-up of facets in Elasticsearch.
+// For more details about aggregations, visit:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html
+type Aggregation interface {
+ Source() interface{}
+}
+
+// Aggregations is a list of aggregations that are part of a search result.
+type Aggregations map[string]*json.RawMessage
+
+// Min returns min aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html
+func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Max returns max aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html
+func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Sum returns sum aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html
+func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Avg returns average aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html
+func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// ValueCount returns value-count aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html
+func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Cardinality returns cardinality aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
+func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Stats returns stats aggregation results.
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html
+func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationStatsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// ExtendedStats returns extended stats aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html
+func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationExtendedStatsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Percentiles returns percentiles results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html
+func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPercentilesMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// PercentileRanks returns percentile ranks results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html
+func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPercentilesMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// TopHits returns top-hits aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html
+func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationTopHitsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Global returns global results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html
+func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Filter returns filter results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
+func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Filters returns filters results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html
+func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketFilters)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Missing returns missing results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html
+func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Nested returns nested results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html
+func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// ReverseNested returns reverse-nested results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html
+func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Children returns children results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html
+func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Terms returns terms aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
+func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketKeyItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// SignificantTerms returns significant terms aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html
+func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketSignificantTerms)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Range returns range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html
+func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// KeyedRange returns keyed range aggregation results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html.
+func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketKeyedRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// DateRange returns date range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html
+func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// IPv4Range returns IPv4 range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html
+func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Histogram returns histogram aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html
+func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketHistogramItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// DateHistogram returns date histogram aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html
+func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketHistogramItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// GeoBounds returns geo-bounds aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html
+func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationGeoBoundsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// GeoHash returns geo-hash aggregation results.
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html
+func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketKeyItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// GeoDistance returns geo distance aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html
+func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// -- Single value metric --
+
+// AggregationValueMetric is a single-value metric, returned e.g. by a
+// Min or Max aggregation.
+type AggregationValueMetric struct {
+ Aggregations
+
+ Value *float64 //`json:"value"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure.
+func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["value"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Value)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Stats metric --
+
+// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation.
+type AggregationStatsMetric struct {
+ Aggregations
+
+ Count int64 // `json:"count"`
+ Min *float64 //`json:"min,omitempty"`
+ Max *float64 //`json:"max,omitempty"`
+ Avg *float64 //`json:"avg,omitempty"`
+ Sum *float64 //`json:"sum,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure.
+func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Count)
+ }
+ if v, ok := aggs["min"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Min)
+ }
+ if v, ok := aggs["max"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Max)
+ }
+ if v, ok := aggs["avg"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Avg)
+ }
+ if v, ok := aggs["sum"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Sum)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Extended stats metric --
+
+// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation.
+type AggregationExtendedStatsMetric struct {
+ Aggregations
+
+ Count int64 // `json:"count"`
+ Min *float64 //`json:"min,omitempty"`
+ Max *float64 //`json:"max,omitempty"`
+ Avg *float64 //`json:"avg,omitempty"`
+ Sum *float64 //`json:"sum,omitempty"`
+ SumOfSquares *float64 //`json:"sum_of_squares,omitempty"`
+ Variance *float64 //`json:"variance,omitempty"`
+ StdDeviation *float64 //`json:"std_deviation,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure.
+func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Count)
+ }
+ if v, ok := aggs["min"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Min)
+ }
+ if v, ok := aggs["max"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Max)
+ }
+ if v, ok := aggs["avg"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Avg)
+ }
+ if v, ok := aggs["sum"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Sum)
+ }
+ if v, ok := aggs["sum_of_squares"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfSquares)
+ }
+ if v, ok := aggs["variance"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Variance)
+ }
+ if v, ok := aggs["std_deviation"]; ok && v != nil {
+ json.Unmarshal(*v, &a.StdDeviation)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Percentiles metric --
+
+// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation.
+type AggregationPercentilesMetric struct {
+ Aggregations
+
+ Values map[string]float64 // `json:"values"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure.
+func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["values"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Values)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Top-hits metric --
+
+// AggregationTopHitsMetric is a metric returned by a TopHits aggregation.
+type AggregationTopHitsMetric struct {
+ Aggregations
+
+ Hits *SearchHits //`json:"hits"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure.
+func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ a.Aggregations = aggs
+ a.Hits = new(SearchHits)
+ if v, ok := aggs["hits"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Hits)
+ }
+ return nil
+}
+
+// -- Geo-bounds metric --
+
+// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation.
+type AggregationGeoBoundsMetric struct {
+ Aggregations
+
+ Bounds struct {
+ TopLeft struct {
+ Latitude float64 `json:"lat"`
+ Longitude float64 `json:"lon"`
+ } `json:"top_left"`
+ BottomRight struct {
+ Latitude float64 `json:"lat"`
+ Longitude float64 `json:"lon"`
+ } `json:"bottom_right"`
+ } `json:"bounds"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure.
+func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["bounds"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Bounds)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Single bucket --
+
+// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global.
+type AggregationSingleBucket struct {
+ Aggregations
+
+ DocCount int64 // `json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure.
+func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket range items --
+
+// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned
+// with a range aggregation.
+type AggregationBucketRangeItems struct {
+ Aggregations
+
+ DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
+ SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
+ Buckets []*AggregationBucketRangeItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
+func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+ }
+ if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfOtherDocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned
+// with a keyed range aggregation.
+type AggregationBucketKeyedRangeItems struct {
+ Aggregations
+
+ DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
+ SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
+ Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
+func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+ }
+ if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfOtherDocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure.
+type AggregationBucketRangeItem struct {
+ Aggregations
+
+ Key string //`json:"key"`
+ DocCount int64 //`json:"doc_count"`
+ From *float64 //`json:"from"`
+ FromAsString string //`json:"from_as_string"`
+ To *float64 //`json:"to"`
+ ToAsString string //`json:"to_as_string"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure.
+func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["from"]; ok && v != nil {
+ json.Unmarshal(*v, &a.From)
+ }
+ if v, ok := aggs["from_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.FromAsString)
+ }
+ if v, ok := aggs["to"]; ok && v != nil {
+ json.Unmarshal(*v, &a.To)
+ }
+ if v, ok := aggs["to_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.ToAsString)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket key items --
+
+// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned
+// with a terms aggregation.
+type AggregationBucketKeyItems struct {
+ Aggregations
+
+ DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
+ SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
+ Buckets []*AggregationBucketKeyItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure.
+func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+ }
+ if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfOtherDocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure.
+type AggregationBucketKeyItem struct {
+ Aggregations
+
+ Key interface{} //`json:"key"`
+ KeyNumber json.Number
+ DocCount int64 //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure.
+func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ dec := json.NewDecoder(bytes.NewReader(data))
+ dec.UseNumber()
+ if err := dec.Decode(&aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ json.Unmarshal(*v, &a.KeyNumber)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket types for significant terms --
+
+// AggregationBucketSignificantTerms is a bucket aggregation returned
+// with a significant terms aggregation.
+type AggregationBucketSignificantTerms struct {
+ Aggregations
+
+ DocCount int64 //`json:"doc_count"`
+ Buckets []*AggregationBucketSignificantTerm //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure.
+func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure.
+type AggregationBucketSignificantTerm struct {
+ Aggregations
+
+ Key string //`json:"key"`
+ DocCount int64 //`json:"doc_count"`
+ BgCount int64 //`json:"bg_count"`
+ Score float64 //`json:"score"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure.
+func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["bg_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.BgCount)
+ }
+ if v, ok := aggs["score"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Score)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket filters --
+
+// AggregationBucketFilters is a multi-bucket aggregation that is returned
+// with a filters aggregation.
+type AggregationBucketFilters struct {
+ Aggregations
+
+ Buckets []*AggregationBucketKeyItem //`json:"buckets"`
+ NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure.
+func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ json.Unmarshal(*v, &a.NamedBuckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket histogram items --
+
+// AggregationBucketHistogramItems is a bucket aggregation that is returned
+// with a date histogram aggregation.
+type AggregationBucketHistogramItems struct {
+ Aggregations
+
+ Buckets []*AggregationBucketHistogramItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure.
+func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure.
+type AggregationBucketHistogramItem struct {
+ Aggregations
+
+ Key int64 //`json:"key"`
+ KeyAsString *string //`json:"key_as_string"`
+ DocCount int64 //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure.
+func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ }
+ if v, ok := aggs["key_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.KeyAsString)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ a.Aggregations = aggs
+ return nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_avg.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_avg.go
new file mode 100644
index 00000000..7b01ee00
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_avg.go
@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// AvgAggregation is a single-value metrics aggregation that computes
+// the average of numeric values that are extracted from the
+// aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by
+// a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html
+type AvgAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewAvgAggregation() AvgAggregation {
+ a := AvgAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a AvgAggregation) Field(field string) AvgAggregation {
+ a.field = field
+ return a
+}
+
+func (a AvgAggregation) Script(script string) AvgAggregation {
+ a.script = script
+ return a
+}
+
+func (a AvgAggregation) ScriptFile(scriptFile string) AvgAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a AvgAggregation) Lang(lang string) AvgAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a AvgAggregation) Format(format string) AvgAggregation {
+ a.format = format
+ return a
+}
+
+func (a AvgAggregation) Param(name string, value interface{}) AvgAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a AvgAggregation) SubAggregation(name string, subAggregation Aggregation) AvgAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a AvgAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "avg_grade" : { "avg" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "avg" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["avg"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_avg_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_avg_test.go
new file mode 100644
index 00000000..8ddd8310
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_avg_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestAvgAggregation(t *testing.T) {
+ agg := NewAvgAggregation().Field("grade")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"avg":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestAvgAggregationWithFormat(t *testing.T) {
+ agg := NewAvgAggregation().Field("grade").Format("000.0")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"avg":{"field":"grade","format":"000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_cardinality.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_cardinality.go
new file mode 100644
index 00000000..5d641346
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_cardinality.go
@@ -0,0 +1,128 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CardinalityAggregation is a single-value metrics aggregation that
+// calculates an approximate count of distinct values.
+// Values can be extracted either from specific fields in the document
+// or generated by a script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
+type CardinalityAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ precisionThreshold *int64
+ rehash *bool
+}
+
+func NewCardinalityAggregation() CardinalityAggregation {
+ a := CardinalityAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a CardinalityAggregation) Field(field string) CardinalityAggregation {
+ a.field = field
+ return a
+}
+
+func (a CardinalityAggregation) Script(script string) CardinalityAggregation {
+ a.script = script
+ return a
+}
+
+func (a CardinalityAggregation) ScriptFile(scriptFile string) CardinalityAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a CardinalityAggregation) Lang(lang string) CardinalityAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a CardinalityAggregation) Format(format string) CardinalityAggregation {
+ a.format = format
+ return a
+}
+
+func (a CardinalityAggregation) Param(name string, value interface{}) CardinalityAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) CardinalityAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a CardinalityAggregation) PrecisionThreshold(threshold int64) CardinalityAggregation {
+ a.precisionThreshold = &threshold
+ return a
+}
+
+func (a CardinalityAggregation) Rehash(rehash bool) CardinalityAggregation {
+ a.rehash = &rehash
+ return a
+}
+
+func (a CardinalityAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "author_count" : {
+ // "cardinality" : { "field" : "author" }
+ // }
+ // }
+ // }
+ // This method returns only the "cardinality" : { "field" : "author" } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["cardinality"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+ if a.precisionThreshold != nil {
+ opts["precision_threshold"] = *a.precisionThreshold
+ }
+ if a.rehash != nil {
+ opts["rehash"] = *a.rehash
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_cardinality_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_cardinality_test.go
new file mode 100644
index 00000000..f2ff3df3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_cardinality_test.go
@@ -0,0 +1,49 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestCardinalityAggregation(t *testing.T) {
+ agg := NewCardinalityAggregation().Field("author.hash")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"cardinality":{"field":"author.hash"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCardinalityAggregationWithOptions(t *testing.T) {
+ agg := NewCardinalityAggregation().Field("author.hash").PrecisionThreshold(100).Rehash(true)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"cardinality":{"field":"author.hash","precision_threshold":100,"rehash":true}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCardinalityAggregationWithFormat(t *testing.T) {
+ agg := NewCardinalityAggregation().Field("author.hash").Format("00000")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"cardinality":{"field":"author.hash","format":"00000"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_children.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_children.go
new file mode 100644
index 00000000..f9cc918d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_children.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ChildrenAggregation is a special single bucket aggregation that enables
+// aggregating from buckets on parent document types to buckets on child documents.
+// It is available from 1.4.0.Beta1 upwards.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html
+type ChildrenAggregation struct {
+ typ string
+ subAggregations map[string]Aggregation
+}
+
+func NewChildrenAggregation() ChildrenAggregation {
+ a := ChildrenAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a ChildrenAggregation) Type(typ string) ChildrenAggregation {
+ a.typ = typ
+ return a
+}
+
+func (a ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) ChildrenAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a ChildrenAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "to-answers" : {
+ // "type" : "answer"
+ // }
+ // }
+ // }
+ // This method returns only the { "type" : ... } part.
+
+ source := make(map[string]interface{})
+ source["type"] = a.typ
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_children_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_children_test.go
new file mode 100644
index 00000000..092d09fb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_children_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestChildrenAggregation(t *testing.T) {
+ agg := NewChildrenAggregation().Type("answer")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"type":"answer"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestChildrenAggregationWithSubAggregation(t *testing.T) {
+ subAgg := NewTermsAggregation().Field("owner.display_name").Size(10)
+ agg := NewChildrenAggregation().Type("answer")
+ agg = agg.SubAggregation("top-names", subAgg)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"top-names":{"terms":{"field":"owner.display_name","size":10}}},"type":"answer"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_histogram.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_histogram.go
new file mode 100644
index 00000000..9b593bd5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_histogram.go
@@ -0,0 +1,303 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// DateHistogramAggregation is a multi-bucket aggregation similar to the
+// histogram except it can only be applied on date values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html
+type DateHistogramAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+
+ interval string
+ order string
+ orderAsc bool
+ minDocCount *int64
+ extendedBoundsMin interface{}
+ extendedBoundsMax interface{}
+ preZone string
+ postZone string
+ preZoneAdjustLargeInterval *bool
+ format string
+ preOffset int64
+ postOffset int64
+ factor *float32
+}
+
+func NewDateHistogramAggregation() DateHistogramAggregation {
+ a := DateHistogramAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a DateHistogramAggregation) Field(field string) DateHistogramAggregation {
+ a.field = field
+ return a
+}
+
+func (a DateHistogramAggregation) Script(script string) DateHistogramAggregation {
+ a.script = script
+ return a
+}
+
+func (a DateHistogramAggregation) ScriptFile(scriptFile string) DateHistogramAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a DateHistogramAggregation) Lang(lang string) DateHistogramAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a DateHistogramAggregation) Param(name string, value interface{}) DateHistogramAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) DateHistogramAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Allowed values are: "year", "quarter", "month", "week", "day",
+// "hour", "minute". It also supports time settings like "1.5h"
+// (up to "w" for weeks).
+func (a DateHistogramAggregation) Interval(interval string) DateHistogramAggregation {
+ a.interval = interval
+ return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a DateHistogramAggregation) Order(order string, asc bool) DateHistogramAggregation {
+ a.order = order
+ a.orderAsc = asc
+ return a
+}
+
+func (a DateHistogramAggregation) OrderByCount(asc bool) DateHistogramAggregation {
+ // "order" : { "_count" : "asc" }
+ a.order = "_count"
+ a.orderAsc = asc
+ return a
+}
+
+func (a DateHistogramAggregation) OrderByCountAsc() DateHistogramAggregation {
+ return a.OrderByCount(true)
+}
+
+func (a DateHistogramAggregation) OrderByCountDesc() DateHistogramAggregation {
+ return a.OrderByCount(false)
+}
+
+func (a DateHistogramAggregation) OrderByKey(asc bool) DateHistogramAggregation {
+ // "order" : { "_key" : "asc" }
+ a.order = "_key"
+ a.orderAsc = asc
+ return a
+}
+
+func (a DateHistogramAggregation) OrderByKeyAsc() DateHistogramAggregation {
+ return a.OrderByKey(true)
+}
+
+func (a DateHistogramAggregation) OrderByKeyDesc() DateHistogramAggregation {
+ return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) DateHistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "avg_height" : "desc" }
+ // },
+ // "aggs" : {
+ // "avg_height" : { "avg" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName
+ a.orderAsc = asc
+ return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) DateHistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "height_stats.avg" : "desc" }
+ // },
+ // "aggs" : {
+ // "height_stats" : { "stats" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName + "." + metric
+ a.orderAsc = asc
+ return a
+}
+
+func (a DateHistogramAggregation) MinDocCount(minDocCount int64) DateHistogramAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a DateHistogramAggregation) PreZone(preZone string) DateHistogramAggregation {
+ a.preZone = preZone
+ return a
+}
+
+func (a DateHistogramAggregation) PostZone(postZone string) DateHistogramAggregation {
+ a.postZone = postZone
+ return a
+}
+
+func (a DateHistogramAggregation) PreZoneAdjustLargeInterval(preZoneAdjustLargeInterval bool) DateHistogramAggregation {
+ a.preZoneAdjustLargeInterval = &preZoneAdjustLargeInterval
+ return a
+}
+
+func (a DateHistogramAggregation) PreOffset(preOffset int64) DateHistogramAggregation {
+ a.preOffset = preOffset
+ return a
+}
+
+func (a DateHistogramAggregation) PostOffset(postOffset int64) DateHistogramAggregation {
+ a.postOffset = postOffset
+ return a
+}
+
+func (a DateHistogramAggregation) Factor(factor float32) DateHistogramAggregation {
+ a.factor = &factor
+ return a
+}
+
+func (a DateHistogramAggregation) Format(format string) DateHistogramAggregation {
+ a.format = format
+ return a
+}
+
+// ExtendedBoundsMin accepts int, int64, string, or time.Time values.
+func (a DateHistogramAggregation) ExtendedBoundsMin(min interface{}) DateHistogramAggregation {
+ a.extendedBoundsMin = min
+ return a
+}
+
+// ExtendedBoundsMax accepts int, int64, string, or time.Time values.
+func (a DateHistogramAggregation) ExtendedBoundsMax(max interface{}) DateHistogramAggregation {
+ a.extendedBoundsMax = max
+ return a
+}
+
+func (a DateHistogramAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "articles_over_time" : {
+ // "date_histogram" : {
+ // "field" : "date",
+ // "interval" : "month"
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "date_histogram" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["date_histogram"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ opts["interval"] = a.interval
+ if a.minDocCount != nil {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.order != "" {
+ o := make(map[string]interface{})
+ if a.orderAsc {
+ o[a.order] = "asc"
+ } else {
+ o[a.order] = "desc"
+ }
+ opts["order"] = o
+ }
+ if a.preZone != "" {
+ opts["pre_zone"] = a.preZone
+ }
+ if a.postZone != "" {
+ opts["post_zone"] = a.postZone
+ }
+ if a.preZoneAdjustLargeInterval != nil {
+ opts["pre_zone_adjust_large_interval"] = *a.preZoneAdjustLargeInterval
+ }
+ if a.preOffset != 0 {
+ opts["pre_offset"] = a.preOffset
+ }
+ if a.postOffset != 0 {
+ opts["post_offset"] = a.postOffset
+ }
+ if a.factor != nil {
+ opts["factor"] = *a.factor
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
+ bounds := make(map[string]interface{})
+ if a.extendedBoundsMin != nil {
+ bounds["min"] = a.extendedBoundsMin
+ }
+ if a.extendedBoundsMax != nil {
+ bounds["max"] = a.extendedBoundsMax
+ }
+ opts["extended_bounds"] = bounds
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_histogram_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_histogram_test.go
new file mode 100644
index 00000000..0e461c6a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_histogram_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestDateHistogramAggregation(t *testing.T) {
+ agg := NewDateHistogramAggregation().Field("date").Interval("month").Format("YYYY-MM")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_histogram":{"field":"date","format":"YYYY-MM","interval":"month"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_range.go
new file mode 100644
index 00000000..c0c550e8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_range.go
@@ -0,0 +1,243 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "time"
+)
+
+// DateRangeAggregation is a range aggregation that is dedicated for
+// date values. The main difference between this aggregation and the
+// normal range aggregation is that the from and to values can be expressed
+// in Date Math expressions, and it is also possible to specify a
+// date format by which the from and to response fields will be returned.
+// Note that this aggregration includes the from value and excludes the to
+// value for each range.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html
+type DateRangeAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ keyed *bool
+ unmapped *bool
+ format string
+ entries []DateRangeAggregationEntry
+}
+
+type DateRangeAggregationEntry struct {
+ Key string
+ From interface{}
+ To interface{}
+}
+
+func NewDateRangeAggregation() DateRangeAggregation {
+ a := DateRangeAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ entries: make([]DateRangeAggregationEntry, 0),
+ }
+ return a
+}
+
+func (a DateRangeAggregation) Field(field string) DateRangeAggregation {
+ a.field = field
+ return a
+}
+
+func (a DateRangeAggregation) Script(script string) DateRangeAggregation {
+ a.script = script
+ return a
+}
+
+func (a DateRangeAggregation) ScriptFile(scriptFile string) DateRangeAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a DateRangeAggregation) Lang(lang string) DateRangeAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a DateRangeAggregation) Param(name string, value interface{}) DateRangeAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) DateRangeAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a DateRangeAggregation) Keyed(keyed bool) DateRangeAggregation {
+ a.keyed = &keyed
+ return a
+}
+
+func (a DateRangeAggregation) Unmapped(unmapped bool) DateRangeAggregation {
+ a.unmapped = &unmapped
+ return a
+}
+
+func (a DateRangeAggregation) Format(format string) DateRangeAggregation {
+ a.format = format
+ return a
+}
+
+func (a DateRangeAggregation) AddRange(from, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) AddUnboundedTo(from interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a DateRangeAggregation) AddUnboundedFrom(to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) Lt(to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) LtWithKey(key string, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) Between(from, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) Gt(from interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a DateRangeAggregation) GtWithKey(key string, from interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a DateRangeAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "range" : {
+ // "date_range": {
+ // "field": "date",
+ // "format": "MM-yyy",
+ // "ranges": [
+ // { "to": "now-10M/M" },
+ // { "from": "now-10M/M" }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "date_range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["date_range"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ if a.keyed != nil {
+ opts["keyed"] = *a.keyed
+ }
+ if a.unmapped != nil {
+ opts["unmapped"] = *a.unmapped
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range a.entries {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_range_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_range_test.go
new file mode 100644
index 00000000..87221c18
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_range_test.go
@@ -0,0 +1,106 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestDateRangeAggregation(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at")
+ agg = agg.AddRange(nil, "2012-12-31")
+ agg = agg.AddRange("2013-01-01", "2013-12-31")
+ agg = agg.AddRange("2014-01-01", nil)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithUnbounded(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ AddUnboundedFrom("2012-12-31").
+ AddRange("2013-01-01", "2013-12-31").
+ AddUnboundedTo("2014-01-01")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithLtAndCo(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ Lt("2012-12-31").
+ Between("2013-01-01", "2013-12-31").
+ Gt("2014-01-01")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithKeyedFlag(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ Keyed(true).
+ Lt("2012-12-31").
+ Between("2013-01-01", "2013-12-31").
+ Gt("2014-01-01")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithKeys(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ Keyed(true).
+ LtWithKey("pre-2012", "2012-12-31").
+ BetweenWithKey("2013", "2013-01-01", "2013-12-31").
+ GtWithKey("post-2013", "2014-01-01")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"key":"pre-2012","to":"2012-12-31"},{"from":"2013-01-01","key":"2013","to":"2013-12-31"},{"from":"2014-01-01","key":"post-2013"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithSpecialNames(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ AddRange("now-10M/M", "now+10M/M")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","ranges":[{"from":"now-10M/M","to":"now+10M/M"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_extended_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_extended_stats.go
new file mode 100644
index 00000000..76cd572c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_extended_stats.go
@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that
+// computes stats over numeric values extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html
+type ExtendedStatsAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewExtendedStatsAggregation() ExtendedStatsAggregation {
+ a := ExtendedStatsAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a ExtendedStatsAggregation) Field(field string) ExtendedStatsAggregation {
+ a.field = field
+ return a
+}
+
+func (a ExtendedStatsAggregation) Script(script string) ExtendedStatsAggregation {
+ a.script = script
+ return a
+}
+
+func (a ExtendedStatsAggregation) ScriptFile(scriptFile string) ExtendedStatsAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a ExtendedStatsAggregation) Lang(lang string) ExtendedStatsAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a ExtendedStatsAggregation) Format(format string) ExtendedStatsAggregation {
+ a.format = format
+ return a
+}
+
+func (a ExtendedStatsAggregation) Param(name string, value interface{}) ExtendedStatsAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) ExtendedStatsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a ExtendedStatsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "grades_stats" : { "extended_stats" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "extended_stats" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["extended_stats"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_extended_stats_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_extended_stats_test.go
new file mode 100644
index 00000000..8771c462
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_extended_stats_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestExtendedStatsAggregation(t *testing.T) {
+ agg := NewExtendedStatsAggregation().Field("grade")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"extended_stats":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestExtendedStatsAggregationWithFormat(t *testing.T) {
+ agg := NewExtendedStatsAggregation().Field("grade").Format("000.0")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"extended_stats":{"field":"grade","format":"000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filter.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filter.go
new file mode 100644
index 00000000..d165f351
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filter.go
@@ -0,0 +1,58 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FilterAggregation defines a single bucket of all the documents
+// in the current document set context that match a specified filter.
+// Often this will be used to narrow down the current aggregation context
+// to a specific set of documents.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
+type FilterAggregation struct {
+ filter Filter
+ subAggregations map[string]Aggregation
+}
+
+func NewFilterAggregation() FilterAggregation {
+ a := FilterAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a FilterAggregation) SubAggregation(name string, subAggregation Aggregation) FilterAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a FilterAggregation) Filter(filter Filter) FilterAggregation {
+ a.filter = filter
+ return a
+}
+
+func (a FilterAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "in_stock_products" : {
+ // "filter" : { "range" : { "stock" : { "gt" : 0 } } }
+ // }
+ // }
+ // }
+ // This method returns only the { "filter" : {} } part.
+
+ source := make(map[string]interface{})
+ source["filter"] = a.filter.Source()
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filter_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filter_test.go
new file mode 100644
index 00000000..b901378d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filter_test.go
@@ -0,0 +1,40 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFilterAggregation(t *testing.T) {
+ filter := NewRangeFilter("stock").Gt(0)
+ agg := NewFilterAggregation().Filter(filter)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFilterAggregationWithSubAggregation(t *testing.T) {
+ avgPriceAgg := NewAvgAggregation().Field("price")
+ filter := NewRangeFilter("stock").Gt(0)
+ agg := NewFilterAggregation().Filter(filter).
+ SubAggregation("avg_price", avgPriceAgg)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filters.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filters.go
new file mode 100644
index 00000000..81da4cc3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filters.go
@@ -0,0 +1,76 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FiltersAggregation defines a multi bucket aggregations where each bucket
+// is associated with a filter. Each bucket will collect all documents that
+// match its associated filter.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html
+type FiltersAggregation struct {
+ filters []Filter
+ subAggregations map[string]Aggregation
+}
+
+func NewFiltersAggregation() FiltersAggregation {
+ return FiltersAggregation{
+ filters: make([]Filter, 0),
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a FiltersAggregation) Filter(filter Filter) FiltersAggregation {
+ a.filters = append(a.filters, filter)
+ return a
+}
+
+func (a FiltersAggregation) Filters(filters ...Filter) FiltersAggregation {
+ if len(filters) > 0 {
+ a.filters = append(a.filters, filters...)
+ }
+ return a
+}
+
+func (a FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) FiltersAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a FiltersAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "messages" : {
+ // "filters" : {
+ // "filters" : {
+ // "errors" : { "term" : { "body" : "error" }},
+ // "warnings" : { "term" : { "body" : "warning" }}
+ // }
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the (outer) { "filters" : {} } part.
+
+ source := make(map[string]interface{})
+ filters := make(map[string]interface{})
+ source["filters"] = filters
+
+ arr := make([]interface{}, len(a.filters))
+ for i, filter := range a.filters {
+ arr[i] = filter.Source()
+ }
+ filters["filters"] = arr
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filters_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filters_test.go
new file mode 100644
index 00000000..c1b244d9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filters_test.go
@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFiltersAggregation(t *testing.T) {
+ f1 := NewRangeFilter("stock").Gt(0)
+ f2 := NewTermFilter("symbol", "GOOG")
+ agg := NewFiltersAggregation().Filters(f1, f2)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFiltersAggregationWithSubAggregation(t *testing.T) {
+ avgPriceAgg := NewAvgAggregation().Field("price")
+ f1 := NewRangeFilter("stock").Gt(0)
+ f2 := NewTermFilter("symbol", "GOOG")
+ agg := NewFiltersAggregation().Filters(f1, f2).SubAggregation("avg_price", avgPriceAgg)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_bounds.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_bounds.go
new file mode 100644
index 00000000..33d9eb9a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_bounds.go
@@ -0,0 +1,104 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoBoundsAggregation is a metric aggregation that computes the
+// bounding box containing all geo_point values for a field.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html
+type GeoBoundsAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ wrapLongitude *bool
+}
+
+func NewGeoBoundsAggregation() GeoBoundsAggregation {
+ a := GeoBoundsAggregation{}
+ return a
+}
+
+func (a GeoBoundsAggregation) Field(field string) GeoBoundsAggregation {
+ a.field = field
+ return a
+}
+
+func (a GeoBoundsAggregation) Script(script string) GeoBoundsAggregation {
+ a.script = script
+ return a
+}
+
+func (a GeoBoundsAggregation) ScriptFile(scriptFile string) GeoBoundsAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a GeoBoundsAggregation) Lang(lang string) GeoBoundsAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a GeoBoundsAggregation) Params(params map[string]interface{}) GeoBoundsAggregation {
+ a.params = params
+ return a
+}
+
+func (a GeoBoundsAggregation) Param(name string, value interface{}) GeoBoundsAggregation {
+ if a.params == nil {
+ a.params = make(map[string]interface{})
+ }
+ a.params[name] = value
+ return a
+}
+
+func (a GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) GeoBoundsAggregation {
+ a.wrapLongitude = &wrapLongitude
+ return a
+}
+
+func (a GeoBoundsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "query" : {
+ // "match" : { "business_type" : "shop" }
+ // },
+ // "aggs" : {
+ // "viewport" : {
+ // "geo_bounds" : {
+ // "field" : "location"
+ // "wrap_longitude" : "true"
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "geo_bounds" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["geo_bounds"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.params != nil && len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+ if a.wrapLongitude != nil {
+ opts["wrap_longitude"] = *a.wrapLongitude
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_bounds_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_bounds_test.go
new file mode 100644
index 00000000..904d7a24
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_bounds_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoBoundsAggregation(t *testing.T) {
+ agg := NewGeoBoundsAggregation().Field("location")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_bounds":{"field":"location"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoBoundsAggregationWithWrapLongitude(t *testing.T) {
+ agg := NewGeoBoundsAggregation().Field("location").WrapLongitude(true)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_bounds":{"field":"location","wrap_longitude":true}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_distance.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_distance.go
new file mode 100644
index 00000000..d63af531
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_distance.go
@@ -0,0 +1,180 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields
+// and conceptually works very similar to the range aggregation.
+// The user can define a point of origin and a set of distance range buckets.
+// The aggregation evaluate the distance of each document value from
+// the origin point and determines the buckets it belongs to based on
+// the ranges (a document belongs to a bucket if the distance between the
+// document and the origin falls within the distance range of the bucket).
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-geodistance-aggregation.html
+type GeoDistanceAggregation struct {
+ field string
+ unit string
+ distanceType string
+ point string
+ ranges []geoDistAggRange
+ subAggregations map[string]Aggregation
+}
+
+type geoDistAggRange struct {
+ Key string
+ From interface{}
+ To interface{}
+}
+
+func NewGeoDistanceAggregation() GeoDistanceAggregation {
+ a := GeoDistanceAggregation{
+ subAggregations: make(map[string]Aggregation),
+ ranges: make([]geoDistAggRange, 0),
+ }
+ return a
+}
+
+func (a GeoDistanceAggregation) Field(field string) GeoDistanceAggregation {
+ a.field = field
+ return a
+}
+
+func (a GeoDistanceAggregation) Unit(unit string) GeoDistanceAggregation {
+ a.unit = unit
+ return a
+}
+
+func (a GeoDistanceAggregation) DistanceType(distanceType string) GeoDistanceAggregation {
+ a.distanceType = distanceType
+ return a
+}
+
+func (a GeoDistanceAggregation) Point(latLon string) GeoDistanceAggregation {
+ a.point = latLon
+ return a
+}
+
+func (a GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) GeoDistanceAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a GeoDistanceAggregation) AddRange(from, to interface{}) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedTo(from float64) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedFrom(to float64) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) Between(from, to interface{}) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "rings_around_amsterdam" : {
+ // "geo_distance" : {
+ // "field" : "location",
+ // "origin" : "52.3760, 4.894",
+ // "ranges" : [
+ // { "to" : 100 },
+ // { "from" : 100, "to" : 300 },
+ // { "from" : 300 }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["geo_distance"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.unit != "" {
+ opts["unit"] = a.unit
+ }
+ if a.distanceType != "" {
+ opts["distance_type"] = a.distanceType
+ }
+ if a.point != "" {
+ opts["origin"] = a.point
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range a.ranges {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["from"] = from
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["to"] = to
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_distance_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_distance_test.go
new file mode 100644
index 00000000..85729e5b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_distance_test.go
@@ -0,0 +1,42 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoDistanceAggregation(t *testing.T) {
+ agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
+ agg = agg.AddRange(nil, 100)
+ agg = agg.AddRange(100, 300)
+ agg = agg.AddRange(300, nil)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceAggregationWithUnbounded(t *testing.T) {
+ agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
+ agg = agg.AddUnboundedFrom(100)
+ agg = agg.AddRange(100, 300)
+ agg = agg.AddUnboundedTo(300)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_global.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_global.go
new file mode 100644
index 00000000..4d56297e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_global.go
@@ -0,0 +1,56 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GlobalAggregation defines a single bucket of all the documents within
+// the search execution context. This context is defined by the indices
+// and the document types you’re searching on, but is not influenced
+// by the search query itself.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html
+type GlobalAggregation struct {
+ subAggregations map[string]Aggregation
+}
+
+func NewGlobalAggregation() GlobalAggregation {
+ a := GlobalAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) GlobalAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a GlobalAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "all_products" : {
+ // "global" : {},
+ // "aggs" : {
+ // "avg_price" : { "avg" : { "field" : "price" } }
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "global" : {} } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["global"] = opts
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_global_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_global_test.go
new file mode 100644
index 00000000..5b28bb37
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_global_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGlobalAggregation(t *testing.T) {
+ agg := NewGlobalAggregation()
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"global":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_histogram.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_histogram.go
new file mode 100644
index 00000000..250d3f7f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_histogram.go
@@ -0,0 +1,234 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// HistogramAggregation is a multi-bucket values source based aggregation
+// that can be applied on numeric values extracted from the documents.
+// It dynamically builds fixed size (a.k.a. interval) buckets over the
+// values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html
+type HistogramAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+
+ interval int64
+ order string
+ orderAsc bool
+ minDocCount *int64
+ extendedBoundsMin *int64
+ extendedBoundsMax *int64
+}
+
+func NewHistogramAggregation() HistogramAggregation {
+ a := HistogramAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a HistogramAggregation) Field(field string) HistogramAggregation {
+ a.field = field
+ return a
+}
+
+func (a HistogramAggregation) Script(script string) HistogramAggregation {
+ a.script = script
+ return a
+}
+
+func (a HistogramAggregation) ScriptFile(scriptFile string) HistogramAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a HistogramAggregation) Lang(lang string) HistogramAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a HistogramAggregation) Param(name string, value interface{}) HistogramAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) HistogramAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a HistogramAggregation) Interval(interval int64) HistogramAggregation {
+ a.interval = interval
+ return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a HistogramAggregation) Order(order string, asc bool) HistogramAggregation {
+ a.order = order
+ a.orderAsc = asc
+ return a
+}
+
+func (a HistogramAggregation) OrderByCount(asc bool) HistogramAggregation {
+ // "order" : { "_count" : "asc" }
+ a.order = "_count"
+ a.orderAsc = asc
+ return a
+}
+
+func (a HistogramAggregation) OrderByCountAsc() HistogramAggregation {
+ return a.OrderByCount(true)
+}
+
+func (a HistogramAggregation) OrderByCountDesc() HistogramAggregation {
+ return a.OrderByCount(false)
+}
+
+func (a HistogramAggregation) OrderByKey(asc bool) HistogramAggregation {
+ // "order" : { "_key" : "asc" }
+ a.order = "_key"
+ a.orderAsc = asc
+ return a
+}
+
+func (a HistogramAggregation) OrderByKeyAsc() HistogramAggregation {
+ return a.OrderByKey(true)
+}
+
+func (a HistogramAggregation) OrderByKeyDesc() HistogramAggregation {
+ return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a HistogramAggregation) OrderByAggregation(aggName string, asc bool) HistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "avg_height" : "desc" }
+ // },
+ // "aggs" : {
+ // "avg_height" : { "avg" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName
+ a.orderAsc = asc
+ return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) HistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "height_stats.avg" : "desc" }
+ // },
+ // "aggs" : {
+ // "height_stats" : { "stats" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName + "." + metric
+ a.orderAsc = asc
+ return a
+}
+
+func (a HistogramAggregation) MinDocCount(minDocCount int64) HistogramAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a HistogramAggregation) ExtendedBoundsMin(min int64) HistogramAggregation {
+ a.extendedBoundsMin = &min
+ return a
+}
+
+func (a HistogramAggregation) ExtendedBoundsMax(max int64) HistogramAggregation {
+ a.extendedBoundsMax = &max
+ return a
+}
+
+func (a HistogramAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "prices" : {
+ // "histogram" : {
+ // "field" : "price",
+ // "interval" : 50
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "histogram" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["histogram"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ opts["interval"] = a.interval
+ if a.order != "" {
+ o := make(map[string]interface{})
+ if a.orderAsc {
+ o[a.order] = "asc"
+ } else {
+ o[a.order] = "desc"
+ }
+ opts["order"] = o
+ }
+ if a.minDocCount != nil {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
+ bounds := make(map[string]interface{})
+ if a.extendedBoundsMin != nil {
+ bounds["min"] = a.extendedBoundsMin
+ }
+ if a.extendedBoundsMax != nil {
+ bounds["max"] = a.extendedBoundsMax
+ }
+ opts["extended_bounds"] = bounds
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_histogram_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_histogram_test.go
new file mode 100644
index 00000000..19c50216
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_histogram_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestHistogramAggregation(t *testing.T) {
+ agg := NewHistogramAggregation().Field("price").Interval(50)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"histogram":{"field":"price","interval":50}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_max.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_max.go
new file mode 100644
index 00000000..9e77ef7a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_max.go
@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MaxAggregation is a single-value metrics aggregation that keeps track and
+// returns the maximum value among the numeric values extracted from
+// the aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by
+// a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html
+type MaxAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewMaxAggregation() MaxAggregation {
+ a := MaxAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a MaxAggregation) Field(field string) MaxAggregation {
+ a.field = field
+ return a
+}
+
+func (a MaxAggregation) Script(script string) MaxAggregation {
+ a.script = script
+ return a
+}
+
+func (a MaxAggregation) ScriptFile(scriptFile string) MaxAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a MaxAggregation) Lang(lang string) MaxAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a MaxAggregation) Format(format string) MaxAggregation {
+ a.format = format
+ return a
+}
+
+func (a MaxAggregation) Param(name string, value interface{}) MaxAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a MaxAggregation) SubAggregation(name string, subAggregation Aggregation) MaxAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a MaxAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "max_price" : { "max" : { "field" : "price" } }
+ // }
+ // }
+ // This method returns only the { "max" : { "field" : "price" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["max"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_max_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_max_test.go
new file mode 100644
index 00000000..60d3779a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_max_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMaxAggregation(t *testing.T) {
+ agg := NewMaxAggregation().Field("price")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"max":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMaxAggregationWithFormat(t *testing.T) {
+ agg := NewMaxAggregation().Field("price").Format("00000.00")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"max":{"field":"price","format":"00000.00"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_min.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_min.go
new file mode 100644
index 00000000..9e00bd30
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_min.go
@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MinAggregation is a single-value metrics aggregation that keeps track and
+// returns the minimum value among numeric values extracted from the
+// aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by a
+// provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html
+type MinAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewMinAggregation() MinAggregation {
+ a := MinAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a MinAggregation) Field(field string) MinAggregation {
+ a.field = field
+ return a
+}
+
+func (a MinAggregation) Script(script string) MinAggregation {
+ a.script = script
+ return a
+}
+
+func (a MinAggregation) ScriptFile(scriptFile string) MinAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a MinAggregation) Lang(lang string) MinAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a MinAggregation) Format(format string) MinAggregation {
+ a.format = format
+ return a
+}
+
+func (a MinAggregation) Param(name string, value interface{}) MinAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a MinAggregation) SubAggregation(name string, subAggregation Aggregation) MinAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a MinAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "min_price" : { "min" : { "field" : "price" } }
+ // }
+ // }
+ // This method returns only the { "min" : { "field" : "price" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["min"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_min_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_min_test.go
new file mode 100644
index 00000000..a52cc024
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_min_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMinAggregation(t *testing.T) {
+ agg := NewMinAggregation().Field("price")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"min":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMinAggregationWithFormat(t *testing.T) {
+ agg := NewMinAggregation().Field("price").Format("00000.00")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"min":{"field":"price","format":"00000.00"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_missing.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_missing.go
new file mode 100644
index 00000000..4e0f526d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_missing.go
@@ -0,0 +1,66 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MissingAggregation is a field data based single bucket aggregation,
+// that creates a bucket of all documents in the current document set context
+// that are missing a field value (effectively, missing a field or having
+// the configured NULL value set). This aggregator will often be used in
+// conjunction with other field data bucket aggregators (such as ranges)
+// to return information for all the documents that could not be placed
+// in any of the other buckets due to missing field data values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html
+type MissingAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+}
+
+func NewMissingAggregation() MissingAggregation {
+ a := MissingAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a MissingAggregation) Field(field string) MissingAggregation {
+ a.field = field
+ return a
+}
+
+func (a MissingAggregation) SubAggregation(name string, subAggregation Aggregation) MissingAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a MissingAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "products_without_a_price" : {
+ // "missing" : { "field" : "price" }
+ // }
+ // }
+ // }
+ // This method returns only the { "missing" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["missing"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_missing_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_missing_test.go
new file mode 100644
index 00000000..4ed528a8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_missing_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMissingAggregation(t *testing.T) {
+ agg := NewMissingAggregation().Field("price")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"missing":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_nested.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_nested.go
new file mode 100644
index 00000000..feab5bef
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_nested.go
@@ -0,0 +1,67 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// NestedAggregation is a special single bucket aggregation that enables
+// aggregating nested documents.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-nested-aggregation.html
+type NestedAggregation struct {
+ path string
+ subAggregations map[string]Aggregation
+}
+
+func NewNestedAggregation() NestedAggregation {
+ a := NestedAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a NestedAggregation) SubAggregation(name string, subAggregation Aggregation) NestedAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a NestedAggregation) Path(path string) NestedAggregation {
+ a.path = path
+ return a
+}
+
+func (a NestedAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "query" : {
+ // "match" : { "name" : "led tv" }
+ // }
+ // "aggs" : {
+ // "resellers" : {
+ // "nested" : {
+ // "path" : "resellers"
+ // },
+ // "aggs" : {
+ // "min_price" : { "min" : { "field" : "resellers.price" } }
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "nested" : {} } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["nested"] = opts
+
+ opts["path"] = a.path
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_nested_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_nested_test.go
new file mode 100644
index 00000000..78c897f4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_nested_test.go
@@ -0,0 +1,37 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestNestedAggregation(t *testing.T) {
+ agg := NewNestedAggregation().Path("resellers")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"nested":{"path":"resellers"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNestedAggregationWithSubAggregation(t *testing.T) {
+ minPriceAgg := NewMinAggregation().Field("resellers.price")
+ agg := NewNestedAggregation().Path("resellers").SubAggregation("min_price", minPriceAgg)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"min_price":{"min":{"field":"resellers.price"}}},"nested":{"path":"resellers"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentile_ranks.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentile_ranks.go
new file mode 100644
index 00000000..7e058d50
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentile_ranks.go
@@ -0,0 +1,141 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PercentileRanksAggregation
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html
+type PercentileRanksAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ values []float64
+ compression *float64
+ estimator string
+}
+
+func NewPercentileRanksAggregation() PercentileRanksAggregation {
+ a := PercentileRanksAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ values: make([]float64, 0),
+ }
+ return a
+}
+
+func (a PercentileRanksAggregation) Field(field string) PercentileRanksAggregation {
+ a.field = field
+ return a
+}
+
+func (a PercentileRanksAggregation) Script(script string) PercentileRanksAggregation {
+ a.script = script
+ return a
+}
+
+func (a PercentileRanksAggregation) ScriptFile(scriptFile string) PercentileRanksAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a PercentileRanksAggregation) Lang(lang string) PercentileRanksAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a PercentileRanksAggregation) Format(format string) PercentileRanksAggregation {
+ a.format = format
+ return a
+}
+
+func (a PercentileRanksAggregation) Param(name string, value interface{}) PercentileRanksAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) PercentileRanksAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a PercentileRanksAggregation) Values(values ...float64) PercentileRanksAggregation {
+ a.values = make([]float64, 0)
+ a.values = append(a.values, values...)
+ return a
+}
+
+func (a PercentileRanksAggregation) Compression(compression float64) PercentileRanksAggregation {
+ a.compression = &compression
+ return a
+}
+
+func (a PercentileRanksAggregation) Estimator(estimator string) PercentileRanksAggregation {
+ a.estimator = estimator
+ return a
+}
+
+func (a PercentileRanksAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "load_time_outlier" : {
+ // "percentile_ranks" : {
+ // "field" : "load_time"
+ // "values" : [15, 30]
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the
+ // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["percentile_ranks"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+ if len(a.values) > 0 {
+ opts["values"] = a.values
+ }
+ if a.compression != nil {
+ opts["compression"] = *a.compression
+ }
+ if a.estimator != "" {
+ opts["estimator"] = a.estimator
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentile_ranks_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentile_ranks_test.go
new file mode 100644
index 00000000..61f4a5de
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentile_ranks_test.go
@@ -0,0 +1,49 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPercentileRanksAggregation(t *testing.T) {
+ agg := NewPercentileRanksAggregation().Field("load_time")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentile_ranks":{"field":"load_time"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentileRanksAggregationWithCustomValues(t *testing.T) {
+ agg := NewPercentileRanksAggregation().Field("load_time").Values(15, 30)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentile_ranks":{"field":"load_time","values":[15,30]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentileRanksAggregationWithFormat(t *testing.T) {
+ agg := NewPercentileRanksAggregation().Field("load_time").Format("000.0")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentile_ranks":{"field":"load_time","format":"000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentiles.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentiles.go
new file mode 100644
index 00000000..5b6cff92
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentiles.go
@@ -0,0 +1,140 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PercentilesAggregation
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html
+type PercentilesAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ percentiles []float64
+ compression *float64
+ estimator string
+}
+
+func NewPercentilesAggregation() PercentilesAggregation {
+ a := PercentilesAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ percentiles: make([]float64, 0),
+ }
+ return a
+}
+
+func (a PercentilesAggregation) Field(field string) PercentilesAggregation {
+ a.field = field
+ return a
+}
+
+func (a PercentilesAggregation) Script(script string) PercentilesAggregation {
+ a.script = script
+ return a
+}
+
+func (a PercentilesAggregation) ScriptFile(scriptFile string) PercentilesAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a PercentilesAggregation) Lang(lang string) PercentilesAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a PercentilesAggregation) Format(format string) PercentilesAggregation {
+ a.format = format
+ return a
+}
+
+func (a PercentilesAggregation) Param(name string, value interface{}) PercentilesAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) PercentilesAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a PercentilesAggregation) Percentiles(percentiles ...float64) PercentilesAggregation {
+ a.percentiles = make([]float64, 0)
+ a.percentiles = append(a.percentiles, percentiles...)
+ return a
+}
+
+func (a PercentilesAggregation) Compression(compression float64) PercentilesAggregation {
+ a.compression = &compression
+ return a
+}
+
+func (a PercentilesAggregation) Estimator(estimator string) PercentilesAggregation {
+ a.estimator = estimator
+ return a
+}
+
+func (a PercentilesAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "load_time_outlier" : {
+ // "percentiles" : {
+ // "field" : "load_time"
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the
+ // { "percentiles" : { "field" : "load_time" } }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["percentiles"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+ if len(a.percentiles) > 0 {
+ opts["percents"] = a.percentiles
+ }
+ if a.compression != nil {
+ opts["compression"] = *a.compression
+ }
+ if a.estimator != "" {
+ opts["estimator"] = a.estimator
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentiles_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentiles_test.go
new file mode 100644
index 00000000..c8e7522a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentiles_test.go
@@ -0,0 +1,49 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPercentilesAggregation(t *testing.T) {
+ agg := NewPercentilesAggregation().Field("price")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentiles":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentilesAggregationWithCustomPercents(t *testing.T) {
+ agg := NewPercentilesAggregation().Field("price").Percentiles(0.2, 0.5, 0.9)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentiles":{"field":"price","percents":[0.2,0.5,0.9]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentilesAggregationWithFormat(t *testing.T) {
+ agg := NewPercentilesAggregation().Field("price").Format("00000.00")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentiles":{"field":"price","format":"00000.00"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_range.go
new file mode 100644
index 00000000..5b05423a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_range.go
@@ -0,0 +1,232 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "time"
+)
+
+// RangeAggregation is a multi-bucket value source based aggregation that
+// enables the user to define a set of ranges - each representing a bucket.
+// During the aggregation process, the values extracted from each document
+// will be checked against each bucket range and "bucket" the
+// relevant/matching document. Note that this aggregration includes the
+// from value and excludes the to value for each range.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html
+type RangeAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ keyed *bool
+ unmapped *bool
+ entries []rangeAggregationEntry
+}
+
+type rangeAggregationEntry struct {
+ Key string
+ From interface{}
+ To interface{}
+}
+
+func NewRangeAggregation() RangeAggregation {
+ a := RangeAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ entries: make([]rangeAggregationEntry, 0),
+ }
+ return a
+}
+
+func (a RangeAggregation) Field(field string) RangeAggregation {
+ a.field = field
+ return a
+}
+
+func (a RangeAggregation) Script(script string) RangeAggregation {
+ a.script = script
+ return a
+}
+
+func (a RangeAggregation) ScriptFile(scriptFile string) RangeAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a RangeAggregation) Lang(lang string) RangeAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a RangeAggregation) Param(name string, value interface{}) RangeAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a RangeAggregation) SubAggregation(name string, subAggregation Aggregation) RangeAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a RangeAggregation) Keyed(keyed bool) RangeAggregation {
+ a.keyed = &keyed
+ return a
+}
+
+func (a RangeAggregation) Unmapped(unmapped bool) RangeAggregation {
+ a.unmapped = &unmapped
+ return a
+}
+
+func (a RangeAggregation) AddRange(from, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a RangeAggregation) AddRangeWithKey(key string, from, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a RangeAggregation) AddUnboundedTo(from interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a RangeAggregation) AddUnboundedFrom(to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a RangeAggregation) Lt(to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a RangeAggregation) LtWithKey(key string, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a RangeAggregation) Between(from, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a RangeAggregation) BetweenWithKey(key string, from, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a RangeAggregation) Gt(from interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a RangeAggregation) GtWithKey(key string, from interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a RangeAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "price_ranges" : {
+ // "range" : {
+ // "field" : "price",
+ // "ranges" : [
+ // { "to" : 50 },
+ // { "from" : 50, "to" : 100 },
+ // { "from" : 100 }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["range"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ if a.keyed != nil {
+ opts["keyed"] = *a.keyed
+ }
+ if a.unmapped != nil {
+ opts["unmapped"] = *a.unmapped
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range a.entries {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_range_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_range_test.go
new file mode 100644
index 00000000..771310c3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_range_test.go
@@ -0,0 +1,96 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRangeAggregation(t *testing.T) {
+ agg := NewRangeAggregation().Field("price")
+ agg = agg.AddRange(nil, 50)
+ agg = agg.AddRange(50, 100)
+ agg = agg.AddRange(100, nil)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithUnbounded(t *testing.T) {
+ agg := NewRangeAggregation().Field("field_name").
+ AddUnboundedFrom(50).
+ AddRange(20, 70).
+ AddRange(70, 120).
+ AddUnboundedTo(150)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithLtAndCo(t *testing.T) {
+ agg := NewRangeAggregation().Field("field_name").
+ Lt(50).
+ Between(20, 70).
+ Between(70, 120).
+ Gt(150)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithKeyedFlag(t *testing.T) {
+ agg := NewRangeAggregation().Field("field_name").
+ Keyed(true).
+ Lt(50).
+ Between(20, 70).
+ Between(70, 120).
+ Gt(150)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithKeys(t *testing.T) {
+ agg := NewRangeAggregation().Field("field_name").
+ Keyed(true).
+ LtWithKey("cheap", 50).
+ BetweenWithKey("affordable", 20, 70).
+ BetweenWithKey("average", 70, 120).
+ GtWithKey("expensive", 150)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"key":"cheap","to":50},{"from":20,"key":"affordable","to":70},{"from":70,"key":"average","to":120},{"from":150,"key":"expensive"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_significant_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_significant_terms.go
new file mode 100644
index 00000000..03082233
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_significant_terms.go
@@ -0,0 +1,122 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SignificantSignificantTermsAggregation is an aggregation that returns interesting
+// or unusual occurrences of terms in a set.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html
+type SignificantTermsAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+
+ minDocCount *int
+ shardMinDocCount *int
+ requiredSize *int
+ shardSize *int
+ filter Filter
+ executionHint string
+}
+
+func NewSignificantTermsAggregation() SignificantTermsAggregation {
+ a := SignificantTermsAggregation{
+ subAggregations: make(map[string]Aggregation, 0),
+ }
+ return a
+}
+
+func (a SignificantTermsAggregation) Field(field string) SignificantTermsAggregation {
+ a.field = field
+ return a
+}
+
+func (a SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) SignificantTermsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a SignificantTermsAggregation) MinDocCount(minDocCount int) SignificantTermsAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) SignificantTermsAggregation {
+ a.shardMinDocCount = &shardMinDocCount
+ return a
+}
+
+func (a SignificantTermsAggregation) RequiredSize(requiredSize int) SignificantTermsAggregation {
+ a.requiredSize = &requiredSize
+ return a
+}
+
+func (a SignificantTermsAggregation) ShardSize(shardSize int) SignificantTermsAggregation {
+ a.shardSize = &shardSize
+ return a
+}
+
+func (a SignificantTermsAggregation) BackgroundFilter(filter Filter) SignificantTermsAggregation {
+ a.filter = filter
+ return a
+}
+
+func (a SignificantTermsAggregation) ExecutionHint(hint string) SignificantTermsAggregation {
+ a.executionHint = hint
+ return a
+}
+
+func (a SignificantTermsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "query" : {
+ // "terms" : {"force" : [ "British Transport Police" ]}
+ // },
+ // "aggregations" : {
+ // "significantCrimeTypes" : {
+ // "significant_terms" : { "field" : "crime_type" }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the
+ // { "significant_terms" : { "field" : "crime_type" }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["significant_terms"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.requiredSize != nil {
+ opts["size"] = *a.requiredSize // not a typo!
+ }
+ if a.shardSize != nil {
+ opts["shard_size"] = *a.shardSize
+ }
+ if a.minDocCount != nil {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.shardMinDocCount != nil {
+ opts["shard_min_doc_count"] = *a.shardMinDocCount
+ }
+ if a.filter != nil {
+ opts["background_filter"] = a.filter.Source()
+ }
+ if a.executionHint != "" {
+ opts["execution_hint"] = a.executionHint
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_significant_terms_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_significant_terms_test.go
new file mode 100644
index 00000000..c53740c7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_significant_terms_test.go
@@ -0,0 +1,56 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSignificantTermsAggregation(t *testing.T) {
+ agg := NewSignificantTermsAggregation().Field("crime_type")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"field":"crime_type"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationWithArgs(t *testing.T) {
+ agg := NewSignificantTermsAggregation().
+ Field("crime_type").
+ ExecutionHint("map").
+ ShardSize(5).
+ MinDocCount(10).
+ BackgroundFilter(NewTermFilter("city", "London"))
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"background_filter":{"term":{"city":"London"}},"execution_hint":"map","field":"crime_type","min_doc_count":10,"shard_size":5}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationSubAggregation(t *testing.T) {
+ crimeTypesAgg := NewSignificantTermsAggregation().Field("crime_type")
+ agg := NewTermsAggregation().Field("force")
+ agg = agg.SubAggregation("significantCrimeTypes", crimeTypesAgg)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"significantCrimeTypes":{"significant_terms":{"field":"crime_type"}}},"terms":{"field":"force"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_stats.go
new file mode 100644
index 00000000..2bc6b274
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_stats.go
@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// StatsAggregation is a multi-value metrics aggregation that computes stats
+// over numeric values extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html
+type StatsAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewStatsAggregation() StatsAggregation {
+ a := StatsAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a StatsAggregation) Field(field string) StatsAggregation {
+ a.field = field
+ return a
+}
+
+func (a StatsAggregation) Script(script string) StatsAggregation {
+ a.script = script
+ return a
+}
+
+func (a StatsAggregation) ScriptFile(scriptFile string) StatsAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a StatsAggregation) Lang(lang string) StatsAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a StatsAggregation) Format(format string) StatsAggregation {
+ a.format = format
+ return a
+}
+
+func (a StatsAggregation) Param(name string, value interface{}) StatsAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a StatsAggregation) SubAggregation(name string, subAggregation Aggregation) StatsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a StatsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "grades_stats" : { "stats" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "stats" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["stats"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_stats_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_stats_test.go
new file mode 100644
index 00000000..616bfde0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_stats_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestStatsAggregation(t *testing.T) {
+ agg := NewStatsAggregation().Field("grade")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"stats":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestStatsAggregationWithFormat(t *testing.T) {
+ agg := NewStatsAggregation().Field("grade").Format("0000.0")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"stats":{"field":"grade","format":"0000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_sum.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_sum.go
new file mode 100644
index 00000000..2aaee602
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_sum.go
@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SumAggregation is a single-value metrics aggregation that sums up
+// numeric values that are extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html
+type SumAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewSumAggregation() SumAggregation {
+ a := SumAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a SumAggregation) Field(field string) SumAggregation {
+ a.field = field
+ return a
+}
+
+func (a SumAggregation) Script(script string) SumAggregation {
+ a.script = script
+ return a
+}
+
+func (a SumAggregation) ScriptFile(scriptFile string) SumAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a SumAggregation) Lang(lang string) SumAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a SumAggregation) Format(format string) SumAggregation {
+ a.format = format
+ return a
+}
+
+func (a SumAggregation) Param(name string, value interface{}) SumAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a SumAggregation) SubAggregation(name string, subAggregation Aggregation) SumAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a SumAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "intraday_return" : { "sum" : { "field" : "change" } }
+ // }
+ // }
+ // This method returns only the { "sum" : { "field" : "change" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["sum"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_sum_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_sum_test.go
new file mode 100644
index 00000000..de87e793
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_sum_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSumAggregation(t *testing.T) {
+ agg := NewSumAggregation().Field("price")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"sum":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSumAggregationWithFormat(t *testing.T) {
+ agg := NewSumAggregation().Field("price").Format("00000.00")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"sum":{"field":"price","format":"00000.00"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_terms.go
new file mode 100644
index 00000000..d38c0663
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_terms.go
@@ -0,0 +1,339 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TermsAggregation is a multi-bucket value source based aggregation
+// where buckets are dynamically built - one per unique value.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
+type TermsAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+
+ size *int
+ shardSize *int
+ requiredSize *int
+ minDocCount *int
+ shardMinDocCount *int
+ valueType string
+ order string
+ orderAsc bool
+ includePattern string
+ includeFlags *int
+ excludePattern string
+ excludeFlags *int
+ executionHint string
+ collectionMode string
+ showTermDocCountError *bool
+ includeTerms []string
+ excludeTerms []string
+}
+
+func NewTermsAggregation() TermsAggregation {
+ a := TermsAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation, 0),
+ includeTerms: make([]string, 0),
+ excludeTerms: make([]string, 0),
+ }
+ return a
+}
+
+func (a TermsAggregation) Field(field string) TermsAggregation {
+ a.field = field
+ return a
+}
+
+func (a TermsAggregation) Script(script string) TermsAggregation {
+ a.script = script
+ return a
+}
+
+func (a TermsAggregation) ScriptFile(scriptFile string) TermsAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a TermsAggregation) Lang(lang string) TermsAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a TermsAggregation) Param(name string, value interface{}) TermsAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a TermsAggregation) SubAggregation(name string, subAggregation Aggregation) TermsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a TermsAggregation) Size(size int) TermsAggregation {
+ a.size = &size
+ return a
+}
+
+func (a TermsAggregation) RequiredSize(requiredSize int) TermsAggregation {
+ a.requiredSize = &requiredSize
+ return a
+}
+
+func (a TermsAggregation) ShardSize(shardSize int) TermsAggregation {
+ a.shardSize = &shardSize
+ return a
+}
+
+func (a TermsAggregation) MinDocCount(minDocCount int) TermsAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a TermsAggregation) ShardMinDocCount(shardMinDocCount int) TermsAggregation {
+ a.shardMinDocCount = &shardMinDocCount
+ return a
+}
+
+func (a TermsAggregation) Include(regexp string) TermsAggregation {
+ a.includePattern = regexp
+ return a
+}
+
+func (a TermsAggregation) IncludeWithFlags(regexp string, flags int) TermsAggregation {
+ a.includePattern = regexp
+ a.includeFlags = &flags
+ return a
+}
+
+func (a TermsAggregation) Exclude(regexp string) TermsAggregation {
+ a.excludePattern = regexp
+ return a
+}
+
+func (a TermsAggregation) ExcludeWithFlags(regexp string, flags int) TermsAggregation {
+ a.excludePattern = regexp
+ a.excludeFlags = &flags
+ return a
+}
+
+// ValueType can be string, long, or double.
+func (a TermsAggregation) ValueType(valueType string) TermsAggregation {
+ a.valueType = valueType
+ return a
+}
+
+func (a TermsAggregation) Order(order string, asc bool) TermsAggregation {
+ a.order = order
+ a.orderAsc = asc
+ return a
+}
+
+func (a TermsAggregation) OrderByCount(asc bool) TermsAggregation {
+ // "order" : { "_count" : "asc" }
+ a.order = "_count"
+ a.orderAsc = asc
+ return a
+}
+
+func (a TermsAggregation) OrderByCountAsc() TermsAggregation {
+ return a.OrderByCount(true)
+}
+
+func (a TermsAggregation) OrderByCountDesc() TermsAggregation {
+ return a.OrderByCount(false)
+}
+
+func (a TermsAggregation) OrderByTerm(asc bool) TermsAggregation {
+ // "order" : { "_term" : "asc" }
+ a.order = "_term"
+ a.orderAsc = asc
+ return a
+}
+
+func (a TermsAggregation) OrderByTermAsc() TermsAggregation {
+ return a.OrderByTerm(true)
+}
+
+func (a TermsAggregation) OrderByTermDesc() TermsAggregation {
+ return a.OrderByTerm(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a TermsAggregation) OrderByAggregation(aggName string, asc bool) TermsAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "avg_height" : "desc" }
+ // },
+ // "aggs" : {
+ // "avg_height" : { "avg" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName
+ a.orderAsc = asc
+ return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) TermsAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "height_stats.avg" : "desc" }
+ // },
+ // "aggs" : {
+ // "height_stats" : { "stats" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName + "." + metric
+ a.orderAsc = asc
+ return a
+}
+
+func (a TermsAggregation) ExecutionHint(hint string) TermsAggregation {
+ a.executionHint = hint
+ return a
+}
+
+// Collection mode can be depth_first or breadth_first as of 1.4.0.
+func (a TermsAggregation) CollectionMode(collectionMode string) TermsAggregation {
+ a.collectionMode = collectionMode
+ return a
+}
+
+func (a TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) TermsAggregation {
+ a.showTermDocCountError = &showTermDocCountError
+ return a
+}
+
+func (a TermsAggregation) IncludeTerms(terms ...string) TermsAggregation {
+ a.includeTerms = append(a.includeTerms, terms...)
+ return a
+}
+
+func (a TermsAggregation) ExcludeTerms(terms ...string) TermsAggregation {
+ a.excludeTerms = append(a.excludeTerms, terms...)
+ return a
+}
+
+func (a TermsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : { "field" : "gender" }
+ // }
+ // }
+ // }
+ // This method returns only the { "terms" : { "field" : "gender" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["terms"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ // TermsBuilder
+ if a.size != nil && *a.size >= 0 {
+ opts["size"] = *a.size
+ }
+ if a.shardSize != nil && *a.shardSize >= 0 {
+ opts["shard_size"] = *a.shardSize
+ }
+ if a.requiredSize != nil && *a.requiredSize >= 0 {
+ opts["required_size"] = *a.requiredSize
+ }
+ if a.minDocCount != nil && *a.minDocCount >= 0 {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 {
+ opts["shard_min_doc_count"] = *a.shardMinDocCount
+ }
+ if a.showTermDocCountError != nil {
+ opts["show_term_doc_count_error"] = *a.showTermDocCountError
+ }
+ if a.collectionMode != "" {
+ opts["collect_mode"] = a.collectionMode
+ }
+ if a.valueType != "" {
+ opts["value_type"] = a.valueType
+ }
+ if a.order != "" {
+ o := make(map[string]interface{})
+ if a.orderAsc {
+ o[a.order] = "asc"
+ } else {
+ o[a.order] = "desc"
+ }
+ opts["order"] = o
+ }
+ if len(a.includeTerms) > 0 {
+ opts["include"] = a.includeTerms
+ }
+ if a.includePattern != "" {
+ if a.includeFlags == nil || *a.includeFlags == 0 {
+ opts["include"] = a.includePattern
+ } else {
+ p := make(map[string]interface{})
+ p["pattern"] = a.includePattern
+ p["flags"] = *a.includeFlags
+ opts["include"] = p
+ }
+ }
+ if len(a.excludeTerms) > 0 {
+ opts["exclude"] = a.excludeTerms
+ }
+ if a.excludePattern != "" {
+ if a.excludeFlags == nil || *a.excludeFlags == 0 {
+ opts["exclude"] = a.excludePattern
+ } else {
+ p := make(map[string]interface{})
+ p["pattern"] = a.excludePattern
+ p["flags"] = *a.excludeFlags
+ opts["exclude"] = p
+ }
+ }
+ if a.executionHint != "" {
+ opts["execution_hint"] = a.executionHint
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_terms_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_terms_test.go
new file mode 100644
index 00000000..e3bb7672
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_terms_test.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermsAggregation(t *testing.T) {
+ agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc()
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsAggregationWithSubAggregation(t *testing.T) {
+ subAgg := NewAvgAggregation().Field("height")
+ agg := NewTermsAggregation().Field("gender").Size(10).
+ OrderByAggregation("avg_height", false)
+ agg = agg.SubAggregation("avg_height", subAgg)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsAggregationWithMultipleSubAggregation(t *testing.T) {
+ subAgg1 := NewAvgAggregation().Field("height")
+ subAgg2 := NewAvgAggregation().Field("width")
+ agg := NewTermsAggregation().Field("gender").Size(10).
+ OrderByAggregation("avg_height", false)
+ agg = agg.SubAggregation("avg_height", subAgg1)
+ agg = agg.SubAggregation("avg_width", subAgg2)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}},"avg_width":{"avg":{"field":"width"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_test.go
new file mode 100644
index 00000000..a504e741
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_test.go
@@ -0,0 +1,2627 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestAggs(t *testing.T) {
+ //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndex(t)
+
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tweet1 := tweet{
+ User: "olivere",
+ Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Image: "http://golang.org/doc/gopher/gophercolor.png",
+ Tags: []string{"golang", "elasticsearch"},
+ Location: "48.1333,11.5667", // lat,lon
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere",
+ Retweets: 0,
+ Message: "Another unrelated topic.",
+ Tags: []string{"golang"},
+ Location: "48.1189,11.4289", // lat,lon
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae",
+ Retweets: 12,
+ Message: "Cycling is fun.",
+ Tags: []string{"sports", "cycling"},
+ Location: "47.7167,11.7167", // lat,lon
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+
+ // Terms Aggregate by user name
+ globalAgg := NewGlobalAggregation()
+ usersAgg := NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
+ retweetsAgg := NewTermsAggregation().Field("retweets").Size(10)
+ avgRetweetsAgg := NewAvgAggregation().Field("retweets")
+ minRetweetsAgg := NewMinAggregation().Field("retweets")
+ maxRetweetsAgg := NewMaxAggregation().Field("retweets")
+ sumRetweetsAgg := NewSumAggregation().Field("retweets")
+ statsRetweetsAgg := NewStatsAggregation().Field("retweets")
+ extstatsRetweetsAgg := NewExtendedStatsAggregation().Field("retweets")
+ valueCountRetweetsAgg := NewValueCountAggregation().Field("retweets")
+ percentilesRetweetsAgg := NewPercentilesAggregation().Field("retweets")
+ percentileRanksRetweetsAgg := NewPercentileRanksAggregation().Field("retweets").Values(25, 50, 75)
+ cardinalityAgg := NewCardinalityAggregation().Field("user")
+ significantTermsAgg := NewSignificantTermsAggregation().Field("message")
+ retweetsRangeAgg := NewRangeAggregation().Field("retweets").Lt(10).Between(10, 100).Gt(100)
+ retweetsKeyedRangeAgg := NewRangeAggregation().Field("retweets").Keyed(true).Lt(10).Between(10, 100).Gt(100)
+ dateRangeAgg := NewDateRangeAggregation().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01")
+ missingTagsAgg := NewMissingAggregation().Field("tags")
+ retweetsHistoAgg := NewHistogramAggregation().Field("retweets").Interval(100)
+ dateHistoAgg := NewDateHistogramAggregation().Field("created").Interval("year")
+ retweetsFilterAgg := NewFilterAggregation().Filter(
+ NewRangeFilter("created").Gte("2012-01-01").Lte("2012-12-31")).
+ SubAggregation("avgRetweetsSub", NewAvgAggregation().Field("retweets"))
+ queryFilterAgg := NewFilterAggregation().Filter(NewQueryFilter(NewTermQuery("tags", "golang")))
+ topTagsHitsAgg := NewTopHitsAggregation().Sort("created", false).Size(5).FetchSource(true)
+ topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg)
+ geoBoundsAgg := NewGeoBoundsAggregation().Field("location")
+
+ // Run query
+ builder := client.Search().Index(testIndexName).Query(&all)
+ builder = builder.Aggregation("global", globalAgg)
+ builder = builder.Aggregation("users", usersAgg)
+ builder = builder.Aggregation("retweets", retweetsAgg)
+ builder = builder.Aggregation("avgRetweets", avgRetweetsAgg)
+ builder = builder.Aggregation("minRetweets", minRetweetsAgg)
+ builder = builder.Aggregation("maxRetweets", maxRetweetsAgg)
+ builder = builder.Aggregation("sumRetweets", sumRetweetsAgg)
+ builder = builder.Aggregation("statsRetweets", statsRetweetsAgg)
+ builder = builder.Aggregation("extstatsRetweets", extstatsRetweetsAgg)
+ builder = builder.Aggregation("valueCountRetweets", valueCountRetweetsAgg)
+ builder = builder.Aggregation("percentilesRetweets", percentilesRetweetsAgg)
+ builder = builder.Aggregation("percentileRanksRetweets", percentileRanksRetweetsAgg)
+ builder = builder.Aggregation("usersCardinality", cardinalityAgg)
+ builder = builder.Aggregation("significantTerms", significantTermsAgg)
+ builder = builder.Aggregation("retweetsRange", retweetsRangeAgg)
+ builder = builder.Aggregation("retweetsKeyedRange", retweetsKeyedRangeAgg)
+ builder = builder.Aggregation("dateRange", dateRangeAgg)
+ builder = builder.Aggregation("missingTags", missingTagsAgg)
+ builder = builder.Aggregation("retweetsHisto", retweetsHistoAgg)
+ builder = builder.Aggregation("dateHisto", dateHistoAgg)
+ builder = builder.Aggregation("retweetsFilter", retweetsFilterAgg)
+ builder = builder.Aggregation("queryFilter", queryFilterAgg)
+ builder = builder.Aggregation("top-tags", topTagsAgg)
+ builder = builder.Aggregation("viewport", geoBoundsAgg)
+ if esversion >= "1.4" {
+ countByUserAgg := NewFiltersAggregation().Filters(NewTermFilter("user", "olivere"), NewTermFilter("user", "sandrae"))
+ builder = builder.Aggregation("countByUser", countByUserAgg)
+ }
+ searchResult, err := builder.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected Hits.TotalHits = %d; got: %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(Hits.Hits) = %d; got: %d", 3, len(searchResult.Hits.Hits))
+ }
+ agg := searchResult.Aggregations
+ if agg == nil {
+ t.Fatalf("expected Aggregations != nil; got: nil")
+ }
+
+ // Search for non-existent aggregate should return (nil, false)
+ unknownAgg, found := agg.Terms("no-such-aggregate")
+ if found {
+ t.Errorf("expected unknown aggregation to not be found; got: %v", found)
+ }
+ if unknownAgg != nil {
+ t.Errorf("expected unknown aggregation to return %v; got %v", nil, unknownAgg)
+ }
+
+ // Global
+ globalAggRes, found := agg.Global("global")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if globalAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if globalAggRes.DocCount != 3 {
+ t.Errorf("expected DocCount = %d; got: %d", 3, globalAggRes.DocCount)
+ }
+
+ // Search for existent aggregate (by name) should return (aggregate, true)
+ termsAggRes, found := agg.Terms("users")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if termsAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(termsAggRes.Buckets) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(termsAggRes.Buckets))
+ }
+ if termsAggRes.Buckets[0].Key != "olivere" {
+ t.Errorf("expected %q; got: %q", "olivere", termsAggRes.Buckets[0].Key)
+ }
+ if termsAggRes.Buckets[0].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, termsAggRes.Buckets[0].DocCount)
+ }
+ if termsAggRes.Buckets[1].Key != "sandrae" {
+ t.Errorf("expected %q; got: %q", "sandrae", termsAggRes.Buckets[1].Key)
+ }
+ if termsAggRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, termsAggRes.Buckets[1].DocCount)
+ }
+
+ // A terms aggregate with keys that are not strings
+ retweetsAggRes, found := agg.Terms("retweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if retweetsAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(retweetsAggRes.Buckets) != 3 {
+ t.Fatalf("expected %d; got: %d", 3, len(retweetsAggRes.Buckets))
+ }
+
+ if retweetsAggRes.Buckets[0].Key != float64(0) {
+ t.Errorf("expected %v; got: %v", float64(0), retweetsAggRes.Buckets[0].Key)
+ }
+ if got, err := retweetsAggRes.Buckets[0].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[0].Key)
+ } else if got != 0 {
+ t.Errorf("expected %d; got: %d", 0, got)
+ }
+ if retweetsAggRes.Buckets[0].KeyNumber != "0" {
+ t.Errorf("expected %q; got: %q", "0", retweetsAggRes.Buckets[0].KeyNumber)
+ }
+ if retweetsAggRes.Buckets[0].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[0].DocCount)
+ }
+
+ if retweetsAggRes.Buckets[1].Key != float64(12) {
+ t.Errorf("expected %v; got: %v", float64(12), retweetsAggRes.Buckets[1].Key)
+ }
+ if got, err := retweetsAggRes.Buckets[1].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[1].KeyNumber)
+ } else if got != 12 {
+ t.Errorf("expected %d; got: %d", 12, got)
+ }
+ if retweetsAggRes.Buckets[1].KeyNumber != "12" {
+ t.Errorf("expected %q; got: %q", "12", retweetsAggRes.Buckets[1].KeyNumber)
+ }
+ if retweetsAggRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[1].DocCount)
+ }
+
+ if retweetsAggRes.Buckets[2].Key != float64(108) {
+ t.Errorf("expected %v; got: %v", float64(108), retweetsAggRes.Buckets[2].Key)
+ }
+ if got, err := retweetsAggRes.Buckets[2].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected %d; got: %v", 108, retweetsAggRes.Buckets[2].KeyNumber)
+ } else if got != 108 {
+ t.Errorf("expected %d; got: %d", 108, got)
+ }
+ if retweetsAggRes.Buckets[2].KeyNumber != "108" {
+ t.Errorf("expected %q; got: %q", "108", retweetsAggRes.Buckets[2].KeyNumber)
+ }
+ if retweetsAggRes.Buckets[2].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[2].DocCount)
+ }
+
+ // avgRetweets
+ avgAggRes, found := agg.Avg("avgRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if avgAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if avgAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *avgAggRes.Value)
+ }
+ if *avgAggRes.Value != 40.0 {
+ t.Errorf("expected %v; got: %v", 40.0, *avgAggRes.Value)
+ }
+
+ // minRetweets
+ minAggRes, found := agg.Min("minRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if minAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if minAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *minAggRes.Value)
+ }
+ if *minAggRes.Value != 0.0 {
+ t.Errorf("expected %v; got: %v", 0.0, *minAggRes.Value)
+ }
+
+ // maxRetweets
+ maxAggRes, found := agg.Max("maxRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if maxAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if maxAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *maxAggRes.Value)
+ }
+ if *maxAggRes.Value != 108.0 {
+ t.Errorf("expected %v; got: %v", 108.0, *maxAggRes.Value)
+ }
+
+ // sumRetweets
+ sumAggRes, found := agg.Sum("sumRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if sumAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if sumAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *sumAggRes.Value)
+ }
+ if *sumAggRes.Value != 120.0 {
+ t.Errorf("expected %v; got: %v", 120.0, *sumAggRes.Value)
+ }
+
+ // statsRetweets
+ statsAggRes, found := agg.Stats("statsRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if statsAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if statsAggRes.Count != 3 {
+ t.Errorf("expected %d; got: %d", 3, statsAggRes.Count)
+ }
+ if statsAggRes.Min == nil {
+ t.Fatalf("expected != nil; got: %v", *statsAggRes.Min)
+ }
+ if *statsAggRes.Min != 0.0 {
+ t.Errorf("expected %v; got: %v", 0.0, *statsAggRes.Min)
+ }
+ if statsAggRes.Max == nil {
+ t.Fatalf("expected != nil; got: %v", *statsAggRes.Max)
+ }
+ if *statsAggRes.Max != 108.0 {
+ t.Errorf("expected %v; got: %v", 108.0, *statsAggRes.Max)
+ }
+ if statsAggRes.Avg == nil {
+ t.Fatalf("expected != nil; got: %v", *statsAggRes.Avg)
+ }
+ if *statsAggRes.Avg != 40.0 {
+ t.Errorf("expected %v; got: %v", 40.0, *statsAggRes.Avg)
+ }
+ if statsAggRes.Sum == nil {
+ t.Fatalf("expected != nil; got: %v", *statsAggRes.Sum)
+ }
+ if *statsAggRes.Sum != 120.0 {
+ t.Errorf("expected %v; got: %v", 120.0, *statsAggRes.Sum)
+ }
+
+ // extstatsRetweets
+ extStatsAggRes, found := agg.ExtendedStats("extstatsRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if extStatsAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if extStatsAggRes.Count != 3 {
+ t.Errorf("expected %d; got: %d", 3, extStatsAggRes.Count)
+ }
+ if extStatsAggRes.Min == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Min)
+ }
+ if *extStatsAggRes.Min != 0.0 {
+ t.Errorf("expected %v; got: %v", 0.0, *extStatsAggRes.Min)
+ }
+ if extStatsAggRes.Max == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Max)
+ }
+ if *extStatsAggRes.Max != 108.0 {
+ t.Errorf("expected %v; got: %v", 108.0, *extStatsAggRes.Max)
+ }
+ if extStatsAggRes.Avg == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Avg)
+ }
+ if *extStatsAggRes.Avg != 40.0 {
+ t.Errorf("expected %v; got: %v", 40.0, *extStatsAggRes.Avg)
+ }
+ if extStatsAggRes.Sum == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Sum)
+ }
+ if *extStatsAggRes.Sum != 120.0 {
+ t.Errorf("expected %v; got: %v", 120.0, *extStatsAggRes.Sum)
+ }
+ if extStatsAggRes.SumOfSquares == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.SumOfSquares)
+ }
+ if *extStatsAggRes.SumOfSquares != 11808.0 {
+ t.Errorf("expected %v; got: %v", 11808.0, *extStatsAggRes.SumOfSquares)
+ }
+ if extStatsAggRes.Variance == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Variance)
+ }
+ if *extStatsAggRes.Variance != 2336.0 {
+ t.Errorf("expected %v; got: %v", 2336.0, *extStatsAggRes.Variance)
+ }
+ if extStatsAggRes.StdDeviation == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.StdDeviation)
+ }
+ if *extStatsAggRes.StdDeviation != 48.33218389437829 {
+ t.Errorf("expected %v; got: %v", 48.33218389437829, *extStatsAggRes.StdDeviation)
+ }
+
+ // valueCountRetweets
+ valueCountAggRes, found := agg.ValueCount("valueCountRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if valueCountAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if valueCountAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *valueCountAggRes.Value)
+ }
+ if *valueCountAggRes.Value != 3.0 {
+ t.Errorf("expected %v; got: %v", 3.0, *valueCountAggRes.Value)
+ }
+
+ // percentilesRetweets
+ percentilesAggRes, found := agg.Percentiles("percentilesRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if percentilesAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ // ES 1.4.x returns 7: {"1.0":...}
+ // ES 1.5.0 returns 14: {"1.0":..., "1.0_as_string":...}
+ // So we're relaxing the test here.
+ if len(percentilesAggRes.Values) == 0 {
+ t.Errorf("expected at least %d value; got: %d\nValues are: %#v", 1, len(percentilesAggRes.Values), percentilesAggRes.Values)
+ }
+ if _, found := percentilesAggRes.Values["0.0"]; found {
+ t.Errorf("expected %v; got: %v", false, found)
+ }
+ if percentilesAggRes.Values["1.0"] != 0.24 {
+ t.Errorf("expected %v; got: %v", 0.24, percentilesAggRes.Values["1.0"])
+ }
+ if percentilesAggRes.Values["25.0"] != 6.0 {
+ t.Errorf("expected %v; got: %v", 6.0, percentilesAggRes.Values["25.0"])
+ }
+ if percentilesAggRes.Values["99.0"] != 106.08 {
+ t.Errorf("expected %v; got: %v", 106.08, percentilesAggRes.Values["99.0"])
+ }
+
+ // percentileRanksRetweets
+ percentileRanksAggRes, found := agg.PercentileRanks("percentileRanksRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if percentileRanksAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(percentileRanksAggRes.Values) == 0 {
+ t.Errorf("expected at least %d value; got %d\nValues are: %#v", 1, len(percentileRanksAggRes.Values), percentileRanksAggRes.Values)
+ }
+ if _, found := percentileRanksAggRes.Values["0.0"]; found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if percentileRanksAggRes.Values["25.0"] != 21.180555555555557 {
+ t.Errorf("expected %v; got: %v", 21.180555555555557, percentileRanksAggRes.Values["25.0"])
+ }
+ if percentileRanksAggRes.Values["50.0"] != 29.86111111111111 {
+ t.Errorf("expected %v; got: %v", 29.86111111111111, percentileRanksAggRes.Values["50.0"])
+ }
+ if percentileRanksAggRes.Values["75.0"] != 38.54166666666667 {
+ t.Errorf("expected %v; got: %v", 38.54166666666667, percentileRanksAggRes.Values["75.0"])
+ }
+
+ // usersCardinality
+ cardAggRes, found := agg.Cardinality("usersCardinality")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if cardAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if cardAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *cardAggRes.Value)
+ }
+ if *cardAggRes.Value != 2 {
+ t.Errorf("expected %v; got: %v", 2, *cardAggRes.Value)
+ }
+
+ // retweetsFilter
+ filterAggRes, found := agg.Filter("retweetsFilter")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if filterAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if filterAggRes.DocCount != 2 {
+ t.Fatalf("expected %v; got: %v", 2, filterAggRes.DocCount)
+ }
+
+ // Retrieve sub-aggregation
+ avgRetweetsAggRes, found := filterAggRes.Avg("avgRetweetsSub")
+ if !found {
+ t.Error("expected sub-aggregation \"avgRetweets\" to be found; got false")
+ }
+ if avgRetweetsAggRes == nil {
+ t.Fatal("expected sub-aggregation \"avgRetweets\"; got nil")
+ }
+ if avgRetweetsAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", avgRetweetsAggRes.Value)
+ }
+ if *avgRetweetsAggRes.Value != 54.0 {
+ t.Errorf("expected %v; got: %v", 54.0, *avgRetweetsAggRes.Value)
+ }
+
+ // queryFilter
+ queryFilterAggRes, found := agg.Filter("queryFilter")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if queryFilterAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if queryFilterAggRes.DocCount != 2 {
+ t.Fatalf("expected %v; got: %v", 2, queryFilterAggRes.DocCount)
+ }
+
+ // significantTerms
+ stAggRes, found := agg.SignificantTerms("significantTerms")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if stAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if stAggRes.DocCount != 3 {
+ t.Errorf("expected %v; got: %v", 3, stAggRes.DocCount)
+ }
+ if len(stAggRes.Buckets) != 0 {
+ t.Errorf("expected %v; got: %v", 0, len(stAggRes.Buckets))
+ }
+
+ // retweetsRange
+ rangeAggRes, found := agg.Range("retweetsRange")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if rangeAggRes == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if len(rangeAggRes.Buckets) != 3 {
+ t.Fatalf("expected %d; got: %d", 3, len(rangeAggRes.Buckets))
+ }
+ if rangeAggRes.Buckets[0].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[0].DocCount)
+ }
+ if rangeAggRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[1].DocCount)
+ }
+ if rangeAggRes.Buckets[2].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[2].DocCount)
+ }
+
+ // retweetsKeyedRange
+ keyedRangeAggRes, found := agg.KeyedRange("retweetsKeyedRange")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if keyedRangeAggRes == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if len(keyedRangeAggRes.Buckets) != 3 {
+ t.Fatalf("expected %d; got: %d", 3, len(keyedRangeAggRes.Buckets))
+ }
+ _, found = keyedRangeAggRes.Buckets["no-such-key"]
+ if found {
+ t.Fatalf("expected bucket to not be found; got: %v", found)
+ }
+ bucket, found := keyedRangeAggRes.Buckets["*-10.0"]
+ if !found {
+ t.Fatalf("expected bucket to be found; got: %v", found)
+ }
+ if bucket.DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, bucket.DocCount)
+ }
+ bucket, found = keyedRangeAggRes.Buckets["10.0-100.0"]
+ if !found {
+ t.Fatalf("expected bucket to be found; got: %v", found)
+ }
+ if bucket.DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, bucket.DocCount)
+ }
+ bucket, found = keyedRangeAggRes.Buckets["100.0-*"]
+ if !found {
+ t.Fatalf("expected bucket to be found; got: %v", found)
+ }
+ if bucket.DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, bucket.DocCount)
+ }
+
+ // dateRange
+ dateRangeRes, found := agg.DateRange("dateRange")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if dateRangeRes == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if dateRangeRes.Buckets[0].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, dateRangeRes.Buckets[0].DocCount)
+ }
+ if dateRangeRes.Buckets[0].From != nil {
+ t.Fatal("expected From to be nil")
+ }
+ if dateRangeRes.Buckets[0].To == nil {
+ t.Fatal("expected To to be != nil")
+ }
+ if *dateRangeRes.Buckets[0].To != 1.325376e+12 {
+ t.Errorf("expected %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[0].To)
+ }
+ if dateRangeRes.Buckets[0].ToAsString != "2012-01-01T00:00:00.000Z" {
+ t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[0].ToAsString)
+ }
+ if dateRangeRes.Buckets[1].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, dateRangeRes.Buckets[1].DocCount)
+ }
+ if dateRangeRes.Buckets[1].From == nil {
+ t.Fatal("expected From to be != nil")
+ }
+ if *dateRangeRes.Buckets[1].From != 1.325376e+12 {
+ t.Errorf("expected From = %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[1].From)
+ }
+ if dateRangeRes.Buckets[1].FromAsString != "2012-01-01T00:00:00.000Z" {
+ t.Errorf("expected FromAsString = %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].FromAsString)
+ }
+ if dateRangeRes.Buckets[1].To == nil {
+ t.Fatal("expected To to be != nil")
+ }
+ if *dateRangeRes.Buckets[1].To != 1.3569984e+12 {
+ t.Errorf("expected To = %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[1].To)
+ }
+ if dateRangeRes.Buckets[1].ToAsString != "2013-01-01T00:00:00.000Z" {
+ t.Errorf("expected ToAsString = %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].ToAsString)
+ }
+ if dateRangeRes.Buckets[2].DocCount != 0 {
+ t.Errorf("expected %d; got: %d", 0, dateRangeRes.Buckets[2].DocCount)
+ }
+ if dateRangeRes.Buckets[2].To != nil {
+ t.Fatal("expected To to be nil")
+ }
+ if dateRangeRes.Buckets[2].From == nil {
+ t.Fatal("expected From to be != nil")
+ }
+ if *dateRangeRes.Buckets[2].From != 1.3569984e+12 {
+ t.Errorf("expected %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[2].From)
+ }
+ if dateRangeRes.Buckets[2].FromAsString != "2013-01-01T00:00:00.000Z" {
+ t.Errorf("expected %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[2].FromAsString)
+ }
+
+ // missingTags
+ missingRes, found := agg.Missing("missingTags")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if missingRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if missingRes.DocCount != 0 {
+ t.Errorf("expected searchResult.Aggregations[\"missingTags\"].DocCount = %v; got %v", 0, missingRes.DocCount)
+ }
+
+ // retweetsHisto
+ histoRes, found := agg.Histogram("retweetsHisto")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if histoRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(histoRes.Buckets) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(histoRes.Buckets))
+ }
+ if histoRes.Buckets[0].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, histoRes.Buckets[0].DocCount)
+ }
+ if histoRes.Buckets[0].Key != 0.0 {
+ t.Errorf("expected %v; got: %v", 0.0, histoRes.Buckets[0].Key)
+ }
+ if histoRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, histoRes.Buckets[1].DocCount)
+ }
+ if histoRes.Buckets[1].Key != 100.0 {
+ t.Errorf("expected %v; got: %v", 100.0, histoRes.Buckets[1].Key)
+ }
+
+ // dateHisto
+ dateHistoRes, found := agg.DateHistogram("dateHisto")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if dateHistoRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(dateHistoRes.Buckets) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(dateHistoRes.Buckets))
+ }
+ if dateHistoRes.Buckets[0].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, dateHistoRes.Buckets[0].DocCount)
+ }
+ if dateHistoRes.Buckets[0].Key != 1.29384e+12 {
+ t.Errorf("expected %v; got: %v", 1.29384e+12, dateHistoRes.Buckets[0].Key)
+ }
+ if dateHistoRes.Buckets[0].KeyAsString == nil {
+ t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[0].KeyAsString)
+ }
+ if *dateHistoRes.Buckets[0].KeyAsString != "2011-01-01T00:00:00.000Z" {
+ t.Errorf("expected %q; got: %q", "2011-01-01T00:00:00.000Z", *dateHistoRes.Buckets[0].KeyAsString)
+ }
+ if dateHistoRes.Buckets[1].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, dateHistoRes.Buckets[1].DocCount)
+ }
+ if dateHistoRes.Buckets[1].Key != 1.325376e+12 {
+ t.Errorf("expected %v; got: %v", 1.325376e+12, dateHistoRes.Buckets[1].Key)
+ }
+ if dateHistoRes.Buckets[1].KeyAsString == nil {
+ t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[1].KeyAsString)
+ }
+ if *dateHistoRes.Buckets[1].KeyAsString != "2012-01-01T00:00:00.000Z" {
+ t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", *dateHistoRes.Buckets[1].KeyAsString)
+ }
+
+ // topHits
+ topTags, found := agg.Terms("top-tags")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if topTags == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if esversion >= "1.4.0" {
+ if topTags.DocCountErrorUpperBound != 0 {
+ t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound)
+ }
+ if topTags.SumOfOtherDocCount != 1 {
+ t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount)
+ }
+ }
+ if len(topTags.Buckets) != 3 {
+ t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets))
+ }
+ if topTags.Buckets[0].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, topTags.Buckets[0].DocCount)
+ }
+ if topTags.Buckets[0].Key != "golang" {
+ t.Errorf("expected %v; got: %v", "golang", topTags.Buckets[0].Key)
+ }
+ topHits, found := topTags.Buckets[0].TopHits("top_tag_hits")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if topHits == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if topHits.Hits == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if topHits.Hits.TotalHits != 2 {
+ t.Errorf("expected %d; got: %d", 2, topHits.Hits.TotalHits)
+ }
+ if topHits.Hits.Hits == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(topHits.Hits.Hits) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(topHits.Hits.Hits))
+ }
+ hit := topHits.Hits.Hits[0]
+ if !found {
+ t.Fatalf("expected %v; got: %v", true, found)
+ }
+ if hit == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ var tw tweet
+ if err := json.Unmarshal(*hit.Source, &tw); err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if tw.Message != "Welcome to Golang and Elasticsearch." {
+ t.Errorf("expected %q; got: %q", "Welcome to Golang and Elasticsearch.", tw.Message)
+ }
+ if topTags.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, topTags.Buckets[1].DocCount)
+ }
+ if topTags.Buckets[1].Key != "cycling" {
+ t.Errorf("expected %v; got: %v", "cycling", topTags.Buckets[1].Key)
+ }
+ topHits, found = topTags.Buckets[1].TopHits("top_tag_hits")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if topHits == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if topHits.Hits == nil {
+ t.Fatal("expected != nil; got nil")
+ }
+ if topHits.Hits.TotalHits != 1 {
+ t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits)
+ }
+ if topTags.Buckets[2].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, topTags.Buckets[2].DocCount)
+ }
+ if topTags.Buckets[2].Key != "elasticsearch" {
+ t.Errorf("expected %v; got: %v", "elasticsearch", topTags.Buckets[2].Key)
+ }
+ topHits, found = topTags.Buckets[2].TopHits("top_tag_hits")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if topHits == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if topHits.Hits == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if topHits.Hits.TotalHits != 1 {
+ t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits)
+ }
+
+ // viewport via geo_bounds (1.3.0 has an error in that it doesn't output the aggregation name)
+ geoBoundsRes, found := agg.GeoBounds("viewport")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if geoBoundsRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+
+ if esversion >= "1.4" {
+ // Filters agg "countByUser"
+ countByUserAggRes, found := agg.Filters("countByUser")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if countByUserAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(countByUserAggRes.Buckets) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets))
+ }
+ if countByUserAggRes.Buckets[0].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount)
+ }
+ if countByUserAggRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount)
+ }
+ }
+}
+
+// TestAggsMarshal ensures that marshaling aggregations back into a string
+// does not yield base64 encoded data. See https://github.com/olivere/elastic/issues/51
+// and https://groups.google.com/forum/#!topic/Golang-Nuts/38ShOlhxAYY for details.
+func TestAggsMarshal(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere",
+ Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Image: "http://golang.org/doc/gopher/gophercolor.png",
+ Tags: []string{"golang", "elasticsearch"},
+ Location: "48.1333,11.5667", // lat,lon
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ dhagg := NewDateHistogramAggregation().Field("created").Interval("year")
+
+ // Run query
+ builder := client.Search().Index(testIndexName).Query(&all)
+ builder = builder.Aggregation("dhagg", dhagg)
+ searchResult, err := builder.Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.TotalHits() != 1 {
+ t.Errorf("expected Hits.TotalHits = %d; got: %d", 1, searchResult.TotalHits())
+ }
+ if _, found := searchResult.Aggregations["dhagg"]; !found {
+ t.Fatalf("expected aggregation %q", "dhagg")
+ }
+ buf, err := json.Marshal(searchResult)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s := string(buf)
+ if i := strings.Index(s, `{"dhagg":{"buckets":[{"key_as_string":"2012-01-01`); i < 0 {
+ t.Errorf("expected to serialize aggregation into string; got: %v", s)
+ }
+}
+
+func TestAggsMin(t *testing.T) {
+ s := `{
+ "min_price": {
+ "value": 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Min("min_price")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(10) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value)
+ }
+}
+
+func TestAggsMax(t *testing.T) {
+ s := `{
+ "max_price": {
+ "value": 35
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Max("max_price")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(35) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(35), *agg.Value)
+ }
+}
+
+func TestAggsSum(t *testing.T) {
+ s := `{
+ "intraday_return": {
+ "value": 2.18
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Sum("intraday_return")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(2.18) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(2.18), *agg.Value)
+ }
+}
+
+func TestAggsAvg(t *testing.T) {
+ s := `{
+ "avg_grade": {
+ "value": 75
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Avg("avg_grade")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(75) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(75), *agg.Value)
+ }
+}
+
+func TestAggsValueCount(t *testing.T) {
+ s := `{
+ "grades_count": {
+ "value": 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.ValueCount("grades_count")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(10) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value)
+ }
+}
+
+func TestAggsCardinality(t *testing.T) {
+ s := `{
+ "author_count": {
+ "value": 12
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Cardinality("author_count")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(12) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(12), *agg.Value)
+ }
+}
+
+func TestAggsStats(t *testing.T) {
+ s := `{
+ "grades_stats": {
+ "count": 6,
+ "min": 60,
+ "max": 98,
+ "avg": 78.5,
+ "sum": 471
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Stats("grades_stats")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Count != int64(6) {
+ t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count)
+ }
+ if agg.Min == nil {
+ t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min)
+ }
+ if *agg.Min != float64(60) {
+ t.Fatalf("expected aggregation Min = %v; got: %v", float64(60), *agg.Min)
+ }
+ if agg.Max == nil {
+ t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max)
+ }
+ if *agg.Max != float64(98) {
+ t.Fatalf("expected aggregation Max = %v; got: %v", float64(98), *agg.Max)
+ }
+ if agg.Avg == nil {
+ t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg)
+ }
+ if *agg.Avg != float64(78.5) {
+ t.Fatalf("expected aggregation Avg = %v; got: %v", float64(78.5), *agg.Avg)
+ }
+ if agg.Sum == nil {
+ t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum)
+ }
+ if *agg.Sum != float64(471) {
+ t.Fatalf("expected aggregation Sum = %v; got: %v", float64(471), *agg.Sum)
+ }
+}
+
+func TestAggsExtendedStats(t *testing.T) {
+ s := `{
+ "grades_stats": {
+ "count": 6,
+ "min": 72,
+ "max": 117.6,
+ "avg": 94.2,
+ "sum": 565.2,
+ "sum_of_squares": 54551.51999999999,
+ "variance": 218.2799999999976,
+ "std_deviation": 14.774302013969987
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.ExtendedStats("grades_stats")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Count != int64(6) {
+ t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count)
+ }
+ if agg.Min == nil {
+ t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min)
+ }
+ if *agg.Min != float64(72) {
+ t.Fatalf("expected aggregation Min = %v; got: %v", float64(72), *agg.Min)
+ }
+ if agg.Max == nil {
+ t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max)
+ }
+ if *agg.Max != float64(117.6) {
+ t.Fatalf("expected aggregation Max = %v; got: %v", float64(117.6), *agg.Max)
+ }
+ if agg.Avg == nil {
+ t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg)
+ }
+ if *agg.Avg != float64(94.2) {
+ t.Fatalf("expected aggregation Avg = %v; got: %v", float64(94.2), *agg.Avg)
+ }
+ if agg.Sum == nil {
+ t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum)
+ }
+ if *agg.Sum != float64(565.2) {
+ t.Fatalf("expected aggregation Sum = %v; got: %v", float64(565.2), *agg.Sum)
+ }
+ if agg.SumOfSquares == nil {
+ t.Fatalf("expected aggregation sum_of_squares != nil; got: %v", agg.SumOfSquares)
+ }
+ if *agg.SumOfSquares != float64(54551.51999999999) {
+ t.Fatalf("expected aggregation sum_of_squares = %v; got: %v", float64(54551.51999999999), *agg.SumOfSquares)
+ }
+ if agg.Variance == nil {
+ t.Fatalf("expected aggregation Variance != nil; got: %v", agg.Variance)
+ }
+ if *agg.Variance != float64(218.2799999999976) {
+ t.Fatalf("expected aggregation Variance = %v; got: %v", float64(218.2799999999976), *agg.Variance)
+ }
+ if agg.StdDeviation == nil {
+ t.Fatalf("expected aggregation StdDeviation != nil; got: %v", agg.StdDeviation)
+ }
+ if *agg.StdDeviation != float64(14.774302013969987) {
+ t.Fatalf("expected aggregation StdDeviation = %v; got: %v", float64(14.774302013969987), *agg.StdDeviation)
+ }
+}
+
+func TestAggsPercentiles(t *testing.T) {
+ s := `{
+ "load_time_outlier": {
+ "values" : {
+ "1.0": 15,
+ "5.0": 20,
+ "25.0": 23,
+ "50.0": 25,
+ "75.0": 29,
+ "95.0": 60,
+ "99.0": 150
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Percentiles("load_time_outlier")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Values == nil {
+ t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values)
+ }
+ if len(agg.Values) != 7 {
+ t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values))
+ }
+ if agg.Values["1.0"] != float64(15) {
+ t.Errorf("expected aggregation value for \"1.0\" = %v; got: %v", float64(15), agg.Values["1.0"])
+ }
+ if agg.Values["5.0"] != float64(20) {
+ t.Errorf("expected aggregation value for \"5.0\" = %v; got: %v", float64(20), agg.Values["5.0"])
+ }
+ if agg.Values["25.0"] != float64(23) {
+ t.Errorf("expected aggregation value for \"25.0\" = %v; got: %v", float64(23), agg.Values["25.0"])
+ }
+ if agg.Values["50.0"] != float64(25) {
+ t.Errorf("expected aggregation value for \"50.0\" = %v; got: %v", float64(25), agg.Values["50.0"])
+ }
+ if agg.Values["75.0"] != float64(29) {
+ t.Errorf("expected aggregation value for \"75.0\" = %v; got: %v", float64(29), agg.Values["75.0"])
+ }
+ if agg.Values["95.0"] != float64(60) {
+ t.Errorf("expected aggregation value for \"95.0\" = %v; got: %v", float64(60), agg.Values["95.0"])
+ }
+ if agg.Values["99.0"] != float64(150) {
+ t.Errorf("expected aggregation value for \"99.0\" = %v; got: %v", float64(150), agg.Values["99.0"])
+ }
+}
+
+func TestAggsPercentilRanks(t *testing.T) {
+ s := `{
+ "load_time_outlier": {
+ "values" : {
+ "15": 92,
+ "30": 100
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.PercentileRanks("load_time_outlier")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Values == nil {
+ t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values)
+ }
+ if len(agg.Values) != 2 {
+ t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values))
+ }
+ if agg.Values["15"] != float64(92) {
+ t.Errorf("expected aggregation value for \"15\" = %v; got: %v", float64(92), agg.Values["15"])
+ }
+ if agg.Values["30"] != float64(100) {
+ t.Errorf("expected aggregation value for \"30\" = %v; got: %v", float64(100), agg.Values["30"])
+ }
+}
+
+func TestAggsTopHits(t *testing.T) {
+ s := `{
+ "top-tags": {
+ "buckets": [
+ {
+ "key": "windows-7",
+ "doc_count": 25365,
+ "top_tags_hits": {
+ "hits": {
+ "total": 25365,
+ "max_score": 1,
+ "hits": [
+ {
+ "_index": "stack",
+ "_type": "question",
+ "_id": "602679",
+ "_score": 1,
+ "_source": {
+ "title": "Windows port opening"
+ },
+ "sort": [
+ 1370143231177
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "key": "linux",
+ "doc_count": 18342,
+ "top_tags_hits": {
+ "hits": {
+ "total": 18342,
+ "max_score": 1,
+ "hits": [
+ {
+ "_index": "stack",
+ "_type": "question",
+ "_id": "602672",
+ "_score": 1,
+ "_source": {
+ "title": "Ubuntu RFID Screensaver lock-unlock"
+ },
+ "sort": [
+ 1370143379747
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "key": "windows",
+ "doc_count": 18119,
+ "top_tags_hits": {
+ "hits": {
+ "total": 18119,
+ "max_score": 1,
+ "hits": [
+ {
+ "_index": "stack",
+ "_type": "question",
+ "_id": "602678",
+ "_score": 1,
+ "_source": {
+ "title": "If I change my computers date / time, what could be affected?"
+ },
+ "sort": [
+ 1370142868283
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Terms("top-tags")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 3 {
+ t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != "windows-7" {
+ t.Errorf("expected bucket key = %q; got: %q", "windows-7", agg.Buckets[0].Key)
+ }
+ if agg.Buckets[1].Key != "linux" {
+ t.Errorf("expected bucket key = %q; got: %q", "linux", agg.Buckets[1].Key)
+ }
+ if agg.Buckets[2].Key != "windows" {
+ t.Errorf("expected bucket key = %q; got: %q", "windows", agg.Buckets[2].Key)
+ }
+
+ // Sub-aggregation of top-hits
+ subAgg, found := agg.Buckets[0].TopHits("top_tags_hits")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Hits == nil {
+ t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)
+ }
+ if subAgg.Hits.TotalHits != 25365 {
+ t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 25365, subAgg.Hits.TotalHits)
+ }
+ if subAgg.Hits.MaxScore == nil {
+ t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)
+ }
+ if *subAgg.Hits.MaxScore != float64(1.0) {
+ t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)
+ }
+
+ subAgg, found = agg.Buckets[1].TopHits("top_tags_hits")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Hits == nil {
+ t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)
+ }
+ if subAgg.Hits.TotalHits != 18342 {
+ t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18342, subAgg.Hits.TotalHits)
+ }
+ if subAgg.Hits.MaxScore == nil {
+ t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)
+ }
+ if *subAgg.Hits.MaxScore != float64(1.0) {
+ t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)
+ }
+
+ subAgg, found = agg.Buckets[2].TopHits("top_tags_hits")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Hits == nil {
+ t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)
+ }
+ if subAgg.Hits.TotalHits != 18119 {
+ t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18119, subAgg.Hits.TotalHits)
+ }
+ if subAgg.Hits.MaxScore == nil {
+ t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)
+ }
+ if *subAgg.Hits.MaxScore != float64(1.0) {
+ t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)
+ }
+}
+
+func TestAggsGlobal(t *testing.T) {
+ s := `{
+ "all_products" : {
+ "doc_count" : 100,
+ "avg_price" : {
+ "value" : 56.3
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Global("all_products")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 100 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount)
+ }
+
+ // Sub-aggregation
+ subAgg, found := agg.Avg("avg_price")
+ if !found {
+ t.Fatalf("expected sub-aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Value == nil {
+ t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)
+ }
+ if *subAgg.Value != float64(56.3) {
+ t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value)
+ }
+}
+
+func TestAggsFilter(t *testing.T) {
+ s := `{
+ "in_stock_products" : {
+ "doc_count" : 100,
+ "avg_price" : { "value" : 56.3 }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Filter("in_stock_products")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 100 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount)
+ }
+
+ // Sub-aggregation
+ subAgg, found := agg.Avg("avg_price")
+ if !found {
+ t.Fatalf("expected sub-aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Value == nil {
+ t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)
+ }
+ if *subAgg.Value != float64(56.3) {
+ t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value)
+ }
+}
+
+func TestAggsFiltersWithBuckets(t *testing.T) {
+ s := `{
+ "messages" : {
+ "buckets" : [
+ {
+ "doc_count" : 34,
+ "monthly" : {
+ "buckets" : []
+ }
+ },
+ {
+ "doc_count" : 439,
+ "monthly" : {
+ "buckets" : []
+ }
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Filters("messages")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Fatalf("expected %d buckets; got: %d", 2, len(agg.Buckets))
+ }
+
+ if agg.Buckets[0].DocCount != 34 {
+ t.Fatalf("expected DocCount = %d; got: %d", 34, agg.Buckets[0].DocCount)
+ }
+ subAgg, found := agg.Buckets[0].Histogram("monthly")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
+ }
+
+ if agg.Buckets[1].DocCount != 439 {
+ t.Fatalf("expected DocCount = %d; got: %d", 439, agg.Buckets[1].DocCount)
+ }
+ subAgg, found = agg.Buckets[1].Histogram("monthly")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
+ }
+}
+
+func TestAggsFiltersWithNamedBuckets(t *testing.T) {
+ s := `{
+ "messages" : {
+ "buckets" : {
+ "errors" : {
+ "doc_count" : 34,
+ "monthly" : {
+ "buckets" : []
+ }
+ },
+ "warnings" : {
+ "doc_count" : 439,
+ "monthly" : {
+ "buckets" : []
+ }
+ }
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Filters("messages")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.NamedBuckets == nil {
+ t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.NamedBuckets)
+ }
+ if len(agg.NamedBuckets) != 2 {
+ t.Fatalf("expected %d buckets; got: %d", 2, len(agg.NamedBuckets))
+ }
+
+ if agg.NamedBuckets["errors"].DocCount != 34 {
+ t.Fatalf("expected DocCount = %d; got: %d", 34, agg.NamedBuckets["errors"].DocCount)
+ }
+ subAgg, found := agg.NamedBuckets["errors"].Histogram("monthly")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
+ }
+
+ if agg.NamedBuckets["warnings"].DocCount != 439 {
+ t.Fatalf("expected DocCount = %d; got: %d", 439, agg.NamedBuckets["warnings"].DocCount)
+ }
+ subAgg, found = agg.NamedBuckets["warnings"].Histogram("monthly")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
+ }
+}
+
+func TestAggsMissing(t *testing.T) {
+ s := `{
+ "products_without_a_price" : {
+ "doc_count" : 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Missing("products_without_a_price")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 10 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)
+ }
+}
+
+func TestAggsNested(t *testing.T) {
+ s := `{
+ "resellers": {
+ "min_price": {
+ "value" : 350
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Nested("resellers")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 0 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 0, agg.DocCount)
+ }
+
+ // Sub-aggregation
+ subAgg, found := agg.Avg("min_price")
+ if !found {
+ t.Fatalf("expected sub-aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Value == nil {
+ t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)
+ }
+ if *subAgg.Value != float64(350) {
+ t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(350), *subAgg.Value)
+ }
+}
+
+func TestAggsReverseNested(t *testing.T) {
+ s := `{
+ "comment_to_issue": {
+ "doc_count" : 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.ReverseNested("comment_to_issue")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 10 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)
+ }
+}
+
+func TestAggsChildren(t *testing.T) {
+ s := `{
+ "to-answers": {
+ "doc_count" : 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Children("to-answers")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 10 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)
+ }
+}
+
+func TestAggsTerms(t *testing.T) {
+ s := `{
+ "users" : {
+ "doc_count_error_upper_bound" : 1,
+ "sum_other_doc_count" : 2,
+ "buckets" : [ {
+ "key" : "olivere",
+ "doc_count" : 2
+ }, {
+ "key" : "sandrae",
+ "doc_count" : 1
+ } ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Terms("users")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != "olivere" {
+ t.Errorf("expected key %q; got: %q", "olivere", agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != "sandrae" {
+ t.Errorf("expected key %q; got: %q", "sandrae", agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsTermsWithNumericKeys(t *testing.T) {
+ s := `{
+ "users" : {
+ "doc_count_error_upper_bound" : 1,
+ "sum_other_doc_count" : 2,
+ "buckets" : [ {
+ "key" : 17,
+ "doc_count" : 2
+ }, {
+ "key" : 21,
+ "doc_count" : 1
+ } ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Terms("users")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != float64(17) {
+ t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key)
+ }
+ if got, err := agg.Buckets[0].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected to convert key to int64; got: %v", err)
+ } else if got != 17 {
+ t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != float64(21) {
+ t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key)
+ }
+ if got, err := agg.Buckets[1].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected to convert key to int64; got: %v", err)
+ } else if got != 21 {
+ t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsTermsWithBoolKeys(t *testing.T) {
+ s := `{
+ "users" : {
+ "doc_count_error_upper_bound" : 1,
+ "sum_other_doc_count" : 2,
+ "buckets" : [ {
+ "key" : true,
+ "doc_count" : 2
+ }, {
+ "key" : false,
+ "doc_count" : 1
+ } ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Terms("users")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != true {
+ t.Errorf("expected key %v; got: %v", true, agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != false {
+ t.Errorf("expected key %v; got: %v", false, agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsSignificantTerms(t *testing.T) {
+ s := `{
+ "significantCrimeTypes" : {
+ "doc_count": 47347,
+ "buckets" : [
+ {
+ "key": "Bicycle theft",
+ "doc_count": 3640,
+ "score": 0.371235374214817,
+ "bg_count": 66799
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.SignificantTerms("significantCrimeTypes")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 47347 {
+ t.Fatalf("expected aggregation DocCount != %d; got: %d", 47347, agg.DocCount)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 1 {
+ t.Errorf("expected %d bucket entries; got: %d", 1, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != "Bicycle theft" {
+ t.Errorf("expected key = %q; got: %q", "Bicycle theft", agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 3640 {
+ t.Errorf("expected doc count = %d; got: %d", 3640, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[0].Score != float64(0.371235374214817) {
+ t.Errorf("expected score = %v; got: %v", float64(0.371235374214817), agg.Buckets[0].Score)
+ }
+ if agg.Buckets[0].BgCount != 66799 {
+ t.Errorf("expected BgCount = %d; got: %d", 66799, agg.Buckets[0].BgCount)
+ }
+}
+
+func TestAggsRange(t *testing.T) {
+ s := `{
+ "price_ranges" : {
+ "buckets": [
+ {
+ "to": 50,
+ "doc_count": 2
+ },
+ {
+ "from": 50,
+ "to": 100,
+ "doc_count": 4
+ },
+ {
+ "from": 100,
+ "doc_count": 4
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Range("price_ranges")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 3 {
+ t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))
+ }
+ if agg.Buckets[0].From != nil {
+ t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
+ }
+ if agg.Buckets[0].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
+ }
+ if *agg.Buckets[0].To != float64(50) {
+ t.Errorf("expected To = %v; got: %v", float64(50), *agg.Buckets[0].To)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
+ }
+ if *agg.Buckets[1].From != float64(50) {
+ t.Errorf("expected From = %v; got: %v", float64(50), *agg.Buckets[1].From)
+ }
+ if agg.Buckets[1].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To)
+ }
+ if *agg.Buckets[1].To != float64(100) {
+ t.Errorf("expected To = %v; got: %v", float64(100), *agg.Buckets[1].To)
+ }
+ if agg.Buckets[1].DocCount != 4 {
+ t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[1].DocCount)
+ }
+ if agg.Buckets[2].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From)
+ }
+ if *agg.Buckets[2].From != float64(100) {
+ t.Errorf("expected From = %v; got: %v", float64(100), *agg.Buckets[2].From)
+ }
+ if agg.Buckets[2].To != nil {
+ t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To)
+ }
+ if agg.Buckets[2].DocCount != 4 {
+ t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[2].DocCount)
+ }
+}
+
+func TestAggsDateRange(t *testing.T) {
+ s := `{
+ "range": {
+ "buckets": [
+ {
+ "to": 1.3437792E+12,
+ "to_as_string": "08-2012",
+ "doc_count": 7
+ },
+ {
+ "from": 1.3437792E+12,
+ "from_as_string": "08-2012",
+ "doc_count": 2
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.DateRange("range")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].From != nil {
+ t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
+ }
+ if agg.Buckets[0].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
+ }
+ if *agg.Buckets[0].To != float64(1.3437792E+12) {
+ t.Errorf("expected To = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[0].To)
+ }
+ if agg.Buckets[0].ToAsString != "08-2012" {
+ t.Errorf("expected ToAsString = %q; got: %q", "08-2012", agg.Buckets[0].ToAsString)
+ }
+ if agg.Buckets[0].DocCount != 7 {
+ t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
+ }
+ if *agg.Buckets[1].From != float64(1.3437792E+12) {
+ t.Errorf("expected From = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[1].From)
+ }
+ if agg.Buckets[1].FromAsString != "08-2012" {
+ t.Errorf("expected FromAsString = %q; got: %q", "08-2012", agg.Buckets[1].FromAsString)
+ }
+ if agg.Buckets[1].To != nil {
+ t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To)
+ }
+ if agg.Buckets[1].DocCount != 2 {
+ t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsIPv4Range(t *testing.T) {
+ s := `{
+ "ip_ranges": {
+ "buckets" : [
+ {
+ "to": 167772165,
+ "to_as_string": "10.0.0.5",
+ "doc_count": 4
+ },
+ {
+ "from": 167772165,
+ "from_as_string": "10.0.0.5",
+ "doc_count": 6
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.IPv4Range("ip_ranges")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].From != nil {
+ t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
+ }
+ if agg.Buckets[0].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
+ }
+ if *agg.Buckets[0].To != float64(167772165) {
+ t.Errorf("expected To = %v; got: %v", float64(167772165), *agg.Buckets[0].To)
+ }
+ if agg.Buckets[0].ToAsString != "10.0.0.5" {
+ t.Errorf("expected ToAsString = %q; got: %q", "10.0.0.5", agg.Buckets[0].ToAsString)
+ }
+ if agg.Buckets[0].DocCount != 4 {
+ t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
+ }
+ if *agg.Buckets[1].From != float64(167772165) {
+ t.Errorf("expected From = %v; got: %v", float64(167772165), *agg.Buckets[1].From)
+ }
+ if agg.Buckets[1].FromAsString != "10.0.0.5" {
+ t.Errorf("expected FromAsString = %q; got: %q", "10.0.0.5", agg.Buckets[1].FromAsString)
+ }
+ if agg.Buckets[1].To != nil {
+ t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To)
+ }
+ if agg.Buckets[1].DocCount != 6 {
+ t.Errorf("expected DocCount = %d; got: %d", 6, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsHistogram(t *testing.T) {
+ s := `{
+ "prices" : {
+ "buckets": [
+ {
+ "key": 0,
+ "doc_count": 2
+ },
+ {
+ "key": 50,
+ "doc_count": 4
+ },
+ {
+ "key": 150,
+ "doc_count": 3
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Histogram("prices")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 3 {
+ t.Errorf("expected %d buckets; got: %d", 3, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != 0 {
+ t.Errorf("expected key = %v; got: %v", 0, agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].KeyAsString != nil {
+ t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[0].KeyAsString)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count = %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != 50 {
+ t.Errorf("expected key = %v; got: %v", 50, agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].KeyAsString != nil {
+ t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[1].KeyAsString)
+ }
+ if agg.Buckets[1].DocCount != 4 {
+ t.Errorf("expected doc count = %d; got: %d", 4, agg.Buckets[1].DocCount)
+ }
+ if agg.Buckets[2].Key != 150 {
+ t.Errorf("expected key = %v; got: %v", 150, agg.Buckets[2].Key)
+ }
+ if agg.Buckets[2].KeyAsString != nil {
+ t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[2].KeyAsString)
+ }
+ if agg.Buckets[2].DocCount != 3 {
+ t.Errorf("expected doc count = %d; got: %d", 3, agg.Buckets[2].DocCount)
+ }
+}
+
+func TestAggsDateHistogram(t *testing.T) {
+ s := `{
+ "articles_over_time": {
+ "buckets": [
+ {
+ "key_as_string": "2013-02-02",
+ "key": 1328140800000,
+ "doc_count": 1
+ },
+ {
+ "key_as_string": "2013-03-02",
+ "key": 1330646400000,
+ "doc_count": 2
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.DateHistogram("articles_over_time")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != 1328140800000 {
+ t.Errorf("expected key %v; got: %v", 1328140800000, agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].KeyAsString == nil {
+ t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[0].KeyAsString)
+ }
+ if *agg.Buckets[0].KeyAsString != "2013-02-02" {
+ t.Errorf("expected key_as_string %q; got: %q", "2013-02-02", *agg.Buckets[0].KeyAsString)
+ }
+ if agg.Buckets[0].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != 1330646400000 {
+ t.Errorf("expected key %v; got: %v", 1330646400000, agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].KeyAsString == nil {
+ t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[1].KeyAsString)
+ }
+ if *agg.Buckets[1].KeyAsString != "2013-03-02" {
+ t.Errorf("expected key_as_string %q; got: %q", "2013-03-02", *agg.Buckets[1].KeyAsString)
+ }
+ if agg.Buckets[1].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsGeoBounds(t *testing.T) {
+ s := `{
+ "viewport": {
+ "bounds": {
+ "top_left": {
+ "lat": 80.45,
+ "lon": -160.22
+ },
+ "bottom_right": {
+ "lat": 40.65,
+ "lon": 42.57
+ }
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.GeoBounds("viewport")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Bounds.TopLeft.Latitude != float64(80.45) {
+ t.Fatalf("expected Bounds.TopLeft.Latitude != %v; got: %v", float64(80.45), agg.Bounds.TopLeft.Latitude)
+ }
+ if agg.Bounds.TopLeft.Longitude != float64(-160.22) {
+ t.Fatalf("expected Bounds.TopLeft.Longitude != %v; got: %v", float64(-160.22), agg.Bounds.TopLeft.Longitude)
+ }
+ if agg.Bounds.BottomRight.Latitude != float64(40.65) {
+ t.Fatalf("expected Bounds.BottomRight.Latitude != %v; got: %v", float64(40.65), agg.Bounds.BottomRight.Latitude)
+ }
+ if agg.Bounds.BottomRight.Longitude != float64(42.57) {
+ t.Fatalf("expected Bounds.BottomRight.Longitude != %v; got: %v", float64(42.57), agg.Bounds.BottomRight.Longitude)
+ }
+}
+
+func TestAggsGeoHash(t *testing.T) {
+ s := `{
+ "myLarge-GrainGeoHashGrid": {
+ "buckets": [
+ {
+ "key": "svz",
+ "doc_count": 10964
+ },
+ {
+ "key": "sv8",
+ "doc_count": 3198
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.GeoHash("myLarge-GrainGeoHashGrid")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != "svz" {
+ t.Errorf("expected key %q; got: %q", "svz", agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 10964 {
+ t.Errorf("expected doc count %d; got: %d", 10964, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != "sv8" {
+ t.Errorf("expected key %q; got: %q", "sv8", agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].DocCount != 3198 {
+ t.Errorf("expected doc count %d; got: %d", 3198, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsGeoDistance(t *testing.T) {
+ s := `{
+ "rings" : {
+ "buckets": [
+ {
+ "unit": "km",
+ "to": 100.0,
+ "doc_count": 3
+ },
+ {
+ "unit": "km",
+ "from": 100.0,
+ "to": 300.0,
+ "doc_count": 1
+ },
+ {
+ "unit": "km",
+ "from": 300.0,
+ "doc_count": 7
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.GeoDistance("rings")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 3 {
+ t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))
+ }
+ if agg.Buckets[0].From != nil {
+ t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
+ }
+ if agg.Buckets[0].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
+ }
+ if *agg.Buckets[0].To != float64(100.0) {
+ t.Errorf("expected To = %v; got: %v", float64(100.0), *agg.Buckets[0].To)
+ }
+ if agg.Buckets[0].DocCount != 3 {
+ t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount)
+ }
+
+ if agg.Buckets[1].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
+ }
+ if *agg.Buckets[1].From != float64(100.0) {
+ t.Errorf("expected From = %v; got: %v", float64(100.0), *agg.Buckets[1].From)
+ }
+ if agg.Buckets[1].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To)
+ }
+ if *agg.Buckets[1].To != float64(300.0) {
+ t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[1].To)
+ }
+ if agg.Buckets[1].DocCount != 1 {
+ t.Errorf("expected DocCount = %d; got: %d", 1, agg.Buckets[1].DocCount)
+ }
+
+ if agg.Buckets[2].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From)
+ }
+ if *agg.Buckets[2].From != float64(300.0) {
+ t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[2].From)
+ }
+ if agg.Buckets[2].To != nil {
+ t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To)
+ }
+ if agg.Buckets[2].DocCount != 7 {
+ t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[2].DocCount)
+ }
+}
+
+func TestAggsSubAggregates(t *testing.T) {
+ rs := `{
+ "users" : {
+ "doc_count_error_upper_bound" : 1,
+ "sum_other_doc_count" : 2,
+ "buckets" : [ {
+ "key" : "olivere",
+ "doc_count" : 2,
+ "ts" : {
+ "buckets" : [ {
+ "key_as_string" : "2012-01-01T00:00:00.000Z",
+ "key" : 1325376000000,
+ "doc_count" : 2
+ } ]
+ }
+ }, {
+ "key" : "sandrae",
+ "doc_count" : 1,
+ "ts" : {
+ "buckets" : [ {
+ "key_as_string" : "2011-01-01T00:00:00.000Z",
+ "key" : 1293840000000,
+ "doc_count" : 1
+ } ]
+ }
+ } ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(rs), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ // Access top-level aggregation
+ users, found := aggs.Terms("users")
+ if !found {
+ t.Fatalf("expected users aggregation to be found; got: %v", found)
+ }
+ if users == nil {
+ t.Fatalf("expected users aggregation; got: %v", users)
+ }
+ if users.Buckets == nil {
+ t.Fatalf("expected users buckets; got: %v", users.Buckets)
+ }
+ if len(users.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(users.Buckets))
+ }
+ if users.Buckets[0].Key != "olivere" {
+ t.Errorf("expected key %q; got: %q", "olivere", users.Buckets[0].Key)
+ }
+ if users.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, users.Buckets[0].DocCount)
+ }
+ if users.Buckets[1].Key != "sandrae" {
+ t.Errorf("expected key %q; got: %q", "sandrae", users.Buckets[1].Key)
+ }
+ if users.Buckets[1].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, users.Buckets[1].DocCount)
+ }
+
+ // Access sub-aggregation
+ ts, found := users.Buckets[0].DateHistogram("ts")
+ if !found {
+ t.Fatalf("expected ts aggregation to be found; got: %v", found)
+ }
+ if ts == nil {
+ t.Fatalf("expected ts aggregation; got: %v", ts)
+ }
+ if ts.Buckets == nil {
+ t.Fatalf("expected ts buckets; got: %v", ts.Buckets)
+ }
+ if len(ts.Buckets) != 1 {
+ t.Errorf("expected %d bucket entries; got: %d", 1, len(ts.Buckets))
+ }
+ if ts.Buckets[0].Key != 1325376000000 {
+ t.Errorf("expected key %v; got: %v", 1325376000000, ts.Buckets[0].Key)
+ }
+ if ts.Buckets[0].KeyAsString == nil {
+ t.Fatalf("expected key_as_string != %v; got: %v", nil, ts.Buckets[0].KeyAsString)
+ }
+ if *ts.Buckets[0].KeyAsString != "2012-01-01T00:00:00.000Z" {
+ t.Errorf("expected key_as_string %q; got: %q", "2012-01-01T00:00:00.000Z", *ts.Buckets[0].KeyAsString)
+ }
+}
+
+/*
+// TestAggsRawMessage is a test for issue #51 (https://github.com/olivere/elastic/issues/51).
+// See also: http://play.golang.org/p/b8fzGMxrMC
+func TestAggsRawMessage(t *testing.T) {
+ f := json.RawMessage([]byte(`42`))
+ m := Aggregations(map[string]*json.RawMessage{
+ "k": &f,
+ })
+ b, _ := json.Marshal(m)
+ if string(b) != `{"k":42}` {
+ t.Errorf("expected %s; got: %s", `{"k":42}`, string(b))
+ }
+}
+*/
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_tophits.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_tophits.go
new file mode 100644
index 00000000..49304639
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_tophits.go
@@ -0,0 +1,150 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TopHitsAggregation keeps track of the most relevant document
+// being aggregated. This aggregator is intended to be used as a
+// sub aggregator, so that the top matching documents
+// can be aggregated per bucket.
+//
+// It can effectively be used to group result sets by certain fields via
+// a bucket aggregator. One or more bucket aggregators determines by
+// which properties a result set get sliced into.
+//
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html
+type TopHitsAggregation struct {
+ searchSource *SearchSource
+}
+
+func NewTopHitsAggregation() TopHitsAggregation {
+ a := TopHitsAggregation{
+ searchSource: NewSearchSource(),
+ }
+ return a
+}
+
+func (a TopHitsAggregation) From(from int) TopHitsAggregation {
+ a.searchSource = a.searchSource.From(from)
+ return a
+}
+
+func (a TopHitsAggregation) Size(size int) TopHitsAggregation {
+ a.searchSource = a.searchSource.Size(size)
+ return a
+}
+
+func (a TopHitsAggregation) TrackScores(trackScores bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.TrackScores(trackScores)
+ return a
+}
+
+func (a TopHitsAggregation) Explain(explain bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.Explain(explain)
+ return a
+}
+
+func (a TopHitsAggregation) Version(version bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.Version(version)
+ return a
+}
+
+func (a TopHitsAggregation) NoFields() TopHitsAggregation {
+ a.searchSource = a.searchSource.NoFields()
+ return a
+}
+
+func (a TopHitsAggregation) FetchSource(fetchSource bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.FetchSource(fetchSource)
+ return a
+}
+
+func (a TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) TopHitsAggregation {
+ a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext)
+ return a
+}
+
+func (a TopHitsAggregation) FieldDataFields(fieldDataFields ...string) TopHitsAggregation {
+ a.searchSource = a.searchSource.FieldDataFields(fieldDataFields...)
+ return a
+}
+
+func (a TopHitsAggregation) FieldDataField(fieldDataField string) TopHitsAggregation {
+ a.searchSource = a.searchSource.FieldDataField(fieldDataField)
+ return a
+}
+
+func (a TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) TopHitsAggregation {
+ a.searchSource = a.searchSource.ScriptFields(scriptFields...)
+ return a
+}
+
+func (a TopHitsAggregation) ScriptField(scriptField *ScriptField) TopHitsAggregation {
+ a.searchSource = a.searchSource.ScriptField(scriptField)
+ return a
+}
+
+func (a TopHitsAggregation) PartialFields(partialFields ...*PartialField) TopHitsAggregation {
+ a.searchSource = a.searchSource.PartialFields(partialFields...)
+ return a
+}
+
+func (a TopHitsAggregation) PartialField(partialField *PartialField) TopHitsAggregation {
+ a.searchSource = a.searchSource.PartialField(partialField)
+ return a
+}
+
+func (a TopHitsAggregation) Sort(field string, ascending bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.Sort(field, ascending)
+ return a
+}
+
+func (a TopHitsAggregation) SortWithInfo(info SortInfo) TopHitsAggregation {
+ a.searchSource = a.searchSource.SortWithInfo(info)
+ return a
+}
+
+func (a TopHitsAggregation) SortBy(sorter ...Sorter) TopHitsAggregation {
+ a.searchSource = a.searchSource.SortBy(sorter...)
+ return a
+}
+
+func (a TopHitsAggregation) Highlight(highlight *Highlight) TopHitsAggregation {
+ a.searchSource = a.searchSource.Highlight(highlight)
+ return a
+}
+
+func (a TopHitsAggregation) Highlighter() *Highlight {
+ return a.searchSource.Highlighter()
+}
+
+func (a TopHitsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs": {
+ // "top_tag_hits": {
+ // "top_hits": {
+ // "sort": [
+ // {
+ // "last_activity_date": {
+ // "order": "desc"
+ // }
+ // }
+ // ],
+ // "_source": {
+ // "include": [
+ // "title"
+ // ]
+ // },
+ // "size" : 1
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "top_hits" : { ... } } part.
+
+ source := make(map[string]interface{})
+ source["top_hits"] = a.searchSource.Source()
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_tophits_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_tophits_test.go
new file mode 100644
index 00000000..474b11ec
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_tophits_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTopHitsAggregation(t *testing.T) {
+ fsc := NewFetchSourceContext(true).Include("title")
+ agg := NewTopHitsAggregation().
+ Sort("last_activity_date", false).
+ FetchSourceContext(fsc).
+ Size(1)
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"top_hits":{"_source":{"excludes":[],"includes":["title"]},"size":1,"sort":[{"last_activity_date":{"order":"desc"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_value_count.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_value_count.go
new file mode 100644
index 00000000..b38d7838
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_value_count.go
@@ -0,0 +1,111 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ValueCountAggregation is a single-value metrics aggregation that counts
+// the number of values that are extracted from the aggregated documents.
+// These values can be extracted either from specific fields in the documents,
+// or be generated by a provided script. Typically, this aggregator will be
+// used in conjunction with other single-value aggregations.
+// For example, when computing the avg one might be interested in the
+// number of values the average is computed over.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html
+type ValueCountAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewValueCountAggregation() ValueCountAggregation {
+ a := ValueCountAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a ValueCountAggregation) Field(field string) ValueCountAggregation {
+ a.field = field
+ return a
+}
+
+func (a ValueCountAggregation) Script(script string) ValueCountAggregation {
+ a.script = script
+ return a
+}
+
+func (a ValueCountAggregation) ScriptFile(scriptFile string) ValueCountAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a ValueCountAggregation) Lang(lang string) ValueCountAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a ValueCountAggregation) Format(format string) ValueCountAggregation {
+ a.format = format
+ return a
+}
+
+func (a ValueCountAggregation) Param(name string, value interface{}) ValueCountAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) ValueCountAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a ValueCountAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "grades_count" : { "value_count" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "value_count" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["value_count"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_value_count_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_value_count_test.go
new file mode 100644
index 00000000..247b5f57
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_value_count_test.go
@@ -0,0 +1,37 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestValueCountAggregation(t *testing.T) {
+ agg := NewValueCountAggregation().Field("grade")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"value_count":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestValueCountAggregationWithFormat(t *testing.T) {
+ // Format comes with 1.5.0+
+ agg := NewValueCountAggregation().Field("grade").Format("0000.0")
+ data, err := json.Marshal(agg.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"value_count":{"field":"grade","format":"0000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets.go
new file mode 100644
index 00000000..2e699800
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets.go
@@ -0,0 +1,12 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents a glimpse into the data.
+// For more details about facets, visit:
+// http://elasticsearch.org/guide/reference/api/search/facets/
+type Facet interface {
+ Source() interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_date_histogram.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_date_histogram.go
new file mode 100644
index 00000000..b13d27e5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_date_histogram.go
@@ -0,0 +1,198 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A specific histogram facet that can work with date field types
+// enhancing it over the regular histogram facet.
+// See:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-date-histogram-facet.html
+type DateHistogramFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ keyField string
+ valueField *string
+ interval string
+ preZone string
+ preZoneAdjustLargeInterval *bool
+ postZone string
+ preOffset string
+ postOffset string
+ factor *float32
+ comparatorType string
+ valueScript string
+ params map[string]interface{}
+ lang string
+}
+
+func NewDateHistogramFacet() DateHistogramFacet {
+ return DateHistogramFacet{
+ params: make(map[string]interface{}),
+ }
+}
+
+func (f DateHistogramFacet) FacetFilter(filter Facet) DateHistogramFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f DateHistogramFacet) Global(global bool) DateHistogramFacet {
+ f.global = &global
+ return f
+}
+
+func (f DateHistogramFacet) Nested(nested string) DateHistogramFacet {
+ f.nested = nested
+ return f
+}
+
+func (f DateHistogramFacet) Mode(mode string) DateHistogramFacet {
+ f.mode = mode
+ return f
+}
+
+func (f DateHistogramFacet) Field(field string) DateHistogramFacet {
+ f.keyField = field
+ return f
+}
+
+func (f DateHistogramFacet) KeyField(keyField string) DateHistogramFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f DateHistogramFacet) ValueField(valueField string) DateHistogramFacet {
+ f.valueField = &valueField
+ return f
+}
+
+func (f DateHistogramFacet) ValueScript(valueScript string) DateHistogramFacet {
+ f.valueScript = valueScript
+ return f
+}
+
+func (f DateHistogramFacet) Param(name string, value interface{}) DateHistogramFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f DateHistogramFacet) Lang(lang string) DateHistogramFacet {
+ f.lang = lang
+ return f
+}
+
+// Allowed values are: "year", "quarter", "month", "week", "day",
+// "hour", "minute". It also supports time settings like "1.5h"
+// (up to "w" for weeks).
+func (f DateHistogramFacet) Interval(interval string) DateHistogramFacet {
+ f.interval = interval
+ return f
+}
+
+func (f DateHistogramFacet) PreZoneAdjustLargeInterval(preZoneAdjustLargeInterval bool) DateHistogramFacet {
+ f.preZoneAdjustLargeInterval = &preZoneAdjustLargeInterval
+ return f
+}
+
+func (f DateHistogramFacet) PreZone(preZone string) DateHistogramFacet {
+ f.preZone = preZone
+ return f
+}
+
+func (f DateHistogramFacet) PostZone(postZone string) DateHistogramFacet {
+ f.postZone = postZone
+ return f
+}
+
+func (f DateHistogramFacet) PreOffset(preOffset string) DateHistogramFacet {
+ f.preOffset = preOffset
+ return f
+}
+
+func (f DateHistogramFacet) PostOffset(postOffset string) DateHistogramFacet {
+ f.postOffset = postOffset
+ return f
+}
+
+func (f DateHistogramFacet) Factor(factor float32) DateHistogramFacet {
+ f.factor = &factor
+ return f
+}
+
+func (f DateHistogramFacet) Comparator(comparator string) DateHistogramFacet {
+ f.comparatorType = comparator
+ return f
+}
+
+func (f DateHistogramFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f DateHistogramFacet) Source() interface{} {
+ /*
+ "histo1" : {
+ "date_histogram" : {
+ "field" : "field_name",
+ "interval" : "day"
+ }
+ }
+ */
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ facet := make(map[string]interface{})
+ source["date_histogram"] = facet
+
+ if f.valueField != nil {
+ facet["key_field"] = f.keyField
+ facet["value_field"] = *f.valueField
+ } else {
+ facet["field"] = f.keyField
+ }
+
+ if f.valueScript != "" {
+ facet["value_script"] = f.valueScript
+ if f.lang != "" {
+ facet["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ facet["params"] = f.params
+ }
+ }
+ facet["interval"] = f.interval
+ if f.preZone != "" {
+ facet["pre_zone"] = f.preZone
+ }
+ if f.preZoneAdjustLargeInterval != nil {
+ facet["pre_zone_adjust_large_interval"] = *f.preZoneAdjustLargeInterval
+ }
+ if f.postZone != "" {
+ facet["post_zone"] = f.postZone
+ }
+ if f.preOffset != "" {
+ facet["pre_offset"] = f.preOffset
+ }
+ if f.postOffset != "" {
+ facet["post_offset"] = f.postOffset
+ }
+ if f.factor != nil {
+ facet["factor"] = *f.factor
+ }
+ if f.comparatorType != "" {
+ facet["comparator"] = f.comparatorType
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_date_histogram_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_date_histogram_test.go
new file mode 100644
index 00000000..a9ff7164
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_date_histogram_test.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestDateHistogramFacetWithField(t *testing.T) {
+ f := NewDateHistogramFacet().Field("field_name").Interval("day")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_histogram":{"field":"field_name","interval":"day"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateHistogramFacetWithValueField(t *testing.T) {
+ f := NewDateHistogramFacet().
+ KeyField("timestamp").
+ ValueField("price").
+ Interval("day")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_histogram":{"interval":"day","key_field":"timestamp","value_field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateHistogramFacetWithGlobals(t *testing.T) {
+ f := NewDateHistogramFacet().
+ KeyField("timestamp").
+ ValueField("price").
+ Interval("day").
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_histogram":{"interval":"day","key_field":"timestamp","value_field":"price"},"facet_filter":{"term":{"user":"kimchy"}},"global":true}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_filter.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_filter.go
new file mode 100644
index 00000000..1b5719d9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_filter.go
@@ -0,0 +1,68 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter facet (not to be confused with a facet filter) allows you
+// to return a count of the hits matching the filter.
+// The filter itself can be expressed using the Query DSL.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-filter-facet.html
+type FilterFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ filter Filter
+}
+
+func NewFilterFacet() FilterFacet {
+ return FilterFacet{}
+}
+
+func (f FilterFacet) FacetFilter(filter Facet) FilterFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f FilterFacet) Global(global bool) FilterFacet {
+ f.global = &global
+ return f
+}
+
+func (f FilterFacet) Nested(nested string) FilterFacet {
+ f.nested = nested
+ return f
+}
+
+func (f FilterFacet) Mode(mode string) FilterFacet {
+ f.mode = mode
+ return f
+}
+
+func (f FilterFacet) Filter(filter Filter) FilterFacet {
+ f.filter = filter
+ return f
+}
+
+func (f FilterFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f FilterFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ source["filter"] = f.filter.Source()
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_filter_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_filter_test.go
new file mode 100644
index 00000000..9566b84e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_filter_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFilterFacet(t *testing.T) {
+ f := NewFilterFacet().Filter(NewTermFilter("tag", "wow"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filter":{"term":{"tag":"wow"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFilterFacetWithGlobals(t *testing.T) {
+ f := NewFilterFacet().Filter(NewTermFilter("tag", "wow")).
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"filter":{"term":{"tag":"wow"}},"global":true}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance.go
new file mode 100644
index 00000000..faa3ee7e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance.go
@@ -0,0 +1,202 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The geo_distance facet is a facet providing information for ranges of
+// distances from a provided geo_point including count of the number of hits
+// that fall within each range, and aggregation information (like total).
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-geo-distance-facet.html
+type GeoDistanceFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ fieldName string
+ valueFieldName string
+ lat float64
+ lon float64
+ geoHash string
+ geoDistance string
+ unit string
+ params map[string]interface{}
+ valueScript string
+ lang string
+ entries []geoDistanceFacetEntry
+}
+
+func NewGeoDistanceFacet() GeoDistanceFacet {
+ return GeoDistanceFacet{
+ params: make(map[string]interface{}),
+ entries: make([]geoDistanceFacetEntry, 0),
+ }
+}
+
+func (f GeoDistanceFacet) FacetFilter(filter Facet) GeoDistanceFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f GeoDistanceFacet) Global(global bool) GeoDistanceFacet {
+ f.global = &global
+ return f
+}
+
+func (f GeoDistanceFacet) Nested(nested string) GeoDistanceFacet {
+ f.nested = nested
+ return f
+}
+
+func (f GeoDistanceFacet) Mode(mode string) GeoDistanceFacet {
+ f.mode = mode
+ return f
+}
+
+func (f GeoDistanceFacet) Field(fieldName string) GeoDistanceFacet {
+ f.fieldName = fieldName
+ return f
+}
+
+func (f GeoDistanceFacet) ValueField(valueFieldName string) GeoDistanceFacet {
+ f.valueFieldName = valueFieldName
+ return f
+}
+
+func (f GeoDistanceFacet) ValueScript(valueScript string) GeoDistanceFacet {
+ f.valueScript = valueScript
+ return f
+}
+
+func (f GeoDistanceFacet) Lang(lang string) GeoDistanceFacet {
+ f.lang = lang
+ return f
+}
+
+func (f GeoDistanceFacet) ScriptParam(name string, value interface{}) GeoDistanceFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f GeoDistanceFacet) Point(lat, lon float64) GeoDistanceFacet {
+ f.lat = lat
+ f.lon = lon
+ return f
+}
+
+func (f GeoDistanceFacet) Lat(lat float64) GeoDistanceFacet {
+ f.lat = lat
+ return f
+}
+
+func (f GeoDistanceFacet) Lon(lon float64) GeoDistanceFacet {
+ f.lon = lon
+ return f
+}
+
+func (f GeoDistanceFacet) GeoHash(geoHash string) GeoDistanceFacet {
+ f.geoHash = geoHash
+ return f
+}
+
+func (f GeoDistanceFacet) GeoDistance(geoDistance string) GeoDistanceFacet {
+ f.geoDistance = geoDistance
+ return f
+}
+
+func (f GeoDistanceFacet) AddRange(from, to float64) GeoDistanceFacet {
+ f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: to})
+ return f
+}
+
+func (f GeoDistanceFacet) AddUnboundedTo(from float64) GeoDistanceFacet {
+ f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: nil})
+ return f
+}
+
+func (f GeoDistanceFacet) AddUnboundedFrom(to float64) GeoDistanceFacet {
+ f.entries = append(f.entries, geoDistanceFacetEntry{From: nil, To: to})
+ return f
+}
+
+func (f GeoDistanceFacet) Unit(distanceUnit string) GeoDistanceFacet {
+ f.unit = distanceUnit
+ return f
+}
+
+func (f GeoDistanceFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f GeoDistanceFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["geo_distance"] = opts
+
+ if f.geoHash != "" {
+ opts[f.fieldName] = f.geoHash
+ } else {
+ opts[f.fieldName] = []float64{f.lat, f.lon}
+ }
+ if f.valueFieldName != "" {
+ opts["value_field"] = f.valueFieldName
+ }
+ if f.valueScript != "" {
+ opts["value_script"] = f.valueScript
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range f.entries {
+ r := make(map[string]interface{})
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ if f.unit != "" {
+ opts["unit"] = f.unit
+ }
+ if f.geoDistance != "" {
+ opts["distance_type"] = f.geoDistance
+ }
+
+ return source
+}
+
+type geoDistanceFacetEntry struct {
+ From interface{}
+ To interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance_test.go
new file mode 100644
index 00000000..65efa667
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance_test.go
@@ -0,0 +1,48 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoDistanceFacet(t *testing.T) {
+ f := NewGeoDistanceFacet().Field("pin.location").
+ Point(40, -70).
+ AddUnboundedFrom(10).
+ AddRange(10, 20).
+ AddRange(20, 100).
+ AddUnboundedTo(100)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"pin.location":[40,-70],"ranges":[{"to":10},{"from":10,"to":20},{"from":20,"to":100},{"from":100}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceFacetWithGlobals(t *testing.T) {
+ f := NewGeoDistanceFacet().Field("pin.location").
+ Point(40, -70).
+ AddUnboundedFrom(10).
+ AddRange(10, 20).
+ AddRange(20, 100).
+ AddUnboundedTo(100).
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"geo_distance":{"pin.location":[40,-70],"ranges":[{"to":10},{"from":10,"to":20},{"from":20,"to":100},{"from":100}]},"global":true}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram.go
new file mode 100644
index 00000000..9fa06956
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram.go
@@ -0,0 +1,110 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Histogram Facet
+// See: http://www.elasticsearch.org/guide/reference/api/search/facets/histogram-facet.html
+type HistogramFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ keyField string
+ valueField string
+ interval int64
+ timeInterval string
+ comparatorType string
+}
+
+func NewHistogramFacet() HistogramFacet {
+ return HistogramFacet{
+ interval: -1,
+ }
+}
+
+func (f HistogramFacet) FacetFilter(filter Facet) HistogramFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f HistogramFacet) Global(global bool) HistogramFacet {
+ f.global = &global
+ return f
+}
+
+func (f HistogramFacet) Nested(nested string) HistogramFacet {
+ f.nested = nested
+ return f
+}
+
+func (f HistogramFacet) Mode(mode string) HistogramFacet {
+ f.mode = mode
+ return f
+}
+
+func (f HistogramFacet) Field(field string) HistogramFacet {
+ f.keyField = field
+ return f
+}
+
+func (f HistogramFacet) KeyField(keyField string) HistogramFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f HistogramFacet) ValueField(valueField string) HistogramFacet {
+ f.valueField = valueField
+ return f
+}
+
+func (f HistogramFacet) Interval(interval int64) HistogramFacet {
+ f.interval = interval
+ return f
+}
+
+func (f HistogramFacet) TimeInterval(timeInterval string) HistogramFacet {
+ f.timeInterval = timeInterval
+ return f
+}
+
+func (f HistogramFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f HistogramFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["histogram"] = opts
+
+ if f.valueField != "" {
+ opts["key_field"] = f.keyField
+ opts["value_field"] = f.valueField
+ } else {
+ opts["field"] = f.keyField
+ }
+ if f.timeInterval != "" {
+ opts["time_interval"] = f.timeInterval
+ } else {
+ opts["interval"] = f.interval
+ }
+
+ if f.comparatorType != "" {
+ opts["comparator"] = f.comparatorType
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_script.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_script.go
new file mode 100644
index 00000000..fcf815f3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_script.go
@@ -0,0 +1,120 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Histogram Facet
+// See: http://www.elasticsearch.org/guide/reference/api/search/facets/histogram-facet.html
+type HistogramScriptFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ lang string
+ keyField string
+ keyScript string
+ valueScript string
+ params map[string]interface{}
+ interval int64
+ comparatorType string
+}
+
+func NewHistogramScriptFacet() HistogramScriptFacet {
+ return HistogramScriptFacet{
+ interval: -1,
+ params: make(map[string]interface{}),
+ }
+}
+
+func (f HistogramScriptFacet) FacetFilter(filter Facet) HistogramScriptFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f HistogramScriptFacet) Global(global bool) HistogramScriptFacet {
+ f.global = &global
+ return f
+}
+
+func (f HistogramScriptFacet) Nested(nested string) HistogramScriptFacet {
+ f.nested = nested
+ return f
+}
+
+func (f HistogramScriptFacet) Mode(mode string) HistogramScriptFacet {
+ f.mode = mode
+ return f
+}
+
+func (f HistogramScriptFacet) KeyField(keyField string) HistogramScriptFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f HistogramScriptFacet) KeyScript(keyScript string) HistogramScriptFacet {
+ f.keyScript = keyScript
+ return f
+}
+
+func (f HistogramScriptFacet) ValueScript(valueScript string) HistogramScriptFacet {
+ f.valueScript = valueScript
+ return f
+}
+
+func (f HistogramScriptFacet) Interval(interval int64) HistogramScriptFacet {
+ f.interval = interval
+ return f
+}
+
+func (f HistogramScriptFacet) Param(name string, value interface{}) HistogramScriptFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f HistogramScriptFacet) Comparator(comparatorType string) HistogramScriptFacet {
+ f.comparatorType = comparatorType
+ return f
+}
+
+func (f HistogramScriptFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f HistogramScriptFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["histogram"] = opts
+
+ if f.keyField != "" {
+ opts["key_field"] = f.keyField
+ } else if f.keyScript != "" {
+ opts["key_script"] = f.keyScript
+ }
+ opts["value_script"] = f.valueScript
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if f.interval > 0 {
+ opts["interval"] = f.interval
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+ if f.comparatorType != "" {
+ opts["comparator"] = f.comparatorType
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_script_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_script_test.go
new file mode 100644
index 00000000..a354205a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_script_test.go
@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestHistogramScriptFacetWithKeyScripts(t *testing.T) {
+ f := NewHistogramScriptFacet().
+ KeyScript("doc['date'].date.minuteOfHour").
+ ValueScript("doc['num1'].value")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"histogram":{"key_script":"doc['date'].date.minuteOfHour","value_script":"doc['num1'].value"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHistogramScriptFacetWithParams(t *testing.T) {
+ f := NewHistogramScriptFacet().
+ KeyScript("doc['date'].date.minuteOfHour * factor1").
+ ValueScript("doc['num1'].value * factor2").
+ Param("factor1", 2).
+ Param("factor2", 3)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"histogram":{"key_script":"doc['date'].date.minuteOfHour * factor1","params":{"factor1":2,"factor2":3},"value_script":"doc['num1'].value * factor2"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHistogramScriptFacetWithGlobals(t *testing.T) {
+ f := NewHistogramScriptFacet().
+ KeyScript("doc['date'].date.minuteOfHour").
+ ValueScript("doc['num1'].value").
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"global":true,"histogram":{"key_script":"doc['date'].date.minuteOfHour","value_script":"doc['num1'].value"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_test.go
new file mode 100644
index 00000000..5645b661
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_test.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestHistogramFacetWithField(t *testing.T) {
+ f := NewHistogramFacet().Field("field_name").Interval(100)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"histogram":{"field":"field_name","interval":100}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHistogramFacetWithValueField(t *testing.T) {
+ f := NewHistogramFacet().
+ KeyField("timestamp").
+ ValueField("price").
+ TimeInterval("1.5d")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"histogram":{"key_field":"timestamp","time_interval":"1.5d","value_field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHistogramFacetWithGlobals(t *testing.T) {
+ f := NewHistogramFacet().
+ KeyField("timestamp").
+ ValueField("price").
+ Interval(1000).
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"global":true,"histogram":{"interval":1000,"key_field":"timestamp","value_field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_query.go
new file mode 100644
index 00000000..184c8b33
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_query.go
@@ -0,0 +1,66 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Query Facet
+// See: http://www.elasticsearch.org/guide/reference/api/search/facets/query-facet.html
+type QueryFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ query Query
+}
+
+func NewQueryFacet() QueryFacet {
+ return QueryFacet{}
+}
+
+func (f QueryFacet) FacetFilter(filter Facet) QueryFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f QueryFacet) Global(global bool) QueryFacet {
+ f.global = &global
+ return f
+}
+
+func (f QueryFacet) Nested(nested string) QueryFacet {
+ f.nested = nested
+ return f
+}
+
+func (f QueryFacet) Mode(mode string) QueryFacet {
+ f.mode = mode
+ return f
+}
+
+func (f QueryFacet) Query(query Query) QueryFacet {
+ f.query = query
+ return f
+}
+
+func (f QueryFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f QueryFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ source["query"] = f.query.Source()
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_query_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_query_test.go
new file mode 100644
index 00000000..d5d9348c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_query_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestQueryFacet(t *testing.T) {
+ f := NewQueryFacet().Query(NewTermQuery("tag", "wow"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"term":{"tag":"wow"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestQueryFacetWithGlobals(t *testing.T) {
+ f := NewQueryFacet().Query(NewTermQuery("tag", "wow")).
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"global":true,"query":{"term":{"tag":"wow"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_range.go
new file mode 100644
index 00000000..864b3555
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_range.go
@@ -0,0 +1,158 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "time"
+)
+
+// Range facet allows to specify a set of ranges and get both the
+// number of docs (count) that fall within each range,
+// and aggregated data either based on the field, or using another field.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-range-facet.html
+type RangeFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ keyField string
+ valueField string
+ entries []rangeFacetEntry
+}
+
+type rangeFacetEntry struct {
+ From interface{}
+ To interface{}
+}
+
+func NewRangeFacet() RangeFacet {
+ return RangeFacet{
+ entries: make([]rangeFacetEntry, 0),
+ }
+}
+
+func (f RangeFacet) FacetFilter(filter Facet) RangeFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f RangeFacet) Global(global bool) RangeFacet {
+ f.global = &global
+ return f
+}
+
+func (f RangeFacet) Nested(nested string) RangeFacet {
+ f.nested = nested
+ return f
+}
+
+func (f RangeFacet) Mode(mode string) RangeFacet {
+ f.mode = mode
+ return f
+}
+
+func (f RangeFacet) Field(field string) RangeFacet {
+ f.keyField = field
+ f.valueField = field
+ return f
+}
+
+func (f RangeFacet) KeyField(keyField string) RangeFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f RangeFacet) ValueField(valueField string) RangeFacet {
+ f.valueField = valueField
+ return f
+}
+
+func (f RangeFacet) AddRange(from, to interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: from, To: to})
+ return f
+}
+
+func (f RangeFacet) AddUnboundedTo(from interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: from, To: nil})
+ return f
+}
+
+func (f RangeFacet) AddUnboundedFrom(to interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: nil, To: to})
+ return f
+}
+
+func (f RangeFacet) Lt(to interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: nil, To: to})
+ return f
+}
+
+func (f RangeFacet) Between(from, to interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: from, To: to})
+ return f
+}
+
+func (f RangeFacet) Gt(from interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: from, To: nil})
+ return f
+}
+
+func (f RangeFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f RangeFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["range"] = opts
+
+ if f.valueField != "" && f.keyField != f.valueField {
+ opts["key_field"] = f.keyField
+ opts["value_field"] = f.valueField
+ } else {
+ opts["field"] = f.keyField
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range f.entries {
+ r := make(map[string]interface{})
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_range_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_range_test.go
new file mode 100644
index 00000000..042393ce
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_range_test.go
@@ -0,0 +1,63 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRangeFacet(t *testing.T) {
+ f := NewRangeFacet().Field("field_name").
+ AddUnboundedFrom(50).
+ AddRange(20, 70).
+ AddRange(70, 120).
+ AddUnboundedTo(150)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeFacetWithLtAndCo(t *testing.T) {
+ f := NewRangeFacet().Field("field_name").
+ Lt(50).
+ Between(20, 70).
+ Between(70, 120).
+ Gt(150)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeFacetWithGlobals(t *testing.T) {
+ f := NewRangeFacet().Field("field_name").
+ AddUnboundedFrom(50).
+ AddRange(20, 70).
+ AddRange(70, 120).
+ AddUnboundedTo(150).
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"global":true,"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical.go
new file mode 100644
index 00000000..5a813a14
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical.go
@@ -0,0 +1,88 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Statistical facet allows to compute statistical data on a numeric fields.
+// The statistical data include count, total, sum of squares, mean (average),
+// minimum, maximum, variance, and standard deviation.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-statistical-facet.html
+type StatisticalFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ fieldName string
+ fieldNames []string
+}
+
+func NewStatisticalFacet() StatisticalFacet {
+ return StatisticalFacet{
+ fieldNames: make([]string, 0),
+ }
+}
+
+func (f StatisticalFacet) FacetFilter(filter Facet) StatisticalFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f StatisticalFacet) Global(global bool) StatisticalFacet {
+ f.global = &global
+ return f
+}
+
+func (f StatisticalFacet) Nested(nested string) StatisticalFacet {
+ f.nested = nested
+ return f
+}
+
+func (f StatisticalFacet) Mode(mode string) StatisticalFacet {
+ f.mode = mode
+ return f
+}
+
+func (f StatisticalFacet) Field(fieldName string) StatisticalFacet {
+ f.fieldName = fieldName
+ return f
+}
+
+func (f StatisticalFacet) Fields(fieldNames ...string) StatisticalFacet {
+ f.fieldNames = append(f.fieldNames, fieldNames...)
+ return f
+}
+
+func (f StatisticalFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f StatisticalFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["statistical"] = opts
+
+ if len(f.fieldNames) > 0 {
+ if len(f.fieldNames) == 1 {
+ opts["field"] = f.fieldNames[0]
+ } else {
+ opts["fields"] = f.fieldNames
+ }
+ } else {
+ opts["field"] = f.fieldName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_script.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_script.go
new file mode 100644
index 00000000..36a60d56
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_script.go
@@ -0,0 +1,92 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Statistical facet allows to compute statistical data on a numeric fields.
+// The statistical data include count, total, sum of squares, mean (average),
+// minimum, maximum, variance, and standard deviation.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-statistical-facet.html
+type StatisticalScriptFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ lang string
+ script string
+ params map[string]interface{}
+}
+
+func NewStatisticalScriptFacet() StatisticalScriptFacet {
+ return StatisticalScriptFacet{
+ params: make(map[string]interface{}),
+ }
+}
+
+func (f StatisticalScriptFacet) FacetFilter(filter Facet) StatisticalScriptFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f StatisticalScriptFacet) Global(global bool) StatisticalScriptFacet {
+ f.global = &global
+ return f
+}
+
+func (f StatisticalScriptFacet) Nested(nested string) StatisticalScriptFacet {
+ f.nested = nested
+ return f
+}
+
+func (f StatisticalScriptFacet) Mode(mode string) StatisticalScriptFacet {
+ f.mode = mode
+ return f
+}
+
+func (f StatisticalScriptFacet) Lang(lang string) StatisticalScriptFacet {
+ f.lang = lang
+ return f
+}
+
+func (f StatisticalScriptFacet) Script(script string) StatisticalScriptFacet {
+ f.script = script
+ return f
+}
+
+func (f StatisticalScriptFacet) Param(name string, value interface{}) StatisticalScriptFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f StatisticalScriptFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f StatisticalScriptFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["statistical"] = opts
+
+ opts["script"] = f.script
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_script_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_script_test.go
new file mode 100644
index 00000000..c1b5c9b6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_script_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestStatisticalScriptFacet(t *testing.T) {
+ f := NewStatisticalScriptFacet().Script("doc['num1'].value + doc['num2'].value")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"statistical":{"script":"doc['num1'].value + doc['num2'].value"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestStatisticalScriptFacetWithGlobals(t *testing.T) {
+ f := NewStatisticalScriptFacet().Script("doc['num1'].value + doc['num2'].value").
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"global":true,"statistical":{"script":"doc['num1'].value + doc['num2'].value"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_test.go
new file mode 100644
index 00000000..2ef10ed3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestStatisticalFacet(t *testing.T) {
+ f := NewStatisticalFacet().Field("num1")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"statistical":{"field":"num1"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestStatisticalFacetWithGlobals(t *testing.T) {
+ f := NewStatisticalFacet().Field("num1").
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"global":true,"statistical":{"field":"num1"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms.go
new file mode 100644
index 00000000..a0133420
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms.go
@@ -0,0 +1,203 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Allow to specify field facets that return the N most frequent terms.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-terms-facet.html
+type TermsFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+
+ fieldName string
+ fields []string
+ size int
+ shardSize *int
+ allTerms *bool
+ exclude []string
+ regex string
+ regexFlags string
+ comparatorType string
+ script string
+ lang string
+ params map[string]interface{}
+ executionHint string
+ index string
+}
+
+func NewTermsFacet() TermsFacet {
+ f := TermsFacet{
+ size: 10,
+ fields: make([]string, 0),
+ exclude: make([]string, 0),
+ params: make(map[string]interface{}),
+ }
+ return f
+}
+
+func (f TermsFacet) FacetFilter(filter Facet) TermsFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f TermsFacet) Global(global bool) TermsFacet {
+ f.global = &global
+ return f
+}
+
+func (f TermsFacet) Nested(nested string) TermsFacet {
+ f.nested = nested
+ return f
+}
+
+func (f TermsFacet) Mode(mode string) TermsFacet {
+ f.mode = mode
+ return f
+}
+
+func (f TermsFacet) Field(fieldName string) TermsFacet {
+ f.fieldName = fieldName
+ return f
+}
+
+func (f TermsFacet) Fields(fields ...string) TermsFacet {
+ f.fields = append(f.fields, fields...)
+ return f
+}
+
+func (f TermsFacet) ScriptField(scriptField string) TermsFacet {
+ f.script = scriptField
+ return f
+}
+
+func (f TermsFacet) Exclude(exclude ...string) TermsFacet {
+ f.exclude = append(f.exclude, exclude...)
+ return f
+}
+
+func (f TermsFacet) Size(size int) TermsFacet {
+ f.size = size
+ return f
+}
+
+func (f TermsFacet) ShardSize(shardSize int) TermsFacet {
+ f.shardSize = &shardSize
+ return f
+}
+
+func (f TermsFacet) Regex(regex string) TermsFacet {
+ f.regex = regex
+ return f
+}
+
+func (f TermsFacet) RegexFlags(regexFlags string) TermsFacet {
+ f.regexFlags = regexFlags
+ return f
+}
+
+func (f TermsFacet) Order(order string) TermsFacet {
+ f.comparatorType = order
+ return f
+}
+
+func (f TermsFacet) Comparator(comparatorType string) TermsFacet {
+ f.comparatorType = comparatorType
+ return f
+}
+
+func (f TermsFacet) Script(script string) TermsFacet {
+ f.script = script
+ return f
+}
+
+func (f TermsFacet) Lang(lang string) TermsFacet {
+ f.lang = lang
+ return f
+}
+
+func (f TermsFacet) ExecutionHint(hint string) TermsFacet {
+ f.executionHint = hint
+ return f
+}
+
+func (f TermsFacet) Param(name string, value interface{}) TermsFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f TermsFacet) AllTerms(allTerms bool) TermsFacet {
+ f.allTerms = &allTerms
+ return f
+}
+
+func (f TermsFacet) Index(index string) TermsFacet {
+ f.index = index
+ return f
+}
+
+func (f TermsFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f TermsFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["terms"] = opts
+
+ if len(f.fields) > 0 {
+ if len(f.fields) == 1 {
+ opts["field"] = f.fields[0]
+ } else {
+ opts["fields"] = f.fields
+ }
+ } else {
+ opts["field"] = f.fieldName
+ }
+ opts["size"] = f.size
+ if f.shardSize != nil && *f.shardSize > f.size {
+ opts["shard_size"] = *f.shardSize
+ }
+ if len(f.exclude) > 0 {
+ opts["exclude"] = f.exclude
+ }
+ if f.regex != "" {
+ opts["regex"] = f.regex
+ if f.regexFlags != "" {
+ opts["regex_flags"] = f.regexFlags
+ }
+ }
+ if f.comparatorType != "" {
+ opts["order"] = f.comparatorType
+ }
+ if f.allTerms != nil {
+ opts["all_terms"] = *f.allTerms
+ }
+ if f.script != "" {
+ opts["script"] = f.script
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+ }
+ if f.executionHint != "" {
+ opts["execution_hint"] = f.executionHint
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_stats.go
new file mode 100644
index 00000000..8f68f3d6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_stats.go
@@ -0,0 +1,142 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The terms_stats facet combines both the terms and statistical allowing
+// to compute stats computed on a field, per term value driven
+// by another field.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-terms-stats-facet.html
+type TermsStatsFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ keyField string
+ valueField string
+ size int
+ shardSize int
+ comparatorType string
+ script string
+ lang string
+ params map[string]interface{}
+}
+
+func NewTermsStatsFacet() TermsStatsFacet {
+ return TermsStatsFacet{
+ size: -1,
+ shardSize: -1,
+ params: make(map[string]interface{}),
+ }
+}
+
+func (f TermsStatsFacet) FacetFilter(filter Facet) TermsStatsFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f TermsStatsFacet) Global(global bool) TermsStatsFacet {
+ f.global = &global
+ return f
+}
+
+func (f TermsStatsFacet) Nested(nested string) TermsStatsFacet {
+ f.nested = nested
+ return f
+}
+
+func (f TermsStatsFacet) Mode(mode string) TermsStatsFacet {
+ f.mode = mode
+ return f
+}
+
+func (f TermsStatsFacet) KeyField(keyField string) TermsStatsFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f TermsStatsFacet) ValueField(valueField string) TermsStatsFacet {
+ f.valueField = valueField
+ return f
+}
+
+func (f TermsStatsFacet) Order(comparatorType string) TermsStatsFacet {
+ f.comparatorType = comparatorType
+ return f
+}
+
+func (f TermsStatsFacet) Size(size int) TermsStatsFacet {
+ f.size = size
+ return f
+}
+
+func (f TermsStatsFacet) ShardSize(shardSize int) TermsStatsFacet {
+ f.shardSize = shardSize
+ return f
+}
+
+func (f TermsStatsFacet) AllTerms() TermsStatsFacet {
+ f.size = 0
+ return f
+}
+
+func (f TermsStatsFacet) ValueScript(script string) TermsStatsFacet {
+ f.script = script
+ return f
+}
+
+func (f TermsStatsFacet) Param(name string, value interface{}) TermsStatsFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f TermsStatsFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f TermsStatsFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["terms_stats"] = opts
+
+ opts["key_field"] = f.keyField
+ if f.valueField != "" {
+ opts["value_field"] = f.valueField
+ }
+
+ if f.script != "" {
+ opts["value_script"] = f.script
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+ }
+
+ if f.comparatorType != "" {
+ opts["order"] = f.comparatorType
+ }
+
+ if f.size != -1 {
+ opts["size"] = f.size
+ }
+ if f.shardSize > f.size {
+ opts["shard_size"] = f.shardSize
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_stats_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_stats_test.go
new file mode 100644
index 00000000..0395592a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_stats_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermsStatsFacet(t *testing.T) {
+ f := NewTermsStatsFacet().KeyField("tag").ValueField("price")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms_stats":{"key_field":"tag","value_field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsStatsFacetWithGlobals(t *testing.T) {
+ f := NewTermsStatsFacet().KeyField("tag").ValueField("price").
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"global":true,"terms_stats":{"key_field":"tag","value_field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_test.go
new file mode 100644
index 00000000..aaddbe7a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermsFacet(t *testing.T) {
+ f := NewTermsFacet().Field("tag").Size(10).Order("term")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"field":"tag","order":"term","size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsFacetWithGlobals(t *testing.T) {
+ f := NewTermsFacet().Field("tag").Size(10).Order("term").
+ Global(true).
+ FacetFilter(NewTermFilter("user", "kimchy"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"facet_filter":{"term":{"user":"kimchy"}},"global":true,"terms":{"field":"tag","order":"term","size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_test.go
new file mode 100644
index 00000000..f1021584
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_test.go
@@ -0,0 +1,533 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ _ "encoding/json"
+ _ "net/http"
+ "testing"
+ "time"
+)
+
+func TestSearchFacets(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ tweet1 := tweet{
+ User: "olivere",
+ Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere",
+ Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae",
+ Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+
+ // Terms Facet by user name
+ userFacet := NewTermsFacet().Field("user").Size(10).Order("count")
+
+ // Terms Facet with numerical key
+ retweetsNumFacet := NewTermsFacet().Field("retweets")
+
+ // Range Facet by retweets
+ retweetsFacet := NewRangeFacet().Field("retweets").Lt(10).Between(10, 100).Gt(100)
+
+ // Histogram Facet by retweets
+ retweetsHistoFacet := NewHistogramFacet().KeyField("retweets").Interval(100)
+
+ // Histogram Facet with time interval by retweets
+ retweetsTimeHistoFacet := NewHistogramFacet().KeyField("retweets").TimeInterval("1m")
+
+ // Date Histogram Facet by creation date
+ dateHisto := NewDateHistogramFacet().Field("created").Interval("year")
+
+ // Date Histogram Facet with Key and Value field by creation date
+ dateHistoWithKeyValue := NewDateHistogramFacet().
+ Interval("year").
+ KeyField("created").
+ ValueField("retweets")
+
+ // Query Facet
+ queryFacet := NewQueryFacet().Query(NewTermQuery("user", "olivere")).Global(true)
+
+ // Range Facet by creation date
+ dateRangeFacet := NewRangeFacet().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01")
+
+ // Range Facet with time.Time by creation date
+ d20120101 := time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)
+ d20130101 := time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC)
+ dateRangeWithTimeFacet := NewRangeFacet().Field("created").
+ Lt(d20120101).
+ Between(d20120101, d20130101).
+ Gt(d20130101)
+
+ // Terms Stats Facet
+ termsStatsFacet := NewTermsStatsFacet().KeyField("user").ValueField("retweets")
+
+ // Run query
+ searchResult, err := client.Search().Index(testIndexName).
+ Query(&all).
+ Facet("user", userFacet).
+ Facet("retweetsNum", retweetsNumFacet).
+ Facet("retweets", retweetsFacet).
+ Facet("retweetsHistogram", retweetsHistoFacet).
+ Facet("retweetsTimeHisto", retweetsTimeHistoFacet).
+ Facet("dateHisto", dateHisto).
+ Facet("createdWithKeyValue", dateHistoWithKeyValue).
+ Facet("queryFacet", queryFacet).
+ Facet("dateRangeFacet", dateRangeFacet).
+ Facet("dateRangeWithTimeFacet", dateRangeWithTimeFacet).
+ Facet("termsStatsFacet", termsStatsFacet).
+ Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+ if searchResult.Facets == nil {
+ t.Errorf("expected SearchResult.Facets != nil; got nil")
+ }
+
+ // Search for non-existent facet field should return (nil, false)
+ facet, found := searchResult.Facets["no-such-field"]
+ if found {
+ t.Errorf("expected SearchResult.Facets.For(...) = %v; got %v", false, found)
+ }
+ if facet != nil {
+ t.Errorf("expected SearchResult.Facets.For(...) = nil; got %v", facet)
+ }
+
+ // Search for existent facet should return (facet, true)
+ facet, found = searchResult.Facets["user"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"user\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"user\"] != nil; got nil")
+ }
+
+ // Check facet details
+ if facet.Type != "terms" {
+ t.Errorf("expected searchResult.Facets[\"user\"].Type = %v; got %v", "terms", facet.Type)
+ }
+ if facet.Total != 3 {
+ t.Errorf("expected searchResult.Facets[\"user\"].Total = %v; got %v", 3, facet.Total)
+ }
+ if len(facet.Terms) != 2 {
+ t.Errorf("expected len(searchResult.Facets[\"user\"].Terms) = %v; got %v", 2, len(facet.Terms))
+ }
+
+ // Search for retweetsNum facet
+ facet, found = searchResult.Facets["retweetsNum"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"retweetsNum\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"retweetsNum\"] != nil; got nil")
+ }
+ if facet.Type != "terms" {
+ t.Errorf("expected searchResult.Facets[\"retweetsNum\"].Type = %v; got %v", "terms", facet.Type)
+ }
+ if facet.Total != 3 {
+ t.Errorf("expected searchResult.Facets[\"retweetsNum\"].Total = %v; got %v", 3, facet.Total)
+ }
+ if len(facet.Terms) != 3 {
+ t.Errorf("expected len(searchResult.Facets[\"retweetsNum\"].Terms) = %v; got %v", 2, len(facet.Terms))
+ }
+
+ // Search for range facet should return (facet, true)
+ facet, found = searchResult.Facets["retweets"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"retweets\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"retweets\"] != nil; got nil")
+ }
+
+ // Check facet details
+ if facet.Type != "range" {
+ t.Errorf("expected searchResult.Facets[\"retweets\"].Type = %v; got %v", "range", facet.Type)
+ }
+ if len(facet.Ranges) != 3 {
+ t.Errorf("expected len(searchResult.Facets[\"retweets\"].Ranges) = %v; got %v", 3, len(facet.Ranges))
+ }
+
+ if facet.Ranges[0].Count != 1 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][0].Count = %v; got %v", 1, facet.Ranges[0].Count)
+ }
+ if facet.Ranges[0].TotalCount != 1 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][0].TotalCount = %v; got %v", 1, facet.Ranges[0].TotalCount)
+ }
+ if facet.Ranges[0].From != nil {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][0].From = %v; got %v", nil, facet.Ranges[0].From)
+ }
+ if to := facet.Ranges[0].To; to == nil || (*to) != 10.0 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][0].To = %v; got %v", 10.0, to)
+ }
+
+ if facet.Ranges[1].Count != 1 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][1].Count = %v; got %v", 1, facet.Ranges[1].Count)
+ }
+ if facet.Ranges[1].TotalCount != 1 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][1].TotalCount = %v; got %v", 1, facet.Ranges[1].TotalCount)
+ }
+ if from := facet.Ranges[1].From; from == nil || (*from) != 10.0 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][1].From = %v; got %v", 10.0, from)
+ }
+ if to := facet.Ranges[1].To; to == nil || (*to) != 100.0 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][1].To = %v; got %v", 100.0, facet.Ranges[1].To)
+ }
+
+ if facet.Ranges[2].Count != 1 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][2].Count = %v; got %v", 1, facet.Ranges[2].Count)
+ }
+ if facet.Ranges[2].TotalCount != 1 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][2].TotalCount = %v; got %v", 1, facet.Ranges[2].TotalCount)
+ }
+ if from := facet.Ranges[2].From; from == nil || (*from) != 100.0 {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][2].From = %v; got %v", 100.0, facet.Ranges[2].From)
+ }
+ if facet.Ranges[2].To != nil {
+ t.Errorf("expected searchResult.Facets[\"retweets\"][2].To = %v; got %v", nil, facet.Ranges[2].To)
+ }
+
+ // Search for histogram facet should return (facet, true)
+ facet, found = searchResult.Facets["retweetsHistogram"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"retweetsHistogram\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"retweetsHistogram\"] != nil; got nil")
+ }
+
+ // Check facet details
+ if facet.Type != "histogram" {
+ t.Errorf("expected searchResult.Facets[\"retweetsHistogram\"].Type = %v; got %v", "histogram", facet.Type)
+ }
+ if len(facet.Entries) != 2 {
+ t.Errorf("expected len(searchResult.Facets[\"retweetsHistogram\"].Entries) = %v; got %v", 3, len(facet.Entries))
+ }
+ if facet.Entries[0].Key.(float64) != 0 {
+ t.Errorf("expected searchResult.Facets[\"retweetsHistogram\"].Entries[0].Key = %v; got %v", 0, facet.Entries[0].Key)
+ }
+ if facet.Entries[0].Count != 2 {
+ t.Errorf("expected searchResult.Facets[\"retweetsHistogram\"].Entries[0].Count = %v; got %v", 2, facet.Entries[0].Count)
+ }
+ if facet.Entries[1].Key.(float64) != 100 {
+ t.Errorf("expected searchResult.Facets[\"retweetsHistogram\"].Entries[1].Key = %v; got %v", 100, facet.Entries[1].Key)
+ }
+ if facet.Entries[1].Count != 1 {
+ t.Errorf("expected searchResult.Facets[\"retweetsHistogram\"].Entries[1].Count = %v; got %v", 1, facet.Entries[1].Count)
+ }
+
+ // Search for histogram facet with time interval should return (facet, true)
+ facet, found = searchResult.Facets["retweetsTimeHisto"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"retweetsTimeHisto\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"retweetsTimeHisto\"] != nil; got nil")
+ }
+
+ // Search for date histogram facet
+ facet, found = searchResult.Facets["dateHisto"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"dateHisto\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"dateHisto\"] != nil; got nil")
+ }
+ if facet.Entries[0].Time != 1293840000000 {
+ t.Errorf("expected searchResult.Facets[\"dateHisto\"].Entries[0].Time = %v; got %v", 1293840000000, facet.Entries[0].Time)
+ }
+ if facet.Entries[0].Count != 1 {
+ t.Errorf("expected searchResult.Facets[\"dateHisto\"].Entries[0].Count = %v; got %v", 1, facet.Entries[0].Count)
+ }
+ if facet.Entries[1].Time != 1325376000000 {
+ t.Errorf("expected searchResult.Facets[\"dateHisto\"].Entries[1].Time = %v; got %v", 1325376000000, facet.Entries[0].Time)
+ }
+ if facet.Entries[1].Count != 2 {
+ t.Errorf("expected searchResult.Facets[\"dateHisto\"].Entries[1].Count = %v; got %v", 2, facet.Entries[1].Count)
+ }
+
+ // Search for date histogram with key/value fields facet
+ facet, found = searchResult.Facets["createdWithKeyValue"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"] != nil; got nil")
+ }
+ if len(facet.Entries) != 2 {
+ t.Errorf("expected len(searchResult.Facets[\"createdWithKeyValue\"].Entries) = %v; got %v", 2, len(facet.Entries))
+ }
+ if facet.Entries[0].Time != 1293840000000 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[0].Time = %v; got %v", 1293840000000, facet.Entries[0].Time)
+ }
+ if facet.Entries[0].Count != 1 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[0].Count = %v; got %v", 1, facet.Entries[0].Count)
+ }
+ if facet.Entries[0].Min.(float64) != 12.0 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[0].Min = %v; got %v", 12.0, facet.Entries[0].Min)
+ }
+ if facet.Entries[0].Max.(float64) != 12.0 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[0].Max = %v; got %v", 12.0, facet.Entries[0].Max)
+ }
+ if facet.Entries[0].Total != 12.0 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[0].Total = %v; got %v", 12.0, facet.Entries[0].Total)
+ }
+ if facet.Entries[0].TotalCount != 1 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[0].TotalCount = %v; got %v", 1, facet.Entries[0].TotalCount)
+ }
+ if facet.Entries[0].Mean != 12.0 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[0].Mean = %v; got %v", 12.0, facet.Entries[0].Mean)
+ }
+ if facet.Entries[1].Time != 1325376000000 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[1].Time = %v; got %v", 1325376000000, facet.Entries[1].Time)
+ }
+ if facet.Entries[1].Count != 2 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[1].Count = %v; got %v", 2, facet.Entries[1].Count)
+ }
+ if facet.Entries[1].Min.(float64) != 0.0 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[1].Min = %v; got %v", 0.0, facet.Entries[1].Min)
+ }
+ if facet.Entries[1].Max.(float64) != 108.0 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[1].Max = %v; got %v", 108.0, facet.Entries[1].Max)
+ }
+ if facet.Entries[1].Total != 108.0 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[1].Total = %v; got %v", 108.0, facet.Entries[1].Total)
+ }
+ if facet.Entries[1].TotalCount != 2 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[1].TotalCount = %v; got %v", 2, facet.Entries[1].TotalCount)
+ }
+ if facet.Entries[1].Mean != 54.0 {
+ t.Errorf("expected searchResult.Facets[\"createdWithKeyValue\"].Entries[1].Mean = %v; got %v", 54.0, facet.Entries[1].Mean)
+ }
+
+ // Search for date range facet
+ facet, found = searchResult.Facets["dateRangeFacet"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"] != nil; got nil")
+ }
+ if len(facet.Ranges) != 3 {
+ t.Errorf("expected len(searchResult.Facets[\"dateRangeFacet\"].Ranges) = %v; got %v", 3, len(facet.Ranges))
+ }
+ if facet.Ranges[0].From != nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[0].From to be nil")
+ }
+ if facet.Ranges[0].To == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[0].To to be != nil")
+ }
+ if *facet.Ranges[0].To != 1.325376e+12 {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[0].To = %v; got %v", 1.325376e+12, *facet.Ranges[0].To)
+ }
+ if facet.Ranges[0].ToStr == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[0].ToStr to be != nil")
+ }
+ if *facet.Ranges[0].ToStr != "2012-01-01" {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[0].ToStr = %v; got %v", "2012-01-01", *facet.Ranges[0].ToStr)
+ }
+ if facet.Ranges[1].From == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[1].From to be != nil")
+ }
+ if *facet.Ranges[1].From != 1.325376e+12 {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[1].From = %v; got %v", 1.325376e+12, *facet.Ranges[1].From)
+ }
+ if facet.Ranges[1].FromStr == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[1].FromStr to be != nil")
+ }
+ if *facet.Ranges[1].FromStr != "2012-01-01" {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[1].FromStr = %v; got %v", "2012-01-01", *facet.Ranges[1].FromStr)
+ }
+ if facet.Ranges[1].To == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[1].To to be != nil")
+ }
+ if *facet.Ranges[1].To != 1.3569984e+12 {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[1].To = %v; got %v", 1.3569984e+12, *facet.Ranges[1].To)
+ }
+ if facet.Ranges[1].ToStr == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[1].ToStr to be != nil")
+ }
+ if *facet.Ranges[1].ToStr != "2013-01-01" {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[1].ToStr = %v; got %v", "2013-01-01", *facet.Ranges[1].ToStr)
+ }
+ if facet.Ranges[2].To != nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[2].To to be nil")
+ }
+ if facet.Ranges[2].From == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[2].From to be != nil")
+ }
+ if *facet.Ranges[2].From != 1.3569984e+12 {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[2].From = %v; got %v", 1.3569984e+12, *facet.Ranges[2].From)
+ }
+ if facet.Ranges[2].FromStr == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[2].FromStr to be != nil")
+ }
+ if *facet.Ranges[2].FromStr != "2013-01-01" {
+ t.Errorf("expected searchResult.Facets[\"dateRangeFacet\"].Ranges[2].FromStr = %v; got %v", "2013-01-01", *facet.Ranges[2].FromStr)
+ }
+
+ // Search for date range facet
+ facet, found = searchResult.Facets["dateRangeWithTimeFacet"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"] != nil; got nil")
+ }
+ if len(facet.Ranges) != 3 {
+ t.Errorf("expected len(searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges) = %v; got %v", 3, len(facet.Ranges))
+ }
+ if facet.Ranges[0].From != nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[0].From to be nil")
+ }
+ if facet.Ranges[0].To == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[0].To to be != nil")
+ }
+ if *facet.Ranges[0].To != 1.325376e+12 {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[0].To = %v; got %v", 1.325376e+12, *facet.Ranges[0].To)
+ }
+ if facet.Ranges[0].ToStr == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[0].ToStr to be != nil")
+ }
+ if *facet.Ranges[0].ToStr != "2012-01-01T00:00:00Z" {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[0].ToStr = %v; got %v", "2012-01-01T00:00:00Z", *facet.Ranges[0].ToStr)
+ }
+ if facet.Ranges[1].From == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[1].From to be != nil")
+ }
+ if *facet.Ranges[1].From != 1.325376e+12 {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[1].From = %v; got %v", 1.325376e+12, *facet.Ranges[1].From)
+ }
+ if facet.Ranges[1].FromStr == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[1].FromStr to be != nil")
+ }
+ if *facet.Ranges[1].FromStr != "2012-01-01T00:00:00Z" {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[1].FromStr = %v; got %v", "2012-01-01T00:00:00Z", *facet.Ranges[1].FromStr)
+ }
+ if facet.Ranges[1].To == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[1].To to be != nil")
+ }
+ if *facet.Ranges[1].To != 1.3569984e+12 {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[1].To = %v; got %v", 1.3569984e+12, *facet.Ranges[1].To)
+ }
+ if facet.Ranges[1].ToStr == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[1].ToStr to be != nil")
+ }
+ if *facet.Ranges[1].ToStr != "2013-01-01T00:00:00Z" {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[1].ToStr = %v; got %v", "2013-01-01T00:00:00Z", *facet.Ranges[1].ToStr)
+ }
+ if facet.Ranges[2].To != nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[2].To to be nil")
+ }
+ if facet.Ranges[2].From == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[2].From to be != nil")
+ }
+ if *facet.Ranges[2].From != 1.3569984e+12 {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[2].From = %v; got %v", 1.3569984e+12, *facet.Ranges[2].From)
+ }
+ if facet.Ranges[2].FromStr == nil {
+ t.Fatalf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[2].FromStr to be != nil")
+ }
+ if *facet.Ranges[2].FromStr != "2013-01-01T00:00:00Z" {
+ t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[2].FromStr = %v; got %v", "2013-01-01T00:00:00Z", *facet.Ranges[2].FromStr)
+ }
+
+ // Search for terms_stats facet
+ facet, found = searchResult.Facets["termsStatsFacet"]
+ if !found {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"] = %v; got %v", true, found)
+ }
+ if facet == nil {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"] != nil; got nil")
+ }
+
+ // Check facet details
+ if got, want := facet.Type, "terms_stats"; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Type = %v; got %v", want, got)
+ }
+ if got, want := len(facet.Terms), 2; got != want {
+ t.Errorf("expected len(searchResult.Facets[\"termsStatsFacet\"].Terms) = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[0].Term, "olivere"; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Term = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[0].Count, 2; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Count = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[0].TotalCount, 2; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].TotalCount = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[0].Min, 0.0; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Min = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[0].Max, 108.0; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Max = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[0].Mean, 54.0; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Mean = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[1].Term, "sandrae"; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Term = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[1].Count, 1; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Count = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[1].TotalCount, 1; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].TotalCount = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[1].Min, 12.0; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Min = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[1].Max, 12.0; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Max = %v; got %v", want, got)
+ }
+ if got, want := facet.Terms[1].Mean, 12.0; got != want {
+ t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Mean = %v; got %v", want, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_and.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_and.go
new file mode 100644
index 00000000..60c01177
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_and.go
@@ -0,0 +1,76 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that matches documents using AND boolean operator
+// on other filters. Can be placed within queries that accept a filter.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-and-filter.html
+type AndFilter struct {
+ filters []Filter
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewAndFilter(filters ...Filter) AndFilter {
+ f := AndFilter{
+ filters: make([]Filter, 0),
+ }
+ if len(filters) > 0 {
+ f.filters = append(f.filters, filters...)
+ }
+ return f
+}
+
+func (f AndFilter) Add(filter Filter) AndFilter {
+ f.filters = append(f.filters, filter)
+ return f
+}
+
+func (f AndFilter) Cache(cache bool) AndFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f AndFilter) CacheKey(cacheKey string) AndFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f AndFilter) FilterName(filterName string) AndFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f AndFilter) Source() interface{} {
+ // {
+ // "and" : [
+ // ... filters ...
+ // ]
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["and"] = params
+
+ filters := make([]interface{}, 0)
+ for _, filter := range f.filters {
+ filters = append(filters, filter.Source())
+ }
+ params["filters"] = filters
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_and_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_and_test.go
new file mode 100644
index 00000000..c1fb3467
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_and_test.go
@@ -0,0 +1,58 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestAndFilter(t *testing.T) {
+ f := NewAndFilter()
+ postDateFilter := NewRangeFilter("postDate").From("2010-03-01").To("2010-04-01")
+ f = f.Add(postDateFilter)
+ prefixFilter := NewPrefixFilter("name.second", "ba")
+ f = f.Add(prefixFilter)
+ f = f.Cache(true)
+ f = f.CacheKey("MyAndFilter")
+ f = f.FilterName("MyFilterName")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"and":{"_cache":true,"_cache_key":"MyAndFilter","_name":"MyFilterName","filters":[{"range":{"postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}},{"prefix":{"name.second":"ba"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNewAndFilter1(t *testing.T) {
+ f := NewAndFilter(NewTermFilter("user", "olivere"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"and":{"filters":[{"term":{"user":"olivere"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNewAndFilter2(t *testing.T) {
+ tf := NewTermsFilter("user", "oliver", "test")
+ mf := NewMissingFilter("user")
+ f := NewAndFilter(tf, mf)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"and":{"filters":[{"terms":{"user":["oliver","test"]}},{"missing":{"field":"user"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_bool.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_bool.go
new file mode 100644
index 00000000..75d1e869
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_bool.go
@@ -0,0 +1,135 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that matches documents matching boolean combinations
+// of other queries. Similar in concept to Boolean query,
+// except that the clauses are other filters.
+// Can be placed within queries that accept a filter.
+// For more details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-bool-filter.html
+type BoolFilter struct {
+ mustClauses []Filter
+ shouldClauses []Filter
+ mustNotClauses []Filter
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+// NewBoolFilter creates a new bool filter.
+func NewBoolFilter() BoolFilter {
+ f := BoolFilter{
+ mustClauses: make([]Filter, 0),
+ shouldClauses: make([]Filter, 0),
+ mustNotClauses: make([]Filter, 0),
+ }
+ return f
+}
+
+func (f BoolFilter) Must(filters ...Filter) BoolFilter {
+ f.mustClauses = append(f.mustClauses, filters...)
+ return f
+}
+
+func (f BoolFilter) MustNot(filters ...Filter) BoolFilter {
+ f.mustNotClauses = append(f.mustNotClauses, filters...)
+ return f
+}
+
+func (f BoolFilter) Should(filters ...Filter) BoolFilter {
+ f.shouldClauses = append(f.shouldClauses, filters...)
+ return f
+}
+
+func (f BoolFilter) FilterName(filterName string) BoolFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f BoolFilter) Cache(cache bool) BoolFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f BoolFilter) CacheKey(cacheKey string) BoolFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+// Creates the query source for the bool query.
+func (f BoolFilter) Source() interface{} {
+ // {
+ // "bool" : {
+ // "must" : {
+ // "term" : { "user" : "kimchy" }
+ // },
+ // "must_not" : {
+ // "range" : {
+ // "age" : { "from" : 10, "to" : 20 }
+ // }
+ // },
+ // "should" : [
+ // {
+ // "term" : { "tag" : "wow" }
+ // },
+ // {
+ // "term" : { "tag" : "elasticsearch" }
+ // }
+ // ],
+ // "_cache" : true
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ boolClause := make(map[string]interface{})
+ source["bool"] = boolClause
+
+ // must
+ if len(f.mustClauses) == 1 {
+ boolClause["must"] = f.mustClauses[0].Source()
+ } else if len(f.mustClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range f.mustClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["must"] = clauses
+ }
+
+ // must_not
+ if len(f.mustNotClauses) == 1 {
+ boolClause["must_not"] = f.mustNotClauses[0].Source()
+ } else if len(f.mustNotClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range f.mustNotClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["must_not"] = clauses
+ }
+
+ // should
+ if len(f.shouldClauses) == 1 {
+ boolClause["should"] = f.shouldClauses[0].Source()
+ } else if len(f.shouldClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range f.shouldClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["should"] = clauses
+ }
+
+ if f.filterName != "" {
+ boolClause["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ boolClause["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ boolClause["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_bool_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_bool_test.go
new file mode 100644
index 00000000..089dfd83
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_bool_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestBoolFilter(t *testing.T) {
+ f := NewBoolFilter()
+ f = f.Must(NewTermFilter("tag", "wow"))
+ f = f.MustNot(NewRangeFilter("age").From(10).To(20))
+ f = f.Should(NewTermFilter("tag", "sometag"), NewTermFilter("tag", "sometagtag"))
+ f = f.Cache(true)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"bool":{"_cache":true,"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_exists.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_exists.go
new file mode 100644
index 00000000..7785880d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_exists.go
@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents where a specific field has a value in them.
+// For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/exists-filter.html
+type ExistsFilter struct {
+ Filter
+ name string
+ filterName string
+}
+
+func NewExistsFilter(name string) ExistsFilter {
+ f := ExistsFilter{name: name}
+ return f
+}
+
+func (f ExistsFilter) FilterName(filterName string) ExistsFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f ExistsFilter) Source() interface{} {
+ // {
+ // "exists" : {
+ // "field" : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["exists"] = params
+ params["field"] = f.name
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_exists_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_exists_test.go
new file mode 100644
index 00000000..8931ec39
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_exists_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestExistsFilter(t *testing.T) {
+ f := NewExistsFilter("user").FilterName("_my_filter")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"exists":{"_name":"_my_filter","field":"user"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_distance.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_distance.go
new file mode 100644
index 00000000..17f88124
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_distance.go
@@ -0,0 +1,136 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoDistanceFilter filters documents that include only hits that exists
+// within a specific distance from a geo point.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-filter.html
+type GeoDistanceFilter struct {
+ Filter
+ name string
+ distance string
+ lat float64
+ lon float64
+ geohash string
+ distanceType string
+ optimizeBbox string
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+// NewGeoDistanceFilter creates a new GeoDistanceFilter.
+func NewGeoDistanceFilter(name string) GeoDistanceFilter {
+ f := GeoDistanceFilter{name: name}
+ return f
+}
+
+func (f GeoDistanceFilter) Distance(distance string) GeoDistanceFilter {
+ f.distance = distance
+ return f
+}
+
+func (f GeoDistanceFilter) GeoPoint(point *GeoPoint) GeoDistanceFilter {
+ f.lat = point.Lat
+ f.lon = point.Lon
+ return f
+}
+
+func (f GeoDistanceFilter) Point(lat, lon float64) GeoDistanceFilter {
+ f.lat = lat
+ f.lon = lon
+ return f
+}
+
+func (f GeoDistanceFilter) Lat(lat float64) GeoDistanceFilter {
+ f.lat = lat
+ return f
+}
+
+func (f GeoDistanceFilter) Lon(lon float64) GeoDistanceFilter {
+ f.lon = lon
+ return f
+}
+
+func (f GeoDistanceFilter) GeoHash(geohash string) GeoDistanceFilter {
+ f.geohash = geohash
+ return f
+}
+
+func (f GeoDistanceFilter) DistanceType(distanceType string) GeoDistanceFilter {
+ f.distanceType = distanceType
+ return f
+}
+
+func (f GeoDistanceFilter) OptimizeBbox(optimizeBbox string) GeoDistanceFilter {
+ f.optimizeBbox = optimizeBbox
+ return f
+}
+
+func (f GeoDistanceFilter) Cache(cache bool) GeoDistanceFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f GeoDistanceFilter) CacheKey(cacheKey string) GeoDistanceFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f GeoDistanceFilter) FilterName(filterName string) GeoDistanceFilter {
+ f.filterName = filterName
+ return f
+}
+
+// Creates the query source for the geo_distance filter.
+func (f GeoDistanceFilter) Source() interface{} {
+ // {
+ // "geo_distance" : {
+ // "distance" : "200km",
+ // "pin.location" : {
+ // "lat" : 40,
+ // "lon" : -70
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+
+ if f.geohash != "" {
+ params[f.name] = f.geohash
+ } else {
+ location := make(map[string]interface{})
+ location["lat"] = f.lat
+ location["lon"] = f.lon
+ params[f.name] = location
+ }
+
+ if f.distance != "" {
+ params["distance"] = f.distance
+ }
+ if f.distanceType != "" {
+ params["distance_type"] = f.distanceType
+ }
+ if f.optimizeBbox != "" {
+ params["optimize_bbox"] = f.optimizeBbox
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ source["geo_distance"] = params
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_distance_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_distance_test.go
new file mode 100644
index 00000000..3eca1096
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_distance_test.go
@@ -0,0 +1,58 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoDistanceFilter(t *testing.T) {
+ f := NewGeoDistanceFilter("pin.location")
+ f = f.Lat(40)
+ f = f.Lon(-70)
+ f = f.Distance("200km")
+ f = f.DistanceType("plane")
+ f = f.OptimizeBbox("memory")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"distance":"200km","distance_type":"plane","optimize_bbox":"memory","pin.location":{"lat":40,"lon":-70}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceFilterWithGeoPoint(t *testing.T) {
+ f := NewGeoDistanceFilter("pin.location")
+ f = f.GeoPoint(GeoPointFromLatLon(40, -70))
+ f = f.Distance("200km")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"distance":"200km","pin.location":{"lat":40,"lon":-70}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceFilterWithGeoHash(t *testing.T) {
+ f := NewGeoDistanceFilter("pin.location")
+ f = f.GeoHash("drm3btev3e86")
+ f = f.Distance("12km")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"distance":"12km","pin.location":"drm3btev3e86"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_polygon.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_polygon.go
new file mode 100644
index 00000000..7032bccc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_polygon.go
@@ -0,0 +1,81 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter allowing to include hits that only fall within a polygon of points.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-geo-polygon-filter.html
+type GeoPolygonFilter struct {
+ Filter
+ name string
+ points []*GeoPoint
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewGeoPolygonFilter(name string) GeoPolygonFilter {
+ f := GeoPolygonFilter{name: name, points: make([]*GeoPoint, 0)}
+ return f
+}
+
+func (f GeoPolygonFilter) Cache(cache bool) GeoPolygonFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f GeoPolygonFilter) CacheKey(cacheKey string) GeoPolygonFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f GeoPolygonFilter) FilterName(filterName string) GeoPolygonFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f GeoPolygonFilter) AddPoint(point *GeoPoint) GeoPolygonFilter {
+ f.points = append(f.points, point)
+ return f
+}
+
+func (f GeoPolygonFilter) Source() interface{} {
+ // "geo_polygon" : {
+ // "person.location" : {
+ // "points" : [
+ // {"lat" : 40, "lon" : -70},
+ // {"lat" : 30, "lon" : -80},
+ // {"lat" : 20, "lon" : -90}
+ // ]
+ // }
+ // }
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["geo_polygon"] = params
+
+ polygon := make(map[string]interface{})
+ params[f.name] = polygon
+
+ points := make([]interface{}, 0)
+ for _, point := range f.points {
+ points = append(points, point.Source())
+ }
+ polygon["points"] = points
+
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_polygon_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_polygon_test.go
new file mode 100644
index 00000000..c33a02f2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_polygon_test.go
@@ -0,0 +1,30 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoPolygonFilter(t *testing.T) {
+ f := NewGeoPolygonFilter("person.location")
+ f = f.AddPoint(&GeoPoint{Lat: 40, Lon: -70})
+ f = f.AddPoint(GeoPointFromLatLon(30, -80))
+ point, err := GeoPointFromString("20,-90")
+ if err != nil {
+ t.Fatalf("GeoPointFromString failed: %v", err)
+ }
+ f = f.AddPoint(point)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_child.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_child.go
new file mode 100644
index 00000000..6d291d1c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_child.go
@@ -0,0 +1,125 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The has_child query works the same as the has_child filter,
+// by automatically wrapping the filter with a constant_score
+// (when using the default score type).
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-has-child-query.html
+type HasChildFilter struct {
+ filter Filter
+ query Query
+ childType string
+ filterName string
+ cache *bool
+ cacheKey string
+ shortCircuitCutoff *int
+ minChildren *int
+ maxChildren *int
+ innerHit *InnerHit
+}
+
+// NewHasChildFilter creates a new has_child query.
+func NewHasChildFilter(childType string) HasChildFilter {
+ f := HasChildFilter{
+ childType: childType,
+ }
+ return f
+}
+
+func (f HasChildFilter) Query(query Query) HasChildFilter {
+ f.query = query
+ return f
+}
+
+func (f HasChildFilter) Filter(filter Filter) HasChildFilter {
+ f.filter = filter
+ return f
+}
+
+func (f HasChildFilter) FilterName(filterName string) HasChildFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f HasChildFilter) Cache(cache bool) HasChildFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f HasChildFilter) CacheKey(cacheKey string) HasChildFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f HasChildFilter) ShortCircuitCutoff(shortCircuitCutoff int) HasChildFilter {
+ f.shortCircuitCutoff = &shortCircuitCutoff
+ return f
+}
+
+func (f HasChildFilter) MinChildren(minChildren int) HasChildFilter {
+ f.minChildren = &minChildren
+ return f
+}
+
+func (f HasChildFilter) MaxChildren(maxChildren int) HasChildFilter {
+ f.maxChildren = &maxChildren
+ return f
+}
+
+func (f HasChildFilter) InnerHit(innerHit *InnerHit) HasChildFilter {
+ f.innerHit = innerHit
+ return f
+}
+
+// Source returns the JSON document for the filter.
+func (f HasChildFilter) Source() interface{} {
+ // {
+ // "has_child" : {
+ // "type" : "blog_tag",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ filter := make(map[string]interface{})
+ source["has_child"] = filter
+
+ if f.query != nil {
+ filter["query"] = f.query.Source()
+ } else if f.filter != nil {
+ filter["filter"] = f.filter.Source()
+ }
+
+ filter["type"] = f.childType
+ if f.filterName != "" {
+ filter["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ filter["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ filter["_cache_key"] = f.cacheKey
+ }
+ if f.shortCircuitCutoff != nil {
+ filter["short_circuit_cutoff"] = *f.shortCircuitCutoff
+ }
+ if f.minChildren != nil {
+ filter["min_children"] = *f.minChildren
+ }
+ if f.maxChildren != nil {
+ filter["max_children"] = *f.maxChildren
+ }
+ if f.innerHit != nil {
+ filter["inner_hits"] = f.innerHit.Source()
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_child_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_child_test.go
new file mode 100644
index 00000000..34b55fda
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_child_test.go
@@ -0,0 +1,70 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestHasChildFilter(t *testing.T) {
+ f := NewHasChildFilter("blog_tag")
+ f = f.Query(NewTermQuery("tag", "something"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_child":{"query":{"term":{"tag":"something"}},"type":"blog_tag"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHasChildFilterWithInnerHits(t *testing.T) {
+ f := NewHasChildFilter("blog_tag")
+ f = f.Query(NewTermQuery("tag", "something"))
+ f = f.InnerHit(NewInnerHit())
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_child":{"inner_hits":{},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHasChildFilterWithInnerHitsName(t *testing.T) {
+ f := NewHasChildFilter("blog_tag")
+ f = f.Query(NewTermQuery("tag", "something"))
+ f = f.InnerHit(NewInnerHit().Name("comments"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_child":{"inner_hits":{"name":"comments"},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHasChildFilterWithInnerHitsQuery(t *testing.T) {
+ f := NewHasChildFilter("blog_tag")
+ f = f.Query(NewTermQuery("tag", "something"))
+ hit := NewInnerHit().Query(NewTermQuery("user", "olivere"))
+ f = f.InnerHit(hit)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_child":{"inner_hits":{"query":{"term":{"user":"olivere"}}},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_parent.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_parent.go
new file mode 100644
index 00000000..bfbbaa3a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_parent.go
@@ -0,0 +1,99 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The has_parent filter accepts a query and a parent type.
+// The query is executed in the parent document space,
+// which is specified by the parent type.
+// This filter return child documents which associated parents have matched.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-has-parent-filter.html
+type HasParentFilter struct {
+ filter Filter
+ query Query
+ parentType string
+ filterName string
+ cache *bool
+ cacheKey string
+ innerHit *InnerHit
+}
+
+// NewHasParentFilter creates a new has_parent filter.
+func NewHasParentFilter(parentType string) HasParentFilter {
+ f := HasParentFilter{
+ parentType: parentType,
+ }
+ return f
+}
+
+func (f HasParentFilter) Query(query Query) HasParentFilter {
+ f.query = query
+ return f
+}
+
+func (f HasParentFilter) Filter(filter Filter) HasParentFilter {
+ f.filter = filter
+ return f
+}
+
+func (f HasParentFilter) FilterName(filterName string) HasParentFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f HasParentFilter) Cache(cache bool) HasParentFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f HasParentFilter) CacheKey(cacheKey string) HasParentFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f HasParentFilter) InnerHit(innerHit *InnerHit) HasParentFilter {
+ f.innerHit = innerHit
+ return f
+}
+
+// Source returns the JSON document for the filter.
+func (f HasParentFilter) Source() interface{} {
+ // {
+ // "has_parent" : {
+ // "parent_type" : "blog",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ filter := make(map[string]interface{})
+ source["has_parent"] = filter
+
+ if f.query != nil {
+ filter["query"] = f.query.Source()
+ } else if f.filter != nil {
+ filter["filter"] = f.filter.Source()
+ }
+
+ filter["parent_type"] = f.parentType
+ if f.filterName != "" {
+ filter["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ filter["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ filter["_cache_key"] = f.cacheKey
+ }
+ if f.innerHit != nil {
+ filter["inner_hits"] = f.innerHit.Source()
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_parent_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_parent_test.go
new file mode 100644
index 00000000..3f59f91d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_parent_test.go
@@ -0,0 +1,69 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestHasParentFilter(t *testing.T) {
+ f := NewHasParentFilter("blog")
+ f = f.Query(NewTermQuery("tag", "something"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHasParentFilterWithInnerHits(t *testing.T) {
+ f := NewHasParentFilter("blog")
+ f = f.Query(NewTermQuery("tag", "something"))
+ f = f.InnerHit(NewInnerHit())
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_parent":{"inner_hits":{},"parent_type":"blog","query":{"term":{"tag":"something"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHasParentFilterWithInnerHitsName(t *testing.T) {
+ f := NewHasParentFilter("blog")
+ f = f.Query(NewTermQuery("tag", "something"))
+ f = f.InnerHit(NewInnerHit().Name("comments"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_parent":{"inner_hits":{"name":"comments"},"parent_type":"blog","query":{"term":{"tag":"something"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHasParentFilterWithInnerHitsQuery(t *testing.T) {
+ f := NewHasParentFilter("blog")
+ f = f.Query(NewTermQuery("tag", "something"))
+ f = f.InnerHit(NewInnerHit().Query(NewTermQuery("user", "olivere")))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_parent":{"inner_hits":{"query":{"term":{"user":"olivere"}}},"parent_type":"blog","query":{"term":{"tag":"something"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_ids.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_ids.go
new file mode 100644
index 00000000..2a612c93
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_ids.go
@@ -0,0 +1,63 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that only have the provided ids.
+// Note, this filter does not require the _id field to be indexed
+// since it works using the _uid field.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-ids-filter.html
+type IdsFilter struct {
+ Filter
+ types []string
+ values []string
+ filterName string
+}
+
+func NewIdsFilter(types ...string) IdsFilter {
+ return IdsFilter{
+ types: types,
+ values: make([]string, 0),
+ }
+}
+
+func (f IdsFilter) Ids(ids ...string) IdsFilter {
+ f.values = append(f.values, ids...)
+ return f
+}
+
+func (f IdsFilter) FilterName(filterName string) IdsFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f IdsFilter) Source() interface{} {
+ // {
+ // "ids" : {
+ // "type" : "my_type",
+ // "values" : ["1", "4", "100"]
+ // }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["ids"] = params
+
+ // type(s)
+ if len(f.types) == 1 {
+ params["type"] = f.types[0]
+ } else if len(f.types) > 1 {
+ params["types"] = f.types
+ }
+
+ // values
+ params["values"] = f.values
+
+ // filter name
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_ids_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_ids_test.go
new file mode 100644
index 00000000..2e0837a0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_ids_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestIdsFilter(t *testing.T) {
+ f := NewIdsFilter("my_type").Ids("1", "4", "100")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ids":{"type":"my_type","values":["1","4","100"]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_limit.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_limit.go
new file mode 100644
index 00000000..14f0d9d6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_limit.go
@@ -0,0 +1,31 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A limit filter limits the number of documents (per shard) to execute on.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-limit-filter.html
+type LimitFilter struct {
+ Filter
+ limit int
+}
+
+func NewLimitFilter(limit int) LimitFilter {
+ f := LimitFilter{limit: limit}
+ return f
+}
+
+func (f LimitFilter) Source() interface{} {
+ // {
+ // "limit" : {
+ // "value" : "..."
+ // }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["limit"] = params
+ params["value"] = f.limit
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_limit_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_limit_test.go
new file mode 100644
index 00000000..d7ca265a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_limit_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestLimitFilter(t *testing.T) {
+ f := NewLimitFilter(42)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"limit":{"value":42}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_match_all.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_match_all.go
new file mode 100644
index 00000000..5092e6d1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_match_all.go
@@ -0,0 +1,25 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that matches on all documents.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-match-all-filter.html
+type MatchAllFilter struct {
+ Filter
+}
+
+func NewMatchAllFilter() MatchAllFilter {
+ return MatchAllFilter{}
+}
+
+func (f MatchAllFilter) Source() interface{} {
+ // {
+ // "match_all" : {}
+ // }
+ source := make(map[string]interface{})
+ source["match_all"] = make(map[string]interface{})
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_match_all_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_match_all_test.go
new file mode 100644
index 00000000..0ce39a6c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_match_all_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMatchAllFilter(t *testing.T) {
+ f := NewMatchAllFilter()
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_all":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_missing.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_missing.go
new file mode 100644
index 00000000..d7342815
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_missing.go
@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents where a specific field has no value in them.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-missing-filter.html
+type MissingFilter struct {
+ Filter
+ name string
+ filterName string
+ nullValue *bool
+ existence *bool
+}
+
+func NewMissingFilter(name string) MissingFilter {
+ f := MissingFilter{name: name}
+ return f
+}
+
+func (f MissingFilter) FilterName(filterName string) MissingFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f MissingFilter) NullValue(nullValue bool) MissingFilter {
+ f.nullValue = &nullValue
+ return f
+}
+
+func (f MissingFilter) Existence(existence bool) MissingFilter {
+ f.existence = &existence
+ return f
+}
+
+func (f MissingFilter) Source() interface{} {
+ // {
+ // "missing" : {
+ // "field" : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["missing"] = params
+ params["field"] = f.name
+ if f.nullValue != nil {
+ params["null_value"] = *f.nullValue
+ }
+ if f.existence != nil {
+ params["existence"] = *f.existence
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_missing_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_missing_test.go
new file mode 100644
index 00000000..88b4dc59
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_missing_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMissingFilter(t *testing.T) {
+ f := NewMissingFilter("user").FilterName("_my_filter")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"missing":{"_name":"_my_filter","field":"user"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_nested.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_nested.go
new file mode 100644
index 00000000..222f43dc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_nested.go
@@ -0,0 +1,122 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A nested filter, works in a similar fashion to the nested query,
+// except used as a filter. It follows exactly the same structure, but
+// also allows to cache the results (set _cache to true),
+// and have it named (set the _name value).
+//
+// For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/nested-filter/
+type NestedFilter struct {
+ query Query
+ filter Filter
+ path string
+ join *bool
+ cache *bool
+ cacheKey string
+ filterName string
+ innerHit *InnerHit
+}
+
+func NewNestedFilter(path string) NestedFilter {
+ return NestedFilter{path: path}
+}
+
+func (f NestedFilter) Query(query Query) NestedFilter {
+ f.query = query
+ return f
+}
+
+func (f NestedFilter) Filter(filter Filter) NestedFilter {
+ f.filter = filter
+ return f
+}
+
+func (f NestedFilter) Path(path string) NestedFilter {
+ f.path = path
+ return f
+}
+
+func (f NestedFilter) Join(join bool) NestedFilter {
+ f.join = &join
+ return f
+}
+
+func (f NestedFilter) Cache(cache bool) NestedFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f NestedFilter) CacheKey(cacheKey string) NestedFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f NestedFilter) FilterName(filterName string) NestedFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f NestedFilter) InnerHit(innerHit *InnerHit) NestedFilter {
+ f.innerHit = innerHit
+ return f
+}
+
+func (f NestedFilter) Source() interface{} {
+ // {
+ // "filtered" : {
+ // "query" : { "match_all" : {} },
+ // "filter" : {
+ // "nested" : {
+ // "path" : "obj1",
+ // "query" : {
+ // "bool" : {
+ // "must" : [
+ // {
+ // "match" : {"obj1.name" : "blue"}
+ // },
+ // {
+ // "range" : {"obj1.count" : {"gt" : 5}}
+ // }
+ // ]
+ // }
+ // },
+ // "_cache" : true
+ // }
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["nested"] = params
+
+ if f.query != nil {
+ params["query"] = f.query.Source()
+ }
+ if f.filter != nil {
+ params["filter"] = f.filter.Source()
+ }
+ if f.join != nil {
+ params["join"] = *f.join
+ }
+ params["path"] = f.path
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.innerHit != nil {
+ params["inner_hits"] = f.innerHit.Source()
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_nested_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_nested_test.go
new file mode 100644
index 00000000..8e0cec64
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_nested_test.go
@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestNestedFilter(t *testing.T) {
+ f := NewNestedFilter("obj1")
+ bq := NewBoolQuery()
+ bq = bq.Must(NewTermQuery("obj1.name", "blue"))
+ bq = bq.Must(NewRangeQuery("obj1.count").Gt(5))
+ f = f.Query(bq)
+ f = f.Cache(true)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"nested":{"_cache":true,"path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNestedFilterWithInnerHit(t *testing.T) {
+ f := NewNestedFilter("obj1")
+ bq := NewBoolQuery()
+ bq = bq.Must(NewTermQuery("obj1.name", "blue"))
+ bq = bq.Must(NewRangeQuery("obj1.count").Gt(5))
+ f = f.Query(bq)
+ f = f.Cache(true)
+ f = f.InnerHit(NewInnerHit().Name("comments").Query(NewTermQuery("user", "olivere")))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"nested":{"_cache":true,"inner_hits":{"name":"comments","query":{"term":{"user":"olivere"}}},"path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_not.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_not.go
new file mode 100644
index 00000000..3dc0c2d1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_not.go
@@ -0,0 +1,62 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that filters out matched documents using a query. Can be placed
+// within queries that accept a filter.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-not-filter.html#query-dsl-not-filter.
+type NotFilter struct {
+ filter Filter
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewNotFilter(filter Filter) NotFilter {
+ return NotFilter{
+ filter: filter,
+ }
+}
+
+func (f NotFilter) Cache(cache bool) NotFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f NotFilter) CacheKey(cacheKey string) NotFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f NotFilter) FilterName(filterName string) NotFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f NotFilter) Source() interface{} {
+ // {
+ // "not" : {
+ // "filter" : { ... }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["not"] = params
+ params["filter"] = f.filter.Source()
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_not_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_not_test.go
new file mode 100644
index 00000000..76699110
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_not_test.go
@@ -0,0 +1,40 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestNotFilter(t *testing.T) {
+ f := NewNotFilter(NewTermFilter("user", "olivere"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"not":{"filter":{"term":{"user":"olivere"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNotFilterWithParams(t *testing.T) {
+ postDateFilter := NewRangeFilter("postDate").From("2010-03-01").To("2010-04-01")
+ f := NewNotFilter(postDateFilter)
+ f = f.Cache(true)
+ f = f.CacheKey("MyNotFilter")
+ f = f.FilterName("MyFilterName")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"not":{"_cache":true,"_cache_key":"MyNotFilter","_name":"MyFilterName","filter":{"range":{"postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_or.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_or.go
new file mode 100644
index 00000000..31b2c673
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_or.go
@@ -0,0 +1,76 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that matches documents using OR boolean operator
+// on other queries. Can be placed within queries that accept a filter.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-or-filter.html
+type OrFilter struct {
+ filters []Filter
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewOrFilter(filters ...Filter) OrFilter {
+ f := OrFilter{
+ filters: make([]Filter, 0),
+ }
+ if len(filters) > 0 {
+ f.filters = append(f.filters, filters...)
+ }
+ return f
+}
+
+func (f OrFilter) Add(filter Filter) OrFilter {
+ f.filters = append(f.filters, filter)
+ return f
+}
+
+func (f OrFilter) Cache(cache bool) OrFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f OrFilter) CacheKey(cacheKey string) OrFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f OrFilter) FilterName(filterName string) OrFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f OrFilter) Source() interface{} {
+ // {
+ // "or" : [
+ // ... filters ...
+ // ]
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["or"] = params
+
+ filters := make([]interface{}, len(f.filters))
+ params["filters"] = filters
+ for i, filter := range f.filters {
+ filters[i] = filter.Source()
+ }
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_or_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_or_test.go
new file mode 100644
index 00000000..4d86007f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_or_test.go
@@ -0,0 +1,45 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestOrFilter(t *testing.T) {
+ f := NewOrFilter()
+ postDateFilter := NewRangeFilter("postDate").From("2010-03-01").To("2010-04-01")
+ f = f.Add(postDateFilter)
+ prefixFilter := NewPrefixFilter("name.second", "ba")
+ f = f.Add(prefixFilter)
+ f = f.Cache(true)
+ f = f.CacheKey("MyOrFilter")
+ f = f.FilterName("MyFilterName")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"or":{"_cache":true,"_cache_key":"MyOrFilter","_name":"MyFilterName","filters":[{"range":{"postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}},{"prefix":{"name.second":"ba"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNewOrFilter(t *testing.T) {
+ tf := NewTermsFilter("user", "oliver", "test")
+ mf := NewMissingFilter("user")
+ f := NewOrFilter(tf, mf)
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"or":{"filters":[{"terms":{"user":["oliver","test"]}},{"missing":{"field":"user"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_prefix.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_prefix.go
new file mode 100644
index 00000000..a2f52735
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_prefix.go
@@ -0,0 +1,67 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that have fiels containing terms
+// with a specified prefix (not analyzed).
+// For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/prefix-filter.html
+type PrefixFilter struct {
+ Filter
+ name string
+ prefix string
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewPrefixFilter(name string, prefix string) PrefixFilter {
+ f := PrefixFilter{name: name, prefix: prefix}
+ return f
+}
+
+func (f PrefixFilter) Cache(cache bool) PrefixFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f PrefixFilter) CacheKey(cacheKey string) PrefixFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f PrefixFilter) FilterName(filterName string) PrefixFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f PrefixFilter) Source() interface{} {
+ // {
+ // "prefix" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["prefix"] = params
+
+ params[f.name] = f.prefix
+
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_prefix_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_prefix_test.go
new file mode 100644
index 00000000..7392572b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_prefix_test.go
@@ -0,0 +1,26 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPrefixFilter(t *testing.T) {
+ f := NewPrefixFilter("user", "ki")
+ f = f.Cache(true)
+ f = f.CacheKey("MyPrefixFilter")
+ f = f.FilterName("MyFilterName")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"prefix":{"_cache":true,"_cache_key":"MyPrefixFilter","_name":"MyFilterName","user":"ki"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_query.go
new file mode 100644
index 00000000..2fc7c4c5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_query.go
@@ -0,0 +1,68 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// QueryFilter wraps any query to be used as a filter. It can be placed
+// within queries that accept a filter.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-query-filter.html
+type QueryFilter struct {
+ Filter
+ name string
+ query Query
+ cache *bool
+ filterName string
+}
+
+func NewQueryFilter(query Query) QueryFilter {
+ f := QueryFilter{query: query}
+ return f
+}
+
+func (f QueryFilter) Name(name string) QueryFilter {
+ f.name = name
+ return f
+}
+
+func (f QueryFilter) Query(query Query) QueryFilter {
+ f.query = query
+ return f
+}
+
+func (f QueryFilter) Cache(cache bool) QueryFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f QueryFilter) FilterName(filterName string) QueryFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f QueryFilter) Source() interface{} {
+ // {
+ // "query" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ if f.filterName == "" && (f.cache == nil || *f.cache == false) {
+ source["query"] = f.query.Source()
+ } else {
+ params := make(map[string]interface{})
+ source["fquery"] = params
+ params["query"] = f.query.Source()
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_query_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_query_test.go
new file mode 100644
index 00000000..9dffc455
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_query_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestQueryFilter(t *testing.T) {
+ f := NewQueryFilter(NewQueryStringQuery("this AND that OR thus"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"query_string":{"query":"this AND that OR thus"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestQueryFilterWithName(t *testing.T) {
+ f := NewQueryFilter(NewQueryStringQuery("this AND that OR thus"))
+ f = f.Cache(true)
+ f = f.FilterName("MyFilterName")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fquery":{"_cache":true,"_name":"MyFilterName","query":{"query_string":{"query":"this AND that OR thus"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_range.go
new file mode 100644
index 00000000..6c727b90
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_range.go
@@ -0,0 +1,140 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents with fields that have terms within
+// a certain range. For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/range-filter.html
+type RangeFilter struct {
+ Filter
+ name string
+ from *interface{}
+ to *interface{}
+ timeZone string
+ includeLower bool
+ includeUpper bool
+ cache *bool
+ cacheKey string
+ filterName string
+ execution string
+}
+
+func NewRangeFilter(name string) RangeFilter {
+ f := RangeFilter{name: name, includeLower: true, includeUpper: true}
+ return f
+}
+
+func (f RangeFilter) TimeZone(timeZone string) RangeFilter {
+ f.timeZone = timeZone
+ return f
+}
+
+func (f RangeFilter) From(from interface{}) RangeFilter {
+ f.from = &from
+ return f
+}
+
+func (f RangeFilter) Gt(from interface{}) RangeFilter {
+ f.from = &from
+ f.includeLower = false
+ return f
+}
+
+func (f RangeFilter) Gte(from interface{}) RangeFilter {
+ f.from = &from
+ f.includeLower = true
+ return f
+}
+
+func (f RangeFilter) To(to interface{}) RangeFilter {
+ f.to = &to
+ return f
+}
+
+func (f RangeFilter) Lt(to interface{}) RangeFilter {
+ f.to = &to
+ f.includeUpper = false
+ return f
+}
+
+func (f RangeFilter) Lte(to interface{}) RangeFilter {
+ f.to = &to
+ f.includeUpper = true
+ return f
+}
+
+func (f RangeFilter) IncludeLower(includeLower bool) RangeFilter {
+ f.includeLower = includeLower
+ return f
+}
+
+func (f RangeFilter) IncludeUpper(includeUpper bool) RangeFilter {
+ f.includeUpper = includeUpper
+ return f
+}
+
+func (f RangeFilter) Cache(cache bool) RangeFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f RangeFilter) CacheKey(cacheKey string) RangeFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f RangeFilter) FilterName(filterName string) RangeFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f RangeFilter) Execution(execution string) RangeFilter {
+ f.execution = execution
+ return f
+}
+
+func (f RangeFilter) Source() interface{} {
+ // {
+ // "range" : {
+ // "name" : {
+ // "..." : "..."
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ rangeQ := make(map[string]interface{})
+ source["range"] = rangeQ
+
+ params := make(map[string]interface{})
+ rangeQ[f.name] = params
+
+ params["from"] = f.from
+ params["to"] = f.to
+ if f.timeZone != "" {
+ params["time_zone"] = f.timeZone
+ }
+ params["include_lower"] = f.includeLower
+ params["include_upper"] = f.includeUpper
+
+ if f.filterName != "" {
+ rangeQ["_name"] = f.filterName
+ }
+
+ if f.cache != nil {
+ rangeQ["_cache"] = *f.cache
+ }
+
+ if f.cacheKey != "" {
+ rangeQ["_cache_key"] = f.cacheKey
+ }
+
+ if f.execution != "" {
+ rangeQ["execution"] = f.execution
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_range_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_range_test.go
new file mode 100644
index 00000000..23e456d9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_range_test.go
@@ -0,0 +1,58 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRangeFilter(t *testing.T) {
+ f := NewRangeFilter("postDate").From("2010-03-01").To("2010-04-01")
+ f = f.Cache(true)
+ f = f.CacheKey("MyAndFilter")
+ f = f.FilterName("MyFilterName")
+ f = f.Execution("index")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"_cache":true,"_cache_key":"MyAndFilter","_name":"MyFilterName","execution":"index","postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+/*
+func TestRangeFilterGte(t *testing.T) {
+ f := NewRangeFilter("postDate").Gte("2010-03-01")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"postDate":{"gte":"2010-03-01"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+*/
+
+func TestRangeFilterWithTimeZone(t *testing.T) {
+ f := NewRangeFilter("born").
+ Gte("2012-01-01").
+ Lte("now").
+ TimeZone("+1:00")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"born":{"from":"2012-01-01","include_lower":true,"include_upper":true,"time_zone":"+1:00","to":"now"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_regexp.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_regexp.go
new file mode 100644
index 00000000..107a1e9b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_regexp.go
@@ -0,0 +1,90 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// RegexpFilter allows filtering for regular expressions.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-filter.html
+// and http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html#regexp-syntax
+// for details.
+type RegexpFilter struct {
+ Filter
+ name string
+ regexp string
+ flags *string
+ maxDeterminizedStates *int
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+// NewRegexpFilter sets up a new RegexpFilter.
+func NewRegexpFilter(name, regexp string) RegexpFilter {
+ return RegexpFilter{name: name, regexp: regexp}
+}
+
+// Flags sets the regexp flags.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html#_optional_operators
+// for details.
+func (f RegexpFilter) Flags(flags string) RegexpFilter {
+ f.flags = &flags
+ return f
+}
+
+func (f RegexpFilter) MaxDeterminizedStates(maxDeterminizedStates int) RegexpFilter {
+ f.maxDeterminizedStates = &maxDeterminizedStates
+ return f
+}
+
+func (f RegexpFilter) Cache(cache bool) RegexpFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f RegexpFilter) CacheKey(cacheKey string) RegexpFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f RegexpFilter) FilterName(filterName string) RegexpFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f RegexpFilter) Source() interface{} {
+ // {
+ // "regexp" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["regexp"] = params
+
+ if f.flags == nil {
+ params[f.name] = f.regexp
+ } else {
+ x := make(map[string]interface{})
+ x["value"] = f.regexp
+ x["flags"] = *f.flags
+ if f.maxDeterminizedStates != nil {
+ x["max_determinized_states"] = *f.maxDeterminizedStates
+ }
+ params[f.name] = x
+ }
+
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_regexp_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_regexp_test.go
new file mode 100644
index 00000000..6498722e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_regexp_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRegexpFilter(t *testing.T) {
+ f := NewRegexpFilter("name.first", "s.*y")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"regexp":{"name.first":"s.*y"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRegexpFilterWithFlags(t *testing.T) {
+ f := NewRegexpFilter("name.first", "s.*y")
+ f = f.Flags("INTERSECTION|COMPLEMENT|EMPTY")
+ f = f.FilterName("test").Cache(true).CacheKey("key")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"regexp":{"_cache":true,"_cache_key":"key","_name":"test","name.first":{"flags":"INTERSECTION|COMPLEMENT|EMPTY","value":"s.*y"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_term.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_term.go
new file mode 100644
index 00000000..db22f7ac
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_term.go
@@ -0,0 +1,66 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that have fields that contain
+// a term (not analyzed). For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/term-filter.html
+type TermFilter struct {
+ Filter
+ name string
+ value interface{}
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewTermFilter(name string, value interface{}) TermFilter {
+ f := TermFilter{name: name, value: value}
+ return f
+}
+
+func (f TermFilter) Cache(cache bool) TermFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f TermFilter) CacheKey(cacheKey string) TermFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f TermFilter) FilterName(filterName string) TermFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f TermFilter) Source() interface{} {
+ // {
+ // "term" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["term"] = params
+
+ params[f.name] = f.value
+
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_term_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_term_test.go
new file mode 100644
index 00000000..a0975b3f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_term_test.go
@@ -0,0 +1,26 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermFilter(t *testing.T) {
+ f := NewTermFilter("user", "ki")
+ f = f.Cache(true)
+ f = f.CacheKey("MyTermFilter")
+ f = f.FilterName("MyFilterName")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"term":{"_cache":true,"_cache_key":"MyTermFilter","_name":"MyFilterName","user":"ki"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_terms.go
new file mode 100644
index 00000000..1705c433
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_terms.go
@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that have fields that match
+// any of the provided terms (not analyzed). For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/terms-filter/
+type TermsFilter struct {
+ Filter
+ name string
+ values []interface{}
+ cache *bool
+ cacheKey string
+ filterName string
+ execution string
+}
+
+func NewTermsFilter(name string, values ...interface{}) TermsFilter {
+ f := TermsFilter{
+ name: name,
+ values: make([]interface{}, 0),
+ }
+ f.values = append(f.values, values...)
+ return f
+}
+
+func (f TermsFilter) Cache(cache bool) TermsFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f TermsFilter) CacheKey(cacheKey string) TermsFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f TermsFilter) FilterName(filterName string) TermsFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f TermsFilter) Execution(execution string) TermsFilter {
+ f.execution = execution
+ return f
+}
+
+func (f TermsFilter) Source() interface{} {
+ // {
+ // "terms" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["terms"] = params
+ params[f.name] = f.values
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ if f.execution != "" {
+ params["execution"] = f.execution
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_terms_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_terms_test.go
new file mode 100644
index 00000000..63540848
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_terms_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermsFilter(t *testing.T) {
+ f := NewTermsFilter("user", "kimchy", "elasticsearch")
+ f = f.Cache(true)
+ f = f.CacheKey("MyTermsFilter")
+ f = f.FilterName("MyFilterName")
+ f = f.Execution("plain")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"_cache":true,"_cache_key":"MyTermsFilter","_name":"MyFilterName","execution":"plain","user":["kimchy","elasticsearch"]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_type.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_type.go
new file mode 100644
index 00000000..f64a2444
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_type.go
@@ -0,0 +1,33 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents matching the provided document / mapping type.
+// Note, this filter can work even when the _type field is not indexed
+// (using the _uid field).
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-type-filter.html
+type TypeFilter struct {
+ Filter
+ typ string
+}
+
+func NewTypeFilter(typ string) TypeFilter {
+ f := TypeFilter{typ: typ}
+ return f
+}
+
+func (f TypeFilter) Source() interface{} {
+ // {
+ // "type" : {
+ // "value" : "..."
+ // }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["type"] = params
+ params["value"] = f.typ
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_type_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_type_test.go
new file mode 100644
index 00000000..e172ed77
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_type_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTypeFilter(t *testing.T) {
+ f := NewTypeFilter("my_type")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"type":{"value":"my_type"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_bool.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_bool.go
new file mode 100644
index 00000000..9fc053cc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_bool.go
@@ -0,0 +1,153 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A bool query matches documents matching boolean
+// combinations of other queries.
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html
+type BoolQuery struct {
+ Query
+ mustClauses []Query
+ shouldClauses []Query
+ mustNotClauses []Query
+ boost *float32
+ disableCoord *bool
+ minimumShouldMatch string
+ adjustPureNegative *bool
+ queryName string
+}
+
+// Creates a new bool query.
+func NewBoolQuery() BoolQuery {
+ q := BoolQuery{
+ mustClauses: make([]Query, 0),
+ shouldClauses: make([]Query, 0),
+ mustNotClauses: make([]Query, 0),
+ }
+ return q
+}
+
+func (q BoolQuery) Must(queries ...Query) BoolQuery {
+ q.mustClauses = append(q.mustClauses, queries...)
+ return q
+}
+
+func (q BoolQuery) MustNot(queries ...Query) BoolQuery {
+ q.mustNotClauses = append(q.mustNotClauses, queries...)
+ return q
+}
+
+func (q BoolQuery) Should(queries ...Query) BoolQuery {
+ q.shouldClauses = append(q.shouldClauses, queries...)
+ return q
+}
+
+func (q BoolQuery) Boost(boost float32) BoolQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q BoolQuery) DisableCoord(disableCoord bool) BoolQuery {
+ q.disableCoord = &disableCoord
+ return q
+}
+
+func (q BoolQuery) MinimumShouldMatch(minimumShouldMatch string) BoolQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q BoolQuery) AdjustPureNegative(adjustPureNegative bool) BoolQuery {
+ q.adjustPureNegative = &adjustPureNegative
+ return q
+}
+
+func (q BoolQuery) QueryName(queryName string) BoolQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the bool query.
+func (q BoolQuery) Source() interface{} {
+ // {
+ // "bool" : {
+ // "must" : {
+ // "term" : { "user" : "kimchy" }
+ // },
+ // "must_not" : {
+ // "range" : {
+ // "age" : { "from" : 10, "to" : 20 }
+ // }
+ // },
+ // "should" : [
+ // {
+ // "term" : { "tag" : "wow" }
+ // },
+ // {
+ // "term" : { "tag" : "elasticsearch" }
+ // }
+ // ],
+ // "minimum_number_should_match" : 1,
+ // "boost" : 1.0
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ boolClause := make(map[string]interface{})
+ query["bool"] = boolClause
+
+ // must
+ if len(q.mustClauses) == 1 {
+ boolClause["must"] = q.mustClauses[0].Source()
+ } else if len(q.mustClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range q.mustClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["must"] = clauses
+ }
+
+ // must_not
+ if len(q.mustNotClauses) == 1 {
+ boolClause["must_not"] = q.mustNotClauses[0].Source()
+ } else if len(q.mustNotClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range q.mustNotClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["must_not"] = clauses
+ }
+
+ // should
+ if len(q.shouldClauses) == 1 {
+ boolClause["should"] = q.shouldClauses[0].Source()
+ } else if len(q.shouldClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range q.shouldClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["should"] = clauses
+ }
+
+ if q.boost != nil {
+ boolClause["boost"] = *q.boost
+ }
+ if q.disableCoord != nil {
+ boolClause["disable_coord"] = *q.disableCoord
+ }
+ if q.minimumShouldMatch != "" {
+ boolClause["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.adjustPureNegative != nil {
+ boolClause["adjust_pure_negative"] = *q.adjustPureNegative
+ }
+ if q.queryName != "" {
+ boolClause["_name"] = q.queryName
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_bool_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_bool_test.go
new file mode 100644
index 00000000..07ecc49c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_bool_test.go
@@ -0,0 +1,29 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestBoolQuery(t *testing.T) {
+ q := NewBoolQuery()
+ q = q.Must(NewTermQuery("tag", "wow"))
+ q = q.MustNot(NewRangeQuery("age").From(10).To(20))
+ q = q.Should(NewTermQuery("tag", "sometag"), NewTermQuery("tag", "sometagtag"))
+ q = q.Boost(10)
+ q = q.DisableCoord(true)
+ q = q.QueryName("Test")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"bool":{"_name":"Test","boost":10,"disable_coord":true,"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_boosting.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_boosting.go
new file mode 100644
index 00000000..29b7a629
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_boosting.go
@@ -0,0 +1,89 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A boosting query can be used to effectively
+// demote results that match a given query.
+// For more details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-boosting-query.html
+type BoostingQuery struct {
+ Query
+ positiveClause Query
+ negativeClause Query
+ negativeBoost *float64
+ boost *float64
+}
+
+// Creates a new boosting query.
+func NewBoostingQuery() BoostingQuery {
+ return BoostingQuery{}
+}
+
+func (q BoostingQuery) Positive(positive Query) BoostingQuery {
+ q.positiveClause = positive
+ return q
+}
+
+func (q BoostingQuery) Negative(negative Query) BoostingQuery {
+ q.negativeClause = negative
+ return q
+}
+
+func (q BoostingQuery) NegativeBoost(negativeBoost float64) BoostingQuery {
+ q.negativeBoost = &negativeBoost
+ return q
+}
+
+func (q BoostingQuery) Boost(boost float64) BoostingQuery {
+ q.boost = &boost
+ return q
+}
+
+// Creates the query source for the boosting query.
+func (q BoostingQuery) Source() interface{} {
+ // {
+ // "boosting" : {
+ // "positive" : {
+ // "term" : {
+ // "field1" : "value1"
+ // }
+ // },
+ // "negative" : {
+ // "term" : {
+ // "field2" : "value2"
+ // }
+ // },
+ // "negative_boost" : 0.2
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ boostingClause := make(map[string]interface{})
+ query["boosting"] = boostingClause
+
+ // Negative and positive clause as well as negative boost
+ // are mandatory in the Java client.
+
+ // positive
+ if q.positiveClause != nil {
+ boostingClause["positive"] = q.positiveClause.Source()
+ }
+
+ // negative
+ if q.negativeClause != nil {
+ boostingClause["negative"] = q.negativeClause.Source()
+ }
+
+ if q.negativeBoost != nil {
+ boostingClause["negative_boost"] = *q.negativeBoost
+ }
+
+ if q.boost != nil {
+ boostingClause["boost"] = *q.boost
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_boosting_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_boosting_test.go
new file mode 100644
index 00000000..31364dca
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_boosting_test.go
@@ -0,0 +1,26 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestBoostingQuery(t *testing.T) {
+ q := NewBoostingQuery()
+ q = q.Positive(NewTermQuery("tag", "wow"))
+ q = q.Negative(NewRangeQuery("age").From(10).To(20))
+ q = q.NegativeBoost(0.2)
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"boosting":{"negative":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"negative_boost":0.2,"positive":{"term":{"tag":"wow"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_common.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_common.go
new file mode 100644
index 00000000..f15f868d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_common.go
@@ -0,0 +1,144 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The common terms query is a modern alternative to stopwords
+// which improves the precision and recall of search results
+// (by taking stopwords into account), without sacrificing performance.
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/common-terms-query/
+type CommonQuery struct {
+ Query
+ name string
+ query string
+ cutoffFreq *float64
+ highFreq *float64
+ highFreqOp string
+ highFreqMinMatch interface{}
+ lowFreq *float64
+ lowFreqOp string
+ lowFreqMinMatch interface{}
+ analyzer string
+ boost *float64
+ disableCoords *bool
+}
+
+// Creates a new common query.
+func NewCommonQuery(name string, query string) CommonQuery {
+ q := CommonQuery{name: name, query: query}
+ return q
+}
+
+func (q *CommonQuery) CutoffFrequency(f float64) *CommonQuery {
+ q.cutoffFreq = &f
+ return q
+}
+
+func (q *CommonQuery) HighFreq(f float64) *CommonQuery {
+ q.highFreq = &f
+ return q
+}
+
+func (q *CommonQuery) HighFreqOperator(op string) *CommonQuery {
+ q.highFreqOp = op
+ return q
+}
+
+func (q *CommonQuery) HighFreqMinMatch(min interface{}) *CommonQuery {
+ q.highFreqMinMatch = min
+ return q
+}
+
+func (q *CommonQuery) LowFreq(f float64) *CommonQuery {
+ q.lowFreq = &f
+ return q
+}
+
+func (q *CommonQuery) LowFreqOperator(op string) *CommonQuery {
+ q.lowFreqOp = op
+ return q
+}
+
+func (q *CommonQuery) LowFreqMinMatch(min interface{}) *CommonQuery {
+ q.lowFreqMinMatch = min
+ return q
+}
+
+func (q *CommonQuery) Analyzer(analyzer string) *CommonQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q *CommonQuery) Boost(boost float64) *CommonQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q *CommonQuery) DisableCoords(disable bool) *CommonQuery {
+ q.disableCoords = &disable
+ return q
+}
+
+// Creates the query source for the common query.
+func (q CommonQuery) Source() interface{} {
+ // {
+ // "common": {
+ // "body": {
+ // "query": "this is bonsai cool",
+ // "cutoff_frequency": 0.001
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+ body := make(map[string]interface{})
+ query := make(map[string]interface{})
+
+ source["common"] = body
+ body[q.name] = query
+ query["query"] = q.query
+
+ if q.cutoffFreq != nil {
+ query["cutoff_frequency"] = *(q.cutoffFreq)
+ }
+
+ if q.highFreq != nil {
+ query["high_freq"] = *(q.highFreq)
+ }
+ if q.highFreqOp != "" {
+ query["high_freq_operator"] = q.highFreqOp
+ }
+
+ if q.lowFreq != nil {
+ query["low_freq"] = *(q.lowFreq)
+ }
+ if q.lowFreqOp != "" {
+ query["low_freq_operator"] = q.lowFreqOp
+ }
+
+ if q.lowFreqMinMatch != nil || q.highFreqMinMatch != nil {
+ mm := make(map[string]interface{})
+ if q.lowFreqMinMatch != nil {
+ mm["low_freq"] = q.lowFreqMinMatch
+ }
+ if q.highFreqMinMatch != nil {
+ mm["high_freq"] = q.highFreqMinMatch
+ }
+ query["minimum_should_match"] = mm
+ }
+
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+
+ if q.disableCoords != nil {
+ query["disable_coords"] = *(q.disableCoords)
+ }
+
+ if q.boost != nil {
+ query["boost"] = *(q.boost)
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_common_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_common_test.go
new file mode 100644
index 00000000..85270b63
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_common_test.go
@@ -0,0 +1,67 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestSearchQueriesCommon(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Common query
+ q := NewCommonQuery("message", "Golang")
+ searchResult, err := client.Search().Index(testIndexName).Query(&q).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 1 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 1 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_filters_score.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_filters_score.go
new file mode 100644
index 00000000..f0503a3e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_filters_score.go
@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A custom_filters_score query allows to execute a query,
+// and if the hit matches a provided filter (ordered),
+// use either a boost or a script associated with it to compute the score.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/custom-filters-score-query/
+type CustomFiltersScoreQuery struct {
+ query Query
+ filters []Filter
+ scoreMode string
+ maxBoost *float32
+ script string
+}
+
+// Creates a new custom_filters_score query.
+func NewCustomFiltersScoreQuery() CustomFiltersScoreQuery {
+ q := CustomFiltersScoreQuery{
+ filters: make([]Filter, 0),
+ }
+ return q
+}
+
+func (q CustomFiltersScoreQuery) Query(query Query) CustomFiltersScoreQuery {
+ q.query = query
+ return q
+}
+
+func (q CustomFiltersScoreQuery) Filter(filter Filter) CustomFiltersScoreQuery {
+ q.filters = append(q.filters, filter)
+ return q
+}
+
+func (q CustomFiltersScoreQuery) ScoreMode(scoreMode string) CustomFiltersScoreQuery {
+ q.scoreMode = scoreMode
+ return q
+}
+
+func (q CustomFiltersScoreQuery) MaxBoost(maxBoost float32) CustomFiltersScoreQuery {
+ q.maxBoost = &maxBoost
+ return q
+}
+
+func (q CustomFiltersScoreQuery) Script(script string) CustomFiltersScoreQuery {
+ q.script = script
+ return q
+}
+
+// Creates the query source for the custom_filters_score query.
+func (q CustomFiltersScoreQuery) Source() interface{} {
+ // {
+ // "custom_filters_score" : {
+ // "query" : {
+ // "match_all" : {}
+ // },
+ // "filters" : [
+ // {
+ // "filter" : { "range" : { "age" : {"from" : 0, "to" : 10} } },
+ // "boost" : "3"
+ // },
+ // {
+ // "filter" : { "range" : { "age" : {"from" : 10, "to" : 20} } },
+ // "boost" : "2"
+ // }
+ // ],
+ // "score_mode" : "first"
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ cfs := make(map[string]interface{})
+ query["custom_filters_score"] = cfs
+
+ // query
+ if q.query != nil {
+ cfs["query"] = q.query.Source()
+ }
+ // filters
+ clauses := make([]interface{}, 0)
+ for _, filter := range q.filters {
+ clauses = append(clauses, filter.Source())
+ }
+ cfs["filters"] = clauses
+
+ // scoreMode
+ if q.scoreMode != "" {
+ cfs["score_mode"] = q.scoreMode
+ }
+
+ // max_boost
+ if q.maxBoost != nil {
+ cfs["max_boost"] = *q.maxBoost
+ }
+
+ // script
+ if q.script != "" {
+ cfs["script"] = q.script
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_score.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_score.go
new file mode 100644
index 00000000..8eadfcb1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_score.go
@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// custom_score query allows to wrap another query and customize
+// the scoring of it optionally with a computation derived from
+// other field values in the doc (numeric ones) using script expression.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/custom-score-query/
+type CustomScoreQuery struct {
+ query Query
+ filter Filter
+ script string
+ lang string
+ boost *float32
+ params map[string]interface{}
+}
+
+// Creates a new custom_score query.
+func NewCustomScoreQuery() CustomScoreQuery {
+ q := CustomScoreQuery{
+ params: make(map[string]interface{}),
+ }
+ return q
+}
+
+func (q CustomScoreQuery) Query(query Query) CustomScoreQuery {
+ q.query = query
+ return q
+}
+
+func (q CustomScoreQuery) Filter(filter Filter) CustomScoreQuery {
+ q.filter = filter
+ return q
+}
+
+func (q CustomScoreQuery) Script(script string) CustomScoreQuery {
+ q.script = script
+ return q
+}
+
+func (q CustomScoreQuery) Lang(lang string) CustomScoreQuery {
+ q.lang = lang
+ return q
+}
+
+func (q CustomScoreQuery) Boost(boost float32) CustomScoreQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q CustomScoreQuery) Params(params map[string]interface{}) CustomScoreQuery {
+ q.params = params
+ return q
+}
+
+func (q CustomScoreQuery) Param(name string, value interface{}) CustomScoreQuery {
+ q.params[name] = value
+ return q
+}
+
+// Creates the query source for the custom_fscore query.
+func (q CustomScoreQuery) Source() interface{} {
+ // "custom_score" : {
+ // "query" : {
+ // ....
+ // },
+ // "params" : {
+ // "param1" : 2,
+ // "param2" : 3.1
+ // },
+ // "script" : "_score * doc['my_numeric_field'].value / pow(param1, param2)"
+ // }
+
+ query := make(map[string]interface{})
+
+ csq := make(map[string]interface{})
+ query["custom_score"] = csq
+
+ // query
+ if q.query != nil {
+ csq["query"] = q.query.Source()
+ } else if q.filter != nil {
+ csq["filter"] = q.filter.Source()
+ }
+
+ csq["script"] = q.script
+
+ // lang
+ if q.lang != "" {
+ csq["lang"] = q.lang
+ }
+
+ // params
+ if len(q.params) > 0 {
+ csq["params"] = q.params
+ }
+
+ // boost
+ if q.boost != nil {
+ csq["boost"] = *q.boost
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_dis_max.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_dis_max.go
new file mode 100644
index 00000000..76be7839
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_dis_max.go
@@ -0,0 +1,83 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A query that generates the union of documents produced by its subqueries,
+// and that scores each document with the maximum score for that document
+// as produced by any subquery, plus a tie breaking increment for
+// any additional matching subqueries.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/dis-max-query/
+type DisMaxQuery struct {
+ queries []Query
+ boost *float32
+ tieBreaker *float32
+}
+
+// Creates a new dis_max query.
+func NewDisMaxQuery() DisMaxQuery {
+ q := DisMaxQuery{
+ queries: make([]Query, 0),
+ }
+ return q
+}
+
+func (q DisMaxQuery) Query(query Query) DisMaxQuery {
+ q.queries = append(q.queries, query)
+ return q
+}
+
+func (q DisMaxQuery) Boost(boost float32) DisMaxQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q DisMaxQuery) TieBreaker(tieBreaker float32) DisMaxQuery {
+ q.tieBreaker = &tieBreaker
+ return q
+}
+
+// Creates the query source for the dis_max query.
+func (q DisMaxQuery) Source() interface{} {
+ // {
+ // "dis_max" : {
+ // "tie_breaker" : 0.7,
+ // "boost" : 1.2,
+ // "queries" : {
+ // {
+ // "term" : { "age" : 34 }
+ // },
+ // {
+ // "term" : { "age" : 35 }
+ // }
+ // ]
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ disMax := make(map[string]interface{})
+ query["dis_max"] = disMax
+
+ // tieBreaker
+ if q.tieBreaker != nil {
+ disMax["tie_breaker"] = *q.tieBreaker
+ }
+
+ // boost
+ if q.boost != nil {
+ disMax["boost"] = *q.boost
+ }
+
+ // queries
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range q.queries {
+ clauses = append(clauses, subQuery.Source())
+ }
+ disMax["queries"] = clauses
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_filtered.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_filtered.go
new file mode 100644
index 00000000..a58b20ae
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_filtered.go
@@ -0,0 +1,86 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A query that applies a filter to the results of another query.
+// For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/filtered-query.html
+type FilteredQuery struct {
+ Query
+ query Query
+ filters []Filter
+ boost *float32
+}
+
+// Creates a new filtered query.
+func NewFilteredQuery(query Query) FilteredQuery {
+ q := FilteredQuery{
+ query: query,
+ filters: make([]Filter, 0),
+ }
+ return q
+}
+
+func (q FilteredQuery) Filter(filter Filter) FilteredQuery {
+ q.filters = append(q.filters, filter)
+ return q
+}
+
+func (q FilteredQuery) Boost(boost float32) FilteredQuery {
+ q.boost = &boost
+ return q
+}
+
+// Creates the query source for the filtered query.
+func (q FilteredQuery) Source() interface{} {
+ // {
+ // "filtered" : {
+ // "query" : {
+ // "term" : { "tag" : "wow" }
+ // },
+ // "filter" : {
+ // "range" : {
+ // "age" : { "from" : 10, "to" : 20 }
+ // }
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ filtered := make(map[string]interface{})
+ source["filtered"] = filtered
+
+ filtered["query"] = q.query.Source()
+
+ if len(q.filters) == 1 {
+ filtered["filter"] = q.filters[0].Source()
+ } else if len(q.filters) > 1 {
+ filter := make(map[string]interface{})
+ filtered["filter"] = filter
+ and := make(map[string]interface{})
+ filter["and"] = and
+ filters := make([]interface{}, 0)
+ for _, f := range q.filters {
+ filters = append(filters, f.Source())
+ }
+ and["filters"] = filters
+ /*
+ anded := make([]map[string]interface{}, 0)
+ filtered["filter"] = anded
+ for _, f := range q.filters {
+ andElem := make(map[string]interface{})
+ andElem["and"] = f.Source()
+ anded = append(anded, andElem)
+ }
+ */
+ }
+
+ if q.boost != nil {
+ filtered["boost"] = *q.boost
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq.go
new file mode 100644
index 00000000..6f2f3e8b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq.go
@@ -0,0 +1,137 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The function_score allows you to modify the score of documents that
+// are retrieved by a query. This can be useful if, for example,
+// a score function is computationally expensive and it is sufficient
+// to compute the score on a filtered set of documents.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+type FunctionScoreQuery struct {
+ query Query
+ filter Filter
+ boost *float32
+ maxBoost *float32
+ scoreMode string
+ boostMode string
+ filters []Filter
+ scoreFuncs []ScoreFunction
+ minScore *float32
+ weight *float64
+}
+
+// NewFunctionScoreQuery creates a new function score query.
+func NewFunctionScoreQuery() FunctionScoreQuery {
+ return FunctionScoreQuery{
+ filters: make([]Filter, 0),
+ scoreFuncs: make([]ScoreFunction, 0),
+ }
+}
+
+func (q FunctionScoreQuery) Query(query Query) FunctionScoreQuery {
+ q.query = query
+ q.filter = nil
+ return q
+}
+
+func (q FunctionScoreQuery) Filter(filter Filter) FunctionScoreQuery {
+ q.query = nil
+ q.filter = filter
+ return q
+}
+
+func (q FunctionScoreQuery) Add(filter Filter, scoreFunc ScoreFunction) FunctionScoreQuery {
+ q.filters = append(q.filters, filter)
+ q.scoreFuncs = append(q.scoreFuncs, scoreFunc)
+ return q
+}
+
+func (q FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) FunctionScoreQuery {
+ q.filters = append(q.filters, nil)
+ q.scoreFuncs = append(q.scoreFuncs, scoreFunc)
+ return q
+}
+
+func (q FunctionScoreQuery) ScoreMode(scoreMode string) FunctionScoreQuery {
+ q.scoreMode = scoreMode
+ return q
+}
+
+func (q FunctionScoreQuery) BoostMode(boostMode string) FunctionScoreQuery {
+ q.boostMode = boostMode
+ return q
+}
+
+func (q FunctionScoreQuery) MaxBoost(maxBoost float32) FunctionScoreQuery {
+ q.maxBoost = &maxBoost
+ return q
+}
+
+func (q FunctionScoreQuery) Boost(boost float32) FunctionScoreQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q FunctionScoreQuery) MinScore(minScore float32) FunctionScoreQuery {
+ q.minScore = &minScore
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q FunctionScoreQuery) Source() interface{} {
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["function_score"] = query
+
+ if q.query != nil {
+ query["query"] = q.query.Source()
+ } else if q.filter != nil {
+ query["filter"] = q.filter.Source()
+ }
+
+ if len(q.filters) == 1 && q.filters[0] == nil {
+ // Weight needs to be serialized on this level.
+ if weight := q.scoreFuncs[0].GetWeight(); weight != nil {
+ query["weight"] = weight
+ }
+ // Serialize the score function
+ query[q.scoreFuncs[0].Name()] = q.scoreFuncs[0].Source()
+ } else {
+ funcs := make([]interface{}, len(q.filters))
+ for i, filter := range q.filters {
+ hsh := make(map[string]interface{})
+ if filter != nil {
+ hsh["filter"] = filter.Source()
+ }
+ // Weight needs to be serialized on this level.
+ if weight := q.scoreFuncs[i].GetWeight(); weight != nil {
+ hsh["weight"] = weight
+ }
+ // Serialize the score function
+ hsh[q.scoreFuncs[i].Name()] = q.scoreFuncs[i].Source()
+ funcs[i] = hsh
+ }
+ query["functions"] = funcs
+ }
+
+ if q.scoreMode != "" {
+ query["score_mode"] = q.scoreMode
+ }
+ if q.boostMode != "" {
+ query["boost_mode"] = q.boostMode
+ }
+ if q.maxBoost != nil {
+ query["max_boost"] = *q.maxBoost
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.minScore != nil {
+ query["min_score"] = *q.minScore
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq_score_funcs.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq_score_funcs.go
new file mode 100644
index 00000000..5fde7659
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq_score_funcs.go
@@ -0,0 +1,627 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "strings"
+)
+
+// ScoreFunction is used in combination with the Function Score Query.
+type ScoreFunction interface {
+ Name() string
+ GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery
+ Source() interface{}
+}
+
+// -- Exponential Decay --
+
+// ExponentialDecayFunction builds an exponential decay score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+// for details.
+type ExponentialDecayFunction struct {
+ fieldName string
+ origin interface{}
+ scale interface{}
+ decay *float64
+ offset interface{}
+ multiValueMode string
+ weight *float64
+}
+
+// NewExponentialDecayFunction creates a new ExponentialDecayFunction.
+func NewExponentialDecayFunction() ExponentialDecayFunction {
+ return ExponentialDecayFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn ExponentialDecayFunction) Name() string {
+ return "exp"
+}
+
+// FieldName specifies the name of the field to which this decay function is applied to.
+func (fn ExponentialDecayFunction) FieldName(fieldName string) ExponentialDecayFunction {
+ fn.fieldName = fieldName
+ return fn
+}
+
+// Origin defines the "central point" by which the decay function calculates
+// "distance".
+func (fn ExponentialDecayFunction) Origin(origin interface{}) ExponentialDecayFunction {
+ fn.origin = origin
+ return fn
+}
+
+// Scale defines the scale to be used with Decay.
+func (fn ExponentialDecayFunction) Scale(scale interface{}) ExponentialDecayFunction {
+ fn.scale = scale
+ return fn
+}
+
+// Decay defines how documents are scored at the distance given a Scale.
+// If no decay is defined, documents at the distance Scale will be scored 0.5.
+func (fn ExponentialDecayFunction) Decay(decay float64) ExponentialDecayFunction {
+ fn.decay = &decay
+ return fn
+}
+
+// Offset, if defined, computes the decay function only for a distance
+// greater than the defined offset.
+func (fn ExponentialDecayFunction) Offset(offset interface{}) ExponentialDecayFunction {
+ fn.offset = offset
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn ExponentialDecayFunction) Weight(weight float64) ExponentialDecayFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn ExponentialDecayFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// MultiValueMode specifies how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn ExponentialDecayFunction) MultiValueMode(mode string) ExponentialDecayFunction {
+ fn.multiValueMode = mode
+ return fn
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn ExponentialDecayFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source[fn.fieldName] = params
+ if fn.origin != nil {
+ params["origin"] = fn.origin
+ }
+ params["scale"] = fn.scale
+ if fn.decay != nil && *fn.decay > 0 {
+ params["decay"] = *fn.decay
+ }
+ if fn.offset != nil {
+ params["offset"] = fn.offset
+ }
+ if fn.multiValueMode != "" {
+ source["multi_value_mode"] = fn.multiValueMode
+ }
+ return source
+}
+
+// -- Gauss Decay --
+
+// GaussDecayFunction builds a gauss decay score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+// for details.
+type GaussDecayFunction struct {
+ fieldName string
+ origin interface{}
+ scale interface{}
+ decay *float64
+ offset interface{}
+ multiValueMode string
+ weight *float64
+}
+
+// NewGaussDecayFunction returns a new GaussDecayFunction.
+func NewGaussDecayFunction() GaussDecayFunction {
+ return GaussDecayFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn GaussDecayFunction) Name() string {
+ return "gauss"
+}
+
+// FieldName specifies the name of the field to which this decay function is applied to.
+func (fn GaussDecayFunction) FieldName(fieldName string) GaussDecayFunction {
+ fn.fieldName = fieldName
+ return fn
+}
+
+// Origin defines the "central point" by which the decay function calculates
+// "distance".
+func (fn GaussDecayFunction) Origin(origin interface{}) GaussDecayFunction {
+ fn.origin = origin
+ return fn
+}
+
+// Scale defines the scale to be used with Decay.
+func (fn GaussDecayFunction) Scale(scale interface{}) GaussDecayFunction {
+ fn.scale = scale
+ return fn
+}
+
+// Decay defines how documents are scored at the distance given a Scale.
+// If no decay is defined, documents at the distance Scale will be scored 0.5.
+func (fn GaussDecayFunction) Decay(decay float64) GaussDecayFunction {
+ fn.decay = &decay
+ return fn
+}
+
+// Offset, if defined, computes the decay function only for a distance
+// greater than the defined offset.
+func (fn GaussDecayFunction) Offset(offset interface{}) GaussDecayFunction {
+ fn.offset = offset
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn GaussDecayFunction) Weight(weight float64) GaussDecayFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn GaussDecayFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// MultiValueMode specifies how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn GaussDecayFunction) MultiValueMode(mode string) GaussDecayFunction {
+ fn.multiValueMode = mode
+ return fn
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn GaussDecayFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source[fn.fieldName] = params
+ if fn.origin != nil {
+ params["origin"] = fn.origin
+ }
+ params["scale"] = fn.scale
+ if fn.decay != nil && *fn.decay > 0 {
+ params["decay"] = *fn.decay
+ }
+ if fn.offset != nil {
+ params["offset"] = fn.offset
+ }
+ if fn.multiValueMode != "" {
+ source["multi_value_mode"] = fn.multiValueMode
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
+
+// -- Linear Decay --
+
+// LinearDecayFunction builds a linear decay score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+// for details.
+type LinearDecayFunction struct {
+ fieldName string
+ origin interface{}
+ scale interface{}
+ decay *float64
+ offset interface{}
+ multiValueMode string
+ weight *float64
+}
+
+// NewLinearDecayFunction initializes and returns a new LinearDecayFunction.
+func NewLinearDecayFunction() LinearDecayFunction {
+ return LinearDecayFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn LinearDecayFunction) Name() string {
+ return "linear"
+}
+
+// FieldName specifies the name of the field to which this decay function is applied to.
+func (fn LinearDecayFunction) FieldName(fieldName string) LinearDecayFunction {
+ fn.fieldName = fieldName
+ return fn
+}
+
+// Origin defines the "central point" by which the decay function calculates
+// "distance".
+func (fn LinearDecayFunction) Origin(origin interface{}) LinearDecayFunction {
+ fn.origin = origin
+ return fn
+}
+
+// Scale defines the scale to be used with Decay.
+func (fn LinearDecayFunction) Scale(scale interface{}) LinearDecayFunction {
+ fn.scale = scale
+ return fn
+}
+
+// Decay defines how documents are scored at the distance given a Scale.
+// If no decay is defined, documents at the distance Scale will be scored 0.5.
+func (fn LinearDecayFunction) Decay(decay float64) LinearDecayFunction {
+ fn.decay = &decay
+ return fn
+}
+
+// Offset, if defined, computes the decay function only for a distance
+// greater than the defined offset.
+func (fn LinearDecayFunction) Offset(offset interface{}) LinearDecayFunction {
+ fn.offset = offset
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn LinearDecayFunction) Weight(weight float64) LinearDecayFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn LinearDecayFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// MultiValueMode specifies how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn LinearDecayFunction) MultiValueMode(mode string) LinearDecayFunction {
+ fn.multiValueMode = mode
+ return fn
+}
+
+// GetMultiValueMode returns how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn LinearDecayFunction) GetMultiValueMode() string {
+ return fn.multiValueMode
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn LinearDecayFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source[fn.fieldName] = params
+ if fn.origin != nil {
+ params["origin"] = fn.origin
+ }
+ params["scale"] = fn.scale
+ if fn.decay != nil && *fn.decay > 0 {
+ params["decay"] = *fn.decay
+ }
+ if fn.offset != nil {
+ params["offset"] = fn.offset
+ }
+ if fn.multiValueMode != "" {
+ source["multi_value_mode"] = fn.multiValueMode
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
+
+// -- Script --
+
+// ScriptFunction builds a script score function. It uses a script to
+// compute or influence the score of documents that match with the inner
+// query or filter.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_script_score
+// for details.
+type ScriptFunction struct {
+ script string
+ lang string
+ params map[string]interface{}
+ weight *float64
+}
+
+// NewScriptFunction initializes and returns a new ScriptFunction.
+func NewScriptFunction(script string) ScriptFunction {
+ return ScriptFunction{
+ script: script,
+ params: make(map[string]interface{}),
+ }
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn ScriptFunction) Name() string {
+ return "script_score"
+}
+
+// Script specifies the script to be executed.
+func (fn ScriptFunction) Script(script string) ScriptFunction {
+ fn.script = script
+ return fn
+}
+
+// Lang specifies the language of the Script.
+func (fn ScriptFunction) Lang(lang string) ScriptFunction {
+ fn.lang = lang
+ return fn
+}
+
+// Param adds a single parameter to the script.
+func (fn ScriptFunction) Param(name string, value interface{}) ScriptFunction {
+ fn.params[name] = value
+ return fn
+}
+
+// Params sets all script parameters in a single step.
+func (fn ScriptFunction) Params(params map[string]interface{}) ScriptFunction {
+ fn.params = params
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn ScriptFunction) Weight(weight float64) ScriptFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn ScriptFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn ScriptFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ if fn.script != "" {
+ source["script"] = fn.script
+ }
+ if fn.lang != "" {
+ source["lang"] = fn.lang
+ }
+ if len(fn.params) > 0 {
+ source["params"] = fn.params
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
+
+// -- Factor --
+
+// FactorFunction is deprecated.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+// for details.
+type FactorFunction struct {
+ boostFactor *float32
+}
+
+// NewFactorFunction initializes and returns a new FactorFunction.
+func NewFactorFunction() FactorFunction {
+ return FactorFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn FactorFunction) Name() string {
+ return "boost_factor"
+}
+
+// BoostFactor specifies a boost for this score function.
+func (fn FactorFunction) BoostFactor(boost float32) FactorFunction {
+ fn.boostFactor = &boost
+ return fn
+}
+
+// GetWeight always returns nil for (deprecated) FactorFunction.
+func (fn FactorFunction) GetWeight() *float64 {
+ return nil
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn FactorFunction) Source() interface{} {
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return fn.boostFactor
+}
+
+// -- Field value factor --
+
+// FieldValueFactorFunction is a function score function that allows you
+// to use a field from a document to influence the score.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_field_value_factor.
+type FieldValueFactorFunction struct {
+ field string
+ factor *float64
+ missing *float64
+ weight *float64
+ modifier string
+}
+
+// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction.
+func NewFieldValueFactorFunction() FieldValueFactorFunction {
+ return FieldValueFactorFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn FieldValueFactorFunction) Name() string {
+ return "field_value_factor"
+}
+
+// Field is the field to be extracted from the document.
+func (fn FieldValueFactorFunction) Field(field string) FieldValueFactorFunction {
+ fn.field = field
+ return fn
+}
+
+// Factor is the (optional) factor to multiply the field with. If you do not
+// specify a factor, the default is 1.
+func (fn FieldValueFactorFunction) Factor(factor float64) FieldValueFactorFunction {
+ fn.factor = &factor
+ return fn
+}
+
+// Modifier to apply to the field value. It can be one of: none, log, log1p,
+// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none.
+func (fn FieldValueFactorFunction) Modifier(modifier string) FieldValueFactorFunction {
+ fn.modifier = modifier
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn FieldValueFactorFunction) Weight(weight float64) FieldValueFactorFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn FieldValueFactorFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// Missing is used if a document does not have that field.
+func (fn FieldValueFactorFunction) Missing(missing float64) FieldValueFactorFunction {
+ fn.missing = &missing
+ return fn
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn FieldValueFactorFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ if fn.field != "" {
+ source["field"] = fn.field
+ }
+ if fn.factor != nil {
+ source["factor"] = *fn.factor
+ }
+ if fn.missing != nil {
+ source["missing"] = *fn.missing
+ }
+ if fn.modifier != "" {
+ source["modifier"] = strings.ToLower(fn.modifier)
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
+
+// -- Weight Factor --
+
+// WeightFactorFunction builds a weight factor function that multiplies
+// the weight to the score.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_weight
+// for details.
+type WeightFactorFunction struct {
+ weight float64
+}
+
+// NewWeightFactorFunction initializes and returns a new WeightFactorFunction.
+func NewWeightFactorFunction(weight float64) WeightFactorFunction {
+ return WeightFactorFunction{weight: weight}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn WeightFactorFunction) Name() string {
+ return "weight"
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn WeightFactorFunction) Weight(weight float64) WeightFactorFunction {
+ fn.weight = weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn WeightFactorFunction) GetWeight() *float64 {
+ return &fn.weight
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn WeightFactorFunction) Source() interface{} {
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return fn.weight
+}
+
+// -- Random --
+
+// RandomFunction builds a random score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_random
+// for details.
+type RandomFunction struct {
+ seed interface{}
+ weight *float64
+}
+
+// NewRandomFunction initializes and returns a new RandomFunction.
+func NewRandomFunction() RandomFunction {
+ return RandomFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn RandomFunction) Name() string {
+ return "random_score"
+}
+
+// Seed is documented in 1.6 as a numeric value. However, in the source code
+// of the Java client, it also accepts strings. So we accept both here, too.
+func (fn RandomFunction) Seed(seed interface{}) RandomFunction {
+ fn.seed = seed
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn RandomFunction) Weight(weight float64) RandomFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn RandomFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn RandomFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ if fn.seed != nil {
+ source["seed"] = fn.seed
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq_test.go
new file mode 100644
index 00000000..d0c07144
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq_test.go
@@ -0,0 +1,138 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFunctionScoreQuery(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ Add(NewTermFilter("name.last", "banon"), NewFactorFunction().BoostFactor(3)).
+ AddScoreFunc(NewFactorFunction().BoostFactor(3)).
+ AddScoreFunc(NewFactorFunction().BoostFactor(3)).
+ Boost(3).
+ MaxBoost(10).
+ ScoreMode("avg")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":3,"functions":[{"boost_factor":3,"filter":{"term":{"name.last":"banon"}}},{"boost_factor":3},{"boost_factor":3}],"max_boost":10,"query":{"term":{"name.last":"banon"}},"score_mode":"avg"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFunctionScoreQueryWithNilFilter(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("tag", "wow")).
+ AddScoreFunc(NewRandomFunction()).
+ Boost(2.0).
+ MaxBoost(12.0).
+ BoostMode("multiply").
+ ScoreMode("max")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":2,"boost_mode":"multiply","max_boost":12,"query":{"term":{"tag":"wow"}},"random_score":{},"score_mode":"max"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldValueFactor(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income")).
+ Boost(2.0).
+ MaxBoost(12.0).
+ BoostMode("multiply").
+ ScoreMode("max")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldValueFactorWithWeight(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)).
+ Boost(2.0).
+ MaxBoost(12.0).
+ BoostMode("multiply").
+ ScoreMode("max")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max","weight":2.5}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldValueFactorWithMultipleScoreFuncsAndWeights(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)).
+ AddScoreFunc(NewScriptFunction("_score * doc['my_numeric_field'].value").Weight(1.25)).
+ AddScoreFunc(NewWeightFactorFunction(0.5)).
+ Boost(2.0).
+ MaxBoost(12.0).
+ BoostMode("multiply").
+ ScoreMode("max")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":2,"boost_mode":"multiply","functions":[{"field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"weight":2.5},{"script_score":{"script":"_score * doc['my_numeric_field'].value"},"weight":1.25},{"weight":0.5}],"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFunctionScoreQueryWithGaussScoreFunc(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33))
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"gauss":{"pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFunctionScoreQueryWithGaussScoreFuncAndMultiValueMode(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33).MultiValueMode("avg"))
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"gauss":{"multi_value_mode":"avg","pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy.go
new file mode 100644
index 00000000..22d83bb0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy.go
@@ -0,0 +1,117 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FuzzyQuery uses similarity based on Levenshtein edit distance for
+// string fields, and a +/- margin on numeric and date fields.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html
+type FuzzyQuery struct {
+ Query
+
+ name string
+ value interface{}
+ boost float32
+ fuzziness interface{}
+ prefixLength *int
+ maxExpansions *int
+ transpositions *bool
+ queryName string
+}
+
+// NewFuzzyQuery creates a new fuzzy query.
+func NewFuzzyQuery() FuzzyQuery {
+ q := FuzzyQuery{
+ boost: -1.0,
+ }
+ return q
+}
+
+func (q FuzzyQuery) Name(name string) FuzzyQuery {
+ q.name = name
+ return q
+}
+
+func (q FuzzyQuery) Value(value interface{}) FuzzyQuery {
+ q.value = value
+ return q
+}
+
+func (q FuzzyQuery) Boost(boost float32) FuzzyQuery {
+ q.boost = boost
+ return q
+}
+
+// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings like "auto",
+// "0..1", "1..4" or "0.0..1.0".
+func (q FuzzyQuery) Fuzziness(fuzziness interface{}) FuzzyQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q FuzzyQuery) PrefixLength(prefixLength int) FuzzyQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q FuzzyQuery) MaxExpansions(maxExpansions int) FuzzyQuery {
+ q.maxExpansions = &maxExpansions
+ return q
+}
+
+func (q FuzzyQuery) Transpositions(transpositions bool) FuzzyQuery {
+ q.transpositions = &transpositions
+ return q
+}
+
+func (q FuzzyQuery) QueryName(queryName string) FuzzyQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q FuzzyQuery) Source() interface{} {
+ // {
+ // "fuzzy" : {
+ // "user" : {
+ // "value" : "ki",
+ // "boost" : 1.0,
+ // "fuzziness" : 2,
+ // "prefix_length" : 0,
+ // "max_expansions" : 100
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["fuzzy"] = query
+
+ fq := make(map[string]interface{})
+ query[q.name] = fq
+
+ fq["value"] = q.value
+
+ if q.boost != -1.0 {
+ fq["boost"] = q.boost
+ }
+ if q.transpositions != nil {
+ fq["transpositions"] = *q.transpositions
+ }
+ if q.fuzziness != nil {
+ fq["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ fq["prefix_length"] = *q.prefixLength
+ }
+ if q.maxExpansions != nil {
+ fq["max_expansions"] = *q.maxExpansions
+ }
+ if q.queryName != "" {
+ fq["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this.go
new file mode 100644
index 00000000..90a837d6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this.go
@@ -0,0 +1,136 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FuzzyLikeThisQuery finds documents that are "like" provided text by
+// running it against one or more fields.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-flt-query.html
+type FuzzyLikeThisQuery struct {
+ Query
+
+ fields []string
+ boost *float32
+ likeText *string
+ fuzziness interface{}
+ prefixLength *int
+ maxQueryTerms *int
+ ignoreTF *bool
+ analyzer string
+ failOnUnsupportedField *bool
+ queryName string
+}
+
+// NewFuzzyLikeThisQuery creates a new fuzzy query.
+func NewFuzzyLikeThisQuery() FuzzyLikeThisQuery {
+ q := FuzzyLikeThisQuery{
+ fields: make([]string, 0),
+ }
+ return q
+}
+
+func (q FuzzyLikeThisQuery) Field(field string) FuzzyLikeThisQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+func (q FuzzyLikeThisQuery) Fields(fields ...string) FuzzyLikeThisQuery {
+ q.fields = append(q.fields, fields...)
+ return q
+}
+
+func (q FuzzyLikeThisQuery) LikeText(likeText string) FuzzyLikeThisQuery {
+ q.likeText = &likeText
+ return q
+}
+
+// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings like "auto",
+// "0..1", "1..4" or "0.0..1.0".
+func (q FuzzyLikeThisQuery) Fuzziness(fuzziness interface{}) FuzzyLikeThisQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q FuzzyLikeThisQuery) PrefixLength(prefixLength int) FuzzyLikeThisQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q FuzzyLikeThisQuery) MaxQueryTerms(maxQueryTerms int) FuzzyLikeThisQuery {
+ q.maxQueryTerms = &maxQueryTerms
+ return q
+}
+
+func (q FuzzyLikeThisQuery) IgnoreTF(ignoreTF bool) FuzzyLikeThisQuery {
+ q.ignoreTF = &ignoreTF
+ return q
+}
+
+func (q FuzzyLikeThisQuery) Analyzer(analyzer string) FuzzyLikeThisQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q FuzzyLikeThisQuery) Boost(boost float32) FuzzyLikeThisQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q FuzzyLikeThisQuery) FailOnUnsupportedField(fail bool) FuzzyLikeThisQuery {
+ q.failOnUnsupportedField = &fail
+ return q
+}
+
+func (q FuzzyLikeThisQuery) QueryName(queryName string) FuzzyLikeThisQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q FuzzyLikeThisQuery) Source() interface{} {
+ // {
+ // "fuzzy_like_this" : {
+ // "fields" : ["name.first", "name.last"],
+ // "like_text" : "text like this one",
+ // "max_query_terms" : 12
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["fuzzy_like_this"] = query
+
+ if len(q.fields) > 0 {
+ query["fields"] = q.fields
+ }
+ query["like_text"] = q.likeText
+
+ if q.maxQueryTerms != nil {
+ query["max_query_terms"] = *q.maxQueryTerms
+ }
+ if q.fuzziness != nil {
+ query["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ query["prefix_length"] = *q.prefixLength
+ }
+ if q.ignoreTF != nil {
+ query["ignore_tf"] = *q.ignoreTF
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.failOnUnsupportedField != nil {
+ query["fail_on_unsupported_field"] = *q.failOnUnsupportedField
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_field_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_field_query.go
new file mode 100644
index 00000000..eb0b531d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_field_query.go
@@ -0,0 +1,128 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FuzzyLikeThisFieldQuery is the same as the fuzzy_like_this query,
+// except that it runs against a single field. It provides nicer query DSL
+// over the generic fuzzy_like_this query, and support typed fields query
+// (automatically wraps typed fields with type filter to match only on the specific type).
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-flt-field-query.html
+type FuzzyLikeThisFieldQuery struct {
+ Query
+
+ field string
+ boost *float32
+ likeText *string
+ fuzziness interface{}
+ prefixLength *int
+ maxQueryTerms *int
+ ignoreTF *bool
+ analyzer string
+ failOnUnsupportedField *bool
+ queryName string
+}
+
+// NewFuzzyLikeThisFieldQuery creates a new fuzzy like this field query.
+func NewFuzzyLikeThisFieldQuery(field string) FuzzyLikeThisFieldQuery {
+ q := FuzzyLikeThisFieldQuery{
+ field: field,
+ }
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) LikeText(likeText string) FuzzyLikeThisFieldQuery {
+ q.likeText = &likeText
+ return q
+}
+
+// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings like "auto",
+// "0..1", "1..4" or "0.0..1.0".
+func (q FuzzyLikeThisFieldQuery) Fuzziness(fuzziness interface{}) FuzzyLikeThisFieldQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) PrefixLength(prefixLength int) FuzzyLikeThisFieldQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) MaxQueryTerms(maxQueryTerms int) FuzzyLikeThisFieldQuery {
+ q.maxQueryTerms = &maxQueryTerms
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) IgnoreTF(ignoreTF bool) FuzzyLikeThisFieldQuery {
+ q.ignoreTF = &ignoreTF
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) Analyzer(analyzer string) FuzzyLikeThisFieldQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) Boost(boost float32) FuzzyLikeThisFieldQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) FailOnUnsupportedField(fail bool) FuzzyLikeThisFieldQuery {
+ q.failOnUnsupportedField = &fail
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) QueryName(queryName string) FuzzyLikeThisFieldQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q FuzzyLikeThisFieldQuery) Source() interface{} {
+ // {
+ // "fuzzy_like_this_field" : {
+ // "name.first": {
+ // "like_text" : "text like this one",
+ // "max_query_terms" : 12
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["fuzzy_like_this_field"] = query
+ fq := make(map[string]interface{})
+ query[q.field] = fq
+
+ fq["like_text"] = q.likeText
+
+ if q.maxQueryTerms != nil {
+ fq["max_query_terms"] = *q.maxQueryTerms
+ }
+ if q.fuzziness != nil {
+ fq["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ fq["prefix_length"] = *q.prefixLength
+ }
+ if q.ignoreTF != nil {
+ fq["ignore_tf"] = *q.ignoreTF
+ }
+ if q.boost != nil {
+ fq["boost"] = *q.boost
+ }
+ if q.analyzer != "" {
+ fq["analyzer"] = q.analyzer
+ }
+ if q.failOnUnsupportedField != nil {
+ fq["fail_on_unsupported_field"] = *q.failOnUnsupportedField
+ }
+ if q.queryName != "" {
+ fq["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_field_query_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_field_query_test.go
new file mode 100644
index 00000000..20bb1c4f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_field_query_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFuzzyLikeThisFieldQuery(t *testing.T) {
+ q := NewFuzzyLikeThisFieldQuery("name.first").LikeText("text like this one").MaxQueryTerms(12)
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fuzzy_like_this_field":{"name.first":{"like_text":"text like this one","max_query_terms":12}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_query_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_query_test.go
new file mode 100644
index 00000000..42ad1a73
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_query_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFuzzyLikeThisQuery(t *testing.T) {
+ q := NewFuzzyLikeThisQuery().Fields("name.first", "name.last").LikeText("text like this one").MaxQueryTerms(12)
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fuzzy_like_this":{"fields":["name.first","name.last"],"like_text":"text like this one","max_query_terms":12}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_test.go
new file mode 100644
index 00000000..47e4efb5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFuzzyQuery(t *testing.T) {
+ q := NewFuzzyQuery().Name("user").Value("ki").Boost(1.5).Fuzziness(2).PrefixLength(0).MaxExpansions(100)
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fuzzy":{"user":{"boost":1.5,"fuzziness":2,"max_expansions":100,"prefix_length":0,"value":"ki"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_child.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_child.go
new file mode 100644
index 00000000..17bcb566
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_child.go
@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The has_child query works the same as the has_child filter,
+// by automatically wrapping the filter with a constant_score
+// (when using the default score type).
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-has-child-query.html
+type HasChildQuery struct {
+ query Query
+ childType string
+ boost *float32
+ scoreType string
+ minChildren *int
+ maxChildren *int
+ shortCircuitCutoff *int
+ queryName string
+ innerHit *InnerHit
+}
+
+// NewHasChildQuery creates a new has_child query.
+func NewHasChildQuery(childType string, query Query) HasChildQuery {
+ q := HasChildQuery{
+ query: query,
+ childType: childType,
+ }
+ return q
+}
+
+func (q HasChildQuery) Boost(boost float32) HasChildQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q HasChildQuery) ScoreType(scoreType string) HasChildQuery {
+ q.scoreType = scoreType
+ return q
+}
+
+func (q HasChildQuery) MinChildren(minChildren int) HasChildQuery {
+ q.minChildren = &minChildren
+ return q
+}
+
+func (q HasChildQuery) MaxChildren(maxChildren int) HasChildQuery {
+ q.maxChildren = &maxChildren
+ return q
+}
+
+func (q HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) HasChildQuery {
+ q.shortCircuitCutoff = &shortCircuitCutoff
+ return q
+}
+
+func (q HasChildQuery) QueryName(queryName string) HasChildQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q HasChildQuery) InnerHit(innerHit *InnerHit) HasChildQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q HasChildQuery) Source() interface{} {
+ // {
+ // "has_child" : {
+ // "type" : "blog_tag",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["has_child"] = query
+
+ query["query"] = q.query.Source()
+ query["type"] = q.childType
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.scoreType != "" {
+ query["score_type"] = q.scoreType
+ }
+ if q.minChildren != nil {
+ query["min_children"] = *q.minChildren
+ }
+ if q.maxChildren != nil {
+ query["max_children"] = *q.maxChildren
+ }
+ if q.shortCircuitCutoff != nil {
+ query["short_circuit_cutoff"] = *q.shortCircuitCutoff
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+ if q.innerHit != nil {
+ query["inner_hits"] = q.innerHit.Source()
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_child_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_child_test.go
new file mode 100644
index 00000000..6c16790b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_child_test.go
@@ -0,0 +1,37 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestHasChildQuery(t *testing.T) {
+ f := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_child":{"query":{"term":{"tag":"something"}},"type":"blog_tag"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHasChildQueryWithInnerHit(t *testing.T) {
+ f := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something"))
+ f = f.InnerHit(NewInnerHit().Name("comments"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_child":{"inner_hits":{"name":"comments"},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_parent.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_parent.go
new file mode 100644
index 00000000..ff22acd6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_parent.go
@@ -0,0 +1,83 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The has_parent query works the same as the has_parent filter,
+// by automatically wrapping the filter with a
+// constant_score (when using the default score type).
+// It has the same syntax as the has_parent filter.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-has-parent-query.html
+type HasParentQuery struct {
+ query Query
+ parentType string
+ boost *float32
+ scoreType string
+ queryName string
+ innerHit *InnerHit
+}
+
+// NewHasParentQuery creates a new has_parent query.
+func NewHasParentQuery(parentType string, query Query) HasParentQuery {
+ q := HasParentQuery{
+ query: query,
+ parentType: parentType,
+ }
+ return q
+}
+
+func (q HasParentQuery) Boost(boost float32) HasParentQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q HasParentQuery) ScoreType(scoreType string) HasParentQuery {
+ q.scoreType = scoreType
+ return q
+}
+
+func (q HasParentQuery) QueryName(queryName string) HasParentQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q HasParentQuery) InnerHit(innerHit *InnerHit) HasParentQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q HasParentQuery) Source() interface{} {
+ // {
+ // "has_parent" : {
+ // "parent_type" : "blog",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["has_parent"] = query
+
+ query["query"] = q.query.Source()
+ query["parent_type"] = q.parentType
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.scoreType != "" {
+ query["score_type"] = q.scoreType
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+ if q.innerHit != nil {
+ query["inner_hits"] = q.innerHit.Source()
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_parent_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_parent_test.go
new file mode 100644
index 00000000..08619c75
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_parent_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestHasParentQueryTest(t *testing.T) {
+ f := NewHasParentQuery("blog", NewTermQuery("tag", "something"))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_ids.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_ids.go
new file mode 100644
index 00000000..9a01a045
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_ids.go
@@ -0,0 +1,77 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that only have the provided ids.
+// Note, this filter does not require the _id field to be indexed
+// since it works using the _uid field.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-ids-query.html
+type IdsQuery struct {
+ Query
+ types []string
+ values []string
+ boost float32
+ queryName string
+}
+
+// NewIdsQuery creates a new ids query.
+func NewIdsQuery(types ...string) IdsQuery {
+ q := IdsQuery{
+ types: types,
+ values: make([]string, 0),
+ boost: -1.0,
+ }
+ return q
+}
+
+func (q IdsQuery) Ids(ids ...string) IdsQuery {
+ q.values = append(q.values, ids...)
+ return q
+}
+
+func (q IdsQuery) Boost(boost float32) IdsQuery {
+ q.boost = boost
+ return q
+}
+
+func (q IdsQuery) QueryName(queryName string) IdsQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q IdsQuery) Source() interface{} {
+ // {
+ // "ids" : {
+ // "type" : "my_type",
+ // "values" : ["1", "4", "100"]
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["ids"] = query
+
+ // type(s)
+ if len(q.types) == 1 {
+ query["type"] = q.types[0]
+ } else if len(q.types) > 1 {
+ query["types"] = q.types
+ }
+
+ // values
+ query["values"] = q.values
+
+ if q.boost != -1.0 {
+ query["boost"] = q.boost
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_ids_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_ids_test.go
new file mode 100644
index 00000000..c223c609
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_ids_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestIdsQuery(t *testing.T) {
+ q := NewIdsQuery("my_type").Ids("1", "4", "100").Boost(10.5).QueryName("my_query")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ids":{"_name":"my_query","boost":10.5,"type":"my_type","values":["1","4","100"]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match.go
new file mode 100644
index 00000000..04d34f61
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match.go
@@ -0,0 +1,198 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MatchQuery is a family of queries that accept text/numerics/dates,
+// analyzes it, and constructs a query out of it. For more details,
+// see http://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html.
+//
+// To create a new MatchQuery, use NewMatchQuery. To create specific types
+// of queries, e.g. a match_phrase query, use NewMatchQuery(...).Type("phrase"),
+// or use one of the shortcuts like NewMatchPhraseQuery(...).
+type MatchQuery struct {
+ Query
+ name string
+ value interface{}
+ matchQueryType string // boolean, phrase, phrase_prefix
+ operator string // or / and
+ analyzer string
+ boost *float32
+ slop *int
+ fuzziness string
+ prefixLength *int
+ maxExpansions *int
+ minimumShouldMatch string
+ rewrite string
+ fuzzyRewrite string
+ lenient *bool
+ fuzzyTranspositions *bool
+ zeroTermsQuery string
+ cutoffFrequency *float32
+ queryName string
+}
+
+// NewMatchQuery creates a new MatchQuery.
+func NewMatchQuery(name string, value interface{}) MatchQuery {
+ q := MatchQuery{name: name, value: value}
+ return q
+}
+
+// NewMatchPhraseQuery creates a new MatchQuery with type phrase.
+func NewMatchPhraseQuery(name string, value interface{}) MatchQuery {
+ q := MatchQuery{name: name, value: value, matchQueryType: "phrase"}
+ return q
+}
+
+// NewMatchPhrasePrefixQuery creates a new MatchQuery with type phrase_prefix.
+func NewMatchPhrasePrefixQuery(name string, value interface{}) MatchQuery {
+ q := MatchQuery{name: name, value: value, matchQueryType: "phrase_prefix"}
+ return q
+}
+
+// Type can be "boolean", "phrase", or "phrase_prefix".
+func (q MatchQuery) Type(matchQueryType string) MatchQuery {
+ q.matchQueryType = matchQueryType
+ return q
+}
+
+func (q MatchQuery) Operator(operator string) MatchQuery {
+ q.operator = operator
+ return q
+}
+
+func (q MatchQuery) Analyzer(analyzer string) MatchQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q MatchQuery) Boost(boost float32) MatchQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q MatchQuery) Slop(slop int) MatchQuery {
+ q.slop = &slop
+ return q
+}
+
+func (q MatchQuery) Fuzziness(fuzziness string) MatchQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q MatchQuery) PrefixLength(prefixLength int) MatchQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q MatchQuery) MaxExpansions(maxExpansions int) MatchQuery {
+ q.maxExpansions = &maxExpansions
+ return q
+}
+
+func (q MatchQuery) MinimumShouldMatch(minimumShouldMatch string) MatchQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q MatchQuery) Rewrite(rewrite string) MatchQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q MatchQuery) FuzzyRewrite(fuzzyRewrite string) MatchQuery {
+ q.fuzzyRewrite = fuzzyRewrite
+ return q
+}
+
+func (q MatchQuery) Lenient(lenient bool) MatchQuery {
+ q.lenient = &lenient
+ return q
+}
+
+func (q MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) MatchQuery {
+ q.fuzzyTranspositions = &fuzzyTranspositions
+ return q
+}
+
+// ZeroTermsQuery can be "all" or "none".
+func (q MatchQuery) ZeroTermsQuery(zeroTermsQuery string) MatchQuery {
+ q.zeroTermsQuery = zeroTermsQuery
+ return q
+}
+
+func (q MatchQuery) CutoffFrequency(cutoff float32) MatchQuery {
+ q.cutoffFrequency = &cutoff
+ return q
+}
+
+func (q MatchQuery) QueryName(queryName string) MatchQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q MatchQuery) Source() interface{} {
+ // {"match":{"name":{"query":"value","type":"boolean/phrase"}}}
+ source := make(map[string]interface{})
+
+ match := make(map[string]interface{})
+ source["match"] = match
+
+ query := make(map[string]interface{})
+ match[q.name] = query
+
+ query["query"] = q.value
+
+ if q.matchQueryType != "" {
+ query["type"] = q.matchQueryType
+ }
+ if q.operator != "" {
+ query["operator"] = q.operator
+ }
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.slop != nil {
+ query["slop"] = *q.slop
+ }
+ if q.fuzziness != "" {
+ query["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ query["prefix_length"] = *q.prefixLength
+ }
+ if q.maxExpansions != nil {
+ query["max_expansions"] = *q.maxExpansions
+ }
+ if q.minimumShouldMatch != "" {
+ query["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.rewrite != "" {
+ query["rewrite"] = q.rewrite
+ }
+ if q.fuzzyRewrite != "" {
+ query["fuzzy_rewrite"] = q.fuzzyRewrite
+ }
+ if q.lenient != nil {
+ query["lenient"] = *q.lenient
+ }
+ if q.fuzzyTranspositions != nil {
+ query["fuzzy_transpositions"] = *q.fuzzyTranspositions
+ }
+ if q.zeroTermsQuery != "" {
+ query["zero_terms_query"] = q.zeroTermsQuery
+ }
+ if q.cutoffFrequency != nil {
+ query["cutoff_frequency"] = q.cutoffFrequency
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_all.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_all.go
new file mode 100644
index 00000000..d2ba3eb3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_all.go
@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A query that matches all documents. Maps to Lucene MatchAllDocsQuery.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-match-all-query.html
+type MatchAllQuery struct {
+ Query
+ normsField string
+ boost *float32
+}
+
+// NewMatchAllQuery creates a new match all query.
+func NewMatchAllQuery() MatchAllQuery {
+ q := MatchAllQuery{}
+ return q
+}
+
+func (q MatchAllQuery) NormsField(normsField string) MatchAllQuery {
+ q.normsField = normsField
+ return q
+}
+
+func (q MatchAllQuery) Boost(boost float32) MatchAllQuery {
+ q.boost = &boost
+ return q
+}
+
+// Creates the query source for the match all query.
+func (q MatchAllQuery) Source() interface{} {
+ // {
+ // "match_all" : { ... }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["match_all"] = params
+ if q.boost != nil {
+ params["boost"] = q.boost
+ }
+ if q.normsField != "" {
+ params["norms_field"] = q.normsField
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_all_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_all_test.go
new file mode 100644
index 00000000..626c9120
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_all_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMatchAllQuery(t *testing.T) {
+ q := NewMatchAllQuery()
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_all":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMatchAllQueryWithParams(t *testing.T) {
+ q := NewMatchAllQuery().NormsField("field_name").Boost(3.14)
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_all":{"boost":3.14,"norms_field":"field_name"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_test.go
new file mode 100644
index 00000000..64ad82dd
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_test.go
@@ -0,0 +1,62 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMatchQuery(t *testing.T) {
+ q := NewMatchQuery("message", "this is a test")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match":{"message":{"query":"this is a test"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMatchPhraseQuery(t *testing.T) {
+ q := NewMatchPhraseQuery("message", "this is a test")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match":{"message":{"query":"this is a test","type":"phrase"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMatchPhrasePrefixQuery(t *testing.T) {
+ q := NewMatchPhrasePrefixQuery("message", "this is a test")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match":{"message":{"query":"this is a test","type":"phrase_prefix"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMatchQueryWithOptions(t *testing.T) {
+ q := NewMatchQuery("message", "this is a test").Analyzer("whitespace").Operator("or").Boost(2.5)
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match":{"message":{"analyzer":"whitespace","boost":2.5,"operator":"or","query":"this is a test"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this.go
new file mode 100644
index 00000000..2a47dafc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this.go
@@ -0,0 +1,184 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// More like this query find documents that are “like” provided text
+// by running it against one or more fields. For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/mlt-query/
+type MoreLikeThisQuery struct {
+ Query
+
+ fields []string
+ likeText string
+ percentTermsToMatch *float32
+ minTermFreq *int
+ maxQueryTerms *int
+ stopWords []string
+ minDocFreq *int
+ maxDocFreq *int
+ minWordLen *int
+ maxWordLen *int
+ boostTerms *float32
+ boost *float32
+ analyzer string
+ failOnUnsupportedField *bool
+}
+
+// Creates a new mlt query.
+func NewMoreLikeThisQuery(likeText string) MoreLikeThisQuery {
+ q := MoreLikeThisQuery{
+ likeText: likeText,
+ fields: make([]string, 0),
+ stopWords: make([]string, 0),
+ }
+ return q
+}
+
+func (q MoreLikeThisQuery) Field(field string) MoreLikeThisQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+func (q MoreLikeThisQuery) Fields(fields ...string) MoreLikeThisQuery {
+ q.fields = append(q.fields, fields...)
+ return q
+}
+
+func (q MoreLikeThisQuery) StopWord(stopWord string) MoreLikeThisQuery {
+ q.stopWords = append(q.stopWords, stopWord)
+ return q
+}
+
+func (q MoreLikeThisQuery) StopWords(stopWords ...string) MoreLikeThisQuery {
+ q.stopWords = append(q.stopWords, stopWords...)
+ return q
+}
+
+func (q MoreLikeThisQuery) LikeText(likeText string) MoreLikeThisQuery {
+ q.likeText = likeText
+ return q
+}
+
+func (q MoreLikeThisQuery) PercentTermsToMatch(percentTermsToMatch float32) MoreLikeThisQuery {
+ q.percentTermsToMatch = &percentTermsToMatch
+ return q
+}
+
+func (q MoreLikeThisQuery) MinTermFreq(minTermFreq int) MoreLikeThisQuery {
+ q.minTermFreq = &minTermFreq
+ return q
+}
+
+func (q MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) MoreLikeThisQuery {
+ q.maxQueryTerms = &maxQueryTerms
+ return q
+}
+
+func (q MoreLikeThisQuery) MinDocFreq(minDocFreq int) MoreLikeThisQuery {
+ q.minDocFreq = &minDocFreq
+ return q
+}
+
+func (q MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) MoreLikeThisQuery {
+ q.maxDocFreq = &maxDocFreq
+ return q
+}
+
+func (q MoreLikeThisQuery) MinWordLen(minWordLen int) MoreLikeThisQuery {
+ q.minWordLen = &minWordLen
+ return q
+}
+
+func (q MoreLikeThisQuery) MaxWordLen(maxWordLen int) MoreLikeThisQuery {
+ q.maxWordLen = &maxWordLen
+ return q
+}
+
+func (q MoreLikeThisQuery) BoostTerms(boostTerms float32) MoreLikeThisQuery {
+ q.boostTerms = &boostTerms
+ return q
+}
+
+func (q MoreLikeThisQuery) Analyzer(analyzer string) MoreLikeThisQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q MoreLikeThisQuery) Boost(boost float32) MoreLikeThisQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q MoreLikeThisQuery) FailOnUnsupportedField(fail bool) MoreLikeThisQuery {
+ q.failOnUnsupportedField = &fail
+ return q
+}
+
+// Creates the query source for the mlt query.
+func (q MoreLikeThisQuery) Source() interface{} {
+ // {
+ // "match_all" : { ... }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["more_like_this"] = params
+
+ if len(q.fields) > 0 {
+ params["fields"] = q.fields
+ }
+
+ params["like_text"] = q.likeText
+
+ if q.percentTermsToMatch != nil {
+ params["percent_terms_to_match"] = *q.percentTermsToMatch
+ }
+
+ if q.minTermFreq != nil {
+ params["min_term_freq"] = *q.minTermFreq
+ }
+
+ if q.maxQueryTerms != nil {
+ params["max_query_terms"] = *q.maxQueryTerms
+ }
+
+ if len(q.stopWords) > 0 {
+ params["stop_words"] = q.stopWords
+ }
+
+ if q.minDocFreq != nil {
+ params["min_doc_freq"] = *q.minDocFreq
+ }
+
+ if q.maxDocFreq != nil {
+ params["max_doc_freq"] = *q.maxDocFreq
+ }
+
+ if q.minWordLen != nil {
+ params["min_word_len"] = *q.minWordLen
+ }
+
+ if q.maxWordLen != nil {
+ params["max_word_len"] = *q.maxWordLen
+ }
+
+ if q.boostTerms != nil {
+ params["boost_terms"] = *q.boostTerms
+ }
+
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+
+ if q.analyzer != "" {
+ params["analyzer"] = q.analyzer
+ }
+
+ if q.failOnUnsupportedField != nil {
+ params["fail_on_unsupported_field"] = *q.failOnUnsupportedField
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_field.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_field.go
new file mode 100644
index 00000000..e3d723ba
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_field.go
@@ -0,0 +1,189 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The more_like_this_field query is the same as the more_like_this query,
+// except it runs against a single field. It provides nicer query DSL
+// over the generic more_like_this query, and support typed fields query
+// (automatically wraps typed fields with type filter to match only
+// on the specific type).
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/mlt-field-query/
+type MoreLikeThisFieldQuery struct {
+ Query
+
+ name string
+ likeText string
+ percentTermsToMatch *float32
+ minTermFreq *int
+ maxQueryTerms *int
+ stopWords []string
+ minDocFreq *int
+ maxDocFreq *int
+ minWordLen *int
+ maxWordLen *int
+ boostTerms *float32
+ boost *float32
+ analyzer string
+ failOnUnsupportedField *bool
+}
+
+// Creates a new mlt_field query.
+func NewMoreLikeThisFieldQuery(name, likeText string) MoreLikeThisFieldQuery {
+ q := MoreLikeThisFieldQuery{
+ name: name,
+ likeText: likeText,
+ stopWords: make([]string, 0),
+ }
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) Name(name string) MoreLikeThisFieldQuery {
+ q.name = name
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) StopWord(stopWord string) MoreLikeThisFieldQuery {
+ q.stopWords = append(q.stopWords, stopWord)
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) StopWords(stopWords ...string) MoreLikeThisFieldQuery {
+ q.stopWords = append(q.stopWords, stopWords...)
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) LikeText(likeText string) MoreLikeThisFieldQuery {
+ q.likeText = likeText
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) PercentTermsToMatch(percentTermsToMatch float32) MoreLikeThisFieldQuery {
+ q.percentTermsToMatch = &percentTermsToMatch
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MinTermFreq(minTermFreq int) MoreLikeThisFieldQuery {
+ q.minTermFreq = &minTermFreq
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MaxQueryTerms(maxQueryTerms int) MoreLikeThisFieldQuery {
+ q.maxQueryTerms = &maxQueryTerms
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MinDocFreq(minDocFreq int) MoreLikeThisFieldQuery {
+ q.minDocFreq = &minDocFreq
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MaxDocFreq(maxDocFreq int) MoreLikeThisFieldQuery {
+ q.maxDocFreq = &maxDocFreq
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MinWordLen(minWordLen int) MoreLikeThisFieldQuery {
+ q.minWordLen = &minWordLen
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MaxWordLen(maxWordLen int) MoreLikeThisFieldQuery {
+ q.maxWordLen = &maxWordLen
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) BoostTerms(boostTerms float32) MoreLikeThisFieldQuery {
+ q.boostTerms = &boostTerms
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) Analyzer(analyzer string) MoreLikeThisFieldQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) Boost(boost float32) MoreLikeThisFieldQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) FailOnUnsupportedField(fail bool) MoreLikeThisFieldQuery {
+ q.failOnUnsupportedField = &fail
+ return q
+}
+
+// Creates the query source for the mlt query.
+func (q MoreLikeThisFieldQuery) Source() interface{} {
+ // {
+ // "more_like_this_field" : {
+ // "name.first" : {
+ // "like_text" : "text like this one",
+ // "min_term_freq" : 1,
+ // "max_query_terms" : 12
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["more_like_this_field"] = params
+
+ mlt := make(map[string]interface{})
+ params[q.name] = mlt
+
+ mlt["like_text"] = q.likeText
+
+ if q.percentTermsToMatch != nil {
+ mlt["percent_terms_to_match"] = *q.percentTermsToMatch
+ }
+
+ if q.minTermFreq != nil {
+ mlt["min_term_freq"] = *q.minTermFreq
+ }
+
+ if q.maxQueryTerms != nil {
+ mlt["max_query_terms"] = *q.maxQueryTerms
+ }
+
+ if len(q.stopWords) > 0 {
+ mlt["stop_words"] = q.stopWords
+ }
+
+ if q.minDocFreq != nil {
+ mlt["min_doc_freq"] = *q.minDocFreq
+ }
+
+ if q.maxDocFreq != nil {
+ mlt["max_doc_freq"] = *q.maxDocFreq
+ }
+
+ if q.minWordLen != nil {
+ mlt["min_word_len"] = *q.minWordLen
+ }
+
+ if q.maxWordLen != nil {
+ mlt["max_word_len"] = *q.maxWordLen
+ }
+
+ if q.boostTerms != nil {
+ mlt["boost_terms"] = *q.boostTerms
+ }
+
+ if q.boost != nil {
+ mlt["boost"] = *q.boost
+ }
+
+ if q.analyzer != "" {
+ mlt["analyzer"] = q.analyzer
+ }
+
+ if q.failOnUnsupportedField != nil {
+ mlt["fail_on_unsupported_field"] = *q.failOnUnsupportedField
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_field_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_field_test.go
new file mode 100644
index 00000000..03f760f1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_field_test.go
@@ -0,0 +1,51 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestMoreLikeThisFieldQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another Golang topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Common query
+ q := NewMoreLikeThisFieldQuery("message", "Golang topic.")
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&q).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_test.go
new file mode 100644
index 00000000..0143f8ca
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_test.go
@@ -0,0 +1,52 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestMoreLikeThis(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another Golang topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Common query
+ q := NewMoreLikeThisQuery("Golang topic.")
+ q = q.Fields("message")
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&q).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_multi_match.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_multi_match.go
new file mode 100644
index 00000000..a52b8537
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_multi_match.go
@@ -0,0 +1,253 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "strings"
+)
+
+// The multi_match query builds further on top of the match query by allowing multiple fields to be specified.
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/multi-match-query.html
+type MultiMatchQuery struct {
+ Query
+ text interface{}
+ fields []string
+ fieldBoosts map[string]*float32
+ matchQueryType string // best_fields, most_fields, cross_fields, phrase, phrase_prefix
+ operator string // and / or
+ analyzer string
+ boost *float32
+ slop *int
+ fuzziness string
+ prefixLength *int
+ maxExpansions *int
+ minimumShouldMatch string
+ rewrite string
+ fuzzyRewrite string
+ useDisMax *bool
+ tieBreaker *float32
+ lenient *bool
+ cutoffFrequency *float32
+ zeroTermsQuery string
+ queryName string
+}
+
+func NewMultiMatchQuery(text interface{}, fields ...string) MultiMatchQuery {
+ q := MultiMatchQuery{
+ text: text,
+ fields: make([]string, 0),
+ fieldBoosts: make(map[string]*float32),
+ }
+ q.fields = append(q.fields, fields...)
+ return q
+}
+
+func (q MultiMatchQuery) Field(field string) MultiMatchQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+func (q MultiMatchQuery) FieldWithBoost(field string, boost float32) MultiMatchQuery {
+ q.fields = append(q.fields, field)
+ q.fieldBoosts[field] = &boost
+ return q
+}
+
+// Type can be: "best_fields", "boolean", "most_fields", "cross_fields",
+// "phrase", or "phrase_prefix".
+func (q MultiMatchQuery) Type(matchQueryType string) MultiMatchQuery {
+ zero := float32(0.0)
+ one := float32(1.0)
+
+ switch strings.ToLower(matchQueryType) {
+ default: // best_fields / boolean
+ q.matchQueryType = "best_fields"
+ q.tieBreaker = &zero
+ case "most_fields":
+ q.matchQueryType = "most_fields"
+ q.tieBreaker = &one
+ case "cross_fields":
+ q.matchQueryType = "cross_fields"
+ q.tieBreaker = &zero
+ case "phrase":
+ q.matchQueryType = "phrase"
+ q.tieBreaker = &zero
+ case "phrase_prefix":
+ q.matchQueryType = "phrase_prefix"
+ q.tieBreaker = &zero
+ }
+ return q
+}
+
+func (q MultiMatchQuery) Operator(operator string) MultiMatchQuery {
+ q.operator = operator
+ return q
+}
+
+func (q MultiMatchQuery) Analyzer(analyzer string) MultiMatchQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q MultiMatchQuery) Boost(boost float32) MultiMatchQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q MultiMatchQuery) Slop(slop int) MultiMatchQuery {
+ q.slop = &slop
+ return q
+}
+
+func (q MultiMatchQuery) Fuzziness(fuzziness string) MultiMatchQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q MultiMatchQuery) PrefixLength(prefixLength int) MultiMatchQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q MultiMatchQuery) MaxExpansions(maxExpansions int) MultiMatchQuery {
+ q.maxExpansions = &maxExpansions
+ return q
+}
+
+func (q MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) MultiMatchQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q MultiMatchQuery) Rewrite(rewrite string) MultiMatchQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) MultiMatchQuery {
+ q.fuzzyRewrite = fuzzyRewrite
+ return q
+}
+
+// Deprecated.
+func (q MultiMatchQuery) UseDisMax(useDisMax bool) MultiMatchQuery {
+ q.useDisMax = &useDisMax
+ return q
+}
+
+func (q MultiMatchQuery) TieBreaker(tieBreaker float32) MultiMatchQuery {
+ q.tieBreaker = &tieBreaker
+ return q
+}
+
+func (q MultiMatchQuery) Lenient(lenient bool) MultiMatchQuery {
+ q.lenient = &lenient
+ return q
+}
+
+func (q MultiMatchQuery) CutoffFrequency(cutoff float32) MultiMatchQuery {
+ q.cutoffFrequency = &cutoff
+ return q
+}
+
+// ZeroTermsQuery can be "all" or "none".
+func (q MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) MultiMatchQuery {
+ q.zeroTermsQuery = zeroTermsQuery
+ return q
+}
+
+func (q MultiMatchQuery) QueryName(queryName string) MultiMatchQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q MultiMatchQuery) Source() interface{} {
+ //
+ // {
+ // "multi_match" : {
+ // "query" : "this is a test",
+ // "fields" : [ "subject", "message" ]
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ multiMatch := make(map[string]interface{})
+ source["multi_match"] = multiMatch
+
+ multiMatch["query"] = q.text
+
+ if len(q.fields) > 0 {
+ fields := make([]string, 0)
+ for _, field := range q.fields {
+ if boost, found := q.fieldBoosts[field]; found {
+ if boost != nil {
+ fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
+ } else {
+ fields = append(fields, field)
+ }
+ } else {
+ fields = append(fields, field)
+ }
+ }
+ multiMatch["fields"] = fields
+ }
+
+ if q.matchQueryType != "" {
+ multiMatch["type"] = q.matchQueryType
+ }
+
+ if q.operator != "" {
+ multiMatch["operator"] = q.operator
+ }
+ if q.analyzer != "" {
+ multiMatch["analyzer"] = q.analyzer
+ }
+ if q.boost != nil {
+ multiMatch["boost"] = *q.boost
+ }
+ if q.slop != nil {
+ multiMatch["slop"] = *q.slop
+ }
+ if q.fuzziness != "" {
+ multiMatch["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ multiMatch["prefix_length"] = *q.prefixLength
+ }
+ if q.maxExpansions != nil {
+ multiMatch["max_expansions"] = *q.maxExpansions
+ }
+ if q.minimumShouldMatch != "" {
+ multiMatch["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.rewrite != "" {
+ multiMatch["rewrite"] = q.rewrite
+ }
+ if q.fuzzyRewrite != "" {
+ multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite
+ }
+ if q.useDisMax != nil {
+ multiMatch["use_dis_max"] = *q.useDisMax
+ }
+ if q.tieBreaker != nil {
+ multiMatch["tie_breaker"] = *q.tieBreaker
+ }
+ if q.lenient != nil {
+ multiMatch["lenient"] = *q.lenient
+ }
+ if q.cutoffFrequency != nil {
+ multiMatch["cutoff_frequency"] = *q.cutoffFrequency
+ }
+ if q.zeroTermsQuery != "" {
+ multiMatch["zero_terms_query"] = q.zeroTermsQuery
+ }
+ if q.queryName != "" {
+ multiMatch["_name"] = q.queryName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_multi_match_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_multi_match_test.go
new file mode 100644
index 00000000..a7bd3471
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_multi_match_test.go
@@ -0,0 +1,103 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMultiMatchQuery(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryBestFields(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("best_fields")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"best_fields"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryMostFields(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("most_fields")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":1,"type":"most_fields"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryCrossFields(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("cross_fields")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"cross_fields"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryPhrase(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryPhrasePrefix(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase_prefix")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase_prefix"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryBestFieldsWithCustomTieBreaker(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").
+ Type("best_fields").
+ TieBreaker(0.3)
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0.3,"type":"best_fields"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_nested.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_nested.go
new file mode 100644
index 00000000..375be658
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_nested.go
@@ -0,0 +1,113 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Nested query allows to query nested objects / docs (see nested mapping).
+// The query is executed against the nested objects / docs as if they were
+// indexed as separate docs (they are, internally) and resulting in the
+// root parent doc (or parent nested mapping).
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/nested-query/
+type NestedQuery struct {
+ query Query
+ filter Filter
+ path string
+ scoreMode string
+ boost *float32
+ queryName string
+ innerHit *InnerHit
+}
+
+// Creates a new nested_query query.
+func NewNestedQuery(path string) NestedQuery {
+ return NestedQuery{path: path}
+}
+
+func (q NestedQuery) Query(query Query) NestedQuery {
+ q.query = query
+ return q
+}
+
+func (q NestedQuery) Filter(filter Filter) NestedQuery {
+ q.filter = filter
+ return q
+}
+
+func (q NestedQuery) Path(path string) NestedQuery {
+ q.path = path
+ return q
+}
+
+func (q NestedQuery) ScoreMode(scoreMode string) NestedQuery {
+ q.scoreMode = scoreMode
+ return q
+}
+
+func (q NestedQuery) Boost(boost float32) NestedQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q NestedQuery) QueryName(queryName string) NestedQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q NestedQuery) InnerHit(innerHit *InnerHit) NestedQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// Creates the query source for the nested_query query.
+func (q NestedQuery) Source() interface{} {
+ // {
+ // "nested" : {
+ // "query" : {
+ // "bool" : {
+ // "must" : [
+ // {
+ // "match" : {"obj1.name" : "blue"}
+ // },
+ // {
+ // "range" : {"obj1.count" : {"gt" : 5}}
+ // }
+ // ]
+ // }
+ // },
+ // "filter" : {
+ // ...
+ // },
+ // "path" : "obj1",
+ // "score_mode" : "avg",
+ // "boost" : 1.0
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ nq := make(map[string]interface{})
+ query["nested"] = nq
+ if q.query != nil {
+ nq["query"] = q.query.Source()
+ }
+ if q.filter != nil {
+ nq["filter"] = q.filter.Source()
+ }
+ nq["path"] = q.path
+ if q.scoreMode != "" {
+ nq["score_mode"] = q.scoreMode
+ }
+ if q.boost != nil {
+ nq["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ nq["_name"] = q.queryName
+ }
+ if q.innerHit != nil {
+ nq["inner_hits"] = q.innerHit.Source()
+ }
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_nested_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_nested_test.go
new file mode 100644
index 00000000..58609d6b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_nested_test.go
@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestNestedQuery(t *testing.T) {
+ f := NewNestedQuery("obj1")
+ bq := NewBoolQuery()
+ bq = bq.Must(NewTermQuery("obj1.name", "blue"))
+ bq = bq.Must(NewRangeQuery("obj1.count").Gt(5))
+ f = f.Query(bq)
+ f = f.QueryName("qname")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"nested":{"_name":"qname","path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNestedQueryWithInnerHit(t *testing.T) {
+ f := NewNestedQuery("obj1")
+ bq := NewBoolQuery()
+ bq = bq.Must(NewTermQuery("obj1.name", "blue"))
+ bq = bq.Must(NewRangeQuery("obj1.count").Gt(5))
+ f = f.Query(bq)
+ f = f.QueryName("qname")
+ f = f.InnerHit(NewInnerHit().Name("comments").Query(NewTermQuery("user", "olivere")))
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"nested":{"_name":"qname","inner_hits":{"name":"comments","query":{"term":{"user":"olivere"}}},"path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_prefix.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_prefix.go
new file mode 100644
index 00000000..02e95d25
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_prefix.go
@@ -0,0 +1,75 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Matches documents that have fields containing terms
+// with a specified prefix (not analyzed).
+// For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/prefix-query.html
+type PrefixQuery struct {
+ Query
+ name string
+ prefix string
+ boost *float32
+ rewrite string
+ queryName string
+}
+
+// Creates a new prefix query.
+func NewPrefixQuery(name string, prefix string) PrefixQuery {
+ q := PrefixQuery{name: name, prefix: prefix}
+ return q
+}
+
+func (q PrefixQuery) Boost(boost float32) PrefixQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q PrefixQuery) Rewrite(rewrite string) PrefixQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q PrefixQuery) QueryName(queryName string) PrefixQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the prefix query.
+func (q PrefixQuery) Source() interface{} {
+ // {
+ // "prefix" : {
+ // "user" : {
+ // "prefix" : "ki",
+ // "boost" : 2.0
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["prefix"] = query
+
+ if q.boost == nil && q.rewrite == "" && q.queryName == "" {
+ query[q.name] = q.prefix
+ } else {
+ subQuery := make(map[string]interface{})
+ subQuery["prefix"] = q.prefix
+ if q.boost != nil {
+ subQuery["boost"] = *q.boost
+ }
+ if q.rewrite != "" {
+ subQuery["rewrite"] = q.rewrite
+ }
+ if q.queryName != "" {
+ subQuery["_name"] = q.queryName
+ }
+ query[q.name] = subQuery
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_prefix_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_prefix_test.go
new file mode 100644
index 00000000..0c2ac929
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_prefix_test.go
@@ -0,0 +1,37 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPrefixQuery(t *testing.T) {
+ q := NewPrefixQuery("user", "ki")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"prefix":{"user":"ki"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPrefixQueryWithOptions(t *testing.T) {
+ q := NewPrefixQuery("user", "ki")
+ q = q.QueryName("my_query_name")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"prefix":{"user":{"_name":"my_query_name","prefix":"ki"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_query_string.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_query_string.go
new file mode 100644
index 00000000..7afdf3c0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_query_string.go
@@ -0,0 +1,281 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+)
+
+// A query that uses the query parser in order to parse
+// its content. For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/query-string-query.html
+type QueryStringQuery struct {
+ Query
+
+ queryString string
+ defaultField string
+ defaultOper string
+ analyzer string
+ quoteAnalyzer string
+ quoteFieldSuffix string
+ autoGeneratePhraseQueries *bool
+ allowLeadingWildcard *bool
+ lowercaseExpandedTerms *bool
+ enablePositionIncrements *bool
+ analyzeWildcard *bool
+ boost *float32
+ fuzzyMinSim *float32
+ fuzzyPrefixLength *int
+ fuzzyMaxExpansions *int
+ fuzzyRewrite string
+ phraseSlop *int
+ fields []string
+ fieldBoosts map[string]*float32
+ useDisMax *bool
+ tieBreaker *float32
+ rewrite string
+ minimumShouldMatch string
+ lenient *bool
+}
+
+// Creates a new query string query.
+func NewQueryStringQuery(queryString string) QueryStringQuery {
+ q := QueryStringQuery{
+ queryString: queryString,
+ fields: make([]string, 0),
+ fieldBoosts: make(map[string]*float32),
+ }
+ return q
+}
+
+func (q QueryStringQuery) DefaultField(defaultField string) QueryStringQuery {
+ q.defaultField = defaultField
+ return q
+}
+
+func (q QueryStringQuery) Field(field string) QueryStringQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+func (q QueryStringQuery) FieldWithBoost(field string, boost float32) QueryStringQuery {
+ q.fields = append(q.fields, field)
+ q.fieldBoosts[field] = &boost
+ return q
+}
+
+func (q QueryStringQuery) UseDisMax(useDisMax bool) QueryStringQuery {
+ q.useDisMax = &useDisMax
+ return q
+}
+
+func (q QueryStringQuery) TieBreaker(tieBreaker float32) QueryStringQuery {
+ q.tieBreaker = &tieBreaker
+ return q
+}
+
+func (q QueryStringQuery) DefaultOperator(operator string) QueryStringQuery {
+ q.defaultOper = operator
+ return q
+}
+
+func (q QueryStringQuery) Analyzer(analyzer string) QueryStringQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) QueryStringQuery {
+ q.quoteAnalyzer = quoteAnalyzer
+ return q
+}
+
+func (q QueryStringQuery) AutoGeneratePhraseQueries(autoGeneratePhraseQueries bool) QueryStringQuery {
+ q.autoGeneratePhraseQueries = &autoGeneratePhraseQueries
+ return q
+}
+
+func (q QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) QueryStringQuery {
+ q.allowLeadingWildcard = &allowLeadingWildcard
+ return q
+}
+
+func (q QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) QueryStringQuery {
+ q.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return q
+}
+
+func (q QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) QueryStringQuery {
+ q.enablePositionIncrements = &enablePositionIncrements
+ return q
+}
+
+func (q QueryStringQuery) FuzzyMinSim(fuzzyMinSim float32) QueryStringQuery {
+ q.fuzzyMinSim = &fuzzyMinSim
+ return q
+}
+
+func (q QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) QueryStringQuery {
+ q.fuzzyMaxExpansions = &fuzzyMaxExpansions
+ return q
+}
+
+func (q QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) QueryStringQuery {
+ q.fuzzyRewrite = fuzzyRewrite
+ return q
+}
+
+func (q QueryStringQuery) PhraseSlop(phraseSlop int) QueryStringQuery {
+ q.phraseSlop = &phraseSlop
+ return q
+}
+
+func (q QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) QueryStringQuery {
+ q.analyzeWildcard = &analyzeWildcard
+ return q
+}
+
+func (q QueryStringQuery) Rewrite(rewrite string) QueryStringQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) QueryStringQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q QueryStringQuery) Boost(boost float32) QueryStringQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) QueryStringQuery {
+ q.quoteFieldSuffix = quoteFieldSuffix
+ return q
+}
+
+func (q QueryStringQuery) Lenient(lenient bool) QueryStringQuery {
+ q.lenient = &lenient
+ return q
+}
+
+// Creates the query source for the query string query.
+func (q QueryStringQuery) Source() interface{} {
+ // {
+ // "query_string" : {
+ // "default_field" : "content",
+ // "query" : "this AND that OR thus"
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["query_string"] = query
+
+ query["query"] = q.queryString
+
+ if q.defaultField != "" {
+ query["default_field"] = q.defaultField
+ }
+
+ if len(q.fields) > 0 {
+ fields := make([]string, 0)
+ for _, field := range q.fields {
+ if boost, found := q.fieldBoosts[field]; found {
+ if boost != nil {
+ fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
+ } else {
+ fields = append(fields, field)
+ }
+ } else {
+ fields = append(fields, field)
+ }
+ }
+ query["fields"] = fields
+ }
+
+ if q.tieBreaker != nil {
+ query["tie_breaker"] = *q.tieBreaker
+ }
+
+ if q.useDisMax != nil {
+ query["use_dis_max"] = *q.useDisMax
+ }
+
+ if q.defaultOper != "" {
+ query["default_operator"] = q.defaultOper
+ }
+
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+
+ if q.quoteAnalyzer != "" {
+ query["quote_analyzer"] = q.quoteAnalyzer
+ }
+
+ if q.autoGeneratePhraseQueries != nil {
+ query["auto_generate_phrase_queries"] = *q.autoGeneratePhraseQueries
+ }
+
+ if q.allowLeadingWildcard != nil {
+ query["allow_leading_wildcard"] = *q.allowLeadingWildcard
+ }
+
+ if q.lowercaseExpandedTerms != nil {
+ query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms
+ }
+
+ if q.enablePositionIncrements != nil {
+ query["enable_position_increments"] = *q.enablePositionIncrements
+ }
+
+ if q.fuzzyMinSim != nil {
+ query["fuzzy_min_sim"] = *q.fuzzyMinSim
+ }
+
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+
+ if q.fuzzyPrefixLength != nil {
+ query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength
+ }
+
+ if q.fuzzyMaxExpansions != nil {
+ query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions
+ }
+
+ if q.fuzzyRewrite != "" {
+ query["fuzzy_rewrite"] = q.fuzzyRewrite
+ }
+
+ if q.phraseSlop != nil {
+ query["phrase_slop"] = *q.phraseSlop
+ }
+
+ if q.analyzeWildcard != nil {
+ query["analyze_wildcard"] = *q.analyzeWildcard
+ }
+
+ if q.rewrite != "" {
+ query["rewrite"] = q.rewrite
+ }
+
+ if q.minimumShouldMatch != "" {
+ query["minimum_should_match"] = q.minimumShouldMatch
+ }
+
+ if q.quoteFieldSuffix != "" {
+ query["quote_field_suffix"] = q.quoteFieldSuffix
+ }
+
+ if q.lenient != nil {
+ query["lenient"] = *q.lenient
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_query_string_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_query_string_test.go
new file mode 100644
index 00000000..20f41a0c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_query_string_test.go
@@ -0,0 +1,24 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestQueryStringQuery(t *testing.T) {
+ q := NewQueryStringQuery(`this AND that OR thus`)
+ q = q.DefaultField("content")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query_string":{"default_field":"content","query":"this AND that OR thus"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_range.go
new file mode 100644
index 00000000..9d10fc32
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_range.go
@@ -0,0 +1,120 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Matches documents with fields that have terms within a certain range.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-range-query.html
+type RangeQuery struct {
+ Query
+ name string
+ from *interface{}
+ to *interface{}
+ timeZone string
+ includeLower bool
+ includeUpper bool
+ boost *float64
+ queryName string
+}
+
+func NewRangeQuery(name string) RangeQuery {
+ q := RangeQuery{name: name, includeLower: true, includeUpper: true}
+ return q
+}
+
+func (f RangeQuery) TimeZone(timeZone string) RangeQuery {
+ f.timeZone = timeZone
+ return f
+}
+
+func (q RangeQuery) From(from interface{}) RangeQuery {
+ q.from = &from
+ return q
+}
+
+func (q RangeQuery) Gt(from interface{}) RangeQuery {
+ q.from = &from
+ q.includeLower = false
+ return q
+}
+
+func (q RangeQuery) Gte(from interface{}) RangeQuery {
+ q.from = &from
+ q.includeLower = true
+ return q
+}
+
+func (q RangeQuery) To(to interface{}) RangeQuery {
+ q.to = &to
+ return q
+}
+
+func (q RangeQuery) Lt(to interface{}) RangeQuery {
+ q.to = &to
+ q.includeUpper = false
+ return q
+}
+
+func (q RangeQuery) Lte(to interface{}) RangeQuery {
+ q.to = &to
+ q.includeUpper = true
+ return q
+}
+
+func (q RangeQuery) IncludeLower(includeLower bool) RangeQuery {
+ q.includeLower = includeLower
+ return q
+}
+
+func (q RangeQuery) IncludeUpper(includeUpper bool) RangeQuery {
+ q.includeUpper = includeUpper
+ return q
+}
+
+func (q RangeQuery) Boost(boost float64) RangeQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q RangeQuery) QueryName(queryName string) RangeQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q RangeQuery) Source() interface{} {
+ // {
+ // "range" : {
+ // "name" : {
+ // "..." : "..."
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ rangeQ := make(map[string]interface{})
+ source["range"] = rangeQ
+
+ params := make(map[string]interface{})
+ rangeQ[q.name] = params
+
+ params["from"] = q.from
+ params["to"] = q.to
+ if q.timeZone != "" {
+ params["time_zone"] = q.timeZone
+ }
+ params["include_lower"] = q.includeLower
+ params["include_upper"] = q.includeUpper
+
+ if q.boost != nil {
+ rangeQ["boost"] = *q.boost
+ }
+
+ if q.queryName != "" {
+ rangeQ["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_range_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_range_test.go
new file mode 100644
index 00000000..f3f9aef0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_range_test.go
@@ -0,0 +1,55 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRangeQuery(t *testing.T) {
+ q := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01")
+ q = q.QueryName("my_query")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"_name":"my_query","postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+/*
+func TestRangeQueryGte(t *testing.T) {
+ q := NewRangeQuery("postDate").Gte("2010-03-01")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"postDate":{"gte":"2010-03-01"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+*/
+
+func TestRangeQueryWithTimeZone(t *testing.T) {
+ f := NewRangeQuery("born").
+ Gte("2012-01-01").
+ Lte("now").
+ TimeZone("+1:00")
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"born":{"from":"2012-01-01","include_lower":true,"include_upper":true,"time_zone":"+1:00","to":"now"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_regexp.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_regexp.go
new file mode 100644
index 00000000..9d3bb5a3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_regexp.go
@@ -0,0 +1,89 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// RegexpQuery allows you to use regular expression term queries.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html.
+type RegexpQuery struct {
+ Query
+ name string
+ regexp string
+ flags *string
+ boost *float64
+ rewrite *string
+ queryName *string
+ maxDeterminizedStates *int
+}
+
+// NewRegexpQuery creates a new regexp query.
+func NewRegexpQuery(name string, regexp string) RegexpQuery {
+ return RegexpQuery{name: name, regexp: regexp}
+}
+
+// Flags sets the regexp flags.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html#_optional_operators
+// for details.
+func (q RegexpQuery) Flags(flags string) RegexpQuery {
+ q.flags = &flags
+ return q
+}
+
+func (q RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) RegexpQuery {
+ q.maxDeterminizedStates = &maxDeterminizedStates
+ return q
+}
+
+func (q RegexpQuery) Boost(boost float64) RegexpQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q RegexpQuery) Rewrite(rewrite string) RegexpQuery {
+ q.rewrite = &rewrite
+ return q
+}
+
+func (q RegexpQuery) QueryName(queryName string) RegexpQuery {
+ q.queryName = &queryName
+ return q
+}
+
+// Source returns the JSON-serializable query data.
+func (q RegexpQuery) Source() interface{} {
+ // {
+ // "regexp" : {
+ // "name.first" : {
+ // "value" : "s.*y",
+ // "boost" : 1.2
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["regexp"] = query
+
+ x := make(map[string]interface{})
+ x["value"] = q.regexp
+ if q.flags != nil {
+ x["flags"] = *q.flags
+ }
+ if q.maxDeterminizedStates != nil {
+ x["max_determinized_states"] = *q.maxDeterminizedStates
+ }
+ if q.boost != nil {
+ x["boost"] = *q.boost
+ }
+ if q.rewrite != nil {
+ x["rewrite"] = *q.rewrite
+ }
+ if q.queryName != nil {
+ x["name"] = *q.queryName
+ }
+ query[q.name] = x
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_regexp_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_regexp_test.go
new file mode 100644
index 00000000..cfd4b6ab
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_regexp_test.go
@@ -0,0 +1,39 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRegexpQuery(t *testing.T) {
+ q := NewRegexpQuery("name.first", "s.*y")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"regexp":{"name.first":{"value":"s.*y"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRegexpQueryWithOptions(t *testing.T) {
+ q := NewRegexpQuery("name.first", "s.*y").
+ Boost(1.2).
+ Flags("INTERSECTION|COMPLEMENT|EMPTY").
+ QueryName("my_query_name")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"regexp":{"name.first":{"boost":1.2,"flags":"INTERSECTION|COMPLEMENT|EMPTY","name":"my_query_name","value":"s.*y"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_simple_query_string.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_simple_query_string.go
new file mode 100644
index 00000000..3e82e6ad
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_simple_query_string.go
@@ -0,0 +1,100 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "strings"
+)
+
+// SimpleQueryStringQuery is a query that uses the SimpleQueryParser
+// to parse its context. Unlike the regular query_string query,
+// the simple_query_string query will never throw an exception,
+// and discards invalid parts of the query.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html
+type SimpleQueryStringQuery struct {
+ queryText string
+ analyzer string
+ operator string
+ fields []string
+ fieldBoosts map[string]*float32
+}
+
+// Creates a new simple query string query.
+func NewSimpleQueryStringQuery(text string) SimpleQueryStringQuery {
+ q := SimpleQueryStringQuery{
+ queryText: text,
+ fields: make([]string, 0),
+ fieldBoosts: make(map[string]*float32),
+ }
+ return q
+}
+
+func (q SimpleQueryStringQuery) Field(field string) SimpleQueryStringQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+func (q SimpleQueryStringQuery) FieldWithBoost(field string, boost float32) SimpleQueryStringQuery {
+ q.fields = append(q.fields, field)
+ q.fieldBoosts[field] = &boost
+ return q
+}
+
+func (q SimpleQueryStringQuery) Analyzer(analyzer string) SimpleQueryStringQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q SimpleQueryStringQuery) DefaultOperator(defaultOperator string) SimpleQueryStringQuery {
+ q.operator = defaultOperator
+ return q
+}
+
+// Creates the query source for the query string query.
+func (q SimpleQueryStringQuery) Source() interface{} {
+ // {
+ // "simple_query_string" : {
+ // "query" : "\"fried eggs\" +(eggplant | potato) -frittata",
+ // "analyzer" : "snowball",
+ // "fields" : ["body^5","_all"],
+ // "default_operator" : "and"
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["simple_query_string"] = query
+
+ query["query"] = q.queryText
+
+ if len(q.fields) > 0 {
+ fields := make([]string, 0)
+ for _, field := range q.fields {
+ if boost, found := q.fieldBoosts[field]; found {
+ if boost != nil {
+ fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
+ } else {
+ fields = append(fields, field)
+ }
+ } else {
+ fields = append(fields, field)
+ }
+ }
+ query["fields"] = fields
+ }
+
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+
+ if q.operator != "" {
+ query["default_operator"] = strings.ToLower(q.operator)
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_simple_query_string_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_simple_query_string_test.go
new file mode 100644
index 00000000..6f6ad7d8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_simple_query_string_test.go
@@ -0,0 +1,82 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSimpleQueryStringQuery(t *testing.T) {
+ q := NewSimpleQueryStringQuery(`"fried eggs" +(eggplant | potato) -frittata`)
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"simple_query_string":{"query":"\"fried eggs\" +(eggplant | potato) -frittata"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSimpleQueryStringQueryExec(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ query := NewSimpleQueryStringQuery("+Golang +Elasticsearch")
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&query).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 1 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 1 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_template_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_template_query.go
new file mode 100644
index 00000000..184d4243
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_template_query.go
@@ -0,0 +1,84 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TemplateQuery is a query that accepts a query template and a
+// map of key/value pairs to fill in template parameters.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html
+type TemplateQuery struct {
+ vars map[string]interface{}
+ template string
+ templateType string
+}
+
+// NewTemplateQuery creates a new TemplateQuery.
+func NewTemplateQuery(name string) TemplateQuery {
+ return TemplateQuery{
+ template: name,
+ vars: make(map[string]interface{}),
+ }
+}
+
+// Template specifies the name of the template.
+func (q TemplateQuery) Template(name string) TemplateQuery {
+ q.template = name
+ return q
+}
+
+// TemplateType defines which kind of query we use. The values can be:
+// inline, indexed, or file. If undefined, inline is used.
+func (q TemplateQuery) TemplateType(typ string) TemplateQuery {
+ q.templateType = typ
+ return q
+}
+
+// Var sets a single parameter pair.
+func (q TemplateQuery) Var(name string, value interface{}) TemplateQuery {
+ q.vars[name] = value
+ return q
+}
+
+// Vars sets parameters for the template query.
+func (q TemplateQuery) Vars(vars map[string]interface{}) TemplateQuery {
+ q.vars = vars
+ return q
+}
+
+// Source returns the JSON serializable content for the search.
+func (q TemplateQuery) Source() interface{} {
+ // {
+ // "template" : {
+ // "query" : {"match_{{template}}": {}},
+ // "params" : {
+ // "template": "all"
+ // }
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ tmpl := make(map[string]interface{})
+ query["template"] = tmpl
+
+ // TODO(oe): Implementation differs from online documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html
+ var fieldname string
+ switch q.templateType {
+ case "file": // file
+ fieldname = "file"
+ case "indexed", "id": // indexed
+ fieldname = "id"
+ default: // inline
+ fieldname = "query"
+ }
+
+ tmpl[fieldname] = q.template
+ if len(q.vars) > 0 {
+ tmpl["params"] = q.vars
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_template_query_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_template_query_test.go
new file mode 100644
index 00000000..74ba2a3a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_template_query_test.go
@@ -0,0 +1,53 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTemplateQueryInlineTest(t *testing.T) {
+ f := NewTemplateQuery("\"match_{{template}}\": {}}\"").Vars(map[string]interface{}{"template": "all"})
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"template":{"params":{"template":"all"},"query":"\"match_{{template}}\": {}}\""}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTemplateQueryIndexedTest(t *testing.T) {
+ f := NewTemplateQuery("indexedTemplate").
+ TemplateType("id").
+ Vars(map[string]interface{}{"template": "all"})
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"template":{"id":"indexedTemplate","params":{"template":"all"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTemplateQueryFileTest(t *testing.T) {
+ f := NewTemplateQuery("storedTemplate").
+ TemplateType("file").
+ Vars(map[string]interface{}{"template": "all"})
+ data, err := json.Marshal(f.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"template":{"file":"storedTemplate","params":{"template":"all"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_term.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_term.go
new file mode 100644
index 00000000..7b8b5184
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_term.go
@@ -0,0 +1,55 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A term query matches documents that contain
+// a term (not analyzed). For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/term-query.html
+type TermQuery struct {
+ Query
+ name string
+ value interface{}
+ boost *float32
+ queryName string
+}
+
+// Creates a new term query.
+func NewTermQuery(name string, value interface{}) TermQuery {
+ t := TermQuery{name: name, value: value}
+ return t
+}
+
+func (q TermQuery) Boost(boost float32) TermQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q TermQuery) QueryName(queryName string) TermQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the term query.
+func (q TermQuery) Source() interface{} {
+ // {"term":{"name":"value"}}
+ source := make(map[string]interface{})
+ tq := make(map[string]interface{})
+ source["term"] = tq
+
+ if q.boost == nil && q.queryName == "" {
+ tq[q.name] = q.value
+ } else {
+ subQ := make(map[string]interface{})
+ subQ["value"] = q.value
+ if q.boost != nil {
+ subQ["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ subQ["_name"] = q.queryName
+ }
+ tq[q.name] = subQ
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_term_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_term_test.go
new file mode 100644
index 00000000..09da9840
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_term_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermQuery(t *testing.T) {
+ q := NewTermQuery("user", "ki")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"term":{"user":"ki"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermQueryWithOptions(t *testing.T) {
+ q := NewTermQuery("user", "ki")
+ q = q.Boost(2.79)
+ q = q.QueryName("my_tq")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"term":{"user":{"_name":"my_tq","boost":2.79,"value":"ki"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_terms.go
new file mode 100644
index 00000000..40a8ed9b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_terms.go
@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A query that match on any (configurable) of the provided terms.
+// This is a simpler syntax query for using a bool query with
+// several term queries in the should clauses.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html
+type TermsQuery struct {
+ Query
+ name string
+ values []interface{}
+ minimumShouldMatch string
+ disableCoord *bool
+ boost *float32
+ queryName string
+}
+
+// NewTermsQuery creates a new terms query.
+func NewTermsQuery(name string, values ...interface{}) TermsQuery {
+ t := TermsQuery{
+ name: name,
+ values: make([]interface{}, 0),
+ }
+ if len(values) > 0 {
+ t.values = append(t.values, values...)
+ }
+ return t
+}
+
+func (q TermsQuery) MinimumShouldMatch(minimumShouldMatch string) TermsQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q TermsQuery) DisableCoord(disableCoord bool) TermsQuery {
+ q.disableCoord = &disableCoord
+ return q
+}
+
+func (q TermsQuery) Boost(boost float32) TermsQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q TermsQuery) QueryName(queryName string) TermsQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the term query.
+func (q TermsQuery) Source() interface{} {
+ // {"terms":{"name":["value1","value2"]}}
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["terms"] = params
+ params[q.name] = q.values
+ if q.minimumShouldMatch != "" {
+ params["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.disableCoord != nil {
+ params["disable_coord"] = *q.disableCoord
+ }
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_terms_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_terms_test.go
new file mode 100644
index 00000000..020d87fe
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_terms_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermsQuery(t *testing.T) {
+ q := NewTermsQuery("user", "ki")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"user":["ki"]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermQuerysWithOptions(t *testing.T) {
+ q := NewTermsQuery("user", "ki", "ko")
+ q = q.Boost(2.79)
+ q = q.QueryName("my_tq")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"_name":"my_tq","boost":2.79,"user":["ki","ko"]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_wildcard.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_wildcard.go
new file mode 100644
index 00000000..5a25e24a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_wildcard.go
@@ -0,0 +1,100 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// WildcardQuery matches documents that have fields matching a wildcard
+// expression (not analyzed). Supported wildcards are *, which matches
+// any character sequence (including the empty one), and ?, which matches
+// any single character. Note this query can be slow, as it needs to iterate
+// over many terms. In order to prevent extremely slow wildcard queries,
+// a wildcard term should not start with one of the wildcards * or ?.
+// The wildcard query maps to Lucene WildcardQuery.
+//
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html.
+type WildcardQuery struct {
+ Query
+
+ name string
+ wildcard string
+ boost float32
+ rewrite string
+ queryName string
+}
+
+// NewWildcardQuery creates a new wildcard query.
+func NewWildcardQuery(name, wildcard string) WildcardQuery {
+ q := WildcardQuery{
+ name: name,
+ wildcard: wildcard,
+ boost: -1.0,
+ }
+ return q
+}
+
+// Name is the name of the field name.
+func (q WildcardQuery) Name(name string) WildcardQuery {
+ q.name = name
+ return q
+}
+
+// Wildcard is the wildcard to be used in the query, e.g. ki*y??.
+func (q WildcardQuery) Wildcard(wildcard string) WildcardQuery {
+ q.wildcard = wildcard
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q WildcardQuery) Boost(boost float32) WildcardQuery {
+ q.boost = boost
+ return q
+}
+
+// Rewrite controls the rewriting.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-multi-term-rewrite.html
+// for details.
+func (q WildcardQuery) Rewrite(rewrite string) WildcardQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+// QueryName sets the name of this query.
+func (q WildcardQuery) QueryName(queryName string) WildcardQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns the JSON serializable body of this query.
+func (q WildcardQuery) Source() interface{} {
+ // {
+ // "wildcard" : {
+ // "user" : {
+ // "wildcard" : "ki*y",
+ // "boost" : 1.0
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["wildcard"] = query
+
+ wq := make(map[string]interface{})
+ query[q.name] = wq
+
+ wq["wildcard"] = q.wildcard
+
+ if q.boost != -1.0 {
+ wq["boost"] = q.boost
+ }
+ if q.rewrite != "" {
+ wq["rewrite"] = q.rewrite
+ }
+ if q.queryName != "" {
+ wq["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_wildcard_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_wildcard_test.go
new file mode 100644
index 00000000..d17bd64c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_wildcard_test.go
@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic_test
+
+import (
+ "encoding/json"
+ "testing"
+
+ elastic "gopkg.in/olivere/elastic.v2"
+)
+
+func ExampleWildcardQuery() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Define wildcard query
+ q := elastic.NewWildcardQuery("user", "oli*er?").Boost(1.2)
+ searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(q). // use wildcard query defined above
+ Do() // execute
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ _ = searchResult
+}
+
+func TestWildcardQuery(t *testing.T) {
+ q := elastic.NewWildcardQuery("user", "ki*y??")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"wildcard":{"user":{"wildcard":"ki*y??"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestWildcardQueryWithBoost(t *testing.T) {
+ q := elastic.NewWildcardQuery("user", "ki*y??").Boost(1.2)
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"wildcard":{"user":{"boost":1.2,"wildcard":"ki*y??"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_request.go
new file mode 100644
index 00000000..2fc0311b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_request.go
@@ -0,0 +1,158 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "strings"
+)
+
+// SearchRequest combines a search request and its
+// query details (see SearchSource).
+// It is used in combination with MultiSearch.
+type SearchRequest struct {
+ searchType string // default in ES is "query_then_fetch"
+ indices []string
+ types []string
+ routing *string
+ preference *string
+ source interface{}
+}
+
+// NewSearchRequest creates a new search request.
+func NewSearchRequest() *SearchRequest {
+ return &SearchRequest{
+ indices: make([]string, 0),
+ types: make([]string, 0),
+ }
+}
+
+// SearchRequest must be one of "query_then_fetch", "query_and_fetch",
+// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch".
+// Use one of the constants defined via SearchType.
+func (r *SearchRequest) SearchType(searchType string) *SearchRequest {
+ r.searchType = searchType
+ return r
+}
+
+func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest {
+ return r.SearchType("dfs_query_then_fetch")
+}
+
+func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest {
+ return r.SearchType("dfs_query_and_fetch")
+}
+
+func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest {
+ return r.SearchType("query_then_fetch")
+}
+
+func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest {
+ return r.SearchType("query_and_fetch")
+}
+
+func (r *SearchRequest) SearchTypeScan() *SearchRequest {
+ return r.SearchType("scan")
+}
+
+func (r *SearchRequest) SearchTypeCount() *SearchRequest {
+ return r.SearchType("count")
+}
+
+func (r *SearchRequest) Index(index string) *SearchRequest {
+ r.indices = append(r.indices, index)
+ return r
+}
+
+func (r *SearchRequest) Indices(indices ...string) *SearchRequest {
+ r.indices = append(r.indices, indices...)
+ return r
+}
+
+func (r *SearchRequest) HasIndices() bool {
+ return len(r.indices) > 0
+}
+
+func (r *SearchRequest) Type(typ string) *SearchRequest {
+ r.types = append(r.types, typ)
+ return r
+}
+
+func (r *SearchRequest) Types(types ...string) *SearchRequest {
+ r.types = append(r.types, types...)
+ return r
+}
+
+func (r *SearchRequest) Routing(routing string) *SearchRequest {
+ r.routing = &routing
+ return r
+}
+
+func (r *SearchRequest) Routings(routings ...string) *SearchRequest {
+ if routings != nil {
+ routings := strings.Join(routings, ",")
+ r.routing = &routings
+ } else {
+ r.routing = nil
+ }
+ return r
+}
+
+func (r *SearchRequest) Preference(preference string) *SearchRequest {
+ r.preference = &preference
+ return r
+}
+
+func (r *SearchRequest) Source(source interface{}) *SearchRequest {
+ switch v := source.(type) {
+ case *SearchSource:
+ r.source = v.Source()
+ default:
+ r.source = source
+ }
+ return r
+}
+
+// header is used by MultiSearch to get information about the search header
+// of one SearchRequest.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html
+func (r *SearchRequest) header() interface{} {
+ h := make(map[string]interface{})
+ if r.searchType != "" {
+ h["search_type"] = r.searchType
+ }
+
+ switch len(r.indices) {
+ case 0:
+ case 1:
+ h["index"] = r.indices[0]
+ default:
+ h["indices"] = r.indices
+ }
+
+ switch len(r.types) {
+ case 0:
+ case 1:
+ h["types"] = r.types[0]
+ default:
+ h["type"] = r.types
+ }
+
+ if r.routing != nil && *r.routing != "" {
+ h["routing"] = *r.routing
+ }
+
+ if r.preference != nil && *r.preference != "" {
+ h["preference"] = *r.preference
+ }
+
+ return h
+}
+
+// bidy is used by MultiSearch to get information about the search body
+// of one SearchRequest.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html
+func (r *SearchRequest) body() interface{} {
+ return r.source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_request_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_request_test.go
new file mode 100644
index 00000000..1185643f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_request_test.go
@@ -0,0 +1,48 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestSearchRequestIndex(t *testing.T) {
+ builder := NewSearchRequest().Index("test")
+ data, err := json.Marshal(builder.header())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"index":"test"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchRequestIndices(t *testing.T) {
+ builder := NewSearchRequest().Indices("test", "test2")
+ data, err := json.Marshal(builder.header())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"indices":["test","test2"]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchRequestHasIndices(t *testing.T) {
+ builder := NewSearchRequest()
+ if builder.HasIndices() {
+ t.Errorf("expected HasIndices to return true; got %v", builder.HasIndices())
+ }
+ builder = builder.Indices("test", "test2")
+ if !builder.HasIndices() {
+ t.Errorf("expected HasIndices to return false; got %v", builder.HasIndices())
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_source.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_source.go
new file mode 100644
index 00000000..6f740ed8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_source.go
@@ -0,0 +1,495 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+)
+
+// SearchSource enables users to build the search source.
+// It resembles the SearchSourceBuilder in Elasticsearch.
+type SearchSource struct {
+ query Query
+ postFilter Filter
+ from int
+ size int
+ explain *bool
+ version *bool
+ sorts []SortInfo
+ sorters []Sorter
+ trackScores bool
+ minScore *float64
+ timeout string
+ fieldNames []string
+ fieldDataFields []string
+ scriptFields []*ScriptField
+ partialFields []*PartialField
+ fetchSourceContext *FetchSourceContext
+ facets map[string]Facet
+ aggregations map[string]Aggregation
+ highlight *Highlight
+ globalSuggestText string
+ suggesters []Suggester
+ rescores []*Rescore
+ defaultRescoreWindowSize *int
+ indexBoosts map[string]float64
+ stats []string
+ innerHits map[string]*InnerHit
+}
+
+func NewSearchSource() *SearchSource {
+ return &SearchSource{
+ from: -1,
+ size: -1,
+ trackScores: false,
+ sorts: make([]SortInfo, 0),
+ sorters: make([]Sorter, 0),
+ fieldDataFields: make([]string, 0),
+ scriptFields: make([]*ScriptField, 0),
+ partialFields: make([]*PartialField, 0),
+ facets: make(map[string]Facet),
+ aggregations: make(map[string]Aggregation),
+ rescores: make([]*Rescore, 0),
+ indexBoosts: make(map[string]float64),
+ stats: make([]string, 0),
+ innerHits: make(map[string]*InnerHit),
+ }
+}
+
+// Query sets the query to use with this search source.
+func (s *SearchSource) Query(query Query) *SearchSource {
+ s.query = query
+ return s
+}
+
+// PostFilter is executed as the last filter. It only affects the
+// search hits but not facets.
+func (s *SearchSource) PostFilter(postFilter Filter) *SearchSource {
+ s.postFilter = postFilter
+ return s
+}
+
+func (s *SearchSource) From(from int) *SearchSource {
+ s.from = from
+ return s
+}
+
+func (s *SearchSource) Size(size int) *SearchSource {
+ s.size = size
+ return s
+}
+
+func (s *SearchSource) MinScore(minScore float64) *SearchSource {
+ s.minScore = &minScore
+ return s
+}
+
+func (s *SearchSource) Explain(explain bool) *SearchSource {
+ s.explain = &explain
+ return s
+}
+
+func (s *SearchSource) Version(version bool) *SearchSource {
+ s.version = &version
+ return s
+}
+
+func (s *SearchSource) Timeout(timeout string) *SearchSource {
+ s.timeout = timeout
+ return s
+}
+
+func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource {
+ s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
+ return s
+}
+
+func (s *SearchSource) Sort(field string, ascending bool) *SearchSource {
+ s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending})
+ return s
+}
+
+func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource {
+ s.sorts = append(s.sorts, info)
+ return s
+}
+
+func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource {
+ s.sorters = append(s.sorters, sorter...)
+ return s
+}
+
+func (s *SearchSource) TrackScores(trackScores bool) *SearchSource {
+ s.trackScores = trackScores
+ return s
+}
+
+func (s *SearchSource) Facet(name string, facet Facet) *SearchSource {
+ s.facets[name] = facet
+ return s
+}
+
+func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource {
+ s.aggregations[name] = aggregation
+ return s
+}
+
+func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource {
+ s.defaultRescoreWindowSize = &defaultRescoreWindowSize
+ return s
+}
+
+func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource {
+ s.highlight = highlight
+ return s
+}
+
+func (s *SearchSource) Highlighter() *Highlight {
+ if s.highlight == nil {
+ s.highlight = NewHighlight()
+ }
+ return s.highlight
+}
+
+func (s *SearchSource) GlobalSuggestText(text string) *SearchSource {
+ s.globalSuggestText = text
+ return s
+}
+
+func (s *SearchSource) Suggester(suggester Suggester) *SearchSource {
+ s.suggesters = append(s.suggesters, suggester)
+ return s
+}
+
+func (s *SearchSource) AddRescore(rescore *Rescore) *SearchSource {
+ s.rescores = append(s.rescores, rescore)
+ return s
+}
+
+func (s *SearchSource) ClearRescores() *SearchSource {
+ s.rescores = make([]*Rescore, 0)
+ return s
+}
+
+func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource {
+ if s.fetchSourceContext == nil {
+ s.fetchSourceContext = NewFetchSourceContext(fetchSource)
+ } else {
+ s.fetchSourceContext.SetFetchSource(fetchSource)
+ }
+ return s
+}
+
+func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource {
+ s.fetchSourceContext = fetchSourceContext
+ return s
+}
+
+func (s *SearchSource) Fields(fieldNames ...string) *SearchSource {
+ if s.fieldNames == nil {
+ s.fieldNames = make([]string, 0)
+ }
+ s.fieldNames = append(s.fieldNames, fieldNames...)
+ return s
+}
+
+func (s *SearchSource) Field(fieldName string) *SearchSource {
+ if s.fieldNames == nil {
+ s.fieldNames = make([]string, 0)
+ }
+ s.fieldNames = append(s.fieldNames, fieldName)
+ return s
+}
+
+func (s *SearchSource) NoFields() *SearchSource {
+ s.fieldNames = make([]string, 0)
+ return s
+}
+
+func (s *SearchSource) FieldDataFields(fieldDataFields ...string) *SearchSource {
+ s.fieldDataFields = append(s.fieldDataFields, fieldDataFields...)
+ return s
+}
+
+func (s *SearchSource) FieldDataField(fieldDataField string) *SearchSource {
+ s.fieldDataFields = append(s.fieldDataFields, fieldDataField)
+ return s
+}
+
+func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource {
+ s.scriptFields = append(s.scriptFields, scriptFields...)
+ return s
+}
+
+func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource {
+ s.scriptFields = append(s.scriptFields, scriptField)
+ return s
+}
+
+func (s *SearchSource) PartialFields(partialFields ...*PartialField) *SearchSource {
+ s.partialFields = append(s.partialFields, partialFields...)
+ return s
+}
+
+func (s *SearchSource) PartialField(partialField *PartialField) *SearchSource {
+ s.partialFields = append(s.partialFields, partialField)
+ return s
+}
+
+func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource {
+ s.indexBoosts[index] = boost
+ return s
+}
+
+func (s *SearchSource) Stats(statsGroup ...string) *SearchSource {
+ s.stats = append(s.stats, statsGroup...)
+ return s
+}
+
+func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource {
+ s.innerHits[name] = innerHit
+ return s
+}
+
+func (s *SearchSource) Source() interface{} {
+ source := make(map[string]interface{})
+
+ if s.from != -1 {
+ source["from"] = s.from
+ }
+ if s.size != -1 {
+ source["size"] = s.size
+ }
+ if s.timeout != "" {
+ source["timeout"] = s.timeout
+ }
+ if s.query != nil {
+ source["query"] = s.query.Source()
+ }
+ if s.postFilter != nil {
+ source["post_filter"] = s.postFilter.Source()
+ }
+ if s.minScore != nil {
+ source["min_score"] = *s.minScore
+ }
+ if s.version != nil {
+ source["version"] = *s.version
+ }
+ if s.explain != nil {
+ source["explain"] = *s.explain
+ }
+ if s.fetchSourceContext != nil {
+ source["_source"] = s.fetchSourceContext.Source()
+ }
+
+ if s.fieldNames != nil {
+ switch len(s.fieldNames) {
+ case 1:
+ source["fields"] = s.fieldNames[0]
+ default:
+ source["fields"] = s.fieldNames
+ }
+ }
+
+ if len(s.fieldDataFields) > 0 {
+ source["fielddata_fields"] = s.fieldDataFields
+ }
+
+ if len(s.partialFields) > 0 {
+ pfmap := make(map[string]interface{})
+ for _, partialField := range s.partialFields {
+ pfmap[partialField.Name] = partialField.Source()
+ }
+ source["partial_fields"] = pfmap
+ }
+
+ if len(s.scriptFields) > 0 {
+ sfmap := make(map[string]interface{})
+ for _, scriptField := range s.scriptFields {
+ sfmap[scriptField.FieldName] = scriptField.Source()
+ }
+ source["script_fields"] = sfmap
+ }
+
+ if len(s.sorters) > 0 {
+ sortarr := make([]interface{}, 0)
+ for _, sorter := range s.sorters {
+ sortarr = append(sortarr, sorter.Source())
+ }
+ source["sort"] = sortarr
+ } else if len(s.sorts) > 0 {
+ sortarr := make([]interface{}, 0)
+ for _, sort := range s.sorts {
+ sortarr = append(sortarr, sort.Source())
+ }
+ source["sort"] = sortarr
+ }
+
+ if s.trackScores {
+ source["track_scores"] = s.trackScores
+ }
+
+ if len(s.indexBoosts) > 0 {
+ source["indices_boost"] = s.indexBoosts
+ }
+
+ if len(s.facets) > 0 {
+ facetsMap := make(map[string]interface{})
+ for field, facet := range s.facets {
+ facetsMap[field] = facet.Source()
+ }
+ source["facets"] = facetsMap
+ }
+
+ if len(s.aggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ for name, aggregate := range s.aggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ source["aggregations"] = aggsMap
+ }
+
+ if s.highlight != nil {
+ source["highlight"] = s.highlight.Source()
+ }
+
+ if len(s.suggesters) > 0 {
+ suggesters := make(map[string]interface{})
+ for _, s := range s.suggesters {
+ suggesters[s.Name()] = s.Source(false)
+ }
+ if s.globalSuggestText != "" {
+ suggesters["text"] = s.globalSuggestText
+ }
+ source["suggest"] = suggesters
+ }
+
+ if len(s.rescores) > 0 {
+ // Strip empty rescores from request
+ rescores := make([]*Rescore, 0)
+ for _, r := range s.rescores {
+ if !r.IsEmpty() {
+ rescores = append(rescores, r)
+ }
+ }
+
+ if len(rescores) == 1 {
+ rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize
+ source["rescore"] = rescores[0].Source()
+ } else {
+ slice := make([]interface{}, 0)
+ for _, r := range rescores {
+ r.defaultRescoreWindowSize = s.defaultRescoreWindowSize
+ slice = append(slice, r.Source())
+ }
+ source["rescore"] = slice
+ }
+ }
+
+ if len(s.stats) > 0 {
+ source["stats"] = s.stats
+ }
+
+ if len(s.innerHits) > 0 {
+ // Top-level inner hits
+ // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits
+ // "inner_hits": {
+ // "": {
+ // "": {
+ // "": {
+ // ,
+ // [,"inner_hits" : { []+ } ]?
+ // }
+ // }
+ // },
+ // [,"" : { ... } ]*
+ // }
+ m := make(map[string]interface{})
+ for name, hit := range s.innerHits {
+ if hit.path != "" {
+ path := make(map[string]interface{})
+ path[hit.path] = hit.Source()
+ m[name] = map[string]interface{}{
+ "path": path,
+ }
+ } else if hit.typ != "" {
+ typ := make(map[string]interface{})
+ typ[hit.typ] = hit.Source()
+ m[name] = map[string]interface{}{
+ "type": typ,
+ }
+ } else {
+ // TODO the Java client throws here, because either path or typ must be specified
+ }
+ }
+ source["inner_hits"] = m
+ }
+
+ return source
+}
+
+// -- Script Field --
+
+type ScriptField struct {
+ FieldName string
+
+ script string
+ lang string
+ params map[string]interface{}
+}
+
+func NewScriptField(fieldName, script, lang string, params map[string]interface{}) *ScriptField {
+ return &ScriptField{fieldName, script, lang, params}
+}
+
+func (f *ScriptField) Source() interface{} {
+ source := make(map[string]interface{})
+ source["script"] = f.script
+ if f.lang != "" {
+ source["lang"] = f.lang
+ }
+ if f.params != nil && len(f.params) > 0 {
+ source["params"] = f.params
+ }
+ return source
+}
+
+// -- Partial Field --
+
+type PartialField struct {
+ Name string
+ includes []string
+ excludes []string
+}
+
+func NewPartialField(name string, includes, excludes []string) *PartialField {
+ return &PartialField{name, includes, excludes}
+}
+
+func (f *PartialField) Source() interface{} {
+ source := make(map[string]interface{})
+
+ if f.includes != nil {
+ switch len(f.includes) {
+ case 0:
+ case 1:
+ source["include"] = f.includes[0]
+ default:
+ source["include"] = f.includes
+ }
+ }
+
+ if f.excludes != nil {
+ switch len(f.excludes) {
+ case 0:
+ case 1:
+ source["exclude"] = f.excludes[0]
+ default:
+ source["exclude"] = f.excludes
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_source_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_source_test.go
new file mode 100644
index 00000000..918e4640
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_source_test.go
@@ -0,0 +1,204 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSearchSourceMatchAllQuery(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceFromAndSize(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).From(21).Size(20)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"from":21,"query":{"match_all":{}},"size":20}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceNoFields(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).NoFields()
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":[],"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceFields(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).Fields("message", "tags")
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":["message","tags"],"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceFetchSourceDisabled(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).FetchSource(false)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_source":false,"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceFetchSourceByWildcards(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ fsc := NewFetchSourceContext(true).Include("obj1.*", "obj2.*").Exclude("*.description")
+ builder := NewSearchSource().Query(matchAllQ).FetchSourceContext(fsc)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_source":{"excludes":["*.description"],"includes":["obj1.*","obj2.*"]},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceFieldDataFields(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).FieldDataFields("test1", "test2")
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fielddata_fields":["test1","test2"],"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceScriptFields(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ sf1 := NewScriptField("test1", "doc['my_field_name'].value * 2", "", nil)
+ sf2 := NewScriptField("test2", "doc['my_field_name'].value * factor", "", map[string]interface{}{"factor": 3.1415927})
+ builder := NewSearchSource().Query(matchAllQ).ScriptFields(sf1, sf2)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}},"script_fields":{"test1":{"script":"doc['my_field_name'].value * 2"},"test2":{"params":{"factor":3.1415927},"script":"doc['my_field_name'].value * factor"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourcePostFilter(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ pf := NewTermFilter("tag", "important")
+ builder := NewSearchSource().Query(matchAllQ).PostFilter(pf)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"post_filter":{"term":{"tag":"important"}},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceHighlight(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ hl := NewHighlight().Field("content")
+ builder := NewSearchSource().Query(matchAllQ).Highlight(hl)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"highlight":{"fields":{"content":{}}},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceRescoring(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ rescorerQuery := NewMatchQuery("field1", "the quick brown fox").Type("phrase").Slop(2)
+ rescorer := NewQueryRescorer(rescorerQuery)
+ rescorer = rescorer.QueryWeight(0.7)
+ rescorer = rescorer.RescoreQueryWeight(1.2)
+ rescore := NewRescore().WindowSize(50).Rescorer(rescorer)
+ builder := NewSearchSource().Query(matchAllQ).AddRescore(rescore)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}},"rescore":{"query":{"query_weight":0.7,"rescore_query":{"match":{"field1":{"query":"the quick brown fox","slop":2,"type":"phrase"}}},"rescore_query_weight":1.2},"window_size":50}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceIndexBoost(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).IndexBoost("index1", 1.4).IndexBoost("index2", 1.3)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"indices_boost":{"index1":1.4,"index2":1.3},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceInnerHits(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).
+ InnerHit("comments", NewInnerHit().Type("comment").Query(NewMatchQuery("user", "olivere"))).
+ InnerHit("views", NewInnerHit().Path("view"))
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"inner_hits":{"comments":{"type":{"comment":{"query":{"match":{"user":{"query":"olivere"}}}}}},"views":{"path":{"view":{}}}},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_suggester_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_suggester_test.go
new file mode 100644
index 00000000..c70cdf9b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_suggester_test.go
@@ -0,0 +1,259 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ _ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestTermSuggester(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+
+ tsName := "my-suggestions"
+ ts := NewTermSuggester(tsName)
+ ts = ts.Text("Goolang")
+ ts = ts.Field("message")
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&all).
+ Suggester(ts).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Suggest == nil {
+ t.Errorf("expected SearchResult.Suggest != nil; got nil")
+ }
+ mySuggestions, found := searchResult.Suggest[tsName]
+ if !found {
+ t.Errorf("expected to find SearchResult.Suggest[%s]; got false", tsName)
+ }
+ if mySuggestions == nil {
+ t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", tsName)
+ }
+
+ if len(mySuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
+ }
+ mySuggestion := mySuggestions[0]
+ if mySuggestion.Text != "goolang" {
+ t.Errorf("expected Text = 'goolang'; got %s", mySuggestion.Text)
+ }
+ if mySuggestion.Offset != 0 {
+ t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
+ }
+ if mySuggestion.Length != 7 {
+ t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
+ }
+ if len(mySuggestion.Options) != 1 {
+ t.Errorf("expected 1 option; got %d", len(mySuggestion.Options))
+ }
+ myOption := mySuggestion.Options[0]
+ if myOption.Text != "golang" {
+ t.Errorf("expected Text = 'golang'; got %s", myOption.Text)
+ }
+ if myOption.Score == float32(0.0) {
+ t.Errorf("expected Score != 0.0; got %v", myOption.Score)
+ }
+ if myOption.Freq == 0 {
+ t.Errorf("expected Freq != 0; got %v", myOption.Freq)
+ }
+}
+
+func TestPhraseSuggester(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+
+ phraseSuggesterName := "my-suggestions"
+ ps := NewPhraseSuggester(phraseSuggesterName)
+ ps = ps.Text("Goolang")
+ ps = ps.Field("message")
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&all).
+ Suggester(ps).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Suggest == nil {
+ t.Errorf("expected SearchResult.Suggest != nil; got nil")
+ }
+ mySuggestions, found := searchResult.Suggest[phraseSuggesterName]
+ if !found {
+ t.Errorf("expected to find SearchResult.Suggest[%s]; got false", phraseSuggesterName)
+ }
+ if mySuggestions == nil {
+ t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", phraseSuggesterName)
+ }
+
+ if len(mySuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
+ }
+ mySuggestion := mySuggestions[0]
+ if mySuggestion.Text != "Goolang" {
+ t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text)
+ }
+ if mySuggestion.Offset != 0 {
+ t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
+ }
+ if mySuggestion.Length != 7 {
+ t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
+ }
+ /*
+ if len(mySuggestion.Options) != 1 {
+ t.Errorf("expected 1 option; got %d", len(mySuggestion.Options))
+ }
+ myOption := mySuggestion.Options[0]
+ if myOption.Text != "golang" {
+ t.Errorf("expected Text = 'golang'; got %s", myOption.Text)
+ }
+ if myOption.Score == float32(0.0) {
+ t.Errorf("expected Score != 0.0; got %v", myOption.Score)
+ }
+ */
+}
+
+// TODO(oe): I get a "Completion suggester not supported" exception on 0.90.2?!
+/*
+func TestCompletionSuggester(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+
+ suggesterName := "my-suggestions"
+ cs := NewCompletionSuggester(suggesterName)
+ cs = cs.Text("Goolang")
+ cs = cs.Field("message")
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&all).
+ Suggester(cs).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Suggest == nil {
+ t.Errorf("expected SearchResult.Suggest != nil; got nil")
+ }
+ mySuggestions, found := searchResult.Suggest[suggesterName]
+ if !found {
+ t.Errorf("expected to find SearchResult.Suggest[%s]; got false")
+ }
+ if mySuggestions == nil {
+ t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", suggesterName)
+ }
+
+ if len(mySuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
+ }
+ mySuggestion := mySuggestions[0]
+ if mySuggestion.Text != "Goolang" {
+ t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text)
+ }
+ if mySuggestion.Offset != 0 {
+ t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
+ }
+ if mySuggestion.Length != 7 {
+ t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
+ }
+ if len(mySuggestion.Options) != 1 {
+ t.Errorf("expected 1 option; got %d", len(mySuggestion.Options))
+ }
+ myOption := mySuggestion.Options[0]
+ if myOption.Text != "golang" {
+ t.Errorf("expected Text = 'golang'; got %s", myOption.Text)
+ }
+ if myOption.Score == float32(0.0) {
+ t.Errorf("expected Score != 0.0; got %v", myOption.Score)
+ }
+}
+//*/
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_templates_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_templates_test.go
new file mode 100644
index 00000000..eebc97fc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_templates_test.go
@@ -0,0 +1,98 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestSearchTemplatesLifecycle(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Template
+ tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}`
+
+ // Create template
+ cresp, err := client.PutTemplate().Id("elastic-test").BodyString(tmpl).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cresp == nil {
+ t.Fatalf("expected response != nil; got: %v", cresp)
+ }
+ if !cresp.Created {
+ t.Errorf("expected created = %v; got: %v", true, cresp.Created)
+ }
+
+ // Get template
+ resp, err := client.GetTemplate().Id("elastic-test").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatalf("expected response != nil; got: %v", resp)
+ }
+ if resp.Template == "" {
+ t.Errorf("expected template != %q; got: %q", "", resp.Template)
+ }
+
+ // Delete template
+ dresp, err := client.DeleteTemplate().Id("elastic-test").Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if dresp == nil {
+ t.Fatalf("expected response != nil; got: %v", dresp)
+ }
+ if !dresp.Found {
+ t.Fatalf("expected found = %v; got: %v", true, dresp.Found)
+ }
+}
+
+func TestSearchTemplatesInlineQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Run query with (inline) search template
+ // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html
+ tq := NewTemplateQuery(`{"match_{{template}}": {}}`).Var("template", "all")
+ resp, err := client.Search(testIndexName).Query(&tq).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatalf("expected response != nil; got: %v", resp)
+ }
+ if resp.Hits == nil {
+ t.Fatalf("expected response hits != nil; got: %v", resp.Hits)
+ }
+ if resp.Hits.TotalHits != 3 {
+ t.Fatalf("expected 3 hits; got: %d", resp.Hits.TotalHits)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_test.go
new file mode 100644
index 00000000..a4e71d8e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_test.go
@@ -0,0 +1,882 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ _ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestSearchMatchAll(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testIndexName).Query(&all).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 4 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 4, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 4 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 4, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkSearchMatchAll(b *testing.B) {
+ client := setupTestClientAndCreateIndexAndAddDocs(b)
+
+ for n := 0; n < b.N; n++ {
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testIndexName).Query(&all).Do()
+ if err != nil {
+ b.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ b.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 4 {
+ b.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 4, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 4 {
+ b.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 4, len(searchResult.Hits.Hits))
+ }
+ }
+}
+
+func TestSearchResultTotalHits(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ count, err := client.Count(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testIndexName).Query(&all).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ got := searchResult.TotalHits()
+ if got != count {
+ t.Fatalf("expected %d hits; got: %d", count, got)
+ }
+
+ // No hits
+ searchResult = &SearchResult{}
+ got = searchResult.TotalHits()
+ if got != 0 {
+ t.Errorf("expected %d hits; got: %d", 0, got)
+ }
+}
+
+func TestSearchResultEach(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testIndexName).Query(&all).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Iterate over non-ptr type
+ var aTweet tweet
+ count := 0
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
+ count++
+ _, ok := item.(tweet)
+ if !ok {
+ t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
+ }
+ }
+ if count == 0 {
+ t.Errorf("expected to find some hits; got: %d", count)
+ }
+
+ // Iterate over ptr-type
+ count = 0
+ var aTweetPtr *tweet
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) {
+ count++
+ tw, ok := item.(*tweet)
+ if !ok {
+ t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
+ }
+ if tw == nil {
+ t.Fatal("expected hit to not be nil")
+ }
+ }
+ if count == 0 {
+ t.Errorf("expected to find some hits; got: %d", count)
+ }
+
+ // Does not iterate when no hits are found
+ searchResult = &SearchResult{Hits: nil}
+ count = 0
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
+ count++
+ _ = item
+ }
+ if count != 0 {
+ t.Errorf("expected to not find any hits; got: %d", count)
+ }
+ searchResult = &SearchResult{Hits: &SearchHits{Hits: make([]*SearchHit, 0)}}
+ count = 0
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
+ count++
+ _ = item
+ }
+ if count != 0 {
+ t.Errorf("expected to not find any hits; got: %d", count)
+ }
+}
+
+func TestSearchSorting(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&all).
+ Sort("created", false).
+ Timeout("1s").
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestSearchSortingBySorters(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&all).
+ SortBy(NewFieldSort("created").Desc(), NewScoreSort()).
+ Timeout("1s").
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestSearchSpecificFields(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&all).
+ Fields("message").
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ if hit.Source != nil {
+ t.Fatalf("expected SearchResult.Hits.Hit.Source to be nil; got: %q", hit.Source)
+ }
+ if hit.Fields == nil {
+ t.Fatal("expected SearchResult.Hits.Hit.Fields to be != nil")
+ }
+ field, found := hit.Fields["message"]
+ if !found {
+ t.Errorf("expected SearchResult.Hits.Hit.Fields[%s] to be found", "message")
+ }
+ fields, ok := field.([]interface{})
+ if !ok {
+ t.Errorf("expected []interface{}; got: %v", reflect.TypeOf(fields))
+ }
+ if len(fields) != 1 {
+ t.Errorf("expected a field with 1 entry; got: %d", len(fields))
+ }
+ message, ok := fields[0].(string)
+ if !ok {
+ t.Errorf("expected a string; got: %v", reflect.TypeOf(fields[0]))
+ }
+ if message == "" {
+ t.Errorf("expected a message; got: %q", message)
+ }
+ }
+}
+
+func TestSearchExplain(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(&all).
+ Explain(true).
+ Timeout("1s").
+ // Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ if hit.Explanation == nil {
+ t.Fatal("expected search explanation")
+ }
+ if hit.Explanation.Value <= 0.0 {
+ t.Errorf("expected explanation value to be > 0.0; got: %v", hit.Explanation.Value)
+ }
+ if hit.Explanation.Description == "" {
+ t.Errorf("expected explanation description != %q; got: %q", "", hit.Explanation.Description)
+ }
+ }
+}
+
+func TestSearchSource(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Set up the request JSON manually to pass to the search service via Source()
+ source := map[string]interface{}{
+ "query": map[string]interface{}{
+ "match_all": map[string]interface{}{},
+ },
+ }
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Source(source). // sets the JSON request
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+}
+
+func TestSearchSearchSource(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Set up the search source manually and pass it to the search service via SearchSource()
+ ss := NewSearchSource().Query(NewMatchAllQuery()).From(0).Size(2)
+
+ // One can use ss.Source() to get to the raw interface{} that will be used
+ // as the search request JSON by the SearchService.
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ SearchSource(ss). // sets the SearchSource
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 2 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits))
+ }
+}
+
+func TestSearchInnerHitsOnHasChild(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Check for valid ES version
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "1.5.0" {
+ t.Skip("InnerHits feature is only available for Elasticsearch 1.5+")
+ return
+ }
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ comment2a := comment{User: "sandrae", Comment: "What does that even mean?"}
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+ comment3a := comment{User: "nico", Comment: "You bet."}
+ comment3b := comment{User: "olivere", Comment: "It sure is."}
+
+ // Add all documents
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fq := NewFilteredQuery(NewMatchAllQuery())
+ fq = fq.Filter(
+ NewHasChildFilter("comment").
+ Query(NewMatchAllQuery()).
+ InnerHit(NewInnerHit().Name("comments")))
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(fq).
+ Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 2 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 2, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 2 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits))
+ }
+
+ hit := searchResult.Hits.Hits[0]
+ if hit.Id != "t2" {
+ t.Fatalf("expected tweet %q; got: %q", "t2", hit.Id)
+ }
+ if hit.InnerHits == nil {
+ t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
+ }
+ if len(hit.InnerHits) != 1 {
+ t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
+ }
+ innerHits, found := hit.InnerHits["comments"]
+ if !found {
+ t.Fatalf("expected inner hits for name %q", "comments")
+ }
+ if innerHits == nil || innerHits.Hits == nil {
+ t.Fatal("expected inner hits != nil")
+ }
+ if len(innerHits.Hits.Hits) != 1 {
+ t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits))
+ }
+ if innerHits.Hits.Hits[0].Id != "c2a" {
+ t.Fatalf("expected inner hit with id %q; got: %q", "c2a", innerHits.Hits.Hits[0].Id)
+ }
+
+ hit = searchResult.Hits.Hits[1]
+ if hit.Id != "t3" {
+ t.Fatalf("expected tweet %q; got: %q", "t3", hit.Id)
+ }
+ if hit.InnerHits == nil {
+ t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
+ }
+ if len(hit.InnerHits) != 1 {
+ t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
+ }
+ innerHits, found = hit.InnerHits["comments"]
+ if !found {
+ t.Fatalf("expected inner hits for name %q", "comments")
+ }
+ if innerHits == nil || innerHits.Hits == nil {
+ t.Fatal("expected inner hits != nil")
+ }
+ if len(innerHits.Hits.Hits) != 2 {
+ t.Fatalf("expected %d inner hits; got: %d", 2, len(innerHits.Hits.Hits))
+ }
+ if innerHits.Hits.Hits[0].Id != "c3a" {
+ t.Fatalf("expected inner hit with id %q; got: %q", "c3a", innerHits.Hits.Hits[0].Id)
+ }
+ if innerHits.Hits.Hits[1].Id != "c3b" {
+ t.Fatalf("expected inner hit with id %q; got: %q", "c3b", innerHits.Hits.Hits[1].Id)
+ }
+}
+
+func TestSearchInnerHitsOnHasParent(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Check for valid ES version
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "1.5.0" {
+ t.Skip("InnerHits feature is only available for Elasticsearch 1.5+")
+ return
+ }
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ comment2a := comment{User: "sandrae", Comment: "What does that even mean?"}
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+ comment3a := comment{User: "nico", Comment: "You bet."}
+ comment3b := comment{User: "olivere", Comment: "It sure is."}
+
+ // Add all documents
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fq := NewFilteredQuery(NewMatchAllQuery())
+ fq = fq.Filter(
+ NewHasParentFilter("tweet").
+ Query(NewMatchAllQuery()).
+ InnerHit(NewInnerHit().Name("tweets")))
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(fq).
+ Pretty(true).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+
+ hit := searchResult.Hits.Hits[0]
+ if hit.Id != "c2a" {
+ t.Fatalf("expected tweet %q; got: %q", "c2a", hit.Id)
+ }
+ if hit.InnerHits == nil {
+ t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
+ }
+ if len(hit.InnerHits) != 1 {
+ t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
+ }
+ innerHits, found := hit.InnerHits["tweets"]
+ if !found {
+ t.Fatalf("expected inner hits for name %q", "tweets")
+ }
+ if innerHits == nil || innerHits.Hits == nil {
+ t.Fatal("expected inner hits != nil")
+ }
+ if len(innerHits.Hits.Hits) != 1 {
+ t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits))
+ }
+ if innerHits.Hits.Hits[0].Id != "t2" {
+ t.Fatalf("expected inner hit with id %q; got: %q", "t2", innerHits.Hits.Hits[0].Id)
+ }
+
+ hit = searchResult.Hits.Hits[1]
+ if hit.Id != "c3a" {
+ t.Fatalf("expected tweet %q; got: %q", "c3a", hit.Id)
+ }
+ if hit.InnerHits == nil {
+ t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
+ }
+ if len(hit.InnerHits) != 1 {
+ t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
+ }
+ innerHits, found = hit.InnerHits["tweets"]
+ if !found {
+ t.Fatalf("expected inner hits for name %q", "tweets")
+ }
+ if innerHits == nil || innerHits.Hits == nil {
+ t.Fatal("expected inner hits != nil")
+ }
+ if len(innerHits.Hits.Hits) != 1 {
+ t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits))
+ }
+ if innerHits.Hits.Hits[0].Id != "t3" {
+ t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id)
+ }
+
+ hit = searchResult.Hits.Hits[2]
+ if hit.Id != "c3b" {
+ t.Fatalf("expected tweet %q; got: %q", "c3b", hit.Id)
+ }
+ if hit.InnerHits == nil {
+ t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
+ }
+ if len(hit.InnerHits) != 1 {
+ t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
+ }
+ innerHits, found = hit.InnerHits["tweets"]
+ if !found {
+ t.Fatalf("expected inner hits for name %q", "tweets")
+ }
+ if innerHits == nil || innerHits.Hits == nil {
+ t.Fatal("expected inner hits != nil")
+ }
+ if len(innerHits.Hits.Hits) != 1 {
+ t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits))
+ }
+ if innerHits.Hits.Hits[0].Id != "t3" {
+ t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/sort.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/sort.go
new file mode 100644
index 00000000..b1b54f9f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/sort.go
@@ -0,0 +1,487 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// -- Sorter --
+
+// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html.
+type Sorter interface {
+ Source() interface{}
+}
+
+// -- SortInfo --
+
+// SortInfo contains information about sorting a field.
+type SortInfo struct {
+ Sorter
+ Field string
+ Ascending bool
+ Missing interface{}
+ IgnoreUnmapped *bool
+ SortMode string
+ NestedFilter Filter
+ NestedPath string
+}
+
+func (info SortInfo) Source() interface{} {
+ prop := make(map[string]interface{})
+ if info.Ascending {
+ prop["order"] = "asc"
+ } else {
+ prop["order"] = "desc"
+ }
+ if info.Missing != nil {
+ prop["missing"] = info.Missing
+ }
+ if info.IgnoreUnmapped != nil {
+ prop["ignore_unmapped"] = *info.IgnoreUnmapped
+ }
+ if info.SortMode != "" {
+ prop["sort_mode"] = info.SortMode
+ }
+ if info.NestedFilter != nil {
+ prop["nested_filter"] = info.NestedFilter
+ }
+ if info.NestedPath != "" {
+ prop["nested_path"] = info.NestedPath
+ }
+ source := make(map[string]interface{})
+ source[info.Field] = prop
+ return source
+}
+
+// -- ScoreSort --
+
+// ScoreSort sorts by relevancy score.
+type ScoreSort struct {
+ Sorter
+ ascending bool
+}
+
+// NewScoreSort creates a new ScoreSort.
+func NewScoreSort() ScoreSort {
+ return ScoreSort{ascending: false} // Descending by default!
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s ScoreSort) Order(ascending bool) ScoreSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s ScoreSort) Asc() ScoreSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s ScoreSort) Desc() ScoreSort {
+ s.ascending = false
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s ScoreSort) Source() interface{} {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source["_score"] = x
+ if s.ascending {
+ x["reverse"] = true
+ }
+ return source
+}
+
+// -- FieldSort --
+
+// FieldSort sorts by a given field.
+type FieldSort struct {
+ Sorter
+ fieldName string
+ ascending bool
+ missing interface{}
+ ignoreUnmapped *bool
+ unmappedType *string
+ sortMode *string
+ nestedFilter Filter
+ nestedPath *string
+}
+
+// NewFieldSort creates a new FieldSort.
+func NewFieldSort(fieldName string) FieldSort {
+ return FieldSort{
+ fieldName: fieldName,
+ ascending: true,
+ }
+}
+
+// FieldName specifies the name of the field to be used for sorting.
+func (s FieldSort) FieldName(fieldName string) FieldSort {
+ s.fieldName = fieldName
+ return s
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s FieldSort) Order(ascending bool) FieldSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s FieldSort) Asc() FieldSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s FieldSort) Desc() FieldSort {
+ s.ascending = false
+ return s
+}
+
+// Missing sets the value to be used when a field is missing in a document.
+// You can also use "_last" or "_first" to sort missing last or first
+// respectively.
+func (s FieldSort) Missing(missing interface{}) FieldSort {
+ s.missing = missing
+ return s
+}
+
+// IgnoreUnmapped specifies what happens if the field does not exist in
+// the index. Set it to true to ignore, or set it to false to not ignore (default).
+func (s FieldSort) IgnoreUnmapped(ignoreUnmapped bool) FieldSort {
+ s.ignoreUnmapped = &ignoreUnmapped
+ return s
+}
+
+// UnmappedType sets the type to use when the current field is not mapped
+// in an index.
+func (s FieldSort) UnmappedType(typ string) FieldSort {
+ s.unmappedType = &typ
+ return s
+}
+
+// SortMode specifies what values to pick in case a document contains
+// multiple values for the targeted sort field. Possible values are:
+// min, max, sum, and avg.
+func (s FieldSort) SortMode(sortMode string) FieldSort {
+ s.sortMode = &sortMode
+ return s
+}
+
+// NestedFilter sets a filter that nested objects should match with
+// in order to be taken into account for sorting.
+func (s FieldSort) NestedFilter(nestedFilter Filter) FieldSort {
+ s.nestedFilter = nestedFilter
+ return s
+}
+
+// NestedPath is used if sorting occurs on a field that is inside a
+// nested object.
+func (s FieldSort) NestedPath(nestedPath string) FieldSort {
+ s.nestedPath = &nestedPath
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s FieldSort) Source() interface{} {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source[s.fieldName] = x
+ if s.ascending {
+ x["order"] = "asc"
+ } else {
+ x["order"] = "desc"
+ }
+ if s.missing != nil {
+ x["missing"] = s.missing
+ }
+ if s.ignoreUnmapped != nil {
+ x["ignore_unmapped"] = *s.ignoreUnmapped
+ }
+ if s.unmappedType != nil {
+ x["unmapped_type"] = *s.unmappedType
+ }
+ if s.sortMode != nil {
+ x["mode"] = *s.sortMode
+ }
+ if s.nestedFilter != nil {
+ x["nested_filter"] = s.nestedFilter.Source()
+ }
+ if s.nestedPath != nil {
+ x["nested_path"] = *s.nestedPath
+ }
+ return source
+}
+
+// -- GeoDistanceSort --
+
+// GeoDistanceSort allows for sorting by geographic distance.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting.
+type GeoDistanceSort struct {
+ Sorter
+ fieldName string
+ points []*GeoPoint
+ geohashes []string
+ geoDistance *string
+ unit string
+ ascending bool
+ sortMode *string
+ nestedFilter Filter
+ nestedPath *string
+}
+
+// NewGeoDistanceSort creates a new sorter for geo distances.
+func NewGeoDistanceSort(fieldName string) GeoDistanceSort {
+ return GeoDistanceSort{
+ fieldName: fieldName,
+ points: make([]*GeoPoint, 0),
+ geohashes: make([]string, 0),
+ ascending: true,
+ }
+}
+
+// FieldName specifies the name of the (geo) field to use for sorting.
+func (s GeoDistanceSort) FieldName(fieldName string) GeoDistanceSort {
+ s.fieldName = fieldName
+ return s
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s GeoDistanceSort) Order(ascending bool) GeoDistanceSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s GeoDistanceSort) Asc() GeoDistanceSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s GeoDistanceSort) Desc() GeoDistanceSort {
+ s.ascending = false
+ return s
+}
+
+// Point specifies a point to create the range distance facets from.
+func (s GeoDistanceSort) Point(lat, lon float64) GeoDistanceSort {
+ s.points = append(s.points, GeoPointFromLatLon(lat, lon))
+ return s
+}
+
+// Points specifies the geo point(s) to create the range distance facets from.
+func (s GeoDistanceSort) Points(points ...*GeoPoint) GeoDistanceSort {
+ s.points = append(s.points, points...)
+ return s
+}
+
+// GeoHashes specifies the geo point to create the range distance facets from.
+func (s GeoDistanceSort) GeoHashes(geohashes ...string) GeoDistanceSort {
+ s.geohashes = append(s.geohashes, geohashes...)
+ return s
+}
+
+// GeoDistance represents how to compute the distance.
+// It can be sloppy_arc (default), arc, or plane.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting.
+func (s GeoDistanceSort) GeoDistance(geoDistance string) GeoDistanceSort {
+ s.geoDistance = &geoDistance
+ return s
+}
+
+// Unit specifies the distance unit to use. It defaults to km.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#distance-units
+// for details.
+func (s GeoDistanceSort) Unit(unit string) GeoDistanceSort {
+ s.unit = unit
+ return s
+}
+
+// SortMode specifies what values to pick in case a document contains
+// multiple values for the targeted sort field. Possible values are:
+// min, max, sum, and avg.
+func (s GeoDistanceSort) SortMode(sortMode string) GeoDistanceSort {
+ s.sortMode = &sortMode
+ return s
+}
+
+// NestedFilter sets a filter that nested objects should match with
+// in order to be taken into account for sorting.
+func (s GeoDistanceSort) NestedFilter(nestedFilter Filter) GeoDistanceSort {
+ s.nestedFilter = nestedFilter
+ return s
+}
+
+// NestedPath is used if sorting occurs on a field that is inside a
+// nested object.
+func (s GeoDistanceSort) NestedPath(nestedPath string) GeoDistanceSort {
+ s.nestedPath = &nestedPath
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s GeoDistanceSort) Source() interface{} {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source["_geo_distance"] = x
+
+ // Points
+ ptarr := make([]interface{}, 0)
+ for _, pt := range s.points {
+ ptarr = append(ptarr, pt.Source())
+ }
+ for _, geohash := range s.geohashes {
+ ptarr = append(ptarr, geohash)
+ }
+ x[s.fieldName] = ptarr
+
+ if s.unit != "" {
+ x["unit"] = s.unit
+ }
+ if s.geoDistance != nil {
+ x["distance_type"] = *s.geoDistance
+ }
+
+ if !s.ascending {
+ x["reverse"] = true
+ }
+ if s.sortMode != nil {
+ x["mode"] = *s.sortMode
+ }
+ if s.nestedFilter != nil {
+ x["nested_filter"] = s.nestedFilter.Source()
+ }
+ if s.nestedPath != nil {
+ x["nested_path"] = *s.nestedPath
+ }
+ return source
+}
+
+// -- ScriptSort --
+
+// ScriptSort sorts by a custom script. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#modules-scripting
+// for details about scripting.
+type ScriptSort struct {
+ Sorter
+ lang string
+ script string
+ typ string
+ params map[string]interface{}
+ ascending bool
+ sortMode *string
+ nestedFilter Filter
+ nestedPath *string
+}
+
+// NewScriptSort creates a new ScriptSort.
+func NewScriptSort(script, typ string) ScriptSort {
+ return ScriptSort{
+ script: script,
+ typ: typ,
+ ascending: true,
+ params: make(map[string]interface{}),
+ }
+}
+
+// Lang specifies the script language to use. It can be one of:
+// groovy (the default for ES >= 1.4), mvel (default for ES < 1.4),
+// js, python, expression, or native. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#modules-scripting
+// for details.
+func (s ScriptSort) Lang(lang string) ScriptSort {
+ s.lang = lang
+ return s
+}
+
+// Type sets the script type, which can be either string or number.
+func (s ScriptSort) Type(typ string) ScriptSort {
+ s.typ = typ
+ return s
+}
+
+// Param adds a parameter to the script.
+func (s ScriptSort) Param(name string, value interface{}) ScriptSort {
+ s.params[name] = value
+ return s
+}
+
+// Params sets the parameters of the script.
+func (s ScriptSort) Params(params map[string]interface{}) ScriptSort {
+ s.params = params
+ return s
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s ScriptSort) Order(ascending bool) ScriptSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s ScriptSort) Asc() ScriptSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s ScriptSort) Desc() ScriptSort {
+ s.ascending = false
+ return s
+}
+
+// SortMode specifies what values to pick in case a document contains
+// multiple values for the targeted sort field. Possible values are:
+// min or max.
+func (s ScriptSort) SortMode(sortMode string) ScriptSort {
+ s.sortMode = &sortMode
+ return s
+}
+
+// NestedFilter sets a filter that nested objects should match with
+// in order to be taken into account for sorting.
+func (s ScriptSort) NestedFilter(nestedFilter Filter) ScriptSort {
+ s.nestedFilter = nestedFilter
+ return s
+}
+
+// NestedPath is used if sorting occurs on a field that is inside a
+// nested object.
+func (s ScriptSort) NestedPath(nestedPath string) ScriptSort {
+ s.nestedPath = &nestedPath
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s ScriptSort) Source() interface{} {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source["_script"] = x
+
+ x["script"] = s.script
+ x["type"] = s.typ
+ if !s.ascending {
+ x["reverse"] = true
+ }
+ if s.lang != "" {
+ x["lang"] = s.lang
+ }
+ if len(s.params) > 0 {
+ x["params"] = s.params
+ }
+ if s.sortMode != nil {
+ x["mode"] = *s.sortMode
+ }
+ if s.nestedFilter != nil {
+ x["nested_filter"] = s.nestedFilter.Source()
+ }
+ if s.nestedPath != nil {
+ x["nested_path"] = *s.nestedPath
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/sort_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/sort_test.go
new file mode 100644
index 00000000..394c8369
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/sort_test.go
@@ -0,0 +1,174 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSortInfo(t *testing.T) {
+ builder := SortInfo{Field: "grade", Ascending: false}
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"grade":{"order":"desc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScoreSort(t *testing.T) {
+ builder := NewScoreSort()
+ if builder.ascending != false {
+ t.Error("expected score sorter to be ascending by default")
+ }
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_score":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScoreSortOrderAscending(t *testing.T) {
+ builder := NewScoreSort().Asc()
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_score":{"reverse":true}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScoreSortOrderDescending(t *testing.T) {
+ builder := NewScoreSort().Desc()
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_score":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldSort(t *testing.T) {
+ builder := NewFieldSort("grade")
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"grade":{"order":"asc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldSortOrderDesc(t *testing.T) {
+ builder := NewFieldSort("grade").Desc()
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"grade":{"order":"desc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldSortComplex(t *testing.T) {
+ builder := NewFieldSort("price").Desc().
+ SortMode("avg").
+ Missing("_last").
+ UnmappedType("product").
+ NestedFilter(NewTermFilter("product.color", "blue")).
+ NestedPath("variant")
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc","unmapped_type":"product"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceSort(t *testing.T) {
+ builder := NewGeoDistanceSort("pin.location").
+ Point(-70, 40).
+ Order(true).
+ Unit("km").
+ SortMode("min").
+ GeoDistance("sloppy_arc")
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceSortOrderDesc(t *testing.T) {
+ builder := NewGeoDistanceSort("pin.location").
+ Point(-70, 40).
+ Unit("km").
+ SortMode("min").
+ GeoDistance("sloppy_arc").
+ Desc()
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"reverse":true,"unit":"km"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+func TestScriptSort(t *testing.T) {
+ builder := NewScriptSort("doc['field_name'].value * factor", "number").
+ Param("factor", 1.1).
+ Order(true)
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_script":{"params":{"factor":1.1},"script":"doc['field_name'].value * factor","type":"number"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScriptSortOrderDesc(t *testing.T) {
+ builder := NewScriptSort("doc['field_name'].value * factor", "number").
+ Param("factor", 1.1).
+ Desc()
+ data, err := json.Marshal(builder.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_script":{"params":{"factor":1.1},"reverse":true,"script":"doc['field_name'].value * factor","type":"number"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest.go
new file mode 100644
index 00000000..9b4060c2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest.go
@@ -0,0 +1,144 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// SuggestService returns suggestions for text.
+type SuggestService struct {
+ client *Client
+ pretty bool
+ routing string
+ preference string
+ indices []string
+ suggesters []Suggester
+}
+
+func NewSuggestService(client *Client) *SuggestService {
+ builder := &SuggestService{
+ client: client,
+ indices: make([]string, 0),
+ suggesters: make([]Suggester, 0),
+ }
+ return builder
+}
+
+func (s *SuggestService) Index(index string) *SuggestService {
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *SuggestService) Indices(indices ...string) *SuggestService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *SuggestService) Pretty(pretty bool) *SuggestService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *SuggestService) Routing(routing string) *SuggestService {
+ s.routing = routing
+ return s
+}
+
+func (s *SuggestService) Preference(preference string) *SuggestService {
+ s.preference = preference
+ return s
+}
+
+func (s *SuggestService) Suggester(suggester Suggester) *SuggestService {
+ s.suggesters = append(s.suggesters, suggester)
+ return s
+}
+
+func (s *SuggestService) Do() (SuggestResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ path += strings.Join(indexPart, ",")
+
+ // Suggest
+ path += "/_suggest"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+
+ // Set body
+ body := make(map[string]interface{})
+ for _, s := range s.suggesters {
+ body[s.Name()] = s.Source(false)
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // There is a _shard object that cannot be deserialized.
+ // So we use json.RawMessage instead.
+ var suggestions map[string]*json.RawMessage
+ if err := json.Unmarshal(res.Body, &suggestions); err != nil {
+ return nil, err
+ }
+
+ ret := make(SuggestResult)
+ for name, result := range suggestions {
+ if name != "_shards" {
+ var s []Suggestion
+ if err := json.Unmarshal(*result, &s); err != nil {
+ return nil, err
+ }
+ ret[name] = s
+ }
+ }
+
+ return ret, nil
+}
+
+type SuggestResult map[string][]Suggestion
+
+type Suggestion struct {
+ Text string `json:"text"`
+ Offset int `json:"offset"`
+ Length int `json:"length"`
+ Options []suggestionOption `json:"options"`
+}
+
+type suggestionOption struct {
+ Text string `json:"text"`
+ Score float32 `json:"score"`
+ Freq int `json:"freq"`
+ Payload interface{} `json:"payload"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest_field.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest_field.go
new file mode 100644
index 00000000..60f94818
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest_field.go
@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+)
+
+// SuggestField can be used by the caller to specify a suggest field
+// at index time. For a detailed example, see e.g.
+// http://www.elasticsearch.org/blog/you-complete-me/.
+type SuggestField struct {
+ inputs []string
+ output *string
+ payload interface{}
+ weight int
+}
+
+func NewSuggestField() *SuggestField {
+ return &SuggestField{weight: -1}
+}
+
+func (f *SuggestField) Input(input ...string) *SuggestField {
+ if f.inputs == nil {
+ f.inputs = make([]string, 0)
+ }
+ f.inputs = append(f.inputs, input...)
+ return f
+}
+
+func (f *SuggestField) Output(output string) *SuggestField {
+ f.output = &output
+ return f
+}
+
+func (f *SuggestField) Payload(payload interface{}) *SuggestField {
+ f.payload = payload
+ return f
+}
+
+func (f *SuggestField) Weight(weight int) *SuggestField {
+ f.weight = weight
+ return f
+}
+
+// MarshalJSON encodes SuggestField into JSON.
+func (f *SuggestField) MarshalJSON() ([]byte, error) {
+ source := make(map[string]interface{})
+
+ if f.inputs != nil {
+ switch len(f.inputs) {
+ case 1:
+ source["input"] = f.inputs[0]
+ default:
+ source["input"] = f.inputs
+ }
+ }
+
+ if f.output != nil {
+ source["output"] = *f.output
+ }
+
+ if f.payload != nil {
+ source["payload"] = f.payload
+ }
+
+ if f.weight >= 0 {
+ source["weight"] = f.weight
+ }
+
+ return json.Marshal(source)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest_test.go
new file mode 100644
index 00000000..50a4a095
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest_test.go
@@ -0,0 +1,131 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ _ "net/http"
+ "testing"
+)
+
+func TestSuggestService(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere",
+ Message: "Welcome to Golang and Elasticsearch.",
+ Tags: []string{"golang", "elasticsearch"},
+ Location: "48.1333,11.5667", // lat,lon
+ Suggest: NewSuggestField().
+ Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch").
+ Output("Golang and Elasticsearch: An introduction.").
+ Weight(0),
+ }
+ tweet2 := tweet{
+ User: "olivere",
+ Message: "Another unrelated topic.",
+ Tags: []string{"golang"},
+ Location: "48.1189,11.4289", // lat,lon
+ Suggest: NewSuggestField().
+ Input("Another unrelated topic.", "Golang topic.").
+ Output("About Golang.").
+ Weight(1),
+ }
+ tweet3 := tweet{
+ User: "sandrae",
+ Message: "Cycling is fun.",
+ Tags: []string{"sports", "cycling"},
+ Location: "47.7167,11.7167", // lat,lon
+ Suggest: NewSuggestField().
+ Input("Cycling is fun.").
+ Output("Cycling is a fun sport."),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test _suggest endpoint
+ termSuggesterName := "my-term-suggester"
+ termSuggester := NewTermSuggester(termSuggesterName).Text("Goolang").Field("message")
+ phraseSuggesterName := "my-phrase-suggester"
+ phraseSuggester := NewPhraseSuggester(phraseSuggesterName).Text("Goolang").Field("message")
+ completionSuggesterName := "my-completion-suggester"
+ completionSuggester := NewCompletionSuggester(completionSuggesterName).Text("Go").Field("suggest_field")
+
+ result, err := client.Suggest().
+ Index(testIndexName).
+ Suggester(termSuggester).
+ Suggester(phraseSuggester).
+ Suggester(completionSuggester).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if result == nil {
+ t.Errorf("expected result != nil; got nil")
+ }
+ if len(result) != 3 {
+ t.Errorf("expected 3 suggester results; got %d", len(result))
+ }
+
+ termSuggestions, found := result[termSuggesterName]
+ if !found {
+ t.Errorf("expected to find Suggest[%s]; got false", termSuggesterName)
+ }
+ if termSuggestions == nil {
+ t.Errorf("expected Suggest[%s] != nil; got nil", termSuggesterName)
+ }
+ if len(termSuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(termSuggestions))
+ }
+
+ phraseSuggestions, found := result[phraseSuggesterName]
+ if !found {
+ t.Errorf("expected to find Suggest[%s]; got false", phraseSuggesterName)
+ }
+ if phraseSuggestions == nil {
+ t.Errorf("expected Suggest[%s] != nil; got nil", phraseSuggesterName)
+ }
+ if len(phraseSuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(phraseSuggestions))
+ }
+
+ completionSuggestions, found := result[completionSuggesterName]
+ if !found {
+ t.Errorf("expected to find Suggest[%s]; got false", completionSuggesterName)
+ }
+ if completionSuggestions == nil {
+ t.Errorf("expected Suggest[%s] != nil; got nil", completionSuggesterName)
+ }
+ if len(completionSuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(completionSuggestions))
+ }
+ if len(completionSuggestions[0].Options) != 2 {
+ t.Errorf("expected 2 suggestion options; got %d", len(completionSuggestions[0].Options))
+ }
+ if completionSuggestions[0].Options[0].Text != "About Golang." {
+ t.Errorf("expected Suggest[%s][0].Options[0].Text == %q; got %q", completionSuggesterName, "About Golang.", completionSuggestions[0].Options[0].Text)
+ }
+ if completionSuggestions[0].Options[1].Text != "Golang and Elasticsearch: An introduction." {
+ t.Errorf("expected Suggest[%s][0].Options[1].Text == %q; got %q", completionSuggesterName, "Golang and Elasticsearch: An introduction.", completionSuggestions[0].Options[1].Text)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester.go
new file mode 100644
index 00000000..c83d050c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester.go
@@ -0,0 +1,15 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents the generic suggester interface.
+// A suggester's only purpose is to return the
+// source of the query as a JSON-serializable
+// object. Returning a map[string]interface{}
+// will do.
+type Suggester interface {
+ Name() string
+ Source(includeName bool) interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion.go
new file mode 100644
index 00000000..e38c38fc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion.go
@@ -0,0 +1,121 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CompletionSuggester is a fast suggester for e.g. type-ahead completion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html
+// for more details.
+type CompletionSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+}
+
+// Creates a new completion suggester.
+func NewCompletionSuggester(name string) CompletionSuggester {
+ return CompletionSuggester{
+ name: name,
+ contextQueries: make([]SuggesterContextQuery, 0),
+ }
+}
+
+func (q CompletionSuggester) Name() string {
+ return q.name
+}
+
+func (q CompletionSuggester) Text(text string) CompletionSuggester {
+ q.text = text
+ return q
+}
+
+func (q CompletionSuggester) Field(field string) CompletionSuggester {
+ q.field = field
+ return q
+}
+
+func (q CompletionSuggester) Analyzer(analyzer string) CompletionSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q CompletionSuggester) Size(size int) CompletionSuggester {
+ q.size = &size
+ return q
+}
+
+func (q CompletionSuggester) ShardSize(shardSize int) CompletionSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q CompletionSuggester) ContextQuery(query SuggesterContextQuery) CompletionSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) CompletionSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+// completionSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the completion element.
+type completionSuggesterRequest struct {
+ Text string `json:"text"`
+ Completion interface{} `json:"completion"`
+}
+
+// Creates the source for the completion suggester.
+func (q CompletionSuggester) Source(includeName bool) interface{} {
+ cs := &completionSuggesterRequest{}
+
+ if q.text != "" {
+ cs.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ cs.Completion = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ suggester["context"] = q.contextQueries[0].Source()
+ default:
+ ctxq := make([]interface{}, 0)
+ for _, query := range q.contextQueries {
+ ctxq = append(ctxq, query.Source())
+ }
+ suggester["context"] = ctxq
+ }
+
+ // TODO(oe) Add competion-suggester specific parameters here
+
+ if !includeName {
+ return cs
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = cs
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_fuzzy.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_fuzzy.go
new file mode 100644
index 00000000..3539381b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_fuzzy.go
@@ -0,0 +1,171 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FuzzyFuzzyCompletionSuggester is a FuzzyCompletionSuggester that allows fuzzy
+// completion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html
+// for details, and
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html#fuzzy
+// for details about the fuzzy completion suggester.
+type FuzzyCompletionSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+
+ fuzziness interface{}
+ fuzzyTranspositions *bool
+ fuzzyMinLength *int
+ fuzzyPrefixLength *int
+ unicodeAware *bool
+}
+
+// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester.
+type Fuzziness struct {
+}
+
+// Creates a new completion suggester.
+func NewFuzzyCompletionSuggester(name string) FuzzyCompletionSuggester {
+ return FuzzyCompletionSuggester{
+ name: name,
+ contextQueries: make([]SuggesterContextQuery, 0),
+ }
+}
+
+func (q FuzzyCompletionSuggester) Name() string {
+ return q.name
+}
+
+func (q FuzzyCompletionSuggester) Text(text string) FuzzyCompletionSuggester {
+ q.text = text
+ return q
+}
+
+func (q FuzzyCompletionSuggester) Field(field string) FuzzyCompletionSuggester {
+ q.field = field
+ return q
+}
+
+func (q FuzzyCompletionSuggester) Analyzer(analyzer string) FuzzyCompletionSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q FuzzyCompletionSuggester) Size(size int) FuzzyCompletionSuggester {
+ q.size = &size
+ return q
+}
+
+func (q FuzzyCompletionSuggester) ShardSize(shardSize int) FuzzyCompletionSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q FuzzyCompletionSuggester) ContextQuery(query SuggesterContextQuery) FuzzyCompletionSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q FuzzyCompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) FuzzyCompletionSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+// Fuzziness defines the strategy used to describe what "fuzzy" actually
+// means for the suggester, e.g. 1, 2, "0", "1..2", ">4", or "AUTO".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#fuzziness
+// for a detailed description.
+func (q FuzzyCompletionSuggester) Fuzziness(fuzziness interface{}) FuzzyCompletionSuggester {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q FuzzyCompletionSuggester) FuzzyTranspositions(fuzzyTranspositions bool) FuzzyCompletionSuggester {
+ q.fuzzyTranspositions = &fuzzyTranspositions
+ return q
+}
+
+func (q FuzzyCompletionSuggester) FuzzyMinLength(minLength int) FuzzyCompletionSuggester {
+ q.fuzzyMinLength = &minLength
+ return q
+}
+
+func (q FuzzyCompletionSuggester) FuzzyPrefixLength(prefixLength int) FuzzyCompletionSuggester {
+ q.fuzzyPrefixLength = &prefixLength
+ return q
+}
+
+func (q FuzzyCompletionSuggester) UnicodeAware(unicodeAware bool) FuzzyCompletionSuggester {
+ q.unicodeAware = &unicodeAware
+ return q
+}
+
+// Creates the source for the completion suggester.
+func (q FuzzyCompletionSuggester) Source(includeName bool) interface{} {
+ cs := &completionSuggesterRequest{}
+
+ if q.text != "" {
+ cs.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ cs.Completion = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ suggester["context"] = q.contextQueries[0].Source()
+ default:
+ ctxq := make([]interface{}, 0)
+ for _, query := range q.contextQueries {
+ ctxq = append(ctxq, query.Source())
+ }
+ suggester["context"] = ctxq
+ }
+
+ // Fuzzy Completion Suggester fields
+ fuzzy := make(map[string]interface{})
+ suggester["fuzzy"] = fuzzy
+ if q.fuzziness != nil {
+ fuzzy["fuzziness"] = q.fuzziness
+ }
+ if q.fuzzyTranspositions != nil {
+ fuzzy["transpositions"] = *q.fuzzyTranspositions
+ }
+ if q.fuzzyMinLength != nil {
+ fuzzy["min_length"] = *q.fuzzyMinLength
+ }
+ if q.fuzzyPrefixLength != nil {
+ fuzzy["prefix_length"] = *q.fuzzyPrefixLength
+ }
+ if q.unicodeAware != nil {
+ fuzzy["unicode_aware"] = *q.unicodeAware
+ }
+
+ if !includeName {
+ return cs
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = cs
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_fuzzy_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_fuzzy_test.go
new file mode 100644
index 00000000..a7d9afc8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_fuzzy_test.go
@@ -0,0 +1,42 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFuzzyCompletionSuggesterSource(t *testing.T) {
+ s := NewFuzzyCompletionSuggester("song-suggest").
+ Text("n").
+ Field("suggest").
+ Fuzziness(2)
+ data, err := json.Marshal(s.Source(true))
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFuzzyCompletionSuggesterWithStringFuzzinessSource(t *testing.T) {
+ s := NewFuzzyCompletionSuggester("song-suggest").
+ Text("n").
+ Field("suggest").
+ Fuzziness("1..4")
+ data, err := json.Marshal(s.Source(true))
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":"1..4"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_test.go
new file mode 100644
index 00000000..18d87459
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_test.go
@@ -0,0 +1,25 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestCompletionSuggesterSource(t *testing.T) {
+ s := NewCompletionSuggester("song-suggest").
+ Text("n").
+ Field("suggest")
+ data, err := json.Marshal(s.Source(true))
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context.go
new file mode 100644
index 00000000..96d6c9ee
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context.go
@@ -0,0 +1,11 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SuggesterContextQuery is used to define context information within
+// a suggestion request.
+type SuggesterContextQuery interface {
+ Source() interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_category.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_category.go
new file mode 100644
index 00000000..1699c7bc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_category.go
@@ -0,0 +1,99 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// -- SuggesterCategoryMapping --
+
+// SuggesterCategoryMapping provides a mapping for a category context in a suggester.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_mapping.
+type SuggesterCategoryMapping struct {
+ name string
+ fieldName string
+ defaultValues []string
+}
+
+// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping.
+func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping {
+ return &SuggesterCategoryMapping{
+ name: name,
+ defaultValues: make([]string, 0),
+ }
+}
+
+func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping {
+ q.defaultValues = append(q.defaultValues, values...)
+ return q
+}
+
+func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping {
+ q.fieldName = fieldName
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterCategoryMapping) Source() interface{} {
+ source := make(map[string]interface{})
+
+ x := make(map[string]interface{})
+ source[q.name] = x
+
+ x["type"] = "category"
+
+ switch len(q.defaultValues) {
+ case 0:
+ x["default"] = q.defaultValues
+ case 1:
+ x["default"] = q.defaultValues[0]
+ default:
+ x["default"] = q.defaultValues
+ }
+
+ if q.fieldName != "" {
+ x["path"] = q.fieldName
+ }
+ return source
+}
+
+// -- SuggesterCategoryQuery --
+
+// SuggesterCategoryQuery provides querying a category context in a suggester.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_query.
+type SuggesterCategoryQuery struct {
+ name string
+ values []string
+}
+
+// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery.
+func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery {
+ q := &SuggesterCategoryQuery{
+ name: name,
+ values: make([]string, 0),
+ }
+ if len(values) > 0 {
+ q.values = append(q.values, values...)
+ }
+ return q
+}
+
+func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery {
+ q.values = append(q.values, values...)
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterCategoryQuery) Source() interface{} {
+ source := make(map[string]interface{})
+
+ switch len(q.values) {
+ case 0:
+ source[q.name] = q.values
+ case 1:
+ source[q.name] = q.values[0]
+ default:
+ source[q.name] = q.values
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_category_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_category_test.go
new file mode 100644
index 00000000..1d380fba
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_category_test.go
@@ -0,0 +1,79 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSuggesterCategoryMapping(t *testing.T) {
+ q := NewSuggesterCategoryMapping("color").
+ DefaultValues("red")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"color":{"default":"red","type":"category"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterCategoryMappingWithTwoDefaultValues(t *testing.T) {
+ q := NewSuggesterCategoryMapping("color").
+ DefaultValues("red", "orange")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"color":{"default":["red","orange"],"type":"category"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterCategoryMappingWithFieldName(t *testing.T) {
+ q := NewSuggesterCategoryMapping("color").
+ DefaultValues("red", "orange").
+ FieldName("color_field")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"color":{"default":["red","orange"],"path":"color_field","type":"category"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterCategoryQuery(t *testing.T) {
+ q := NewSuggesterCategoryQuery("color", "red")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"color":"red"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterCategoryQueryWithTwoValues(t *testing.T) {
+ q := NewSuggesterCategoryQuery("color", "red", "yellow")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"color":["red","yellow"]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_geo.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_geo.go
new file mode 100644
index 00000000..116fe9e4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_geo.go
@@ -0,0 +1,132 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// -- SuggesterGeoMapping --
+
+// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_mapping.
+type SuggesterGeoMapping struct {
+ name string
+ defaultLocations []*GeoPoint
+ precision []string
+ neighbors *bool
+ fieldName string
+}
+
+// NewSuggesterGeoMapping creates a new SuggesterGeoMapping.
+func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping {
+ return &SuggesterGeoMapping{
+ name: name,
+ defaultLocations: make([]*GeoPoint, 0),
+ precision: make([]string, 0),
+ }
+}
+
+func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping {
+ q.defaultLocations = append(q.defaultLocations, locations...)
+ return q
+}
+
+func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping {
+ q.precision = append(q.precision, precision...)
+ return q
+}
+
+func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping {
+ q.neighbors = &neighbors
+ return q
+}
+
+func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping {
+ q.fieldName = fieldName
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterGeoMapping) Source() interface{} {
+ source := make(map[string]interface{})
+
+ x := make(map[string]interface{})
+ source[q.name] = x
+
+ x["type"] = "geo"
+
+ if len(q.precision) > 0 {
+ x["precision"] = q.precision
+ }
+ if q.neighbors != nil {
+ x["neighbors"] = *q.neighbors
+ }
+
+ switch len(q.defaultLocations) {
+ case 0:
+ case 1:
+ x["default"] = q.defaultLocations[0].Source()
+ default:
+ arr := make([]interface{}, 0)
+ for _, p := range q.defaultLocations {
+ arr = append(arr, p.Source())
+ }
+ x["default"] = arr
+ }
+
+ if q.fieldName != "" {
+ x["path"] = q.fieldName
+ }
+ return source
+}
+
+// -- SuggesterGeoQuery --
+
+// SuggesterGeoQuery provides querying a geolocation context in a suggester.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_query
+type SuggesterGeoQuery struct {
+ name string
+ location *GeoPoint
+ precision []string
+}
+
+// NewSuggesterGeoQuery creates a new SuggesterGeoQuery.
+func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery {
+ return &SuggesterGeoQuery{
+ name: name,
+ location: location,
+ precision: make([]string, 0),
+ }
+}
+
+func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery {
+ q.precision = append(q.precision, precision...)
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterGeoQuery) Source() interface{} {
+ source := make(map[string]interface{})
+
+ if len(q.precision) == 0 {
+ if q.location != nil {
+ source[q.name] = q.location.Source()
+ }
+ } else {
+ x := make(map[string]interface{})
+ source[q.name] = x
+
+ if q.location != nil {
+ x["value"] = q.location.Source()
+ }
+
+ switch len(q.precision) {
+ case 0:
+ case 1:
+ x["precision"] = q.precision[0]
+ default:
+ x["precision"] = q.precision
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_geo_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_geo_test.go
new file mode 100644
index 00000000..a6c346c5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_geo_test.go
@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSuggesterGeoMapping(t *testing.T) {
+ q := NewSuggesterGeoMapping("location").
+ Precision("1km", "5m").
+ Neighbors(true).
+ FieldName("pin").
+ DefaultLocations(GeoPointFromLatLon(0.0, 0.0))
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterGeoQuery(t *testing.T) {
+ q := NewSuggesterGeoQuery("location", GeoPointFromLatLon(11.5, 62.71)).
+ Precision("1km")
+ data, err := json.Marshal(q.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"location":{"precision":"1km","value":{"lat":11.5,"lon":62.71}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_phrase.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_phrase.go
new file mode 100644
index 00000000..d25c4f73
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_phrase.go
@@ -0,0 +1,538 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// For more details, see
+// http://www.elasticsearch.org/guide/reference/api/search/phrase-suggest/
+type PhraseSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+
+ // fields specific to a phrase suggester
+ maxErrors *float32
+ separator *string
+ realWordErrorLikelihood *float32
+ confidence *float32
+ generators map[string][]CandidateGenerator
+ gramSize *int
+ smoothingModel SmoothingModel
+ forceUnigrams *bool
+ tokenLimit *int
+ preTag, postTag *string
+ collateQuery *string
+ collateFilter *string
+ collatePreference *string
+ collateParams map[string]interface{}
+ collatePrune *bool
+}
+
+// Creates a new phrase suggester.
+func NewPhraseSuggester(name string) PhraseSuggester {
+ return PhraseSuggester{
+ name: name,
+ contextQueries: make([]SuggesterContextQuery, 0),
+ collateParams: make(map[string]interface{}),
+ }
+}
+
+func (q PhraseSuggester) Name() string {
+ return q.name
+}
+
+func (q PhraseSuggester) Text(text string) PhraseSuggester {
+ q.text = text
+ return q
+}
+
+func (q PhraseSuggester) Field(field string) PhraseSuggester {
+ q.field = field
+ return q
+}
+
+func (q PhraseSuggester) Analyzer(analyzer string) PhraseSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q PhraseSuggester) Size(size int) PhraseSuggester {
+ q.size = &size
+ return q
+}
+
+func (q PhraseSuggester) ShardSize(shardSize int) PhraseSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q PhraseSuggester) ContextQuery(query SuggesterContextQuery) PhraseSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) PhraseSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+func (q PhraseSuggester) GramSize(gramSize int) PhraseSuggester {
+ if gramSize >= 1 {
+ q.gramSize = &gramSize
+ }
+ return q
+}
+
+func (q PhraseSuggester) MaxErrors(maxErrors float32) PhraseSuggester {
+ q.maxErrors = &maxErrors
+ return q
+}
+
+func (q PhraseSuggester) Separator(separator string) PhraseSuggester {
+ q.separator = &separator
+ return q
+}
+
+func (q PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float32) PhraseSuggester {
+ q.realWordErrorLikelihood = &realWordErrorLikelihood
+ return q
+}
+
+func (q PhraseSuggester) Confidence(confidence float32) PhraseSuggester {
+ q.confidence = &confidence
+ return q
+}
+
+func (q PhraseSuggester) CandidateGenerator(generator CandidateGenerator) PhraseSuggester {
+ if q.generators == nil {
+ q.generators = make(map[string][]CandidateGenerator)
+ }
+ typ := generator.Type()
+ if _, found := q.generators[typ]; !found {
+ q.generators[typ] = make([]CandidateGenerator, 0)
+ }
+ q.generators[typ] = append(q.generators[typ], generator)
+ return q
+}
+
+func (q PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) PhraseSuggester {
+ for _, g := range generators {
+ q = q.CandidateGenerator(g)
+ }
+ return q
+}
+
+func (q PhraseSuggester) ClearCandidateGenerator() PhraseSuggester {
+ q.generators = nil
+ return q
+}
+
+func (q PhraseSuggester) ForceUnigrams(forceUnigrams bool) PhraseSuggester {
+ q.forceUnigrams = &forceUnigrams
+ return q
+}
+
+func (q PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) PhraseSuggester {
+ q.smoothingModel = smoothingModel
+ return q
+}
+
+func (q PhraseSuggester) TokenLimit(tokenLimit int) PhraseSuggester {
+ q.tokenLimit = &tokenLimit
+ return q
+}
+
+func (q PhraseSuggester) Highlight(preTag, postTag string) PhraseSuggester {
+ q.preTag = &preTag
+ q.postTag = &postTag
+ return q
+}
+
+func (q PhraseSuggester) CollateQuery(collateQuery string) PhraseSuggester {
+ q.collateQuery = &collateQuery
+ return q
+}
+
+func (q PhraseSuggester) CollateFilter(collateFilter string) PhraseSuggester {
+ q.collateFilter = &collateFilter
+ return q
+}
+
+func (q PhraseSuggester) CollatePreference(collatePreference string) PhraseSuggester {
+ q.collatePreference = &collatePreference
+ return q
+}
+
+func (q PhraseSuggester) CollateParams(collateParams map[string]interface{}) PhraseSuggester {
+ q.collateParams = collateParams
+ return q
+}
+
+func (q PhraseSuggester) CollatePrune(collatePrune bool) PhraseSuggester {
+ q.collatePrune = &collatePrune
+ return q
+}
+
+// simplePhraseSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the simple_phrase element.
+type phraseSuggesterRequest struct {
+ Text string `json:"text"`
+ Phrase interface{} `json:"phrase"`
+}
+
+// Creates the source for the phrase suggester.
+func (q PhraseSuggester) Source(includeName bool) interface{} {
+ ps := &phraseSuggesterRequest{}
+
+ if q.text != "" {
+ ps.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ ps.Phrase = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ suggester["context"] = q.contextQueries[0].Source()
+ default:
+ ctxq := make([]interface{}, 0)
+ for _, query := range q.contextQueries {
+ ctxq = append(ctxq, query.Source())
+ }
+ suggester["context"] = ctxq
+ }
+
+ // Phase-specified parameters
+ if q.realWordErrorLikelihood != nil {
+ suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood
+ }
+ if q.confidence != nil {
+ suggester["confidence"] = *q.confidence
+ }
+ if q.separator != nil {
+ suggester["separator"] = *q.separator
+ }
+ if q.maxErrors != nil {
+ suggester["max_errors"] = *q.maxErrors
+ }
+ if q.gramSize != nil {
+ suggester["gram_size"] = *q.gramSize
+ }
+ if q.forceUnigrams != nil {
+ suggester["force_unigrams"] = *q.forceUnigrams
+ }
+ if q.tokenLimit != nil {
+ suggester["token_limit"] = *q.tokenLimit
+ }
+ if q.generators != nil && len(q.generators) > 0 {
+ for typ, generators := range q.generators {
+ arr := make([]interface{}, 0)
+ for _, g := range generators {
+ arr = append(arr, g.Source())
+ }
+ suggester[typ] = arr
+ }
+ }
+ if q.smoothingModel != nil {
+ x := make(map[string]interface{})
+ x[q.smoothingModel.Type()] = q.smoothingModel.Source()
+ suggester["smoothing"] = x
+ }
+ if q.preTag != nil {
+ hl := make(map[string]string)
+ hl["pre_tag"] = *q.preTag
+ if q.postTag != nil {
+ hl["post_tag"] = *q.postTag
+ }
+ suggester["highlight"] = hl
+ }
+ if q.collateQuery != nil || q.collateFilter != nil {
+ collate := make(map[string]interface{})
+ suggester["collate"] = collate
+ if q.collateQuery != nil {
+ collate["query"] = *q.collateQuery
+ }
+ if q.collateFilter != nil {
+ collate["filter"] = *q.collateFilter
+ }
+ if q.collatePreference != nil {
+ collate["preference"] = *q.collatePreference
+ }
+ if len(q.collateParams) > 0 {
+ collate["params"] = q.collateParams
+ }
+ if q.collatePrune != nil {
+ collate["prune"] = *q.collatePrune
+ }
+ }
+
+ if !includeName {
+ return ps
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = ps
+ return source
+}
+
+// -- Smoothing models --
+
+type SmoothingModel interface {
+ Type() string
+ Source() interface{}
+}
+
+// StupidBackoffSmoothingModel implements a stupid backoff smoothing model.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type StupidBackoffSmoothingModel struct {
+ discount float64
+}
+
+func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel {
+ return &StupidBackoffSmoothingModel{
+ discount: discount,
+ }
+}
+
+func (sm *StupidBackoffSmoothingModel) Type() string {
+ return "stupid_backoff"
+}
+
+func (sm *StupidBackoffSmoothingModel) Source() interface{} {
+ source := make(map[string]interface{})
+ source["discount"] = sm.discount
+ return source
+}
+
+// --
+
+// LaplaceSmoothingModel implements a laplace smoothing model.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type LaplaceSmoothingModel struct {
+ alpha float64
+}
+
+func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel {
+ return &LaplaceSmoothingModel{
+ alpha: alpha,
+ }
+}
+
+func (sm *LaplaceSmoothingModel) Type() string {
+ return "laplace"
+}
+
+func (sm *LaplaceSmoothingModel) Source() interface{} {
+ source := make(map[string]interface{})
+ source["alpha"] = sm.alpha
+ return source
+}
+
+// --
+
+// LinearInterpolationSmoothingModel implements a linear interpolation
+// smoothing model.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type LinearInterpolationSmoothingModel struct {
+ trigramLamda float64
+ bigramLambda float64
+ unigramLambda float64
+}
+
+func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel {
+ return &LinearInterpolationSmoothingModel{
+ trigramLamda: trigramLamda,
+ bigramLambda: bigramLambda,
+ unigramLambda: unigramLambda,
+ }
+}
+
+func (sm *LinearInterpolationSmoothingModel) Type() string {
+ return "linear_interpolation"
+}
+
+func (sm *LinearInterpolationSmoothingModel) Source() interface{} {
+ source := make(map[string]interface{})
+ source["trigram_lambda"] = sm.trigramLamda
+ source["bigram_lambda"] = sm.bigramLambda
+ source["unigram_lambda"] = sm.unigramLambda
+ return source
+}
+
+// -- CandidateGenerator --
+
+type CandidateGenerator interface {
+ Type() string
+ Source() interface{}
+}
+
+// DirectCandidateGenerator implements a direct candidate generator.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type DirectCandidateGenerator struct {
+ field string
+ preFilter *string
+ postFilter *string
+ suggestMode *string
+ accuracy *float64
+ size *int
+ sort *string
+ stringDistance *string
+ maxEdits *int
+ maxInspections *int
+ maxTermFreq *float64
+ prefixLength *int
+ minWordLength *int
+ minDocFreq *float64
+}
+
+func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator {
+ return &DirectCandidateGenerator{
+ field: field,
+ }
+}
+
+func (g *DirectCandidateGenerator) Type() string {
+ return "direct_generator"
+}
+
+func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator {
+ g.field = field
+ return g
+}
+
+func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator {
+ g.preFilter = &preFilter
+ return g
+}
+
+func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator {
+ g.postFilter = &postFilter
+ return g
+}
+
+func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator {
+ g.suggestMode = &suggestMode
+ return g
+}
+
+func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator {
+ g.accuracy = &accuracy
+ return g
+}
+
+func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator {
+ g.size = &size
+ return g
+}
+
+func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator {
+ g.sort = &sort
+ return g
+}
+
+func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator {
+ g.stringDistance = &stringDistance
+ return g
+}
+
+func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator {
+ g.maxEdits = &maxEdits
+ return g
+}
+
+func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator {
+ g.maxInspections = &maxInspections
+ return g
+}
+
+func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator {
+ g.maxTermFreq = &maxTermFreq
+ return g
+}
+
+func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator {
+ g.prefixLength = &prefixLength
+ return g
+}
+
+func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator {
+ g.minWordLength = &minWordLength
+ return g
+}
+
+func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator {
+ g.minDocFreq = &minDocFreq
+ return g
+}
+
+func (g *DirectCandidateGenerator) Source() interface{} {
+ source := make(map[string]interface{})
+ if g.field != "" {
+ source["field"] = g.field
+ }
+ if g.suggestMode != nil {
+ source["suggest_mode"] = *g.suggestMode
+ }
+ if g.accuracy != nil {
+ source["accuracy"] = *g.accuracy
+ }
+ if g.size != nil {
+ source["size"] = *g.size
+ }
+ if g.sort != nil {
+ source["sort"] = *g.sort
+ }
+ if g.stringDistance != nil {
+ source["string_distance"] = *g.stringDistance
+ }
+ if g.maxEdits != nil {
+ source["max_edits"] = *g.maxEdits
+ }
+ if g.maxInspections != nil {
+ source["max_inspections"] = *g.maxInspections
+ }
+ if g.maxTermFreq != nil {
+ source["max_term_freq"] = *g.maxTermFreq
+ }
+ if g.prefixLength != nil {
+ source["prefix_length"] = *g.prefixLength
+ }
+ if g.minWordLength != nil {
+ source["min_word_length"] = *g.minWordLength
+ }
+ if g.minDocFreq != nil {
+ source["min_doc_freq"] = *g.minDocFreq
+ }
+ if g.preFilter != nil {
+ source["pre_filter"] = *g.preFilter
+ }
+ if g.postFilter != nil {
+ source["post_filter"] = *g.postFilter
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_phrase_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_phrase_test.go
new file mode 100644
index 00000000..135c0377
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_phrase_test.go
@@ -0,0 +1,145 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPhraseSuggesterSource(t *testing.T) {
+ s := NewPhraseSuggester("name").
+ Text("Xor the Got-Jewel").
+ Analyzer("body").
+ Field("bigram").
+ Size(1).
+ RealWordErrorLikelihood(0.95).
+ MaxErrors(0.5).
+ GramSize(2).
+ Highlight("", "")
+ data, err := json.Marshal(s.Source(true))
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPhraseSuggesterSourceWithContextQuery(t *testing.T) {
+ geomapQ := NewSuggesterGeoMapping("location").
+ Precision("1km", "5m").
+ Neighbors(true).
+ FieldName("pin").
+ DefaultLocations(GeoPointFromLatLon(0.0, 0.0))
+
+ s := NewPhraseSuggester("name").
+ Text("Xor the Got-Jewel").
+ Analyzer("body").
+ Field("bigram").
+ Size(1).
+ RealWordErrorLikelihood(0.95).
+ MaxErrors(0.5).
+ GramSize(2).
+ Highlight("", "").
+ ContextQuery(geomapQ)
+ data, err := json.Marshal(s.Source(true))
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","context":{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}},"field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPhraseSuggesterComplexSource(t *testing.T) {
+ g1 := NewDirectCandidateGenerator("body").
+ SuggestMode("always").
+ MinWordLength(1)
+
+ g2 := NewDirectCandidateGenerator("reverse").
+ SuggestMode("always").
+ MinWordLength(1).
+ PreFilter("reverse").
+ PostFilter("reverse")
+
+ s := NewPhraseSuggester("simple_phrase").
+ Text("Xor the Got-Jewel").
+ Analyzer("body").
+ Field("bigram").
+ Size(4).
+ RealWordErrorLikelihood(0.95).
+ Confidence(2.0).
+ GramSize(2).
+ CandidateGenerators(g1, g2).
+ CollateQuery(`"match":{"{{field_name}}" : "{{suggestion}}"}`).
+ CollateParams(map[string]interface{}{"field_name": "title"}).
+ CollatePreference("_primary").
+ CollatePrune(true)
+ data, err := json.Marshal(s.Source(true))
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"simple_phrase":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","collate":{"params":{"field_name":"title"},"preference":"_primary","prune":true,"query":"\"match\":{\"{{field_name}}\" : \"{{suggestion}}\"}"},"confidence":2,"direct_generator":[{"field":"body","min_word_length":1,"suggest_mode":"always"},{"field":"reverse","min_word_length":1,"post_filter":"reverse","pre_filter":"reverse","suggest_mode":"always"}],"field":"bigram","gram_size":2,"real_word_error_likelihood":0.95,"size":4}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPhraseStupidBackoffSmoothingModel(t *testing.T) {
+ s := NewStupidBackoffSmoothingModel(0.42)
+ data, err := json.Marshal(s.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ // The source does NOT include the smoothing model type!
+ expected := `{"discount":0.42}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+ if s.Type() != "stupid_backoff" {
+ t.Errorf("expected %q, got: %q", "stupid_backoff", s.Type())
+ }
+}
+
+func TestPhraseLaplaceSmoothingModel(t *testing.T) {
+ s := NewLaplaceSmoothingModel(0.63)
+ data, err := json.Marshal(s.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ // The source does NOT include the smoothing model type!
+ expected := `{"alpha":0.63}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+ if s.Type() != "laplace" {
+ t.Errorf("expected %q, got: %q", "laplace", s.Type())
+ }
+}
+
+func TestLinearInterpolationSmoothingModel(t *testing.T) {
+ s := NewLinearInterpolationSmoothingModel(0.3, 0.2, 0.05)
+ data, err := json.Marshal(s.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ // The source does NOT include the smoothing model type!
+ expected := `{"bigram_lambda":0.2,"trigram_lambda":0.3,"unigram_lambda":0.05}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+ if s.Type() != "linear_interpolation" {
+ t.Errorf("expected %q, got: %q", "linear_interpolation", s.Type())
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_term.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_term.go
new file mode 100644
index 00000000..f19484dc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_term.go
@@ -0,0 +1,225 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// For more details, see
+// http://www.elasticsearch.org/guide/reference/api/search/term-suggest/
+type TermSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+
+ // fields specific to term suggester
+ suggestMode string
+ accuracy *float32
+ sort string
+ stringDistance string
+ maxEdits *int
+ maxInspections *int
+ maxTermFreq *float32
+ prefixLength *int
+ minWordLength *int
+ minDocFreq *float32
+}
+
+// Creates a new term suggester.
+func NewTermSuggester(name string) TermSuggester {
+ return TermSuggester{
+ name: name,
+ contextQueries: make([]SuggesterContextQuery, 0),
+ }
+}
+
+func (q TermSuggester) Name() string {
+ return q.name
+}
+
+func (q TermSuggester) Text(text string) TermSuggester {
+ q.text = text
+ return q
+}
+
+func (q TermSuggester) Field(field string) TermSuggester {
+ q.field = field
+ return q
+}
+
+func (q TermSuggester) Analyzer(analyzer string) TermSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q TermSuggester) Size(size int) TermSuggester {
+ q.size = &size
+ return q
+}
+
+func (q TermSuggester) ShardSize(shardSize int) TermSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q TermSuggester) ContextQuery(query SuggesterContextQuery) TermSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q TermSuggester) ContextQueries(queries ...SuggesterContextQuery) TermSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+func (q TermSuggester) SuggestMode(suggestMode string) TermSuggester {
+ q.suggestMode = suggestMode
+ return q
+}
+
+func (q TermSuggester) Accuracy(accuracy float32) TermSuggester {
+ q.accuracy = &accuracy
+ return q
+}
+
+func (q TermSuggester) Sort(sort string) TermSuggester {
+ q.sort = sort
+ return q
+}
+
+func (q TermSuggester) StringDistance(stringDistance string) TermSuggester {
+ q.stringDistance = stringDistance
+ return q
+}
+
+func (q TermSuggester) MaxEdits(maxEdits int) TermSuggester {
+ q.maxEdits = &maxEdits
+ return q
+}
+
+func (q TermSuggester) MaxInspections(maxInspections int) TermSuggester {
+ q.maxInspections = &maxInspections
+ return q
+}
+
+func (q TermSuggester) MaxTermFreq(maxTermFreq float32) TermSuggester {
+ q.maxTermFreq = &maxTermFreq
+ return q
+}
+
+func (q TermSuggester) PrefixLength(prefixLength int) TermSuggester {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q TermSuggester) MinWordLength(minWordLength int) TermSuggester {
+ q.minWordLength = &minWordLength
+ return q
+}
+
+func (q TermSuggester) MinDocFreq(minDocFreq float32) TermSuggester {
+ q.minDocFreq = &minDocFreq
+ return q
+}
+
+// termSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the term element.
+type termSuggesterRequest struct {
+ Text string `json:"text"`
+ Term interface{} `json:"term"`
+}
+
+// Creates the source for the term suggester.
+func (q TermSuggester) Source(includeName bool) interface{} {
+ // "suggest" : {
+ // "my-suggest-1" : {
+ // "text" : "the amsterdma meetpu",
+ // "term" : {
+ // "field" : "body"
+ // }
+ // },
+ // "my-suggest-2" : {
+ // "text" : "the rottredam meetpu",
+ // "term" : {
+ // "field" : "title",
+ // }
+ // }
+ // }
+ ts := &termSuggesterRequest{}
+ if q.text != "" {
+ ts.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ ts.Term = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ suggester["context"] = q.contextQueries[0].Source()
+ default:
+ ctxq := make([]interface{}, 0)
+ for _, query := range q.contextQueries {
+ ctxq = append(ctxq, query.Source())
+ }
+ suggester["context"] = ctxq
+ }
+
+ // Specific to term suggester
+ if q.suggestMode != "" {
+ suggester["suggest_mode"] = q.suggestMode
+ }
+ if q.accuracy != nil {
+ suggester["accuracy"] = *q.accuracy
+ }
+ if q.sort != "" {
+ suggester["sort"] = q.sort
+ }
+ if q.stringDistance != "" {
+ suggester["string_distance"] = q.stringDistance
+ }
+ if q.maxEdits != nil {
+ suggester["max_edits"] = *q.maxEdits
+ }
+ if q.maxInspections != nil {
+ suggester["max_inspections"] = *q.maxInspections
+ }
+ if q.maxTermFreq != nil {
+ suggester["max_term_freq"] = *q.maxTermFreq
+ }
+ if q.prefixLength != nil {
+ suggester["prefix_len"] = *q.prefixLength
+ }
+ if q.minWordLength != nil {
+ suggester["min_word_len"] = *q.minWordLength
+ }
+ if q.minDocFreq != nil {
+ suggester["min_doc_freq"] = *q.minDocFreq
+ }
+
+ if !includeName {
+ return ts
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = ts
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_term_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_term_test.go
new file mode 100644
index 00000000..6d716292
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_term_test.go
@@ -0,0 +1,25 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermSuggesterSource(t *testing.T) {
+ s := NewTermSuggester("name").
+ Text("n").
+ Field("suggest")
+ data, err := json.Marshal(s.Source(true))
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"name":{"text":"n","term":{"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/update.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/update.go
new file mode 100644
index 00000000..d2595a44
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/update.go
@@ -0,0 +1,342 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// UpdateResult is the result of updating a document in Elasticsearch.
+type UpdateResult struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int `json:"_version"`
+ Created bool `json:"created"`
+ GetResult *GetResult `json:"get"`
+}
+
+// UpdateService updates a document in Elasticsearch.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-update.html
+// for details.
+type UpdateService struct {
+ client *Client
+ index string
+ typ string
+ id string
+ routing string
+ parent string
+ script string
+ scriptId string
+ scriptFile string
+ scriptType string
+ scriptLang string
+ scriptParams map[string]interface{}
+ fields []string
+ version *int64
+ versionType string
+ retryOnConflict *int
+ refresh *bool
+ replicationType string
+ consistencyLevel string
+ upsert interface{}
+ scriptedUpsert *bool
+ docAsUpsert *bool
+ detectNoop *bool
+ doc interface{}
+ timeout string
+ pretty bool
+}
+
+// NewUpdateService creates the service to update documents in Elasticsearch.
+func NewUpdateService(client *Client) *UpdateService {
+ builder := &UpdateService{
+ client: client,
+ scriptParams: make(map[string]interface{}),
+ fields: make([]string, 0),
+ }
+ return builder
+}
+
+// Index is the name of the Elasticsearch index (required).
+func (b *UpdateService) Index(name string) *UpdateService {
+ b.index = name
+ return b
+}
+
+// Type is the type of the document (required).
+func (b *UpdateService) Type(typ string) *UpdateService {
+ b.typ = typ
+ return b
+}
+
+// Id is the identifier of the document to update (required).
+func (b *UpdateService) Id(id string) *UpdateService {
+ b.id = id
+ return b
+}
+
+// Routing specifies a specific routing value.
+func (b *UpdateService) Routing(routing string) *UpdateService {
+ b.routing = routing
+ return b
+}
+
+// Parent sets the id of the parent document.
+func (b *UpdateService) Parent(parent string) *UpdateService {
+ b.parent = parent
+ return b
+}
+
+// Script is the URL-encoded script definition.
+func (b *UpdateService) Script(script string) *UpdateService {
+ b.script = script
+ return b
+}
+
+// ScriptId is the id of a stored script.
+func (b *UpdateService) ScriptId(scriptId string) *UpdateService {
+ b.scriptId = scriptId
+ return b
+}
+
+// ScriptFile is the file name of a stored script.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html for details.
+func (b *UpdateService) ScriptFile(scriptFile string) *UpdateService {
+ b.scriptFile = scriptFile
+ return b
+}
+
+func (b *UpdateService) ScriptType(scriptType string) *UpdateService {
+ b.scriptType = scriptType
+ return b
+}
+
+// ScriptLang defines the scripting language (default: groovy).
+func (b *UpdateService) ScriptLang(scriptLang string) *UpdateService {
+ b.scriptLang = scriptLang
+ return b
+}
+
+func (b *UpdateService) ScriptParams(params map[string]interface{}) *UpdateService {
+ b.scriptParams = params
+ return b
+}
+
+// RetryOnConflict specifies how many times the operation should be retried
+// when a conflict occurs (default: 0).
+func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService {
+ b.retryOnConflict = &retryOnConflict
+ return b
+}
+
+// Fields is a list of fields to return in the response.
+func (b *UpdateService) Fields(fields ...string) *UpdateService {
+ b.fields = make([]string, 0, len(fields))
+ b.fields = append(b.fields, fields...)
+ return b
+}
+
+// Version defines the explicit version number for concurrency control.
+func (b *UpdateService) Version(version int64) *UpdateService {
+ b.version = &version
+ return b
+}
+
+// VersionType is one of "internal" or "force".
+func (b *UpdateService) VersionType(versionType string) *UpdateService {
+ b.versionType = versionType
+ return b
+}
+
+// Refresh the index after performing the update.
+func (b *UpdateService) Refresh(refresh bool) *UpdateService {
+ b.refresh = &refresh
+ return b
+}
+
+// ReplicationType is one of "sync" or "async".
+func (b *UpdateService) ReplicationType(replicationType string) *UpdateService {
+ b.replicationType = replicationType
+ return b
+}
+
+// ConsistencyLevel is one of "one", "quorum", or "all".
+// It sets the write consistency setting for the update operation.
+func (b *UpdateService) ConsistencyLevel(consistencyLevel string) *UpdateService {
+ b.consistencyLevel = consistencyLevel
+ return b
+}
+
+// Doc allows for updating a partial document.
+func (b *UpdateService) Doc(doc interface{}) *UpdateService {
+ b.doc = doc
+ return b
+}
+
+// Upsert can be used to index the document when it doesn't exist yet.
+// Use this e.g. to initialize a document with a default value.
+func (b *UpdateService) Upsert(doc interface{}) *UpdateService {
+ b.upsert = doc
+ return b
+}
+
+// DocAsUpsert can be used to insert the document if it doesn't already exist.
+func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService {
+ b.docAsUpsert = &docAsUpsert
+ return b
+}
+
+// DetectNoop will instruct Elasticsearch to check if changes will occur
+// when updating via Doc. It there aren't any changes, the request will
+// turn into a no-op.
+func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService {
+ b.detectNoop = &detectNoop
+ return b
+}
+
+// ScriptedUpsert should be set to true if the referenced script
+// (defined in Script or ScriptId) should be called to perform an insert.
+// The default is false.
+func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService {
+ b.scriptedUpsert = &scriptedUpsert
+ return b
+}
+
+// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms".
+func (b *UpdateService) Timeout(timeout string) *UpdateService {
+ b.timeout = timeout
+ return b
+}
+
+// Pretty instructs to return human readable, prettified JSON.
+func (b *UpdateService) Pretty(pretty bool) *UpdateService {
+ b.pretty = pretty
+ return b
+}
+
+// url returns the URL part of the document request.
+func (b *UpdateService) url() (string, url.Values, error) {
+ // Build url
+ path := "/{index}/{type}/{id}/_update"
+ path, err := uritemplates.Expand(path, map[string]string{
+ "index": b.index,
+ "type": b.typ,
+ "id": b.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Parameters
+ params := make(url.Values)
+ if b.pretty {
+ params.Set("pretty", "true")
+ }
+ if b.routing != "" {
+ params.Set("routing", b.routing)
+ }
+ if b.parent != "" {
+ params.Set("parent", b.parent)
+ }
+ if b.timeout != "" {
+ params.Set("timeout", b.timeout)
+ }
+ if b.refresh != nil {
+ params.Set("refresh", fmt.Sprintf("%v", *b.refresh))
+ }
+ if b.replicationType != "" {
+ params.Set("replication", b.replicationType)
+ }
+ if b.consistencyLevel != "" {
+ params.Set("consistency", b.consistencyLevel)
+ }
+ if len(b.fields) > 0 {
+ params.Set("fields", strings.Join(b.fields, ","))
+ }
+ if b.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *b.version))
+ }
+ if b.versionType != "" {
+ params.Set("version_type", b.versionType)
+ }
+ if b.retryOnConflict != nil {
+ params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict))
+ }
+
+ return path, params, nil
+}
+
+// body returns the body part of the document request.
+func (b *UpdateService) body() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ if b.script != "" {
+ source["script"] = b.script
+ }
+ if b.scriptId != "" {
+ source["script_id"] = b.scriptId
+ }
+ if b.scriptFile != "" {
+ source["script_file"] = b.scriptFile
+ }
+ if b.scriptLang != "" {
+ source["lang"] = b.scriptLang
+ }
+ if len(b.scriptParams) > 0 {
+ source["params"] = b.scriptParams
+ }
+ if b.scriptedUpsert != nil {
+ source["scripted_upsert"] = *b.scriptedUpsert
+ }
+
+ if b.upsert != nil {
+ source["upsert"] = b.upsert
+ }
+
+ if b.doc != nil {
+ source["doc"] = b.doc
+ }
+ if b.docAsUpsert != nil {
+ source["doc_as_upsert"] = *b.docAsUpsert
+ }
+ if b.detectNoop != nil {
+ source["detect_noop"] = *b.detectNoop
+ }
+
+ return source, nil
+}
+
+// Do executes the update operation.
+func (b *UpdateService) Do() (*UpdateResult, error) {
+ path, params, err := b.url()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get body of the request
+ body, err := b.body()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(UpdateResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/update_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/update_test.go
new file mode 100644
index 00000000..eb648eb0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/update_test.go
@@ -0,0 +1,313 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "net/url"
+ "testing"
+)
+
+func TestUpdateViaScript(t *testing.T) {
+ client := setupTestClient(t)
+ update := client.Update().
+ Index("test").Type("type1").Id("1").
+ Script("ctx._source.tags += tag").
+ ScriptParams(map[string]interface{}{"tag": "blue"}).
+ ScriptLang("groovy")
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/test/type1/1/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"lang":"groovy","params":{"tag":"blue"},"script":"ctx._source.tags += tag"}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaScriptId(t *testing.T) {
+ client := setupTestClient(t)
+
+ scriptParams := map[string]interface{}{
+ "pageViewEvent": map[string]interface{}{
+ "url": "foo.com/bar",
+ "response": 404,
+ "time": "2014-01-01 12:32",
+ },
+ }
+
+ update := client.Update().
+ Index("sessions").Type("session").Id("dh3sgudg8gsrgl").
+ ScriptId("my_web_session_summariser").
+ ScriptedUpsert(true).
+ ScriptParams(scriptParams).
+ Upsert(map[string]interface{}{})
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}},"script_id":"my_web_session_summariser","scripted_upsert":true,"upsert":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaScriptFile(t *testing.T) {
+ client := setupTestClient(t)
+
+ scriptParams := map[string]interface{}{
+ "pageViewEvent": map[string]interface{}{
+ "url": "foo.com/bar",
+ "response": 404,
+ "time": "2014-01-01 12:32",
+ },
+ }
+
+ update := client.Update().
+ Index("sessions").Type("session").Id("dh3sgudg8gsrgl").
+ ScriptFile("update_script").
+ ScriptedUpsert(true).
+ ScriptParams(scriptParams).
+ Upsert(map[string]interface{}{})
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}},"script_file":"update_script","scripted_upsert":true,"upsert":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaScriptAndUpsert(t *testing.T) {
+ client := setupTestClient(t)
+ update := client.Update().
+ Index("test").Type("type1").Id("1").
+ Script("ctx._source.counter += count").
+ ScriptParams(map[string]interface{}{"count": 4}).
+ Upsert(map[string]interface{}{"counter": 1})
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/test/type1/1/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"params":{"count":4},"script":"ctx._source.counter += count","upsert":{"counter":1}}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaDoc(t *testing.T) {
+ client := setupTestClient(t)
+ update := client.Update().
+ Index("test").Type("type1").Id("1").
+ Doc(map[string]interface{}{"name": "new_name"}).
+ DetectNoop(true)
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/test/type1/1/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"detect_noop":true,"doc":{"name":"new_name"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaDocAndUpsert(t *testing.T) {
+ client := setupTestClient(t)
+ update := client.Update().
+ Index("test").Type("type1").Id("1").
+ Doc(map[string]interface{}{"name": "new_name"}).
+ DocAsUpsert(true).
+ Timeout("1s").
+ Refresh(true)
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/test/type1/1/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{"refresh": []string{"true"}, "timeout": []string{"1s"}}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"doc":{"name":"new_name"},"doc_as_upsert":true}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaScriptIntegration(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion >= "1.4.3" || (esversion < "1.4.0" && esversion >= "1.3.8") {
+ t.Skip("groovy scripting has been disabled as for [1.3.8,1.4.0) and 1.4.3+")
+ return
+ }
+
+ tweet1 := tweet{User: "olivere", Retweets: 10, Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("tweet").
+ Id("1").
+ BodyJson(&tweet1).
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+
+ // Update number of retweets
+ increment := 1
+ update, err := client.Update().Index(testIndexName).Type("tweet").Id("1").
+ Script("ctx._source.retweets += num").
+ ScriptParams(map[string]interface{}{"num": increment}).
+ ScriptLang("groovy"). // Use "groovy" as default language as 1.3 uses MVEL by default
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if update == nil {
+ t.Errorf("expected update to be != nil; got %v", update)
+ }
+ if update.Version != indexResult.Version+1 {
+ t.Errorf("expected version to be %d; got %d", indexResult.Version+1, update.Version)
+ }
+
+ // Get document
+ getResult, err := client.Get().
+ Index(testIndexName).
+ Type("tweet").
+ Id("1").
+ Do()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if getResult.Index != testIndexName {
+ t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+ }
+ if getResult.Type != "tweet" {
+ t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+ }
+ if getResult.Id != "1" {
+ t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id)
+ }
+ if getResult.Source == nil {
+ t.Errorf("expected GetResult.Source to be != nil; got nil")
+ }
+
+ // Decode the Source field
+ var tweetGot tweet
+ err = json.Unmarshal(*getResult.Source, &tweetGot)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tweetGot.Retweets != tweet1.Retweets+increment {
+ t.Errorf("expected Tweet.Retweets to be %d; got %d", tweet1.Retweets+increment, tweetGot.Retweets)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/LICENSE b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/LICENSE
new file mode 100644
index 00000000..de9c88cb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2013 Joshua Tacoma
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/uritemplates.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/uritemplates.go
new file mode 100644
index 00000000..8a84813f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/uritemplates.go
@@ -0,0 +1,359 @@
+// Copyright 2013 Joshua Tacoma. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uritemplates is a level 4 implementation of RFC 6570 (URI
+// Template, http://tools.ietf.org/html/rfc6570).
+//
+// To use uritemplates, parse a template string and expand it with a value
+// map:
+//
+// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
+// values := make(map[string]interface{})
+// values["user"] = "jtacoma"
+// values["repo"] = "uritemplates"
+// expanded, _ := template.ExpandString(values)
+// fmt.Printf(expanded)
+//
+package uritemplates
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
+ reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
+ validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
+ hex = []byte("0123456789ABCDEF")
+)
+
+func pctEncode(src []byte) []byte {
+ dst := make([]byte, len(src)*3)
+ for i, b := range src {
+ buf := dst[i*3 : i*3+3]
+ buf[0] = 0x25
+ buf[1] = hex[b/16]
+ buf[2] = hex[b%16]
+ }
+ return dst
+}
+
+func escape(s string, allowReserved bool) (escaped string) {
+ if allowReserved {
+ escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
+ } else {
+ escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
+ }
+ return escaped
+}
+
+// A UriTemplate is a parsed representation of a URI template.
+type UriTemplate struct {
+ raw string
+ parts []templatePart
+}
+
+// Parse parses a URI template string into a UriTemplate object.
+func Parse(rawtemplate string) (template *UriTemplate, err error) {
+ template = new(UriTemplate)
+ template.raw = rawtemplate
+ split := strings.Split(rawtemplate, "{")
+ template.parts = make([]templatePart, len(split)*2-1)
+ for i, s := range split {
+ if i == 0 {
+ if strings.Contains(s, "}") {
+ err = errors.New("unexpected }")
+ break
+ }
+ template.parts[i].raw = s
+ } else {
+ subsplit := strings.Split(s, "}")
+ if len(subsplit) != 2 {
+ err = errors.New("malformed template")
+ break
+ }
+ expression := subsplit[0]
+ template.parts[i*2-1], err = parseExpression(expression)
+ if err != nil {
+ break
+ }
+ template.parts[i*2].raw = subsplit[1]
+ }
+ }
+ if err != nil {
+ template = nil
+ }
+ return template, err
+}
+
+type templatePart struct {
+ raw string
+ terms []templateTerm
+ first string
+ sep string
+ named bool
+ ifemp string
+ allowReserved bool
+}
+
+type templateTerm struct {
+ name string
+ explode bool
+ truncate int
+}
+
+func parseExpression(expression string) (result templatePart, err error) {
+ switch expression[0] {
+ case '+':
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ case '.':
+ result.first = "."
+ result.sep = "."
+ expression = expression[1:]
+ case '/':
+ result.first = "/"
+ result.sep = "/"
+ expression = expression[1:]
+ case ';':
+ result.first = ";"
+ result.sep = ";"
+ result.named = true
+ expression = expression[1:]
+ case '?':
+ result.first = "?"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '&':
+ result.first = "&"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '#':
+ result.first = "#"
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ default:
+ result.sep = ","
+ }
+ rawterms := strings.Split(expression, ",")
+ result.terms = make([]templateTerm, len(rawterms))
+ for i, raw := range rawterms {
+ result.terms[i], err = parseTerm(raw)
+ if err != nil {
+ break
+ }
+ }
+ return result, err
+}
+
+func parseTerm(term string) (result templateTerm, err error) {
+ if strings.HasSuffix(term, "*") {
+ result.explode = true
+ term = term[:len(term)-1]
+ }
+ split := strings.Split(term, ":")
+ if len(split) == 1 {
+ result.name = term
+ } else if len(split) == 2 {
+ result.name = split[0]
+ var parsed int64
+ parsed, err = strconv.ParseInt(split[1], 10, 0)
+ result.truncate = int(parsed)
+ } else {
+ err = errors.New("multiple colons in same term")
+ }
+ if !validname.MatchString(result.name) {
+ err = errors.New("not a valid name: " + result.name)
+ }
+ if result.explode && result.truncate > 0 {
+ err = errors.New("both explode and prefix modifers on same term")
+ }
+ return result, err
+}
+
+// Expand expands a URI template with a set of values to produce a string.
+func (self *UriTemplate) Expand(value interface{}) (string, error) {
+ values, ismap := value.(map[string]interface{})
+ if !ismap {
+ if m, ismap := struct2map(value); !ismap {
+ return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
+ } else {
+ return self.Expand(m)
+ }
+ }
+ var buf bytes.Buffer
+ for _, p := range self.parts {
+ err := p.expand(&buf, values)
+ if err != nil {
+ return "", err
+ }
+ }
+ return buf.String(), nil
+}
+
+func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
+ if len(self.raw) > 0 {
+ buf.WriteString(self.raw)
+ return nil
+ }
+ var zeroLen = buf.Len()
+ buf.WriteString(self.first)
+ var firstLen = buf.Len()
+ for _, term := range self.terms {
+ value, exists := values[term.name]
+ if !exists {
+ continue
+ }
+ if buf.Len() != firstLen {
+ buf.WriteString(self.sep)
+ }
+ switch v := value.(type) {
+ case string:
+ self.expandString(buf, term, v)
+ case []interface{}:
+ self.expandArray(buf, term, v)
+ case map[string]interface{}:
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, v)
+ default:
+ if m, ismap := struct2map(value); ismap {
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, m)
+ } else {
+ str := fmt.Sprintf("%v", value)
+ self.expandString(buf, term, str)
+ }
+ }
+ }
+ if buf.Len() == firstLen {
+ original := buf.Bytes()[:zeroLen]
+ buf.Reset()
+ buf.Write(original)
+ }
+ return nil
+}
+
+func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
+ if self.named {
+ buf.WriteString(name)
+ if empty {
+ buf.WriteString(self.ifemp)
+ } else {
+ buf.WriteString("=")
+ }
+ }
+}
+
+func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ self.expandName(buf, t.name, len(s) == 0)
+ buf.WriteString(escape(s, self.allowReserved))
+}
+
+func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
+ if len(a) == 0 {
+ return
+ } else if !t.explode {
+ self.expandName(buf, t.name, false)
+ }
+ for i, value := range a {
+ if t.explode && i > 0 {
+ buf.WriteString(self.sep)
+ } else if i > 0 {
+ buf.WriteString(",")
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ if self.named && t.explode {
+ self.expandName(buf, t.name, len(s) == 0)
+ }
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+}
+
+func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
+ if len(m) == 0 {
+ return
+ }
+ if !t.explode {
+ self.expandName(buf, t.name, len(m) == 0)
+ }
+ var firstLen = buf.Len()
+ for k, value := range m {
+ if firstLen != buf.Len() {
+ if t.explode {
+ buf.WriteString(self.sep)
+ } else {
+ buf.WriteString(",")
+ }
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if t.explode {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune('=')
+ buf.WriteString(escape(s, self.allowReserved))
+ } else {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune(',')
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+ }
+}
+
+func struct2map(v interface{}) (map[string]interface{}, bool) {
+ value := reflect.ValueOf(v)
+ switch value.Type().Kind() {
+ case reflect.Ptr:
+ return struct2map(value.Elem().Interface())
+ case reflect.Struct:
+ m := make(map[string]interface{})
+ for i := 0; i < value.NumField(); i++ {
+ tag := value.Type().Field(i).Tag
+ var name string
+ if strings.Contains(string(tag), ":") {
+ name = tag.Get("uri")
+ } else {
+ name = strings.TrimSpace(string(tag))
+ }
+ if len(name) == 0 {
+ name = value.Type().Field(i).Name
+ }
+ m[name] = value.Field(i).Interface()
+ }
+ return m, true
+ }
+ return nil, false
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/utils.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/utils.go
new file mode 100644
index 00000000..399ef462
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/utils.go
@@ -0,0 +1,13 @@
+package uritemplates
+
+func Expand(path string, expansions map[string]string) (string, error) {
+ template, err := Parse(path)
+ if err != nil {
+ return "", err
+ }
+ values := make(map[string]interface{})
+ for k, v := range expansions {
+ values[k] = v
+ }
+ return template.Expand(values)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/utils_test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/utils_test.go
new file mode 100644
index 00000000..633949b6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/utils_test.go
@@ -0,0 +1,105 @@
+package uritemplates
+
+import (
+ "testing"
+)
+
+type ExpandTest struct {
+ in string
+ expansions map[string]string
+ want string
+}
+
+var expandTests = []ExpandTest{
+ // #0: no expansions
+ {
+ "http://www.golang.org/",
+ map[string]string{},
+ "http://www.golang.org/",
+ },
+ // #1: one expansion, no escaping
+ {
+ "http://www.golang.org/{bucket}/delete",
+ map[string]string{
+ "bucket": "red",
+ },
+ "http://www.golang.org/red/delete",
+ },
+ // #2: one expansion, with hex escapes
+ {
+ "http://www.golang.org/{bucket}/delete",
+ map[string]string{
+ "bucket": "red/blue",
+ },
+ "http://www.golang.org/red%2Fblue/delete",
+ },
+ // #3: one expansion, with space
+ {
+ "http://www.golang.org/{bucket}/delete",
+ map[string]string{
+ "bucket": "red or blue",
+ },
+ "http://www.golang.org/red%20or%20blue/delete",
+ },
+ // #4: expansion not found
+ {
+ "http://www.golang.org/{object}/delete",
+ map[string]string{
+ "bucket": "red or blue",
+ },
+ "http://www.golang.org//delete",
+ },
+ // #5: multiple expansions
+ {
+ "http://www.golang.org/{one}/{two}/{three}/get",
+ map[string]string{
+ "one": "ONE",
+ "two": "TWO",
+ "three": "THREE",
+ },
+ "http://www.golang.org/ONE/TWO/THREE/get",
+ },
+ // #6: utf-8 characters
+ {
+ "http://www.golang.org/{bucket}/get",
+ map[string]string{
+ "bucket": "£100",
+ },
+ "http://www.golang.org/%C2%A3100/get",
+ },
+ // #7: punctuations
+ {
+ "http://www.golang.org/{bucket}/get",
+ map[string]string{
+ "bucket": `/\@:,.*~`,
+ },
+ "http://www.golang.org/%2F%5C%40%3A%2C.%2A~/get",
+ },
+ // #8: mis-matched brackets
+ {
+ "http://www.golang.org/{bucket/get",
+ map[string]string{
+ "bucket": "red",
+ },
+ "",
+ },
+ // #9: "+" prefix for suppressing escape
+ // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3
+ {
+ "http://www.golang.org/{+topic}",
+ map[string]string{
+ "topic": "/topics/myproject/mytopic",
+ },
+ // The double slashes here look weird, but it's intentional
+ "http://www.golang.org//topics/myproject/mytopic",
+ },
+}
+
+func TestExpand(t *testing.T) {
+ for i, test := range expandTests {
+ got, _ := Expand(test.in, test.expansions)
+ if got != test.want {
+ t.Errorf("got %q expected %q in test %d", got, test.want, i)
+ }
+ }
+}
diff --git a/storage/elasticsearch/elasticsearch.go b/storage/elasticsearch/elasticsearch.go
new file mode 100644
index 00000000..b7f0ec11
--- /dev/null
+++ b/storage/elasticsearch/elasticsearch.go
@@ -0,0 +1,122 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package elasticsearch
+
+import (
+ "fmt"
+ "sync"
+
+ info "github.com/google/cadvisor/info/v1"
+ storage "github.com/google/cadvisor/storage"
+ "gopkg.in/olivere/elastic.v2"
+)
+
+type elasticStorage struct {
+ client *elastic.Client
+ machineName string
+ indexName string
+ typeName string
+ lock sync.Mutex
+}
+
+type detailSpec struct {
+ Timestamp int64 `json:"timestamp"`
+ MachineName string `json:"machine_name,omitempty"`
+ ContainerName string `json:"container_Name,omitempty"`
+ ContainerStats *info.ContainerStats `json:"container_stats,omitempty"`
+}
+
+func (self *elasticStorage) containerStatsAndDefaultValues(
+ ref info.ContainerReference, stats *info.ContainerStats) *detailSpec {
+ timestamp := stats.Timestamp.UnixNano() / 1E3
+ var containerName string
+ if len(ref.Aliases) > 0 {
+ containerName = ref.Aliases[0]
+ } else {
+ containerName = ref.Name
+ }
+ detail := &detailSpec{
+ Timestamp: timestamp,
+ MachineName: self.machineName,
+ ContainerName: containerName,
+ ContainerStats: stats,
+ }
+ return detail
+}
+
+func (self *elasticStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
+ if stats == nil {
+ return nil
+ }
+ func() {
+ // AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write.
+ self.lock.Lock()
+ defer self.lock.Unlock()
+ // Add some default params based on ContainerStats
+ detail := self.containerStatsAndDefaultValues(ref, stats)
+ // Index a cadvisor (using JSON serialization)
+ put, err := self.client.Index().
+ Index(self.indexName).
+ Type(self.typeName).
+ BodyJson(detail).
+ Do()
+ if err != nil {
+ // Handle error
+ panic(fmt.Errorf("failed to write stats to ElasticSearch- %s", err))
+ }
+ fmt.Printf("Indexed tweet %s to index %s, type %s\n", put.Id, put.Index, put.Type)
+ }()
+ return nil
+}
+
+func (self *elasticStorage) Close() error {
+ self.client = nil
+ return nil
+}
+
+// machineName: A unique identifier to identify the host that current cAdvisor
+// instance is running on.
+// ElasticHost: The host which runs ElasticSearch.
+func New(machineName,
+ indexName,
+ typeName,
+ elasticHost string,
+) (storage.StorageDriver, error) {
+ // Obtain a client and connect to the default Elasticsearch installation
+ // on 127.0.0.1:9200. Of course you can configure your client to connect
+ // to other hosts and configure it in various other ways.
+ client, err := elastic.NewClient(
+ elastic.SetURL(elasticHost))
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Ping the Elasticsearch server to get e.g. the version number
+ info, code, err := client.Ping().Do()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number)
+
+ ret := &elasticStorage{
+ client: client,
+ machineName: machineName,
+ indexName: indexName,
+ typeName: typeName,
+ }
+ return ret, nil
+}
diff --git a/storagedriver.go b/storagedriver.go
index 7f71a93f..b177131c 100644
--- a/storagedriver.go
+++ b/storagedriver.go
@@ -24,6 +24,7 @@ import (
"github.com/google/cadvisor/cache/memory"
"github.com/google/cadvisor/storage"
"github.com/google/cadvisor/storage/bigquery"
+ "github.com/google/cadvisor/storage/elasticsearch"
"github.com/google/cadvisor/storage/influxdb"
"github.com/google/cadvisor/storage/redis"
"github.com/google/cadvisor/storage/statsd"
@@ -38,6 +39,9 @@ var argDbTable = flag.String("storage_driver_table", "stats", "table name")
var argDbIsSecure = flag.Bool("storage_driver_secure", false, "use secure connection with database")
var argDbBufferDuration = flag.Duration("storage_driver_buffer_duration", 60*time.Second, "Writes in the storage driver will be buffered for this duration, and committed to the non memory backends as a single transaction")
var storageDuration = flag.Duration("storage_duration", 2*time.Minute, "How long to keep data stored (Default: 2min).")
+var argElasticHost = flag.String("storage_driver_es_host", "http://localhost:9200", "database host:port")
+var argIndexName = flag.String("storage_driver_index", "cadvisor", "index name")
+var argTypeName = flag.String("storage_driver_type", "stats", "type name")
// Creates a memory storage with an optional backend storage option.
func NewMemoryStorage(backendStorageName string) (*memory.InMemoryCache, error) {
@@ -90,6 +94,21 @@ func NewMemoryStorage(backendStorageName string) (*memory.InMemoryCache, error)
*argDbHost,
*argDbBufferDuration,
)
+ case "elasticsearch":
+ //argIndexName: the index for elasticsearch
+ //argTypeName: the type for index
+ //argElasticHost: the elasticsearch's server host
+ var machineName string
+ machineName, err = os.Hostname()
+ if err != nil {
+ return nil, err
+ }
+ backendStorage, err = elasticsearch.New(
+ machineName,
+ *argIndexName,
+ *argTypeName,
+ *argElasticHost,
+ )
case "statsd":
backendStorage, err = statsd.New(
*argDbName,