From 49d5ce9c73edce9fcf1453a97fae137b80e17faf Mon Sep 17 00:00:00 2001
From: Tom Wilkie <tom.wilkie@gmail.com>
Date: Mon, 16 Apr 2018 09:40:08 +0100
Subject: [PATCH] Update vendor.

---
 Gopkg.lock                                    |   99 +-
 Gopkg.toml                                    |    6 +
 vendor/github.com/Shopify/sarama/.gitignore   |   26 +
 vendor/github.com/Shopify/sarama/.travis.yml  |   35 +
 vendor/github.com/Shopify/sarama/CHANGELOG.md |  503 +++++++
 vendor/github.com/Shopify/sarama/LICENSE      |   20 +
 vendor/github.com/Shopify/sarama/Makefile     |   29 +
 vendor/github.com/Shopify/sarama/README.md    |   39 +
 vendor/github.com/Shopify/sarama/Vagrantfile  |   20 +
 .../github.com/Shopify/sarama/acl_bindings.go |  119 ++
 .../Shopify/sarama/acl_create_request.go      |   76 +
 .../Shopify/sarama/acl_create_response.go     |   88 ++
 .../Shopify/sarama/acl_delete_request.go      |   48 +
 .../Shopify/sarama/acl_delete_response.go     |  155 ++
 .../Shopify/sarama/acl_describe_request.go    |   25 +
 .../Shopify/sarama/acl_describe_response.go   |   80 +
 .../github.com/Shopify/sarama/acl_filter.go   |   61 +
 vendor/github.com/Shopify/sarama/acl_types.go |   42 +
 .../sarama/add_offsets_to_txn_request.go      |   52 +
 .../sarama/add_offsets_to_txn_response.go     |   44 +
 .../sarama/add_partitions_to_txn_request.go   |   76 +
 .../sarama/add_partitions_to_txn_response.go  |  108 ++
 .../Shopify/sarama/alter_configs_request.go   |  120 ++
 .../Shopify/sarama/alter_configs_response.go  |   95 ++
 .../Shopify/sarama/api_versions_request.go    |   24 +
 .../Shopify/sarama/api_versions_response.go   |   87 ++
 .../Shopify/sarama/async_producer.go          |  921 ++++++++++++
 vendor/github.com/Shopify/sarama/broker.go    |  823 ++++++++++
 vendor/github.com/Shopify/sarama/client.go    |  794 ++++++++++
 vendor/github.com/Shopify/sarama/config.go    |  442 ++++++
 .../Shopify/sarama/config_resource_type.go    |   15 +
 vendor/github.com/Shopify/sarama/consumer.go  |  833 ++++++++++
 .../Shopify/sarama/consumer_group_members.go  |   94 ++
 .../sarama/consumer_metadata_request.go       |   26 +
 .../sarama/consumer_metadata_response.go      |   85 ++
 .../github.com/Shopify/sarama/crc32_field.go  |   69 +
 .../sarama/create_partitions_request.go       |  121 ++
 .../sarama/create_partitions_response.go      |   94 ++
 .../Shopify/sarama/create_topics_request.go   |  174 +++
 .../Shopify/sarama/create_topics_response.go  |  112 ++
 .../Shopify/sarama/delete_topics_request.go   |   41 +
 .../Shopify/sarama/delete_topics_response.go  |   78 +
 .../sarama/describe_configs_request.go        |   91 ++
 .../sarama/describe_configs_response.go       |  188 +++
 .../Shopify/sarama/describe_groups_request.go |   30 +
 .../sarama/describe_groups_response.go        |  187 +++
 vendor/github.com/Shopify/sarama/dev.yml      |   10 +
 .../Shopify/sarama/encoder_decoder.go         |   89 ++
 .../Shopify/sarama/end_txn_request.go         |   50 +
 .../Shopify/sarama/end_txn_response.go        |   44 +
 vendor/github.com/Shopify/sarama/errors.go    |  273 ++++
 .../Shopify/sarama/fetch_request.go           |  170 +++
 .../Shopify/sarama/fetch_response.go          |  385 +++++
 .../Shopify/sarama/heartbeat_request.go       |   47 +
 .../Shopify/sarama/heartbeat_response.go      |   32 +
 .../sarama/init_producer_id_request.go        |   43 +
 .../sarama/init_producer_id_response.go       |   55 +
 .../Shopify/sarama/join_group_request.go      |  143 ++
 .../Shopify/sarama/join_group_response.go     |  115 ++
 .../Shopify/sarama/leave_group_request.go     |   40 +
 .../Shopify/sarama/leave_group_response.go    |   32 +
 .../github.com/Shopify/sarama/length_field.go |   69 +
 .../Shopify/sarama/list_groups_request.go     |   24 +
 .../Shopify/sarama/list_groups_response.go    |   69 +
 vendor/github.com/Shopify/sarama/message.go   |  200 +++
 .../github.com/Shopify/sarama/message_set.go  |  102 ++
 .../Shopify/sarama/metadata_request.go        |   52 +
 .../Shopify/sarama/metadata_response.go       |  239 +++
 vendor/github.com/Shopify/sarama/metrics.go   |   51 +
 .../github.com/Shopify/sarama/mockbroker.go   |  330 ++++
 .../Shopify/sarama/mockresponses.go           |  477 ++++++
 .../Shopify/sarama/offset_commit_request.go   |  190 +++
 .../Shopify/sarama/offset_commit_response.go  |   85 ++
 .../Shopify/sarama/offset_fetch_request.go    |   81 +
 .../Shopify/sarama/offset_fetch_response.go   |  143 ++
 .../Shopify/sarama/offset_manager.go          |  560 +++++++
 .../Shopify/sarama/offset_request.go          |  132 ++
 .../Shopify/sarama/offset_response.go         |  174 +++
 .../Shopify/sarama/packet_decoder.go          |   60 +
 .../Shopify/sarama/packet_encoder.go          |   65 +
 .../github.com/Shopify/sarama/partitioner.go  |  135 ++
 .../github.com/Shopify/sarama/prep_encoder.go |  153 ++
 .../Shopify/sarama/produce_request.go         |  252 ++++
 .../Shopify/sarama/produce_response.go        |  183 +++
 .../github.com/Shopify/sarama/produce_set.go  |  243 +++
 .../github.com/Shopify/sarama/real_decoder.go |  324 ++++
 .../github.com/Shopify/sarama/real_encoder.go |  156 ++
 vendor/github.com/Shopify/sarama/record.go    |  113 ++
 .../github.com/Shopify/sarama/record_batch.go |  259 ++++
 vendor/github.com/Shopify/sarama/records.go   |  173 +++
 vendor/github.com/Shopify/sarama/request.go   |  145 ++
 .../Shopify/sarama/response_header.go         |   21 +
 vendor/github.com/Shopify/sarama/sarama.go    |   99 ++
 .../Shopify/sarama/sasl_handshake_request.go  |   33 +
 .../Shopify/sarama/sasl_handshake_response.go |   38 +
 .../Shopify/sarama/sync_group_request.go      |  100 ++
 .../Shopify/sarama/sync_group_response.go     |   41 +
 .../Shopify/sarama/sync_producer.go           |  164 ++
 vendor/github.com/Shopify/sarama/timestamp.go |   40 +
 .../sarama/txn_offset_commit_request.go       |  126 ++
 .../sarama/txn_offset_commit_response.go      |   83 +
 vendor/github.com/Shopify/sarama/utils.go     |  184 +++
 vendor/github.com/apache/thrift/LICENSE       |  239 +++
 vendor/github.com/apache/thrift/NOTICE        |    5 +
 .../apache/thrift/contrib/fb303/LICENSE       |   16 +
 .../github.com/apache/thrift/debian/copyright |  129 ++
 .../apache/thrift/lib/dart/LICENSE_HEADER     |   16 +
 .../lib/go/thrift/application_exception.go    |  164 ++
 .../thrift/lib/go/thrift/binary_protocol.go   |  514 +++++++
 .../lib/go/thrift/buffered_transport.go       |   91 ++
 .../apache/thrift/lib/go/thrift/client.go     |   78 +
 .../thrift/lib/go/thrift/client_go17.go       |   13 +
 .../thrift/lib/go/thrift/client_pre_go17.go   |   13 +
 .../thrift/lib/go/thrift/common_test_go17.go  |   32 +
 .../lib/go/thrift/common_test_pre_go17.go     |   32 +
 .../thrift/lib/go/thrift/compact_protocol.go  |  815 ++++++++++
 .../thrift/lib/go/thrift/debug_protocol.go    |  269 ++++
 .../thrift/lib/go/thrift/deserializer.go      |   58 +
 .../apache/thrift/lib/go/thrift/exception.go  |   44 +
 .../apache/thrift/lib/go/thrift/field.go      |   79 +
 .../thrift/lib/go/thrift/framed_transport.go  |  172 +++
 .../apache/thrift/lib/go/thrift/go17.go       |   26 +
 .../thrift/lib/go/thrift/http_client.go       |  238 +++
 .../thrift/lib/go/thrift/http_transport.go    |   51 +
 .../lib/go/thrift/http_transport_go17.go      |   38 +
 .../lib/go/thrift/http_transport_pre_go17.go  |   40 +
 .../lib/go/thrift/iostream_transport.go       |  213 +++
 .../thrift/lib/go/thrift/json_protocol.go     |  583 +++++++
 .../thrift/lib/go/thrift/memory_buffer.go     |   79 +
 .../thrift/lib/go/thrift/messagetype.go       |   31 +
 .../lib/go/thrift/multiplexed_protocol.go     |  139 ++
 .../go/thrift/multiplexed_protocol_go17.go    |   53 +
 .../thrift/multiplexed_protocol_pre_go17.go   |   54 +
 .../apache/thrift/lib/go/thrift/numeric.go    |  164 ++
 .../apache/thrift/lib/go/thrift/pointerize.go |   50 +
 .../apache/thrift/lib/go/thrift/pre_go17.go   |   26 +
 .../apache/thrift/lib/go/thrift/processor.go  |   34 +
 .../thrift/lib/go/thrift/processor_factory.go |   58 +
 .../thrift/lib/go/thrift/processor_go17.go    |   34 +
 .../apache/thrift/lib/go/thrift/protocol.go   |  178 +++
 .../lib/go/thrift/protocol_exception.go       |   77 +
 .../thrift/lib/go/thrift/protocol_factory.go  |   25 +
 .../thrift/lib/go/thrift/rich_transport.go    |   68 +
 .../apache/thrift/lib/go/thrift/serializer.go |   75 +
 .../apache/thrift/lib/go/thrift/server.go     |   35 +
 .../thrift/lib/go/thrift/server_socket.go     |  134 ++
 .../thrift/lib/go/thrift/server_transport.go  |   34 +
 .../lib/go/thrift/simple_json_protocol.go     | 1337 +++++++++++++++++
 .../thrift/lib/go/thrift/simple_server.go     |  215 +++
 .../apache/thrift/lib/go/thrift/socket.go     |  165 ++
 .../thrift/lib/go/thrift/ssl_server_socket.go |  112 ++
 .../apache/thrift/lib/go/thrift/ssl_socket.go |  175 +++
 .../apache/thrift/lib/go/thrift/transport.go  |   65 +
 .../lib/go/thrift/transport_exception.go      |   90 ++
 .../thrift/lib/go/thrift/transport_factory.go |   39 +
 .../apache/thrift/lib/go/thrift/type.go       |   69 +
 .../thrift/lib/go/thrift/zlib_transport.go    |  131 ++
 .../github.com/apache/thrift/lib/hs/LICENSE   |  202 +++
 .../apache/thrift/tutorial/erl/client.sh      |    1 +
 .../apache/thrift/tutorial/hs/LICENSE         |  239 +++
 vendor/github.com/davecgh/go-spew/LICENSE     |   15 +
 .../github.com/davecgh/go-spew/spew/bypass.go |  152 ++
 .../davecgh/go-spew/spew/bypasssafe.go        |   38 +
 .../github.com/davecgh/go-spew/spew/common.go |  341 +++++
 .../github.com/davecgh/go-spew/spew/config.go |  306 ++++
 vendor/github.com/davecgh/go-spew/spew/doc.go |  211 +++
 .../github.com/davecgh/go-spew/spew/dump.go   |  509 +++++++
 .../github.com/davecgh/go-spew/spew/format.go |  419 ++++++
 .../github.com/davecgh/go-spew/spew/spew.go   |  148 ++
 .../github.com/eapache/go-resiliency/LICENSE  |   22 +
 .../eapache/go-resiliency/breaker/README.md   |   34 +
 .../eapache/go-resiliency/breaker/breaker.go  |  161 ++
 .../eapache/go-xerial-snappy/.gitignore       |   24 +
 .../eapache/go-xerial-snappy/.travis.yml      |    7 +
 .../eapache/go-xerial-snappy/LICENSE          |   21 +
 .../eapache/go-xerial-snappy/README.md        |   13 +
 .../eapache/go-xerial-snappy/snappy.go        |   43 +
 vendor/github.com/eapache/queue/.gitignore    |   23 +
 vendor/github.com/eapache/queue/.travis.yml   |    7 +
 vendor/github.com/eapache/queue/LICENSE       |   21 +
 vendor/github.com/eapache/queue/README.md     |   16 +
 vendor/github.com/eapache/queue/queue.go      |  102 ++
 .../golang/protobuf/ptypes/empty/empty.proto  |   52 +
 .../go-observer/.gitignore                    |   14 +
 .../opentracing-contrib/go-observer/LICENSE   |  201 +++
 .../opentracing-contrib/go-observer/README.md |   64 +
 .../go-observer/observer.go                   |   39 +
 .../opentracing-contrib/go-stdlib/LICENSE     |   27 +
 .../go-stdlib/nethttp/client.go               |  301 ++++
 .../go-stdlib/nethttp/doc.go                  |    3 +
 .../go-stdlib/nethttp/server.go               |  110 ++
 .../zipkin-go-opentracing/.gitignore          |    4 +
 .../zipkin-go-opentracing/.travis.yml         |   12 +
 .../openzipkin/zipkin-go-opentracing/LICENSE  |   22 +
 .../openzipkin/zipkin-go-opentracing/Makefile |   26 +
 .../zipkin-go-opentracing/README.md           |   29 +
 .../zipkin-go-opentracing/circle.yml          |   10 +
 .../zipkin-go-opentracing/collector-http.go   |  234 +++
 .../zipkin-go-opentracing/collector-kafka.go  |   95 ++
 .../zipkin-go-opentracing/collector-scribe.go |  235 +++
 .../zipkin-go-opentracing/collector.go        |   77 +
 .../zipkin-go-opentracing/context.go          |   61 +
 .../openzipkin/zipkin-go-opentracing/debug.go |   78 +
 .../openzipkin/zipkin-go-opentracing/event.go |   62 +
 .../zipkin-go-opentracing/flag/flags.go       |   39 +
 .../log-materializers.go                      |  113 ++
 .../zipkin-go-opentracing/logger.go           |   64 +
 .../zipkin-go-opentracing/observer.go         |   52 +
 .../zipkin-go-opentracing/propagation.go      |   68 +
 .../zipkin-go-opentracing/propagation_ot.go   |  257 ++++
 .../openzipkin/zipkin-go-opentracing/raw.go   |   30 +
 .../zipkin-go-opentracing/recorder.go         |   60 +
 .../zipkin-go-opentracing/sample.go           |   99 ++
 .../openzipkin/zipkin-go-opentracing/span.go  |  290 ++++
 .../gen-go/scribe/GoUnusedProtection__.go     |    7 +
 .../thrift/gen-go/scribe/scribe-consts.go     |   24 +
 .../thrift/gen-go/scribe/scribe.go            |  550 +++++++
 .../gen-go/zipkincore/GoUnusedProtection__.go |    7 +
 .../gen-go/zipkincore/zipkinCore-consts.go    |   45 +
 .../thrift/gen-go/zipkincore/zipkinCore.go    | 1285 ++++++++++++++++
 .../zipkin-go-opentracing/tracer.go           |  440 ++++++
 .../zipkin-go-opentracing/types/traceid.go    |   38 +
 .../openzipkin/zipkin-go-opentracing/util.go  |   25 +
 .../zipkin-go-opentracing/wire/carrier.go     |   65 +
 .../zipkin-go-opentracing/wire/gen.go         |    6 +
 .../zipkin-go-opentracing/wire/wire.proto     |   13 +
 .../zipkin-go-opentracing/zipkin-endpoint.go  |   72 +
 .../zipkin-go-opentracing/zipkin-recorder.go  |  218 +++
 vendor/github.com/pierrec/lz4/.gitignore      |   31 +
 vendor/github.com/pierrec/lz4/.travis.yml     |    8 +
 vendor/github.com/pierrec/lz4/LICENSE         |   28 +
 vendor/github.com/pierrec/lz4/README.md       |   31 +
 vendor/github.com/pierrec/lz4/block.go        |  454 ++++++
 vendor/github.com/pierrec/lz4/lz4.go          |  105 ++
 vendor/github.com/pierrec/lz4/reader.go       |  364 +++++
 vendor/github.com/pierrec/lz4/writer.go       |  377 +++++
 vendor/github.com/pierrec/xxHash/LICENSE      |   28 +
 .../pierrec/xxHash/xxHash32/xxHash32.go       |  205 +++
 .../github.com/rcrowley/go-metrics/.gitignore |    9 +
 .../rcrowley/go-metrics/.travis.yml           |   17 +
 vendor/github.com/rcrowley/go-metrics/LICENSE |   29 +
 .../github.com/rcrowley/go-metrics/README.md  |  167 ++
 .../github.com/rcrowley/go-metrics/counter.go |  112 ++
 .../github.com/rcrowley/go-metrics/debug.go   |   76 +
 vendor/github.com/rcrowley/go-metrics/ewma.go |  138 ++
 .../github.com/rcrowley/go-metrics/gauge.go   |  120 ++
 .../rcrowley/go-metrics/gauge_float64.go      |  125 ++
 .../rcrowley/go-metrics/graphite.go           |  113 ++
 .../rcrowley/go-metrics/healthcheck.go        |   61 +
 .../rcrowley/go-metrics/histogram.go          |  202 +++
 vendor/github.com/rcrowley/go-metrics/json.go |   31 +
 vendor/github.com/rcrowley/go-metrics/log.go  |   80 +
 .../github.com/rcrowley/go-metrics/memory.md  |  285 ++++
 .../github.com/rcrowley/go-metrics/meter.go   |  257 ++++
 .../github.com/rcrowley/go-metrics/metrics.go |   13 +
 .../rcrowley/go-metrics/opentsdb.go           |  119 ++
 .../rcrowley/go-metrics/registry.go           |  363 +++++
 .../github.com/rcrowley/go-metrics/runtime.go |  212 +++
 .../rcrowley/go-metrics/runtime_cgo.go        |   10 +
 .../go-metrics/runtime_gccpufraction.go       |    9 +
 .../rcrowley/go-metrics/runtime_no_cgo.go     |    7 +
 .../go-metrics/runtime_no_gccpufraction.go    |    9 +
 .../github.com/rcrowley/go-metrics/sample.go  |  616 ++++++++
 .../github.com/rcrowley/go-metrics/syslog.go  |   78 +
 .../github.com/rcrowley/go-metrics/timer.go   |  329 ++++
 .../rcrowley/go-metrics/validate.sh           |   10 +
 .../github.com/rcrowley/go-metrics/writer.go  |  100 ++
 .../sercand/kuberesolver/.gitignore           |    2 +
 .../github.com/sercand/kuberesolver/README.md |   18 +
 .../sercand/kuberesolver/balancer.go          |  135 ++
 .../sercand/kuberesolver/kubernetes.go        |   71 +
 .../github.com/sercand/kuberesolver/models.go |   42 +
 .../sercand/kuberesolver/resolver.go          |   86 ++
 .../github.com/sercand/kuberesolver/stream.go |  105 ++
 .../github.com/sercand/kuberesolver/util.go   |   35 +
 .../sercand/kuberesolver/watcher.go           |   95 ++
 .../loki/pkg/client/collector.go              |  150 ++
 .../loki/pkg/client/tracer.go                 |   27 +
 .../common/httpgrpc/server/server.go          |  183 +++
 .../common/server/fake_server.proto           |   17 +
 .../weaveworks/common/server/server.go        |  189 +++
 .../weaveworks/common/signals/signals.go      |   75 +
 .../cortex/pkg/util/wire/BUILD.bazel          |    8 +
 .../weaveworks/cortex/pkg/util/wire/bytes.go  |   39 +
 284 files changed, 38006 insertions(+), 2 deletions(-)
 create mode 100644 vendor/github.com/Shopify/sarama/.gitignore
 create mode 100644 vendor/github.com/Shopify/sarama/.travis.yml
 create mode 100644 vendor/github.com/Shopify/sarama/CHANGELOG.md
 create mode 100644 vendor/github.com/Shopify/sarama/LICENSE
 create mode 100644 vendor/github.com/Shopify/sarama/Makefile
 create mode 100644 vendor/github.com/Shopify/sarama/README.md
 create mode 100644 vendor/github.com/Shopify/sarama/Vagrantfile
 create mode 100644 vendor/github.com/Shopify/sarama/acl_bindings.go
 create mode 100644 vendor/github.com/Shopify/sarama/acl_create_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/acl_create_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/acl_delete_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/acl_delete_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/acl_describe_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/acl_describe_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/acl_filter.go
 create mode 100644 vendor/github.com/Shopify/sarama/acl_types.go
 create mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/alter_configs_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/alter_configs_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/api_versions_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/api_versions_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/async_producer.go
 create mode 100644 vendor/github.com/Shopify/sarama/broker.go
 create mode 100644 vendor/github.com/Shopify/sarama/client.go
 create mode 100644 vendor/github.com/Shopify/sarama/config.go
 create mode 100644 vendor/github.com/Shopify/sarama/config_resource_type.go
 create mode 100644 vendor/github.com/Shopify/sarama/consumer.go
 create mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members.go
 create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/crc32_field.go
 create mode 100644 vendor/github.com/Shopify/sarama/create_partitions_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/create_partitions_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/create_topics_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/create_topics_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/delete_topics_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/delete_topics_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/describe_configs_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/describe_configs_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/dev.yml
 create mode 100644 vendor/github.com/Shopify/sarama/encoder_decoder.go
 create mode 100644 vendor/github.com/Shopify/sarama/end_txn_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/end_txn_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/errors.go
 create mode 100644 vendor/github.com/Shopify/sarama/fetch_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/fetch_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/join_group_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/join_group_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/leave_group_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/leave_group_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/length_field.go
 create mode 100644 vendor/github.com/Shopify/sarama/list_groups_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/list_groups_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/message.go
 create mode 100644 vendor/github.com/Shopify/sarama/message_set.go
 create mode 100644 vendor/github.com/Shopify/sarama/metadata_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/metadata_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/metrics.go
 create mode 100644 vendor/github.com/Shopify/sarama/mockbroker.go
 create mode 100644 vendor/github.com/Shopify/sarama/mockresponses.go
 create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/offset_manager.go
 create mode 100644 vendor/github.com/Shopify/sarama/offset_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/offset_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/packet_decoder.go
 create mode 100644 vendor/github.com/Shopify/sarama/packet_encoder.go
 create mode 100644 vendor/github.com/Shopify/sarama/partitioner.go
 create mode 100644 vendor/github.com/Shopify/sarama/prep_encoder.go
 create mode 100644 vendor/github.com/Shopify/sarama/produce_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/produce_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/produce_set.go
 create mode 100644 vendor/github.com/Shopify/sarama/real_decoder.go
 create mode 100644 vendor/github.com/Shopify/sarama/real_encoder.go
 create mode 100644 vendor/github.com/Shopify/sarama/record.go
 create mode 100644 vendor/github.com/Shopify/sarama/record_batch.go
 create mode 100644 vendor/github.com/Shopify/sarama/records.go
 create mode 100644 vendor/github.com/Shopify/sarama/request.go
 create mode 100644 vendor/github.com/Shopify/sarama/response_header.go
 create mode 100644 vendor/github.com/Shopify/sarama/sarama.go
 create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/sync_group_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/sync_group_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/sync_producer.go
 create mode 100644 vendor/github.com/Shopify/sarama/timestamp.go
 create mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
 create mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
 create mode 100644 vendor/github.com/Shopify/sarama/utils.go
 create mode 100644 vendor/github.com/apache/thrift/LICENSE
 create mode 100644 vendor/github.com/apache/thrift/NOTICE
 create mode 100644 vendor/github.com/apache/thrift/contrib/fb303/LICENSE
 create mode 100644 vendor/github.com/apache/thrift/debian/copyright
 create mode 100644 vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/client.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/client_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/client_pre_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/exception.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/field.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_client.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/numeric.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/processor.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/serializer.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/socket.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/type.go
 create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go
 create mode 100644 vendor/github.com/apache/thrift/lib/hs/LICENSE
 create mode 120000 vendor/github.com/apache/thrift/tutorial/erl/client.sh
 create mode 100644 vendor/github.com/apache/thrift/tutorial/hs/LICENSE
 create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE
 create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go
 create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
 create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go
 create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go
 create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go
 create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go
 create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go
 create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go
 create mode 100644 vendor/github.com/eapache/go-resiliency/LICENSE
 create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/README.md
 create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker.go
 create mode 100644 vendor/github.com/eapache/go-xerial-snappy/.gitignore
 create mode 100644 vendor/github.com/eapache/go-xerial-snappy/.travis.yml
 create mode 100644 vendor/github.com/eapache/go-xerial-snappy/LICENSE
 create mode 100644 vendor/github.com/eapache/go-xerial-snappy/README.md
 create mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy.go
 create mode 100644 vendor/github.com/eapache/queue/.gitignore
 create mode 100644 vendor/github.com/eapache/queue/.travis.yml
 create mode 100644 vendor/github.com/eapache/queue/LICENSE
 create mode 100644 vendor/github.com/eapache/queue/README.md
 create mode 100644 vendor/github.com/eapache/queue/queue.go
 create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
 create mode 100644 vendor/github.com/opentracing-contrib/go-observer/.gitignore
 create mode 100644 vendor/github.com/opentracing-contrib/go-observer/LICENSE
 create mode 100644 vendor/github.com/opentracing-contrib/go-observer/README.md
 create mode 100644 vendor/github.com/opentracing-contrib/go-observer/observer.go
 create mode 100644 vendor/github.com/opentracing-contrib/go-stdlib/LICENSE
 create mode 100644 vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go
 create mode 100644 vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go
 create mode 100644 vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/README.md
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/circle.yml
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/context.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/event.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/span.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/util.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go
 create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go
 create mode 100644 vendor/github.com/pierrec/lz4/.gitignore
 create mode 100644 vendor/github.com/pierrec/lz4/.travis.yml
 create mode 100644 vendor/github.com/pierrec/lz4/LICENSE
 create mode 100644 vendor/github.com/pierrec/lz4/README.md
 create mode 100644 vendor/github.com/pierrec/lz4/block.go
 create mode 100644 vendor/github.com/pierrec/lz4/lz4.go
 create mode 100644 vendor/github.com/pierrec/lz4/reader.go
 create mode 100644 vendor/github.com/pierrec/lz4/writer.go
 create mode 100644 vendor/github.com/pierrec/xxHash/LICENSE
 create mode 100644 vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/.gitignore
 create mode 100644 vendor/github.com/rcrowley/go-metrics/.travis.yml
 create mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE
 create mode 100644 vendor/github.com/rcrowley/go-metrics/README.md
 create mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/json.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/log.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/memory.md
 create mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go
 create mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go
 create mode 100755 vendor/github.com/rcrowley/go-metrics/validate.sh
 create mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go
 create mode 100644 vendor/github.com/sercand/kuberesolver/.gitignore
 create mode 100644 vendor/github.com/sercand/kuberesolver/README.md
 create mode 100644 vendor/github.com/sercand/kuberesolver/balancer.go
 create mode 100644 vendor/github.com/sercand/kuberesolver/kubernetes.go
 create mode 100644 vendor/github.com/sercand/kuberesolver/models.go
 create mode 100644 vendor/github.com/sercand/kuberesolver/resolver.go
 create mode 100644 vendor/github.com/sercand/kuberesolver/stream.go
 create mode 100644 vendor/github.com/sercand/kuberesolver/util.go
 create mode 100644 vendor/github.com/sercand/kuberesolver/watcher.go
 create mode 100644 vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go
 create mode 100644 vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go
 create mode 100644 vendor/github.com/weaveworks/common/httpgrpc/server/server.go
 create mode 100644 vendor/github.com/weaveworks/common/server/fake_server.proto
 create mode 100644 vendor/github.com/weaveworks/common/server/server.go
 create mode 100644 vendor/github.com/weaveworks/common/signals/signals.go
 create mode 100644 vendor/github.com/weaveworks/cortex/pkg/util/wire/BUILD.bazel
 create mode 100644 vendor/github.com/weaveworks/cortex/pkg/util/wire/bytes.go

diff --git a/Gopkg.lock b/Gopkg.lock
index 9ee765fb..1b83e0e5 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -1,6 +1,18 @@
 # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
 
 
+[[projects]]
+  name = "github.com/Shopify/sarama"
+  packages = ["."]
+  revision = "f7be6aa2bc7b2e38edf816b08b582782194a1c02"
+  version = "v1.16.0"
+
+[[projects]]
+  name = "github.com/apache/thrift"
+  packages = ["lib/go/thrift"]
+  revision = "327ebb6c2b6df8bf075da02ef45a2a034e9b79ba"
+  version = "0.11.0"
+
 [[projects]]
   branch = "master"
   name = "github.com/beorn7/perks"
@@ -19,6 +31,30 @@
   revision = "5c37fe3735342a2e0d01c87a907579987c8936cc"
   version = "v1.0.0"
 
+[[projects]]
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+  version = "v1.1.0"
+
+[[projects]]
+  name = "github.com/eapache/go-resiliency"
+  packages = ["breaker"]
+  revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
+  version = "v1.1.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/eapache/go-xerial-snappy"
+  packages = ["."]
+  revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
+
+[[projects]]
+  name = "github.com/eapache/queue"
+  packages = ["."]
+  revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
+  version = "v1.1.0"
+
 [[projects]]
   name = "github.com/fluent/fluent-logger-golang"
   packages = ["fluent"]
@@ -65,6 +101,7 @@
     "ptypes",
     "ptypes/any",
     "ptypes/duration",
+    "ptypes/empty",
     "ptypes/timestamp"
   ]
   revision = "925541529c1fa6821df4e44ce2723319eb2be768"
@@ -154,6 +191,18 @@
   revision = "d311cb43c92434ec4072dfbbda3400741d0a6337"
   version = "v0.3.0"
 
+[[projects]]
+  branch = "master"
+  name = "github.com/opentracing-contrib/go-observer"
+  packages = ["."]
+  revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/opentracing-contrib/go-stdlib"
+  packages = ["nethttp"]
+  revision = "36723135187404d2f4002f4f189938565e64cc5c"
+
 [[projects]]
   name = "github.com/opentracing/opentracing-go"
   packages = [
@@ -164,12 +213,37 @@
   revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
   version = "v1.0.2"
 
+[[projects]]
+  name = "github.com/openzipkin/zipkin-go-opentracing"
+  packages = [
+    ".",
+    "flag",
+    "thrift/gen-go/scribe",
+    "thrift/gen-go/zipkincore",
+    "types",
+    "wire"
+  ]
+  revision = "4c9fbcbd6d73a644fd17214fe475296780c68fb5"
+  version = "v0.3.3"
+
 [[projects]]
   name = "github.com/philhofer/fwd"
   packages = ["."]
   revision = "bb6d471dc95d4fe11e432687f8b70ff496cf3136"
   version = "v1.0.0"
 
+[[projects]]
+  name = "github.com/pierrec/lz4"
+  packages = ["."]
+  revision = "2fcda4cb7018ce05a25959d2fe08c83e3329f169"
+  version = "v1.1"
+
+[[projects]]
+  name = "github.com/pierrec/xxHash"
+  packages = ["xxHash32"]
+  revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7"
+  version = "v0.1.1"
+
 [[projects]]
   name = "github.com/pkg/errors"
   packages = ["."]
@@ -240,6 +314,17 @@
   ]
   revision = "def6e5a57439cffe7b44a619c05bce4ac513a63e"
 
+[[projects]]
+  branch = "master"
+  name = "github.com/rcrowley/go-metrics"
+  packages = ["."]
+  revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
+
+[[projects]]
+  name = "github.com/sercand/kuberesolver"
+  packages = ["."]
+  revision = "aa801ca262949d887bbe0bae3f6f731ac82c26f6"
+
 [[projects]]
   name = "github.com/sirupsen/logrus"
   packages = ["."]
@@ -252,6 +337,12 @@
   revision = "b2b6a672cf1e5b90748f79b8b81fc8c5cf0571a1"
   version = "1.0.2"
 
+[[projects]]
+  branch = "master"
+  name = "github.com/weaveworks-experiments/loki"
+  packages = ["pkg/client"]
+  revision = "80bb2c795b3f88e2d2ce5dad6b26bd790806f2c8"
+
 [[projects]]
   branch = "master"
   name = "github.com/weaveworks/billing-client"
@@ -264,9 +355,12 @@
   packages = [
     "errors",
     "httpgrpc",
+    "httpgrpc/server",
     "instrument",
     "logging",
     "middleware",
+    "server",
+    "signals",
     "user"
   ]
   revision = "ebab3a78900a09cebce8d0b37f2a69d474796bf4"
@@ -279,7 +373,8 @@
     "pkg/ingester/client",
     "pkg/prom1/storage/metric",
     "pkg/ring",
-    "pkg/util"
+    "pkg/util",
+    "pkg/util/wire"
   ]
   revision = "189025f74e60a8a53940ff098ec73eb8268eb82c"
   source = "github.com/tomwilkie/cortex"
@@ -393,6 +488,6 @@
 [solve-meta]
   analyzer-name = "dep"
   analyzer-version = 1
-  inputs-digest = "f507e61940941f1d84caef431c22e590e4c334b8c63384a7f502e88763b834d5"
+  inputs-digest = "3e3b00e44c091332ddc2822d7dd4eb4c42fdeb8c056edcb82c0a3bdb5d0ed59e"
   solver-name = "gps-cdcl"
   solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
index b5a61e44..250f5dab 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -24,11 +24,17 @@
 #   go-tests = true
 #   unused-packages = true
 
+required = ["github.com/weaveworks/cortex/pkg/util/wire"]
+
 [[constraint]]
   name = "github.com/weaveworks/cortex"
   branch = "factorings"
   source = "github.com/tomwilkie/cortex"
 
+[[override]]
+  name = "github.com/sercand/kuberesolver"
+  revision = "aa801ca262949d887bbe0bae3f6f731ac82c26f6"
+
 [prune]
   go-tests = true
   unused-packages = true
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
new file mode 100644
index 00000000..c6c482dc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+coverage.txt
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
new file mode 100644
index 00000000..cc38769f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.travis.yml
@@ -0,0 +1,35 @@
+language: go
+go:
+- 1.8.x
+- 1.9.x
+
+env:
+  global:
+  - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
+  - TOXIPROXY_ADDR=http://localhost:8474
+  - KAFKA_INSTALL_ROOT=/home/travis/kafka
+  - KAFKA_HOSTNAME=localhost
+  - DEBUG=true
+  matrix:
+  - KAFKA_VERSION=0.10.2.1
+  - KAFKA_VERSION=0.11.0.2
+  - KAFKA_VERSION=1.0.0
+
+before_install:
+- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
+- vagrant/install_cluster.sh
+- vagrant/boot_cluster.sh
+- vagrant/create_topics.sh
+
+install: make install_dependencies
+
+script:
+- make test
+- make vet
+- make errcheck
+- make fmt
+
+after_success:
+- bash <(curl -s https://codecov.io/bash)
+
+after_script: vagrant/halt_cluster.sh
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
new file mode 100644
index 00000000..83684165
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -0,0 +1,503 @@
+# Changelog
+
+#### Version 1.16.0 (2018-02-12)
+
+New Features:
+ - Add support for the Create/Delete Topics request/response pairs
+   ([#1007](https://github.com/Shopify/sarama/pull/1007),
+    [#1008](https://github.com/Shopify/sarama/pull/1008)).
+ - Add support for the Describe/Create/Delete ACL request/response pairs
+   ([#1009](https://github.com/Shopify/sarama/pull/1009)).
+ - Add support for the five transaction-related request/response pairs
+   ([#1016](https://github.com/Shopify/sarama/pull/1016)).
+
+Improvements:
+ - Permit setting version on mock producer responses
+   ([#999](https://github.com/Shopify/sarama/pull/999)).
+ - Add `NewMockBrokerListener` helper for testing TLS connections
+   ([#1019](https://github.com/Shopify/sarama/pull/1019)).
+ - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
+   which results in much higher throughput in most cases
+   ([#1024](https://github.com/Shopify/sarama/pull/1024)).
+ - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
+   reduce CPU and memory usage when processing many partitions
+   ([#1028](https://github.com/Shopify/sarama/pull/1028)).
+ - Assign relative offsets to messages in the producer to save the brokers a
+   recompression pass
+   ([#1002](https://github.com/Shopify/sarama/pull/1002),
+    [#1015](https://github.com/Shopify/sarama/pull/1015)).
+
+Bug Fixes:
+ - Fix producing uncompressed batches with the new protocol format
+   ([#1032](https://github.com/Shopify/sarama/issues/1032)).
+ - Fix consuming compacted topics with the new protocol format
+   ([#1005](https://github.com/Shopify/sarama/issues/1005)).
+ - Fix consuming topics with a mix of protocol formats
+   ([#1021](https://github.com/Shopify/sarama/issues/1021)).
+ - Fix consuming when the broker includes multiple batches in a single response
+   ([#1022](https://github.com/Shopify/sarama/issues/1022)).
+ - Fix detection of `PartialTrailingMessage` when the partial message was
+   truncated before the magic value indicating its version
+   ([#1030](https://github.com/Shopify/sarama/pull/1030)).
+ - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
+   ([#1035](https://github.com/Shopify/sarama/pull/1035)).
+
+#### Version 1.15.0 (2017-12-08)
+
+New Features:
+ - Claim official support for Kafka 1.0, though it did already work
+   ([#984](https://github.com/Shopify/sarama/pull/984)).
+ - Helper methods for Kafka version numbers to/from strings
+   ([#989](https://github.com/Shopify/sarama/pull/989)).
+ - Implement CreatePartitions request/response
+   ([#985](https://github.com/Shopify/sarama/pull/985)).
+
+Improvements:
+ - Add error codes 45-60
+   ([#986](https://github.com/Shopify/sarama/issues/986)).
+
+Bug Fixes:
+ - Fix slow consuming for certain Kafka 0.11/1.0 configurations
+   ([#982](https://github.com/Shopify/sarama/pull/982)).
+ - Correctly determine when a FetchResponse contains the new message format
+   ([#990](https://github.com/Shopify/sarama/pull/990)).
+ - Fix producing with multiple headers
+   ([#996](https://github.com/Shopify/sarama/pull/996)).
+ - Fix handling of truncated record batches
+   ([#998](https://github.com/Shopify/sarama/pull/998)).
+ - Fix leaking metrics when closing brokers
+   ([#991](https://github.com/Shopify/sarama/pull/991)).
+
+#### Version 1.14.0 (2017-11-13)
+
+New Features:
+ - Add support for the new Kafka 0.11 record-batch format, including the wire
+   protocol and the necessary behavioural changes in the producer and consumer.
+   Transactions and idempotency are not yet supported, but producing and
+   consuming should work with all the existing bells and whistles (batching,
+   compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
+   of Arista Networks for this work. Part of
+   ([#901](https://github.com/Shopify/sarama/issues/901)).
+
+Bug Fixes:
+ - Fix encoding of ProduceResponse versions in test
+   ([#970](https://github.com/Shopify/sarama/pull/970)).
+ - Return partial replicas list when we have it
+   ([#975](https://github.com/Shopify/sarama/pull/975)).
+
+#### Version 1.13.0 (2017-10-04)
+
+New Features:
+ - Support for FetchRequest version 3
+   ([#905](https://github.com/Shopify/sarama/pull/905)).
+ - Permit setting version on mock FetchResponses
+   ([#939](https://github.com/Shopify/sarama/pull/939)).
+ - Add a configuration option to support storing only minimal metadata for
+   extremely large clusters
+   ([#937](https://github.com/Shopify/sarama/pull/937)).
+ - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
+   ([#932](https://github.com/Shopify/sarama/pull/932)).
+
+Improvements:
+ - Provide the block-level timestamp when consuming compressed messages
+   ([#885](https://github.com/Shopify/sarama/issues/885)).
+ - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
+   by the broker, which can be meaningful
+   ([#930](https://github.com/Shopify/sarama/pull/930)).
+ - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
+   variance in the actual timeout
+   ([#933](https://github.com/Shopify/sarama/pull/933)).
+
+Bug Fixes:
+ - Gracefully handle messages with negative timestamps
+   ([#907](https://github.com/Shopify/sarama/pull/907)).
+ - Raise a proper error when encountering an unknown message version
+   ([#940](https://github.com/Shopify/sarama/pull/940)).
+
+#### Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+   version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
+   that you still need to specify the Kafka version in the Sarama configuration
+   for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+   active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+   in-sync broker IDs for the given partition, now that the Kafka versions for
+   which this was misleading are no longer in our supported set
+   ([#872](https://github.com/Shopify/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+   partitioner with a custom hash method in case the default (FNV-1a) is not
+   suitable
+   ([#837](https://github.com/Shopify/sarama/pull/837),
+    [#841](https://github.com/Shopify/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+   ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+   correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+   transparent if you're using the `AddGroupProtocol` or
+   `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+   the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+   ([#812](https://github.com/Shopify/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+   ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+#### Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+   ([#701](https://github.com/Shopify/sarama/pull/701),
+    [#746](https://github.com/Shopify/sarama/pull/746),
+    [#766](https://github.com/Shopify/sarama/pull/766)).
+ - Add support for LZ4 compression
+   ([#786](https://github.com/Shopify/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+   ([#775](https://github.com/Shopify/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+   `HighWaterMarkOffset` values of its child topic/partitions
+   ([#769](https://github.com/Shopify/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+   ([#759](https://github.com/Shopify/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+   ([#756](https://github.com/Shopify/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+   ([#787](https://github.com/Shopify/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+   ([#790](https://github.com/Shopify/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+   ([#795](https://github.com/Shopify/sarama/pull/795)).
+
+#### Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+   ([#717](https://github.com/Shopify/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+   ([#722](https://github.com/Shopify/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+   in v1.10.0. Go's timers are finicky
+   ([#730](https://github.com/Shopify/sarama/pull/730),
+    [#733](https://github.com/Shopify/sarama/pull/733),
+    [#734](https://github.com/Shopify/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+   ([#735](https://github.com/Shopify/sarama/pull/735)).
+
+#### Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
+[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+   ([#672](https://github.com/Shopify/sarama/pull/672),
+    [#678](https://github.com/Shopify/sarama/pull/678),
+    [#681](https://github.com/Shopify/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+   ([#676](https://github.com/Shopify/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+   ([#677](https://github.com/Shopify/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+   ([#667](https://github.com/Shopify/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+   ([#634](https://github.com/Shopify/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+   misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+   ([#707](https://github.com/Shopify/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+   ([#664](https://github.com/Shopify/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+   ([#685](https://github.com/Shopify/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+   ([#693](https://github.com/Shopify/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+   ([#705](https://github.com/Shopify/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+   ([#706](https://github.com/Shopify/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+   ([#709](https://github.com/Shopify/sarama/pull/709)).
+
+#### Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+   ([#602](https://github.com/Shopify/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+   implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
+ - Declare support for Golang 1.6
+   ([#611](https://github.com/Shopify/sarama/pull/611)).
+ - Support for SASL plain-text auth
+   ([#648](https://github.com/Shopify/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+   ([#604](https://github.com/Shopify/sarama/pull/604)).
+ - Documentation cleanup
+   ([#605](https://github.com/Shopify/sarama/pull/605),
+    [#621](https://github.com/Shopify/sarama/pull/621),
+    [#654](https://github.com/Shopify/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+   ([#658](https://github.com/Shopify/sarama/pull/658)).
+
+#### Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+   - All protocol messages and fields
+   ([#586](https://github.com/Shopify/sarama/pull/586),
+   [#588](https://github.com/Shopify/sarama/pull/588),
+   [#590](https://github.com/Shopify/sarama/pull/590)).
+   - Verified that TLS support works
+   ([#581](https://github.com/Shopify/sarama/pull/581)).
+   - Fixed the OffsetManager compatibility
+   ([#585](https://github.com/Shopify/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+   ([#584](https://github.com/Shopify/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+   ([#589](https://github.com/Shopify/sarama/pull/589)).
+
+#### Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+   ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
+   caveats:
+   - Protocol-layer support is mostly in place
+     ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
+     renamed some messages and fields, which we did not in order to preserve API
+     compatibility.
+   - The producer and consumer work against 0.9, but the offset manager does
+     not ([#573](https://github.com/Shopify/sarama/pull/573)).
+   - TLS support may or may not work
+     ([#581](https://github.com/Shopify/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+   when the TCP connection is left hanging
+   ([#548](https://github.com/Shopify/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+   solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
+   slightly more efficient, and much more precise in calculating batch sizes
+   when compression is used
+   ([#549](https://github.com/Shopify/sarama/pull/549),
+   [#550](https://github.com/Shopify/sarama/pull/550),
+   [#551](https://github.com/Shopify/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+   ([#553](https://github.com/Shopify/sarama/pull/553)).
+
+#### Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+   ([#449](https://github.com/Shopify/sarama/pull/449)).
+
+#### Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+   Kafka 0.8.2. The API is designed mainly for integration into a future
+   high-level consumer, not for direct use, although it is *possible* to use it
+   directly.
+   ([#461](https://github.com/Shopify/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+   removing a major hotspot from most profiles
+   ([#255](https://github.com/Shopify/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+   by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
+   [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
+   ([#528](https://github.com/Shopify/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+   ([#529](https://github.com/Shopify/sarama/pull/529)).
+
+#### Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+   to change when Kafka releases built-in TLS support, but for now this is
+   enough to work with TLS-terminating proxies
+   ([#154](https://github.com/Shopify/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+   all other partitions will continue to consume normally
+   ([#485](https://github.com/Shopify/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+   ([#495](https://github.com/Shopify/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+   future work ([#300](https://github.com/Shopify/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+   ([#475](https://github.com/Shopify/sarama/pull/475)).
+
+#### Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+   circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+   the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+   changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
+
+#### Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+   ([#456](https://github.com/Shopify/sarama/pull/456)).
+
+#### Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+   ([#446](https://github.com/Shopify/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+   ([#450](https://github.com/Shopify/sarama/pull/450),
+   [#451](https://github.com/Shopify/sarama/pull/451)).
+
+#### Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+   users to dynamically choose what topics/partitions to consume without
+   instantiating a full client
+   ([#431](https://github.com/Shopify/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+   by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+   partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+   ([#439](https://github.com/Shopify/sarama/pull/439),
+   [#442](https://github.com/Shopify/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+   useful, and slightly less verbose
+   ([#429](https://github.com/Shopify/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+   thundering herd on the first broker in the list
+   ([#441](https://github.com/Shopify/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+   shutting down, fixing several instances of confusing behaviour and at least
+   one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+   making it much more resilient to specific user code ordering
+   ([#325](https://github.com/Shopify/sarama/pull/325)).
+
+#### Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+   ConsumerMetadataRequests similar to how it tracks partition leadership using
+   regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
+   This adds two methods to the client API:
+   - `Coordinator(consumerGroup string) (*Broker, error)`
+   - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+   ID/address/port combination for the Coordinator; accessing the fields
+   individually has been deprecated
+   ([#413](https://github.com/Shopify/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+   Consumers will fail to start if the provided offset is out of range
+   ([#418](https://github.com/Shopify/sarama/pull/418))
+   and they will automatically shut down if the offset falls out of range
+   ([#424](https://github.com/Shopify/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+   ([#427](https://github.com/Shopify/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+   it happens to be activated while the client is being closed
+   ([#422](https://github.com/Shopify/sarama/pull/422)).
+
+#### Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+   ([#389](https://github.com/Shopify/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+   messages due to an improved queue implementation
+   ([#396](https://github.com/Shopify/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+   changes ([#385](https://github.com/Shopify/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+   retry once in the event of stale information or similar
+   ([#394](https://github.com/Shopify/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+   ([#407](https://github.com/Shopify/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+   API versions ([#390](https://github.com/Shopify/sarama/pull/390),
+   [#400](https://github.com/Shopify/sarama/pull/400)).
+
+#### Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+   broken topics don't choke throughput
+   ([#373](https://github.com/Shopify/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+   ([#367](https://github.com/Shopify/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+   ([#369](https://github.com/Shopify/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+   gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+   metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
+
+
+#### Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
new file mode 100644
index 00000000..8121b63b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
new file mode 100644
index 00000000..58a39e4f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -0,0 +1,29 @@
+default: fmt vet errcheck test
+
+# Taken from https://github.com/codecov/example-go#caveat-multiple-files
+test:
+	echo "" > coverage.txt
+	for d in `go list ./... | grep -v vendor`; do \
+		go test -v -timeout 60s -race -coverprofile=profile.out -covermode=atomic $$d; \
+		if [ -f profile.out ]; then \
+			cat profile.out >> coverage.txt; \
+			rm profile.out; \
+		fi \
+	done
+
+vet:
+	go vet ./...
+
+errcheck:
+	errcheck github.com/Shopify/sarama/...
+
+fmt:
+	@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
+
+install_dependencies: install_errcheck get
+
+install_errcheck:
+	go get github.com/kisielk/errcheck
+
+get:
+	go get -t
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
new file mode 100644
index 00000000..28431f13
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -0,0 +1,39 @@
+sarama
+======
+
+[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
+[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
+[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
+
+### Getting started
+
+- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
+
+### Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. This means we currently officially support
+Go 1.9 and 1.8, and Kafka 1.0 through 0.10, although older releases are
+still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the gopkg.in service.
+You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
+A changelog is available [here](CHANGELOG.md).
+
+### Contributing
+
+* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
+* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
+  technical and design details.
+* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
+  contains a wealth of useful information.
+* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+* If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
new file mode 100644
index 00000000..f4b848a3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Vagrantfile
@@ -0,0 +1,20 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
+MEMORY = 3072
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+  config.vm.box = "ubuntu/trusty64"
+
+  config.vm.provision :shell, path: "vagrant/provision.sh"
+
+  config.vm.network "private_network", ip: "192.168.100.67"
+
+  config.vm.provider "virtualbox" do |v|
+    v.memory = MEMORY
+  end
+end
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go
new file mode 100644
index 00000000..51517359
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_bindings.go
@@ -0,0 +1,119 @@
+package sarama
+
+type Resource struct {
+	ResourceType AclResourceType
+	ResourceName string
+}
+
+func (r *Resource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(r.ResourceType))
+
+	if err := pe.putString(r.ResourceName); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
+	resourceType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	r.ResourceType = AclResourceType(resourceType)
+
+	if r.ResourceName, err = pd.getString(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type Acl struct {
+	Principal      string
+	Host           string
+	Operation      AclOperation
+	PermissionType AclPermissionType
+}
+
+func (a *Acl) encode(pe packetEncoder) error {
+	if err := pe.putString(a.Principal); err != nil {
+		return err
+	}
+
+	if err := pe.putString(a.Host); err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(a.Operation))
+	pe.putInt8(int8(a.PermissionType))
+
+	return nil
+}
+
+func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
+	if a.Principal, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if a.Host, err = pd.getString(); err != nil {
+		return err
+	}
+
+	operation, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = AclOperation(operation)
+
+	permissionType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.PermissionType = AclPermissionType(permissionType)
+
+	return nil
+}
+
+type ResourceAcls struct {
+	Resource
+	Acls []*Acl
+}
+
+func (r *ResourceAcls) encode(pe packetEncoder) error {
+	if err := r.Resource.encode(pe); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Acls)); err != nil {
+		return err
+	}
+	for _, acl := range r.Acls {
+		if err := acl.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
+	if err := r.Resource.decode(pd, version); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Acls = make([]*Acl, n)
+	for i := 0; i < n; i++ {
+		r.Acls[i] = new(Acl)
+		if err := r.Acls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
new file mode 100644
index 00000000..0b6ecbec
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_create_request.go
@@ -0,0 +1,76 @@
+package sarama
+
+type CreateAclsRequest struct {
+	AclCreations []*AclCreation
+}
+
+func (c *CreateAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
+		return err
+	}
+
+	for _, aclCreation := range c.AclCreations {
+		if err := aclCreation.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreations = make([]*AclCreation, n)
+
+	for i := 0; i < n; i++ {
+		c.AclCreations[i] = new(AclCreation)
+		if err := c.AclCreations[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *CreateAclsRequest) key() int16 {
+	return 30
+}
+
+func (d *CreateAclsRequest) version() int16 {
+	return 0
+}
+
+func (d *CreateAclsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type AclCreation struct {
+	Resource
+	Acl
+}
+
+func (a *AclCreation) encode(pe packetEncoder) error {
+	if err := a.Resource.encode(pe); err != nil {
+		return err
+	}
+	if err := a.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
+	if err := a.Resource.decode(pd, version); err != nil {
+		return err
+	}
+	if err := a.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
new file mode 100644
index 00000000..8a56f357
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_create_response.go
@@ -0,0 +1,88 @@
+package sarama
+
+import "time"
+
+type CreateAclsResponse struct {
+	ThrottleTime         time.Duration
+	AclCreationResponses []*AclCreationResponse
+}
+
+func (c *CreateAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
+		return err
+	}
+
+	for _, aclCreationResponse := range c.AclCreationResponses {
+		if err := aclCreationResponse.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreationResponses = make([]*AclCreationResponse, n)
+	for i := 0; i < n; i++ {
+		c.AclCreationResponses[i] = new(AclCreationResponse)
+		if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *CreateAclsResponse) key() int16 {
+	return 30
+}
+
+func (d *CreateAclsResponse) version() int16 {
+	return 0
+}
+
+func (d *CreateAclsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type AclCreationResponse struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (a *AclCreationResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(a.Err))
+
+	if err := pe.putNullableString(a.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.Err = KError(kerr)
+
+	if a.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
new file mode 100644
index 00000000..4133dcea
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go
@@ -0,0 +1,48 @@
+package sarama
+
+type DeleteAclsRequest struct {
+	Filters []*AclFilter
+}
+
+func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(d.Filters)); err != nil {
+		return err
+	}
+
+	for _, filter := range d.Filters {
+		if err := filter.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.Filters = make([]*AclFilter, n)
+	for i := 0; i < n; i++ {
+		d.Filters[i] = new(AclFilter)
+		if err := d.Filters[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) key() int16 {
+	return 31
+}
+
+func (d *DeleteAclsRequest) version() int16 {
+	return 0
+}
+
+func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
new file mode 100644
index 00000000..b5e1c45e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go
@@ -0,0 +1,155 @@
+package sarama
+
+import "time"
+
+type DeleteAclsResponse struct {
+	ThrottleTime    time.Duration
+	FilterResponses []*FilterResponse
+}
+
+func (a *DeleteAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(a.FilterResponses)); err != nil {
+		return err
+	}
+
+	for _, filterResponse := range a.FilterResponses {
+		if err := filterResponse.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	a.FilterResponses = make([]*FilterResponse, n)
+
+	for i := 0; i < n; i++ {
+		a.FilterResponses[i] = new(FilterResponse)
+		if err := a.FilterResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsResponse) key() int16 {
+	return 31
+}
+
+func (d *DeleteAclsResponse) version() int16 {
+	return 0
+}
+
+func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type FilterResponse struct {
+	Err          KError
+	ErrMsg       *string
+	MatchingAcls []*MatchingAcl
+}
+
+func (f *FilterResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(f.Err))
+	if err := pe.putNullableString(f.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
+		return err
+	}
+	for _, matchingAcl := range f.MatchingAcls {
+		if err := matchingAcl.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	f.Err = KError(kerr)
+
+	if f.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	f.MatchingAcls = make([]*MatchingAcl, n)
+	for i := 0; i < n; i++ {
+		f.MatchingAcls[i] = new(MatchingAcl)
+		if err := f.MatchingAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type MatchingAcl struct {
+	Err    KError
+	ErrMsg *string
+	Resource
+	Acl
+}
+
+func (m *MatchingAcl) encode(pe packetEncoder) error {
+	pe.putInt16(int16(m.Err))
+	if err := pe.putNullableString(m.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := m.Resource.encode(pe); err != nil {
+		return err
+	}
+
+	if err := m.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	m.Err = KError(kerr)
+
+	if m.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if err := m.Resource.decode(pd, version); err != nil {
+		return err
+	}
+
+	if err := m.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
new file mode 100644
index 00000000..02a5a1f0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go
@@ -0,0 +1,25 @@
+package sarama
+
+type DescribeAclsRequest struct {
+	AclFilter
+}
+
+func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
+	return d.AclFilter.encode(pe)
+}
+
+func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	return d.AclFilter.decode(pd, version)
+}
+
+func (d *DescribeAclsRequest) key() int16 {
+	return 29
+}
+
+func (d *DescribeAclsRequest) version() int16 {
+	return 0
+}
+
+func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
new file mode 100644
index 00000000..5bc9497f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go
@@ -0,0 +1,80 @@
+package sarama
+
+import "time"
+
+type DescribeAclsResponse struct {
+	ThrottleTime time.Duration
+	Err          KError
+	ErrMsg       *string
+	ResourceAcls []*ResourceAcls
+}
+
+func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(d.Err))
+
+	if err := pe.putNullableString(d.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
+		return err
+	}
+
+	for _, resourceAcl := range d.ResourceAcls {
+		if err := resourceAcl.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	d.Err = KError(kerr)
+
+	errmsg, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	if errmsg != "" {
+		d.ErrMsg = &errmsg
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	d.ResourceAcls = make([]*ResourceAcls, n)
+
+	for i := 0; i < n; i++ {
+		d.ResourceAcls[i] = new(ResourceAcls)
+		if err := d.ResourceAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) key() int16 {
+	return 29
+}
+
+func (d *DescribeAclsResponse) version() int16 {
+	return 0
+}
+
+func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go
new file mode 100644
index 00000000..97063542
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_filter.go
@@ -0,0 +1,61 @@
+package sarama
+
+type AclFilter struct {
+	ResourceType   AclResourceType
+	ResourceName   *string
+	Principal      *string
+	Host           *string
+	Operation      AclOperation
+	PermissionType AclPermissionType
+}
+
+func (a *AclFilter) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.ResourceType))
+	if err := pe.putNullableString(a.ResourceName); err != nil {
+		return err
+	}
+	if err := pe.putNullableString(a.Principal); err != nil {
+		return err
+	}
+	if err := pe.putNullableString(a.Host); err != nil {
+		return err
+	}
+	pe.putInt8(int8(a.Operation))
+	pe.putInt8(int8(a.PermissionType))
+
+	return nil
+}
+
+func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
+	resourceType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.ResourceType = AclResourceType(resourceType)
+
+	if a.ResourceName, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if a.Principal, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if a.Host, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	operation, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = AclOperation(operation)
+
+	permissionType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.PermissionType = AclPermissionType(permissionType)
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
new file mode 100644
index 00000000..19da6f2f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_types.go
@@ -0,0 +1,42 @@
+package sarama
+
+type AclOperation int
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
+const (
+	AclOperationUnknown         AclOperation = 0
+	AclOperationAny             AclOperation = 1
+	AclOperationAll             AclOperation = 2
+	AclOperationRead            AclOperation = 3
+	AclOperationWrite           AclOperation = 4
+	AclOperationCreate          AclOperation = 5
+	AclOperationDelete          AclOperation = 6
+	AclOperationAlter           AclOperation = 7
+	AclOperationDescribe        AclOperation = 8
+	AclOperationClusterAction   AclOperation = 9
+	AclOperationDescribeConfigs AclOperation = 10
+	AclOperationAlterConfigs    AclOperation = 11
+	AclOperationIdempotentWrite AclOperation = 12
+)
+
+type AclPermissionType int
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
+const (
+	AclPermissionUnknown AclPermissionType = 0
+	AclPermissionAny     AclPermissionType = 1
+	AclPermissionDeny    AclPermissionType = 2
+	AclPermissionAllow   AclPermissionType = 3
+)
+
+type AclResourceType int
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
+const (
+	AclResourceUnknown         AclResourceType = 0
+	AclResourceAny             AclResourceType = 1
+	AclResourceTopic           AclResourceType = 2
+	AclResourceGroup           AclResourceType = 3
+	AclResourceCluster         AclResourceType = 4
+	AclResourceTransactionalID AclResourceType = 5
+)
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
new file mode 100644
index 00000000..6da166c6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
@@ -0,0 +1,52 @@
+package sarama
+
+type AddOffsetsToTxnRequest struct {
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	GroupID         string
+}
+
+func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putString(a.GroupID); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) key() int16 {
+	return 25
+}
+
+func (a *AddOffsetsToTxnRequest) version() int16 {
+	return 0
+}
+
+func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
new file mode 100644
index 00000000..3a46151a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
@@ -0,0 +1,44 @@
+package sarama
+
+import (
+	"time"
+)
+
+type AddOffsetsToTxnResponse struct {
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(a.Err))
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.Err = KError(kerr)
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) key() int16 {
+	return 25
+}
+
+func (a *AddOffsetsToTxnResponse) version() int16 {
+	return 0
+}
+
+func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
new file mode 100644
index 00000000..a8a59225
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
@@ -0,0 +1,76 @@
+package sarama
+
+type AddPartitionsToTxnRequest struct {
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	TopicPartitions map[string][]int32
+}
+
+func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt64(a.ProducerID)
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
+		return err
+	}
+	for topic, partitions := range a.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.TopicPartitions = make(map[string][]int32)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+
+		a.TopicPartitions[topic] = partitions
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) key() int16 {
+	return 24
+}
+
+func (a *AddPartitionsToTxnRequest) version() int16 {
+	return 0
+}
+
+func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
new file mode 100644
index 00000000..581c556c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
@@ -0,0 +1,108 @@
+package sarama
+
+import (
+	"time"
+)
+
+type AddPartitionsToTxnResponse struct {
+	ThrottleTime time.Duration
+	Errors       map[string][]*PartitionError
+}
+
+func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+	if err := pe.putArrayLength(len(a.Errors)); err != nil {
+		return err
+	}
+
+	for topic, e := range a.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Errors = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		a.Errors[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			a.Errors[topic][j] = new(PartitionError)
+			if err := a.Errors[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) key() int16 {
+	return 24
+}
+
+func (a *AddPartitionsToTxnResponse) version() int16 {
+	return 0
+}
+
+func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type PartitionError struct {
+	Partition int32
+	Err       KError
+}
+
+func (p *PartitionError) encode(pe packetEncoder) error {
+	pe.putInt32(p.Partition)
+	pe.putInt16(int16(p.Err))
+	return nil
+}
+
+func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	p.Err = KError(kerr)
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
new file mode 100644
index 00000000..48c44ead
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go
@@ -0,0 +1,120 @@
+package sarama
+
+type AlterConfigsRequest struct {
+	Resources    []*AlterConfigsResource
+	ValidateOnly bool
+}
+
+type AlterConfigsResource struct {
+	Type          ConfigResourceType
+	Name          string
+	ConfigEntries map[string]*string
+}
+
+func (acr *AlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(acr.Resources)); err != nil {
+		return err
+	}
+
+	for _, r := range acr.Resources {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putBool(acr.ValidateOnly)
+	return nil
+}
+
+func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+	resourceCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	acr.Resources = make([]*AlterConfigsResource, resourceCount)
+	for i := range acr.Resources {
+		r := &AlterConfigsResource{}
+		err = r.decode(pd, version)
+		if err != nil {
+			return err
+		}
+		acr.Resources[i] = r
+	}
+
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	acr.ValidateOnly = validateOnly
+
+	return nil
+}
+
+func (ac *AlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(ac.Type))
+
+	if err := pe.putString(ac.Name); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range ac.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	ac.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	ac.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		ac.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+		}
+	}
+	return err
+}
+
+func (acr *AlterConfigsRequest) key() int16 {
+	return 33
+}
+
+func (acr *AlterConfigsRequest) version() int16 {
+	return 0
+}
+
+func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
new file mode 100644
index 00000000..29b09e1f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go
@@ -0,0 +1,95 @@
+package sarama
+
+import "time"
+
+type AlterConfigsResponse struct {
+	ThrottleTime time.Duration
+	Resources    []*AlterConfigsResourceResponse
+}
+
+type AlterConfigsResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+}
+
+func (ct *AlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(ct.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(ct.Resources)); err != nil {
+		return err
+	}
+
+	for i := range ct.Resources {
+		pe.putInt16(ct.Resources[i].ErrorCode)
+		err := pe.putString(ct.Resources[i].ErrorMsg)
+		if err != nil {
+			return nil
+		}
+		pe.putInt8(int8(ct.Resources[i].Type))
+		err = pe.putString(ct.Resources[i].Name)
+		if err != nil {
+			return nil
+		}
+	}
+
+	return nil
+}
+
+func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	responseCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	acr.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+	for i := range acr.Resources {
+		acr.Resources[i] = new(AlterConfigsResourceResponse)
+
+		errCode, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+		acr.Resources[i].ErrorCode = errCode
+
+		e, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		acr.Resources[i].ErrorMsg = e
+
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		acr.Resources[i].Type = ConfigResourceType(t)
+
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		acr.Resources[i].Name = name
+	}
+
+	return nil
+}
+
+func (r *AlterConfigsResponse) key() int16 {
+	return 32
+}
+
+func (r *AlterConfigsResponse) version() int16 {
+	return 0
+}
+
+func (r *AlterConfigsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
new file mode 100644
index 00000000..ab65f01c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ApiVersionsRequest struct {
+}
+
+func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
+	return nil
+}
+
+func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	return nil
+}
+
+func (r *ApiVersionsRequest) key() int16 {
+	return 18
+}
+
+func (r *ApiVersionsRequest) version() int16 {
+	return 0
+}
+
+func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
new file mode 100644
index 00000000..23bc326e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+type ApiVersionsResponseBlock struct {
+	ApiKey     int16
+	MinVersion int16
+	MaxVersion int16
+}
+
+func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
+	pe.putInt16(b.ApiKey)
+	pe.putInt16(b.MinVersion)
+	pe.putInt16(b.MaxVersion)
+	return nil
+}
+
+func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
+	var err error
+
+	if b.ApiKey, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.MinVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.MaxVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type ApiVersionsResponse struct {
+	Err         KError
+	ApiVersions []*ApiVersionsResponseBlock
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
+		return err
+	}
+	for _, apiVersion := range r.ApiVersions {
+		if err := apiVersion.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	numBlocks, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
+	for i := 0; i < numBlocks; i++ {
+		block := new(ApiVersionsResponseBlock)
+		if err := block.decode(pd); err != nil {
+			return err
+		}
+		r.ApiVersions[i] = block
+	}
+
+	return nil
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+	return 18
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+	return 0
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
new file mode 100644
index 00000000..1eff81cb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -0,0 +1,921 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/eapache/go-resiliency/breaker"
+	"github.com/eapache/queue"
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks: it will not be garbage-collected automatically when it passes out of
+// scope.
+type AsyncProducer interface {
+
+	// AsyncClose triggers a shutdown of the producer. The shutdown has completed
+	// when both the Errors and Successes channels have been closed. When calling
+	// AsyncClose, you *must* continue to read from those channels in order to
+	// drain the results of any messages in flight.
+	AsyncClose()
+
+	// Close shuts down the producer and waits for any buffered messages to be
+	// flushed. You must call this function before a producer object passes out of
+	// scope, as it may otherwise leak memory. You must call this before calling
+	// Close on the underlying client.
+	Close() error
+
+	// Input is the input channel for the user to write messages to that they
+	// wish to send.
+	Input() chan<- *ProducerMessage
+
+	// Successes is the success output channel back to the user when Return.Successes is
+	// enabled. If Return.Successes is true, you MUST read from this channel or the
+	// Producer will deadlock. It is suggested that you send and read messages
+	// together in a single select statement.
+	Successes() <-chan *ProducerMessage
+
+	// Errors is the error output channel back to the user. You MUST read from this
+	// channel or the Producer will deadlock when the channel is full. Alternatively,
+	// you can set Producer.Return.Errors in your config to false, which prevents
+	// errors to be returned.
+	Errors() <-chan *ProducerError
+}
+
+type asyncProducer struct {
+	client    Client
+	conf      *Config
+	ownClient bool
+
+	errors                    chan *ProducerError
+	input, successes, retries chan *ProducerMessage
+	inFlight                  sync.WaitGroup
+
+	brokers    map[*Broker]chan<- *ProducerMessage
+	brokerRefs map[chan<- *ProducerMessage]int
+	brokerLock sync.Mutex
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+	client, err := NewClient(addrs, conf)
+	if err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+	p.(*asyncProducer).ownClient = true
+	return p, nil
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	p := &asyncProducer{
+		client:     client,
+		conf:       client.Config(),
+		errors:     make(chan *ProducerError),
+		input:      make(chan *ProducerMessage),
+		successes:  make(chan *ProducerMessage),
+		retries:    make(chan *ProducerMessage),
+		brokers:    make(map[*Broker]chan<- *ProducerMessage),
+		brokerRefs: make(map[chan<- *ProducerMessage]int),
+	}
+
+	// launch our singleton dispatchers
+	go withRecover(p.dispatcher)
+	go withRecover(p.retryHandler)
+
+	return p, nil
+}
+
+type flagSet int8
+
+const (
+	syn      flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+	fin                          // final message from partitionProducer to brokerProducer and back
+	shutdown                     // start the shutdown process
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+	Topic string // The Kafka topic for this message.
+	// The partitioning key for this message. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Key Encoder
+	// The actual message to store in Kafka. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Value Encoder
+
+	// The headers are key-value pairs that are transparently passed
+	// by Kafka between producers and consumers.
+	Headers []RecordHeader
+
+	// This field is used to hold arbitrary data you wish to include so it
+	// will be available when receiving on the Successes and Errors channels.
+	// Sarama completely ignores this field and is only to be used for
+	// pass-through data.
+	Metadata interface{}
+
+	// Below this point are filled in by the producer as the message is processed
+
+	// Offset is the offset of the message stored on the broker. This is only
+	// guaranteed to be defined if the message was successfully delivered and
+	// RequiredAcks is not NoResponse.
+	Offset int64
+	// Partition is the partition that the message was sent to. This is only
+	// guaranteed to be defined if the message was successfully delivered.
+	Partition int32
+	// Timestamp is the timestamp assigned to the message by the broker. This
+	// is only guaranteed to be defined if the message was successfully
+	// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
+	// least version 0.10.0.
+	Timestamp time.Time
+
+	retries int
+	flags   flagSet
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) byteSize(version int) int {
+	var size int
+	if version >= 2 {
+		size = maximumRecordOverhead
+		for _, h := range m.Headers {
+			size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
+		}
+	} else {
+		size = producerMessageOverhead
+	}
+	if m.Key != nil {
+		size += m.Key.Length()
+	}
+	if m.Value != nil {
+		size += m.Value.Length()
+	}
+	return size
+}
+
+func (m *ProducerMessage) clear() {
+	m.flags = 0
+	m.retries = 0
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+	Msg *ProducerMessage
+	Err error
+}
+
+func (pe ProducerError) Error() string {
+	return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+	return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+	return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+	return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+	return p.input
+}
+
+func (p *asyncProducer) Close() error {
+	p.AsyncClose()
+
+	if p.conf.Producer.Return.Successes {
+		go withRecover(func() {
+			for range p.successes {
+			}
+		})
+	}
+
+	var errors ProducerErrors
+	if p.conf.Producer.Return.Errors {
+		for event := range p.errors {
+			errors = append(errors, event)
+		}
+	} else {
+		<-p.errors
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+	go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+	handlers := make(map[string]chan<- *ProducerMessage)
+	shuttingDown := false
+
+	for msg := range p.input {
+		if msg == nil {
+			Logger.Println("Something tried to send a nil message, it was ignored.")
+			continue
+		}
+
+		if msg.flags&shutdown != 0 {
+			shuttingDown = true
+			p.inFlight.Done()
+			continue
+		} else if msg.retries == 0 {
+			if shuttingDown {
+				// we can't just call returnError here because that decrements the wait group,
+				// which hasn't been incremented yet for this message, and shouldn't be
+				pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+				if p.conf.Producer.Return.Errors {
+					p.errors <- pErr
+				} else {
+					Logger.Println(pErr)
+				}
+				continue
+			}
+			p.inFlight.Add(1)
+		}
+
+		version := 1
+		if p.conf.Version.IsAtLeast(V0_11_0_0) {
+			version = 2
+		}
+		if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
+			p.returnError(msg, ErrMessageSizeTooLarge)
+			continue
+		}
+
+		handler := handlers[msg.Topic]
+		if handler == nil {
+			handler = p.newTopicProducer(msg.Topic)
+			handlers[msg.Topic] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range handlers {
+		close(handler)
+	}
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+	parent *asyncProducer
+	topic  string
+	input  <-chan *ProducerMessage
+
+	breaker     *breaker.Breaker
+	handlers    map[int32]chan<- *ProducerMessage
+	partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	tp := &topicProducer{
+		parent:      p,
+		topic:       topic,
+		input:       input,
+		breaker:     breaker.New(3, 1, 10*time.Second),
+		handlers:    make(map[int32]chan<- *ProducerMessage),
+		partitioner: p.conf.Producer.Partitioner(topic),
+	}
+	go withRecover(tp.dispatch)
+	return input
+}
+
+func (tp *topicProducer) dispatch() {
+	for msg := range tp.input {
+		if msg.retries == 0 {
+			if err := tp.partitionMessage(msg); err != nil {
+				tp.parent.returnError(msg, err)
+				continue
+			}
+		}
+
+		handler := tp.handlers[msg.Partition]
+		if handler == nil {
+			handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+			tp.handlers[msg.Partition] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range tp.handlers {
+		close(handler)
+	}
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+	var partitions []int32
+
+	err := tp.breaker.Run(func() (err error) {
+		if tp.partitioner.RequiresConsistency() {
+			partitions, err = tp.parent.client.Partitions(msg.Topic)
+		} else {
+			partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+		}
+		return
+	})
+
+	if err != nil {
+		return err
+	}
+
+	numPartitions := int32(len(partitions))
+
+	if numPartitions == 0 {
+		return ErrLeaderNotAvailable
+	}
+
+	choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+	if err != nil {
+		return err
+	} else if choice < 0 || choice >= numPartitions {
+		return ErrInvalidPartition
+	}
+
+	msg.Partition = partitions[choice]
+
+	return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+	parent    *asyncProducer
+	topic     string
+	partition int32
+	input     <-chan *ProducerMessage
+
+	leader  *Broker
+	breaker *breaker.Breaker
+	output  chan<- *ProducerMessage
+
+	// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+	// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+	// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+	// therefore whether our buffer is complete and safe to flush)
+	highWatermark int
+	retryState    []partitionRetryState
+}
+
+type partitionRetryState struct {
+	buf          []*ProducerMessage
+	expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	pp := &partitionProducer{
+		parent:    p,
+		topic:     topic,
+		partition: partition,
+		input:     input,
+
+		breaker:    breaker.New(3, 1, 10*time.Second),
+		retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+	}
+	go withRecover(pp.dispatch)
+	return input
+}
+
+func (pp *partitionProducer) dispatch() {
+	// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+	// on the first message
+	pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+	if pp.leader != nil {
+		pp.output = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+	}
+
+	for msg := range pp.input {
+		if msg.retries > pp.highWatermark {
+			// a new, higher, retry level; handle it and then back off
+			pp.newHighWatermark(msg.retries)
+			time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+		} else if pp.highWatermark > 0 {
+			// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+			if msg.retries < pp.highWatermark {
+				// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+				if msg.flags&fin == fin {
+					pp.retryState[msg.retries].expectChaser = false
+					pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				} else {
+					pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+				}
+				continue
+			} else if msg.flags&fin == fin {
+				// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+				// meaning this retry level is done and we can go down (at least) one level and flush that
+				pp.retryState[pp.highWatermark].expectChaser = false
+				pp.flushRetryBuffers()
+				pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				continue
+			}
+		}
+
+		// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+		// without breaking any of our ordering guarantees
+
+		if pp.output == nil {
+			if err := pp.updateLeader(); err != nil {
+				pp.parent.returnError(msg, err)
+				time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+				continue
+			}
+			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+		}
+
+		pp.output <- msg
+	}
+
+	if pp.output != nil {
+		pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+	}
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+	Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+	pp.highWatermark = hwm
+
+	// send off a fin so that we know when everything "in between" has made it
+	// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+	pp.retryState[pp.highWatermark].expectChaser = true
+	pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+	pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+	// a new HWM means that our current broker selection is out of date
+	Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+	pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+	pp.output = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+	Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+	for {
+		pp.highWatermark--
+
+		if pp.output == nil {
+			if err := pp.updateLeader(); err != nil {
+				pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+				goto flushDone
+			}
+			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+		}
+
+		for _, msg := range pp.retryState[pp.highWatermark].buf {
+			pp.output <- msg
+		}
+
+	flushDone:
+		pp.retryState[pp.highWatermark].buf = nil
+		if pp.retryState[pp.highWatermark].expectChaser {
+			Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+			break
+		} else if pp.highWatermark == 0 {
+			Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+			break
+		}
+	}
+}
+
+func (pp *partitionProducer) updateLeader() error {
+	return pp.breaker.Run(func() (err error) {
+		if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+			return err
+		}
+
+		if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+			return err
+		}
+
+		pp.output = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+		return nil
+	})
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+	var (
+		input     = make(chan *ProducerMessage)
+		bridge    = make(chan *produceSet)
+		responses = make(chan *brokerProducerResponse)
+	)
+
+	bp := &brokerProducer{
+		parent:         p,
+		broker:         broker,
+		input:          input,
+		output:         bridge,
+		responses:      responses,
+		buffer:         newProduceSet(p),
+		currentRetries: make(map[string]map[int32]error),
+	}
+	go withRecover(bp.run)
+
+	// minimal bridge to make the network response `select`able
+	go withRecover(func() {
+		for set := range bridge {
+			request := set.buildRequest()
+
+			response, err := broker.Produce(request)
+
+			responses <- &brokerProducerResponse{
+				set: set,
+				err: err,
+				res: response,
+			}
+		}
+		close(responses)
+	})
+
+	return input
+}
+
+type brokerProducerResponse struct {
+	set *produceSet
+	err error
+	res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+	parent *asyncProducer
+	broker *Broker
+
+	input     <-chan *ProducerMessage
+	output    chan<- *produceSet
+	responses <-chan *brokerProducerResponse
+
+	buffer     *produceSet
+	timer      <-chan time.Time
+	timerFired bool
+
+	closing        error
+	currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+	var output chan<- *produceSet
+	Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+	for {
+		select {
+		case msg := <-bp.input:
+			if msg == nil {
+				bp.shutdown()
+				return
+			}
+
+			if msg.flags&syn == syn {
+				Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+					bp.broker.ID(), msg.Topic, msg.Partition)
+				if bp.currentRetries[msg.Topic] == nil {
+					bp.currentRetries[msg.Topic] = make(map[int32]error)
+				}
+				bp.currentRetries[msg.Topic][msg.Partition] = nil
+				bp.parent.inFlight.Done()
+				continue
+			}
+
+			if reason := bp.needsRetry(msg); reason != nil {
+				bp.parent.retryMessage(msg, reason)
+
+				if bp.closing == nil && msg.flags&fin == fin {
+					// we were retrying this partition but we can start processing again
+					delete(bp.currentRetries[msg.Topic], msg.Partition)
+					Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+						bp.broker.ID(), msg.Topic, msg.Partition)
+				}
+
+				continue
+			}
+
+			if bp.buffer.wouldOverflow(msg) {
+				if err := bp.waitForSpace(msg); err != nil {
+					bp.parent.retryMessage(msg, err)
+					continue
+				}
+			}
+
+			if err := bp.buffer.add(msg); err != nil {
+				bp.parent.returnError(msg, err)
+				continue
+			}
+
+			if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+				bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
+			}
+		case <-bp.timer:
+			bp.timerFired = true
+		case output <- bp.buffer:
+			bp.rollOver()
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+		}
+
+		if bp.timerFired || bp.buffer.readyToFlush() {
+			output = bp.output
+		} else {
+			output = nil
+		}
+	}
+}
+
+func (bp *brokerProducer) shutdown() {
+	for !bp.buffer.empty() {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+		}
+	}
+	close(bp.output)
+	for response := range bp.responses {
+		bp.handleResponse(response)
+	}
+
+	Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+	if bp.closing != nil {
+		return bp.closing
+	}
+
+	return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
+	Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+
+	for {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+			// handling a response can change our state, so re-check some things
+			if reason := bp.needsRetry(msg); reason != nil {
+				return reason
+			} else if !bp.buffer.wouldOverflow(msg) {
+				return nil
+			}
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+			return nil
+		}
+	}
+}
+
+func (bp *brokerProducer) rollOver() {
+	bp.timer = nil
+	bp.timerFired = false
+	bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+	if response.err != nil {
+		bp.handleError(response.set, response.err)
+	} else {
+		bp.handleSuccess(response.set, response.res)
+	}
+
+	if bp.buffer.empty() {
+		bp.rollOver() // this can happen if the response invalidated our buffer
+	}
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+	// we iterate through the blocks in the request set, not the response, so that we notice
+	// if the response is missing a block completely
+	sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+		if response == nil {
+			// this only happens when RequiredAcks is NoResponse, so we have to assume success
+			bp.parent.returnSuccesses(msgs)
+			return
+		}
+
+		block := response.GetBlock(topic, partition)
+		if block == nil {
+			bp.parent.returnErrors(msgs, ErrIncompleteResponse)
+			return
+		}
+
+		switch block.Err {
+		// Success
+		case ErrNoError:
+			if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+				for _, msg := range msgs {
+					msg.Timestamp = block.Timestamp
+				}
+			}
+			for i, msg := range msgs {
+				msg.Offset = block.Offset + int64(i)
+			}
+			bp.parent.returnSuccesses(msgs)
+		// Retriable errors
+		case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+			ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+			Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+				bp.broker.ID(), topic, partition, block.Err)
+			bp.currentRetries[topic][partition] = block.Err
+			bp.parent.retryMessages(msgs, block.Err)
+			bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+		// Other non-retriable errors
+		default:
+			bp.parent.returnErrors(msgs, block.Err)
+		}
+	})
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+	switch err.(type) {
+	case PacketEncodingError:
+		sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+			bp.parent.returnErrors(msgs, err)
+		})
+	default:
+		Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+		bp.parent.abandonBrokerConnection(bp.broker)
+		_ = bp.broker.Close()
+		bp.closing = err
+		sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+			bp.parent.retryMessages(msgs, err)
+		})
+		bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+			bp.parent.retryMessages(msgs, err)
+		})
+		bp.rollOver()
+	}
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+	var msg *ProducerMessage
+	buf := queue.New()
+
+	for {
+		if buf.Length() == 0 {
+			msg = <-p.retries
+		} else {
+			select {
+			case msg = <-p.retries:
+			case p.input <- buf.Peek().(*ProducerMessage):
+				buf.Remove()
+				continue
+			}
+		}
+
+		if msg == nil {
+			return
+		}
+
+		buf.Add(msg)
+	}
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+	Logger.Println("Producer shutting down.")
+	p.inFlight.Add(1)
+	p.input <- &ProducerMessage{flags: shutdown}
+
+	p.inFlight.Wait()
+
+	if p.ownClient {
+		err := p.client.Close()
+		if err != nil {
+			Logger.Println("producer/shutdown failed to close the embedded client:", err)
+		}
+	}
+
+	close(p.input)
+	close(p.retries)
+	close(p.errors)
+	close(p.successes)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+	msg.clear()
+	pErr := &ProducerError{Msg: msg, Err: err}
+	if p.conf.Producer.Return.Errors {
+		p.errors <- pErr
+	} else {
+		Logger.Println(pErr)
+	}
+	p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.returnError(msg, err)
+	}
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+	for _, msg := range batch {
+		if p.conf.Producer.Return.Successes {
+			msg.clear()
+			p.successes <- msg
+		}
+		p.inFlight.Done()
+	}
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+	if msg.retries >= p.conf.Producer.Retry.Max {
+		p.returnError(msg, err)
+	} else {
+		msg.retries++
+		p.retries <- msg
+	}
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.retryMessage(msg, err)
+	}
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	bp := p.brokers[broker]
+
+	if bp == nil {
+		bp = p.newBrokerProducer(broker)
+		p.brokers[broker] = bp
+		p.brokerRefs[bp] = 0
+	}
+
+	p.brokerRefs[bp]++
+
+	return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	p.brokerRefs[bp]--
+	if p.brokerRefs[bp] == 0 {
+		close(bp)
+		delete(p.brokerRefs, bp)
+
+		if p.brokers[broker] == bp {
+			delete(p.brokers, broker)
+		}
+	}
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
new file mode 100644
index 00000000..b759f8f7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -0,0 +1,823 @@
+package sarama
+
+import (
+	"crypto/tls"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"net"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+	id   int32
+	addr string
+
+	conf          *Config
+	correlationID int32
+	conn          net.Conn
+	connErr       error
+	lock          sync.Mutex
+	opened        int32
+
+	responses chan responsePromise
+	done      chan bool
+
+	incomingByteRate       metrics.Meter
+	requestRate            metrics.Meter
+	requestSize            metrics.Histogram
+	requestLatency         metrics.Histogram
+	outgoingByteRate       metrics.Meter
+	responseRate           metrics.Meter
+	responseSize           metrics.Histogram
+	brokerIncomingByteRate metrics.Meter
+	brokerRequestRate      metrics.Meter
+	brokerRequestSize      metrics.Histogram
+	brokerRequestLatency   metrics.Histogram
+	brokerOutgoingByteRate metrics.Meter
+	brokerResponseRate     metrics.Meter
+	brokerResponseSize     metrics.Histogram
+}
+
+type responsePromise struct {
+	requestTime   time.Time
+	correlationID int32
+	packets       chan []byte
+	errors        chan error
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+	return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+	if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
+		return ErrAlreadyConnected
+	}
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	err := conf.Validate()
+	if err != nil {
+		return err
+	}
+
+	b.lock.Lock()
+
+	go withRecover(func() {
+		defer b.lock.Unlock()
+
+		dialer := net.Dialer{
+			Timeout:   conf.Net.DialTimeout,
+			KeepAlive: conf.Net.KeepAlive,
+		}
+
+		if conf.Net.TLS.Enable {
+			b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
+		} else {
+			b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+		}
+		if b.connErr != nil {
+			Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+			b.conn = nil
+			atomic.StoreInt32(&b.opened, 0)
+			return
+		}
+		b.conn = newBufConn(b.conn)
+
+		b.conf = conf
+
+		// Create or reuse the global metrics shared between brokers
+		b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
+		b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
+		b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
+		b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
+		b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
+		b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
+		b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
+		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
+		// the same id (-1) and are already exposed through the global metrics above
+		if b.id >= 0 {
+			b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
+			b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
+			b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
+			b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
+			b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
+			b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
+			b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
+		}
+
+		if conf.Net.SASL.Enable {
+			b.connErr = b.sendAndReceiveSASLPlainAuth()
+			if b.connErr != nil {
+				err = b.conn.Close()
+				if err == nil {
+					Logger.Printf("Closed connection to broker %s\n", b.addr)
+				} else {
+					Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+				}
+				b.conn = nil
+				atomic.StoreInt32(&b.opened, 0)
+				return
+			}
+		}
+
+		b.done = make(chan bool)
+		b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+		if b.id >= 0 {
+			Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+		} else {
+			Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+		}
+		go withRecover(b.responseReceiver)
+	})
+
+	return nil
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	return b.conn != nil, b.connErr
+}
+
+func (b *Broker) Close() error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		return ErrNotConnected
+	}
+
+	close(b.responses)
+	<-b.done
+
+	err := b.conn.Close()
+
+	b.conn = nil
+	b.connErr = nil
+	b.done = nil
+	b.responses = nil
+
+	if b.id >= 0 {
+		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b))
+		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b))
+		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b))
+		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b))
+	}
+
+	if err == nil {
+		Logger.Printf("Closed connection to broker %s\n", b.addr)
+	} else {
+		Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+	}
+
+	atomic.StoreInt32(&b.opened, 0)
+
+	return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+	return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+	return b.addr
+}
+
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+	response := new(MetadataResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+	response := new(ConsumerMetadataResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+	response := new(OffsetResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+	var response *ProduceResponse
+	var err error
+
+	if request.RequiredAcks == NoResponse {
+		err = b.sendAndReceive(request, nil)
+	} else {
+		response = new(ProduceResponse)
+		err = b.sendAndReceive(request, response)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+	response := new(FetchResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+	response := new(OffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+	response := new(OffsetFetchResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+	response := new(JoinGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+	response := new(SyncGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+	response := new(LeaveGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+	response := new(HeartbeatResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+	response := new(ListGroupsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+	response := new(DescribeGroupsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+	response := new(ApiVersionsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
+	response := new(CreateTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
+	response := new(DeleteTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
+	response := new(DescribeAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
+	response := new(CreateAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
+	response := new(DeleteAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
+	response := new(InitProducerIDResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
+	response := new(AddPartitionsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
+	response := new(AddOffsetsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
+	response := new(EndTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
+	response := new(TxnOffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
+	response := new(DescribeConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
+	response := new(AlterConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		if b.connErr != nil {
+			return nil, b.connErr
+		}
+		return nil, ErrNotConnected
+	}
+
+	if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+		return nil, ErrUnsupportedVersion
+	}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return nil, err
+	}
+
+	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+	if err != nil {
+		return nil, err
+	}
+
+	requestTime := time.Now()
+	bytes, err := b.conn.Write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		return nil, err
+	}
+	b.correlationID++
+
+	if !promiseResponse {
+		// Record request latency without the response
+		b.updateRequestLatencyMetrics(time.Since(requestTime))
+		return nil, nil
+	}
+
+	promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
+	b.responses <- promise
+
+	return &promise, nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
+	promise, err := b.send(req, res != nil)
+
+	if err != nil {
+		return err
+	}
+
+	if promise == nil {
+		return nil
+	}
+
+	select {
+	case buf := <-promise.packets:
+		return versionedDecode(buf, res, req.version())
+	case err = <-promise.errors:
+		return err
+	}
+}
+
+func (b *Broker) decode(pd packetDecoder) (err error) {
+	b.id, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	host, err := pd.getString()
+	if err != nil {
+		return err
+	}
+
+	port, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+	if _, _, err := net.SplitHostPort(b.addr); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (b *Broker) encode(pe packetEncoder) (err error) {
+
+	host, portstr, err := net.SplitHostPort(b.addr)
+	if err != nil {
+		return err
+	}
+	port, err := strconv.Atoi(portstr)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(b.id)
+
+	err = pe.putString(host)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(int32(port))
+
+	return nil
+}
+
+func (b *Broker) responseReceiver() {
+	var dead error
+	header := make([]byte, 8)
+	for response := range b.responses {
+		if dead != nil {
+			response.errors <- dead
+			continue
+		}
+
+		err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
+		if err != nil {
+			dead = err
+			response.errors <- err
+			continue
+		}
+
+		bytesReadHeader, err := io.ReadFull(b.conn, header)
+		requestLatency := time.Since(response.requestTime)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			response.errors <- err
+			continue
+		}
+
+		decodedHeader := responseHeader{}
+		err = decode(header, &decodedHeader)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			response.errors <- err
+			continue
+		}
+		if decodedHeader.correlationID != response.correlationID {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			// TODO if decoded ID < cur ID, discard until we catch up
+			// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+			dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
+			response.errors <- dead
+			continue
+		}
+
+		buf := make([]byte, decodedHeader.length-4)
+		bytesReadBody, err := io.ReadFull(b.conn, buf)
+		b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+		if err != nil {
+			dead = err
+			response.errors <- err
+			continue
+		}
+
+		response.packets <- buf
+	}
+	close(b.done)
+}
+
+func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
+	rb := &SaslHandshakeRequest{"PLAIN"}
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return err
+	}
+
+	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+	if err != nil {
+		return err
+	}
+
+	requestTime := time.Now()
+	bytes, err := b.conn.Write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+		return err
+	}
+	b.correlationID++
+	//wait for the response
+	header := make([]byte, 8) // response header
+	_, err = io.ReadFull(b.conn, header)
+	if err != nil {
+		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+		return err
+	}
+	length := binary.BigEndian.Uint32(header[:4])
+	payload := make([]byte, length-4)
+	n, err := io.ReadFull(b.conn, payload)
+	if err != nil {
+		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+		return err
+	}
+	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+	res := &SaslHandshakeResponse{}
+	err = versionedDecode(payload, res, 0)
+	if err != nil {
+		Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+		return err
+	}
+	if res.Err != ErrNoError {
+		Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+		return res.Err
+	}
+	Logger.Print("Successful SASL handshake")
+	return nil
+}
+
+// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
+// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+//   message   = [authzid] UTF8NUL authcid UTF8NUL passwd
+//   authcid   = 1*SAFE ; MUST accept up to 255 octets
+//   authzid   = 1*SAFE ; MUST accept up to 255 octets
+//   passwd    = 1*SAFE ; MUST accept up to 255 octets
+//   UTF8NUL   = %x00 ; UTF-8 encoded NUL character
+//
+//   SAFE      = UTF1 / UTF2 / UTF3 / UTF4
+//                  ;; any UTF-8 encoded Unicode character except NUL
+//
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
+// of responding to bad credentials but thats how its being done today.
+func (b *Broker) sendAndReceiveSASLPlainAuth() error {
+	if b.conf.Net.SASL.Handshake {
+		handshakeErr := b.sendAndReceiveSASLPlainHandshake()
+		if handshakeErr != nil {
+			Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
+			return handshakeErr
+		}
+	}
+	length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+	authBytes := make([]byte, length+4) //4 byte length header + auth data
+	binary.BigEndian.PutUint32(authBytes, uint32(length))
+	copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
+
+	err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+	if err != nil {
+		Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	requestTime := time.Now()
+	bytesWritten, err := b.conn.Write(authBytes)
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
+	if err != nil {
+		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	header := make([]byte, 4)
+	n, err := io.ReadFull(b.conn, header)
+	b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+	// If the credentials are valid, we would get a 4 byte response filled with null characters.
+	// Otherwise, the broker closes the connection and we get an EOF
+	if err != nil {
+		Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+	return nil
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+	b.updateRequestLatencyMetrics(requestLatency)
+	b.responseRate.Mark(1)
+	if b.brokerResponseRate != nil {
+		b.brokerResponseRate.Mark(1)
+	}
+	responseSize := int64(bytes)
+	b.incomingByteRate.Mark(responseSize)
+	if b.brokerIncomingByteRate != nil {
+		b.brokerIncomingByteRate.Mark(responseSize)
+	}
+	b.responseSize.Update(responseSize)
+	if b.brokerResponseSize != nil {
+		b.brokerResponseSize.Update(responseSize)
+	}
+}
+
+func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
+	requestLatencyInMs := int64(requestLatency / time.Millisecond)
+	b.requestLatency.Update(requestLatencyInMs)
+	if b.brokerRequestLatency != nil {
+		b.brokerRequestLatency.Update(requestLatencyInMs)
+	}
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+	b.requestRate.Mark(1)
+	if b.brokerRequestRate != nil {
+		b.brokerRequestRate.Mark(1)
+	}
+	requestSize := int64(bytes)
+	b.outgoingByteRate.Mark(requestSize)
+	if b.brokerOutgoingByteRate != nil {
+		b.brokerOutgoingByteRate.Mark(requestSize)
+	}
+	b.requestSize.Update(requestSize)
+	if b.brokerRequestSize != nil {
+		b.brokerRequestSize.Update(requestSize)
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
new file mode 100644
index 00000000..3dbfc4b0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -0,0 +1,794 @@
+package sarama
+
+import (
+	"math/rand"
+	"sort"
+	"sync"
+	"time"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+	// Config returns the Config struct of the client. This struct should not be
+	// altered after it has been created.
+	Config() *Config
+
+	// Brokers returns the current set of active brokers as retrieved from cluster metadata.
+	Brokers() []*Broker
+
+	// Topics returns the set of available topics as retrieved from cluster metadata.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	Partitions(topic string) ([]int32, error)
+
+	// WritablePartitions returns the sorted list of all writable partition IDs for
+	// the given topic, where "writable" means "having a valid leader accepting
+	// writes".
+	WritablePartitions(topic string) ([]int32, error)
+
+	// Leader returns the broker object that is the leader of the current
+	// topic/partition, as determined by querying the cluster metadata.
+	Leader(topic string, partitionID int32) (*Broker, error)
+
+	// Replicas returns the set of all replica IDs for the given partition.
+	Replicas(topic string, partitionID int32) ([]int32, error)
+
+	// InSyncReplicas returns the set of all in-sync replica IDs for the given
+	// partition. In-sync replicas are replicas which are fully caught up with
+	// the partition leader.
+	InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
+	// available metadata for those topics. If no topics are provided, it will refresh
+	// metadata for all topics.
+	RefreshMetadata(topics ...string) error
+
+	// GetOffset queries the cluster to get the most recent available offset at the
+	// given time (in milliseconds) on the topic/partition combination.
+	// Time should be OffsetOldest for the earliest available offset,
+	// OffsetNewest for the offset of the message that will be produced next, or a time.
+	GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+	// Coordinator returns the coordinating broker for a consumer group. It will
+	// return a locally cached value if it's available. You can call
+	// RefreshCoordinator to update the cached value. This function only works on
+	// Kafka 0.8.2 and higher.
+	Coordinator(consumerGroup string) (*Broker, error)
+
+	// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+	// in local cache. This function only works on Kafka 0.8.2 and higher.
+	RefreshCoordinator(consumerGroup string) error
+
+	// Close shuts down all broker connections managed by this client. It is required
+	// to call this function before a client object passes out of scope, as it will
+	// otherwise leak memory. You must close any Producers or Consumers using a client
+	// before you close the client.
+	Close() error
+
+	// Closed returns true if the client has already had Close called on it
+	Closed() bool
+}
+
+const (
+	// OffsetNewest stands for the log head offset, i.e. the offset that will be
+	// assigned to the next message that will be produced to the partition. You
+	// can send this to a client's GetOffset method to get this offset, or when
+	// calling ConsumePartition to start consuming new messages.
+	OffsetNewest int64 = -1
+	// OffsetOldest stands for the oldest offset available on the broker for a
+	// partition. You can send this to a client's GetOffset method to get this
+	// offset, or when calling ConsumePartition to start consuming from the
+	// oldest offset that is still available on the broker.
+	OffsetOldest int64 = -2
+)
+
+type client struct {
+	conf           *Config
+	closer, closed chan none // for shutting down background metadata updater
+
+	// the broker addresses given to us through the constructor are not guaranteed to be returned in
+	// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+	// so we store them separately
+	seedBrokers []*Broker
+	deadSeeds   []*Broker
+
+	brokers      map[int32]*Broker                       // maps broker ids to brokers
+	metadata     map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+	coordinators map[string]int32                        // Maps consumer group names to coordinating broker IDs
+
+	// If the number of partitions is large, we can get some churn calling cachedPartitions,
+	// so the result is cached.  It is important to update this value whenever metadata is changed
+	cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+	lock sync.RWMutex // protects access to the maps that hold cluster state.
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+	Logger.Println("Initializing new client")
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	if err := conf.Validate(); err != nil {
+		return nil, err
+	}
+
+	if len(addrs) < 1 {
+		return nil, ConfigurationError("You must provide at least one broker address")
+	}
+
+	client := &client{
+		conf:                    conf,
+		closer:                  make(chan none),
+		closed:                  make(chan none),
+		brokers:                 make(map[int32]*Broker),
+		metadata:                make(map[string]map[int32]*PartitionMetadata),
+		cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+		coordinators:            make(map[string]int32),
+	}
+
+	random := rand.New(rand.NewSource(time.Now().UnixNano()))
+	for _, index := range random.Perm(len(addrs)) {
+		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+	}
+
+	if conf.Metadata.Full {
+		// do an initial fetch of all cluster metadata by specifying an empty list of topics
+		err := client.RefreshMetadata()
+		switch err {
+		case nil:
+			break
+		case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
+			// indicates that maybe part of the cluster is down, but is not fatal to creating the client
+			Logger.Println(err)
+		default:
+			close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+			_ = client.Close()
+			return nil, err
+		}
+	}
+	go withRecover(client.backgroundMetadataUpdater)
+
+	Logger.Println("Successfully initialized new client")
+
+	return client, nil
+}
+
+func (client *client) Config() *Config {
+	return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	brokers := make([]*Broker, 0)
+	for _, broker := range client.brokers {
+		brokers = append(brokers, broker)
+	}
+	return brokers
+}
+
+func (client *client) Close() error {
+	if client.Closed() {
+		// Chances are this is being called from a defer() and the error will go unobserved
+		// so we go ahead and log the event in this case.
+		Logger.Printf("Close() called on already closed client")
+		return ErrClosedClient
+	}
+
+	// shutdown and wait for the background thread before we take the lock, to avoid races
+	close(client.closer)
+	<-client.closed
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	Logger.Println("Closing Client")
+
+	for _, broker := range client.brokers {
+		safeAsyncClose(broker)
+	}
+
+	for _, broker := range client.seedBrokers {
+		safeAsyncClose(broker)
+	}
+
+	client.brokers = nil
+	client.metadata = nil
+
+	return nil
+}
+
+func (client *client) Closed() bool {
+	return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	ret := make([]string, 0, len(client.metadata))
+	for topic := range client.metadata {
+		ret = append(ret, topic)
+	}
+
+	return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	partitions := client.cachedPartitions(topic, allPartitions)
+
+	if len(partitions) == 0 {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		partitions = client.cachedPartitions(topic, allPartitions)
+	}
+
+	if partitions == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	return partitions, nil
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	partitions := client.cachedPartitions(topic, writablePartitions)
+
+	// len==0 catches when it's nil (no such topic) and the odd case when every single
+	// partition is undergoing leader election simultaneously. Callers have to be able to handle
+	// this function returning an empty slice (which is a valid return value) but catching it
+	// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+	// a metadata refresh as a nicety so callers can just try again and don't have to manually
+	// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+	if len(partitions) == 0 {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		partitions = client.cachedPartitions(topic, writablePartitions)
+	}
+
+	if partitions == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	if metadata.Err == ErrReplicaNotAvailable {
+		return dupInt32Slice(metadata.Replicas), metadata.Err
+	}
+	return dupInt32Slice(metadata.Replicas), nil
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	if metadata.Err == ErrReplicaNotAvailable {
+		return dupInt32Slice(metadata.Isr), metadata.Err
+	}
+	return dupInt32Slice(metadata.Isr), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	leader, err := client.cachedLeader(topic, partitionID)
+
+	if leader == nil {
+		err = client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		leader, err = client.cachedLeader(topic, partitionID)
+	}
+
+	return leader, err
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+	// error. This handles the case by returning an error instead of sending it
+	// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
+	for _, topic := range topics {
+		if len(topic) == 0 {
+			return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+		}
+	}
+
+	return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
+	if client.Closed() {
+		return -1, ErrClosedClient
+	}
+
+	offset, err := client.getOffset(topic, partitionID, time)
+
+	if err != nil {
+		if err := client.RefreshMetadata(topic); err != nil {
+			return -1, err
+		}
+		return client.getOffset(topic, partitionID, time)
+	}
+
+	return offset, err
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	coordinator := client.cachedCoordinator(consumerGroup)
+
+	if coordinator == nil {
+		if err := client.RefreshCoordinator(consumerGroup); err != nil {
+			return nil, err
+		}
+		coordinator = client.cachedCoordinator(consumerGroup)
+	}
+
+	if coordinator == nil {
+		return nil, ErrConsumerCoordinatorNotAvailable
+	}
+
+	_ = coordinator.Open(client.conf)
+	return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
+	if err != nil {
+		return err
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	client.registerBroker(response.Coordinator)
+	client.coordinators[consumerGroup] = response.Coordinator.ID()
+	return nil
+}
+
+// private broker management helpers
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+	if client.brokers[broker.ID()] == nil {
+		client.brokers[broker.ID()] = broker
+		Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+	} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+		safeAsyncClose(client.brokers[broker.ID()])
+		client.brokers[broker.ID()] = broker
+		Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+	}
+}
+
+// deregisterBroker removes a broker from the seedsBroker list, and if it's
+// not the seedbroker, removes it from brokers map completely.
+func (client *client) deregisterBroker(broker *Broker) {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+		client.deadSeeds = append(client.deadSeeds, broker)
+		client.seedBrokers = client.seedBrokers[1:]
+	} else {
+		// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
+		// but we really shouldn't have to; once that loop is made better this case can be
+		// removed, and the function generally can be renamed from `deregisterBroker` to
+		// `nextSeedBroker` or something
+		Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+		delete(client.brokers, broker.ID())
+	}
+}
+
+func (client *client) resurrectDeadBrokers() {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+	client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+	client.deadSeeds = nil
+}
+
+func (client *client) any() *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	if len(client.seedBrokers) > 0 {
+		_ = client.seedBrokers[0].Open(client.conf)
+		return client.seedBrokers[0]
+	}
+
+	// not guaranteed to be random *or* deterministic
+	for _, broker := range client.brokers {
+		_ = broker.Open(client.conf)
+		return broker
+	}
+
+	return nil
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+	allPartitions partitionType = iota
+	writablePartitions
+	// If you add any more types, update the partition cache in update()
+
+	// Ensure this is the last partition type value
+	maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		return partitions[partitionID]
+	}
+
+	return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions, exists := client.cachedPartitionsResults[topic]
+
+	if !exists {
+		return nil
+	}
+	return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+	partitions := client.metadata[topic]
+
+	if partitions == nil {
+		return nil
+	}
+
+	ret := make([]int32, 0, len(partitions))
+	for _, partition := range partitions {
+		if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
+			continue
+		}
+		ret = append(ret, partition.ID)
+	}
+
+	sort.Sort(int32Slice(ret))
+	return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		metadata, ok := partitions[partitionID]
+		if ok {
+			if metadata.Err == ErrLeaderNotAvailable {
+				return nil, ErrLeaderNotAvailable
+			}
+			b := client.brokers[metadata.Leader]
+			if b == nil {
+				return nil, ErrLeaderNotAvailable
+			}
+			_ = b.Open(client.conf)
+			return b, nil
+		}
+	}
+
+	return nil, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
+	broker, err := client.Leader(topic, partitionID)
+	if err != nil {
+		return -1, err
+	}
+
+	request := &OffsetRequest{}
+	if client.conf.Version.IsAtLeast(V0_10_1_0) {
+		request.Version = 1
+	}
+	request.AddBlock(topic, partitionID, time, 1)
+
+	response, err := broker.GetAvailableOffsets(request)
+	if err != nil {
+		_ = broker.Close()
+		return -1, err
+	}
+
+	block := response.GetBlock(topic, partitionID)
+	if block == nil {
+		_ = broker.Close()
+		return -1, ErrIncompleteResponse
+	}
+	if block.Err != ErrNoError {
+		return -1, block.Err
+	}
+	if len(block.Offsets) != 1 {
+		return -1, ErrOffsetOutOfRange
+	}
+
+	return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+	defer close(client.closed)
+
+	if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+		return
+	}
+
+	ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			topics := []string{}
+			if !client.conf.Metadata.Full {
+				if specificTopics, err := client.Topics(); err != nil {
+					Logger.Println("Client background metadata topic load:", err)
+					break
+				} else if len(specificTopics) == 0 {
+					Logger.Println("Client background metadata update: no specific topics to update")
+					break
+				} else {
+					topics = specificTopics
+				}
+			}
+
+			if err := client.RefreshMetadata(topics...); err != nil {
+				Logger.Println("Client background metadata update:", err)
+			}
+		case <-client.closer:
+			return
+		}
+	}
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
+	retry := func(err error) error {
+		if attemptsRemaining > 0 {
+			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+			time.Sleep(client.conf.Metadata.Retry.Backoff)
+			return client.tryRefreshMetadata(topics, attemptsRemaining-1)
+		}
+		return err
+	}
+
+	for broker := client.any(); broker != nil; broker = client.any() {
+		if len(topics) > 0 {
+			Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+		} else {
+			Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+		}
+		response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
+
+		switch err.(type) {
+		case nil:
+			// valid response, use it
+			shouldRetry, err := client.updateMetadata(response)
+			if shouldRetry {
+				Logger.Println("client/metadata found some partitions to be leaderless")
+				return retry(err) // note: err can be nil
+			}
+			return err
+
+		case PacketEncodingError:
+			// didn't even send, return the error
+			return err
+		default:
+			// some other error, remove that broker and try again
+			Logger.Println("client/metadata got error from broker while fetching metadata:", err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+		}
+	}
+
+	Logger.Println("client/metadata no available broker to send metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(ErrOutOfBrokers)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	// For all the brokers we received:
+	// - if it is a new ID, save it
+	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
+	// - otherwise ignore it, replacing our existing one would just bounce the connection
+	for _, broker := range data.Brokers {
+		client.registerBroker(broker)
+	}
+
+	for _, topic := range data.Topics {
+		delete(client.metadata, topic.Name)
+		delete(client.cachedPartitionsResults, topic.Name)
+
+		switch topic.Err {
+		case ErrNoError:
+			break
+		case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+			err = topic.Err
+			continue
+		case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+			err = topic.Err
+			retry = true
+			continue
+		case ErrLeaderNotAvailable: // retry, but store partial partition results
+			retry = true
+			break
+		default: // don't retry, don't store partial results
+			Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+			err = topic.Err
+			continue
+		}
+
+		client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+		for _, partition := range topic.Partitions {
+			client.metadata[topic.Name][partition.ID] = partition
+			if partition.Err == ErrLeaderNotAvailable {
+				retry = true
+			}
+		}
+
+		var partitionCache [maxPartitionIndex][]int32
+		partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+		partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+		client.cachedPartitionsResults[topic.Name] = partitionCache
+	}
+
+	return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+		return client.brokers[coordinatorID]
+	}
+	return nil
+}
+
+func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
+	retry := func(err error) (*ConsumerMetadataResponse, error) {
+		if attemptsRemaining > 0 {
+			Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+			time.Sleep(client.conf.Metadata.Retry.Backoff)
+			return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
+		}
+		return nil, err
+	}
+
+	for broker := client.any(); broker != nil; broker = client.any() {
+		Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
+
+		request := new(ConsumerMetadataRequest)
+		request.ConsumerGroup = consumerGroup
+
+		response, err := broker.GetConsumerMetadata(request)
+
+		if err != nil {
+			Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+			switch err.(type) {
+			case PacketEncodingError:
+				return nil, err
+			default:
+				_ = broker.Close()
+				client.deregisterBroker(broker)
+				continue
+			}
+		}
+
+		switch response.Err {
+		case ErrNoError:
+			Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
+			return response, nil
+
+		case ErrConsumerCoordinatorNotAvailable:
+			Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
+
+			// This is very ugly, but this scenario will only happen once per cluster.
+			// The __consumer_offsets topic only has to be created one time.
+			// The number of partitions not configurable, but partition 0 should always exist.
+			if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+				Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+				time.Sleep(2 * time.Second)
+			}
+
+			return retry(ErrConsumerCoordinatorNotAvailable)
+		default:
+			return nil, response.Err
+		}
+	}
+
+	Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(ErrOutOfBrokers)
+}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
new file mode 100644
index 00000000..29ea5c2b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -0,0 +1,442 @@
+package sarama
+
+import (
+	"crypto/tls"
+	"regexp"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+const defaultClientID = "sarama"
+
+var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+	// Net is the namespace for network-level properties used by the Broker, and
+	// shared by the Client/Producer/Consumer.
+	Net struct {
+		// How many outstanding requests a connection is allowed to have before
+		// sending on it blocks (default 5).
+		MaxOpenRequests int
+
+		// All three of the below configurations are similar to the
+		// `socket.timeout.ms` setting in JVM kafka. All of them default
+		// to 30 seconds.
+		DialTimeout  time.Duration // How long to wait for the initial connection.
+		ReadTimeout  time.Duration // How long to wait for a response.
+		WriteTimeout time.Duration // How long to wait for a transmit.
+
+		TLS struct {
+			// Whether or not to use TLS when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// The TLS configuration to use for secure connections if
+			// enabled (defaults to nil).
+			Config *tls.Config
+		}
+
+		// SASL based authentication with broker. While there are multiple SASL authentication methods
+		// the current implementation is limited to plaintext (SASL/PLAIN) authentication
+		SASL struct {
+			// Whether or not to use SASL authentication when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// Whether or not to send the Kafka SASL handshake first if enabled
+			// (defaults to true). You should only set this to false if you're using
+			// a non-Kafka SASL proxy.
+			Handshake bool
+			//username and password for SASL/PLAIN authentication
+			User     string
+			Password string
+		}
+
+		// KeepAlive specifies the keep-alive period for an active network connection.
+		// If zero, keep-alives are disabled. (default is 0: disabled).
+		KeepAlive time.Duration
+	}
+
+	// Metadata is the namespace for metadata management properties used by the
+	// Client, and shared by the Producer/Consumer.
+	Metadata struct {
+		Retry struct {
+			// The total number of times to retry a metadata request when the
+			// cluster is in the middle of a leader election (default 3).
+			Max int
+			// How long to wait for leader election to occur before retrying
+			// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+			Backoff time.Duration
+		}
+		// How frequently to refresh the cluster metadata in the background.
+		// Defaults to 10 minutes. Set to 0 to disable. Similar to
+		// `topic.metadata.refresh.interval.ms` in the JVM version.
+		RefreshFrequency time.Duration
+
+		// Whether to maintain a full set of metadata for all topics, or just
+		// the minimal set that has been necessary so far. The full set is simpler
+		// and usually more convenient, but can take up a substantial amount of
+		// memory if you have many topics and partitions. Defaults to true.
+		Full bool
+	}
+
+	// Producer is the namespace for configuration related to producing messages,
+	// used by the Producer.
+	Producer struct {
+		// The maximum permitted size of a message (defaults to 1000000). Should be
+		// set equal to or smaller than the broker's `message.max.bytes`.
+		MaxMessageBytes int
+		// The level of acknowledgement reliability needed from the broker (defaults
+		// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+		// JVM producer.
+		RequiredAcks RequiredAcks
+		// The maximum duration the broker will wait the receipt of the number of
+		// RequiredAcks (defaults to 10 seconds). This is only relevant when
+		// RequiredAcks is set to WaitForAll or a number > 1. Only supports
+		// millisecond resolution, nanoseconds will be truncated. Equivalent to
+		// the JVM producer's `request.timeout.ms` setting.
+		Timeout time.Duration
+		// The type of compression to use on messages (defaults to no compression).
+		// Similar to `compression.codec` setting of the JVM producer.
+		Compression CompressionCodec
+		// Generates partitioners for choosing the partition to send messages to
+		// (defaults to hashing the message key). Similar to the `partitioner.class`
+		// setting for the JVM producer.
+		Partitioner PartitionerConstructor
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from the respective channels to prevent deadlock. If,
+		// however, this config is used to create a `SyncProducer`, both must be set
+		// to true and you shall not read from the channels since the producer does
+		// this internally.
+		Return struct {
+			// If enabled, successfully delivered messages will be returned on the
+			// Successes channel (default disabled).
+			Successes bool
+
+			// If enabled, messages that failed to deliver will be returned on the
+			// Errors channel, including error (default enabled).
+			Errors bool
+		}
+
+		// The following config options control how often messages are batched up and
+		// sent to the broker. By default, messages are sent as fast as possible, and
+		// all messages received while the current batch is in-flight are placed
+		// into the subsequent batch.
+		Flush struct {
+			// The best-effort number of bytes needed to trigger a flush. Use the
+			// global sarama.MaxRequestSize to set a hard upper limit.
+			Bytes int
+			// The best-effort number of messages needed to trigger a flush. Use
+			// `MaxMessages` to set a hard upper limit.
+			Messages int
+			// The best-effort frequency of flushes. Equivalent to
+			// `queue.buffering.max.ms` setting of JVM producer.
+			Frequency time.Duration
+			// The maximum number of messages the producer will send in a single
+			// broker request. Defaults to 0 for unlimited. Similar to
+			// `queue.buffering.max.messages` in the JVM producer.
+			MaxMessages int
+		}
+
+		Retry struct {
+			// The total number of times to retry sending a message (default 3).
+			// Similar to the `message.send.max.retries` setting of the JVM producer.
+			Max int
+			// How long to wait for the cluster to settle between retries
+			// (default 100ms). Similar to the `retry.backoff.ms` setting of the
+			// JVM producer.
+			Backoff time.Duration
+		}
+	}
+
+	// Consumer is the namespace for configuration related to consuming messages,
+	// used by the Consumer.
+	//
+	// Note that Sarama's Consumer type does not currently support automatic
+	// consumer-group rebalancing and offset tracking.  For Zookeeper-based
+	// tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
+	// library builds on Sarama to add this support. For Kafka-based tracking
+	// (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
+	// builds on Sarama to add this support.
+	Consumer struct {
+		Retry struct {
+			// How long to wait after a failing to read from a partition before
+			// trying again (default 2s).
+			Backoff time.Duration
+		}
+
+		// Fetch is the namespace for controlling how many bytes are retrieved by any
+		// given request.
+		Fetch struct {
+			// The minimum number of message bytes to fetch in a request - the broker
+			// will wait until at least this many are available. The default is 1,
+			// as 0 causes the consumer to spin when no messages are available.
+			// Equivalent to the JVM's `fetch.min.bytes`.
+			Min int32
+			// The default number of message bytes to fetch from the broker in each
+			// request (default 1MB). This should be larger than the majority of
+			// your messages, or else the consumer will spend a lot of time
+			// negotiating sizes and not actually consuming. Similar to the JVM's
+			// `fetch.message.max.bytes`.
+			Default int32
+			// The maximum number of message bytes to fetch from the broker in a
+			// single request. Messages larger than this will return
+			// ErrMessageTooLarge and will not be consumable, so you must be sure
+			// this is at least as large as your largest message. Defaults to 0
+			// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+			// global `sarama.MaxResponseSize` still applies.
+			Max int32
+		}
+		// The maximum amount of time the broker will wait for Consumer.Fetch.Min
+		// bytes to become available before it returns fewer than that anyways. The
+		// default is 250ms, since 0 causes the consumer to spin when no events are
+		// available. 100-500ms is a reasonable range for most cases. Kafka only
+		// supports precision up to milliseconds; nanoseconds will be truncated.
+		// Equivalent to the JVM's `fetch.wait.max.ms`.
+		MaxWaitTime time.Duration
+
+		// The maximum amount of time the consumer expects a message takes to
+		// process for the user. If writing to the Messages channel takes longer
+		// than this, that partition will stop fetching more messages until it
+		// can proceed again.
+		// Note that, since the Messages channel is buffered, the actual grace time is
+		// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
+		// If a message is not written to the Messages channel between two ticks
+		// of the expiryTicker then a timeout is detected.
+		// Using a ticker instead of a timer to detect timeouts should typically
+		// result in many fewer calls to Timer functions which may result in a
+		// significant performance improvement if many messages are being sent
+		// and timeouts are infrequent.
+		// The disadvantage of using a ticker instead of a timer is that
+		// timeouts will be less accurate. That is, the effective timeout could
+		// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
+		// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
+		// between two messages being sent may not be recognized as a timeout.
+		MaxProcessingTime time.Duration
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from them to prevent deadlock.
+		Return struct {
+			// If enabled, any errors that occurred while consuming are returned on
+			// the Errors channel (default disabled).
+			Errors bool
+		}
+
+		// Offsets specifies configuration for how and when to commit consumed
+		// offsets. This currently requires the manual use of an OffsetManager
+		// but will eventually be automated.
+		Offsets struct {
+			// How frequently to commit updated offsets. Defaults to 1s.
+			CommitInterval time.Duration
+
+			// The initial offset to use if no offset was previously committed.
+			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+			Initial int64
+
+			// The retention duration for committed offsets. If zero, disabled
+			// (in which case the `offsets.retention.minutes` option on the
+			// broker will be used).  Kafka only supports precision up to
+			// milliseconds; nanoseconds will be truncated. Requires Kafka
+			// broker version 0.9.0 or later.
+			// (default is 0: disabled).
+			Retention time.Duration
+		}
+	}
+
+	// A user-provided string sent with every request to the brokers for logging,
+	// debugging, and auditing purposes. Defaults to "sarama", but you should
+	// probably set it to something specific to your application.
+	ClientID string
+	// The number of events to buffer in internal and external channels. This
+	// permits the producer and consumer to continue processing some messages
+	// in the background while user code is working, greatly improving throughput.
+	// Defaults to 256.
+	ChannelBufferSize int
+	// The version of Kafka that Sarama will assume it is running against.
+	// Defaults to the oldest supported stable version. Since Kafka provides
+	// backwards-compatibility, setting it to a version older than you have
+	// will not break anything, although it may prevent you from using the
+	// latest features. Setting it to a version greater than you are actually
+	// running may lead to random breakage.
+	Version KafkaVersion
+	// The registry to define metrics into.
+	// Defaults to a local registry.
+	// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+	// prior to starting Sarama.
+	// See Examples on how to use the metrics registry
+	MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+	c := &Config{}
+
+	c.Net.MaxOpenRequests = 5
+	c.Net.DialTimeout = 30 * time.Second
+	c.Net.ReadTimeout = 30 * time.Second
+	c.Net.WriteTimeout = 30 * time.Second
+	c.Net.SASL.Handshake = true
+
+	c.Metadata.Retry.Max = 3
+	c.Metadata.Retry.Backoff = 250 * time.Millisecond
+	c.Metadata.RefreshFrequency = 10 * time.Minute
+	c.Metadata.Full = true
+
+	c.Producer.MaxMessageBytes = 1000000
+	c.Producer.RequiredAcks = WaitForLocal
+	c.Producer.Timeout = 10 * time.Second
+	c.Producer.Partitioner = NewHashPartitioner
+	c.Producer.Retry.Max = 3
+	c.Producer.Retry.Backoff = 100 * time.Millisecond
+	c.Producer.Return.Errors = true
+
+	c.Consumer.Fetch.Min = 1
+	c.Consumer.Fetch.Default = 1024 * 1024
+	c.Consumer.Retry.Backoff = 2 * time.Second
+	c.Consumer.MaxWaitTime = 250 * time.Millisecond
+	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+	c.Consumer.Return.Errors = false
+	c.Consumer.Offsets.CommitInterval = 1 * time.Second
+	c.Consumer.Offsets.Initial = OffsetNewest
+
+	c.ClientID = defaultClientID
+	c.ChannelBufferSize = 256
+	c.Version = minVersion
+	c.MetricRegistry = metrics.NewRegistry()
+
+	return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+	// some configuration values should be warned on but not fail completely, do those first
+	if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
+		Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+	}
+	if c.Net.SASL.Enable == false {
+		if c.Net.SASL.User != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+		}
+		if c.Net.SASL.Password != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+		}
+	}
+	if c.Producer.RequiredAcks > 1 {
+		Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+	}
+	if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+		Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+	}
+	if c.Producer.Timeout%time.Millisecond != 0 {
+		Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+	}
+	if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+		Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+	}
+	if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+		Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+		Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.ClientID == defaultClientID {
+		Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+	}
+
+	// validate Net values
+	switch {
+	case c.Net.MaxOpenRequests <= 0:
+		return ConfigurationError("Net.MaxOpenRequests must be > 0")
+	case c.Net.DialTimeout <= 0:
+		return ConfigurationError("Net.DialTimeout must be > 0")
+	case c.Net.ReadTimeout <= 0:
+		return ConfigurationError("Net.ReadTimeout must be > 0")
+	case c.Net.WriteTimeout <= 0:
+		return ConfigurationError("Net.WriteTimeout must be > 0")
+	case c.Net.KeepAlive < 0:
+		return ConfigurationError("Net.KeepAlive must be >= 0")
+	case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
+		return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+	case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
+		return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+	}
+
+	// validate the Metadata values
+	switch {
+	case c.Metadata.Retry.Max < 0:
+		return ConfigurationError("Metadata.Retry.Max must be >= 0")
+	case c.Metadata.Retry.Backoff < 0:
+		return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+	case c.Metadata.RefreshFrequency < 0:
+		return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+	}
+
+	// validate the Producer values
+	switch {
+	case c.Producer.MaxMessageBytes <= 0:
+		return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+	case c.Producer.RequiredAcks < -1:
+		return ConfigurationError("Producer.RequiredAcks must be >= -1")
+	case c.Producer.Timeout <= 0:
+		return ConfigurationError("Producer.Timeout must be > 0")
+	case c.Producer.Partitioner == nil:
+		return ConfigurationError("Producer.Partitioner must not be nil")
+	case c.Producer.Flush.Bytes < 0:
+		return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+	case c.Producer.Flush.Messages < 0:
+		return ConfigurationError("Producer.Flush.Messages must be >= 0")
+	case c.Producer.Flush.Frequency < 0:
+		return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+	case c.Producer.Flush.MaxMessages < 0:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+	case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+	case c.Producer.Retry.Max < 0:
+		return ConfigurationError("Producer.Retry.Max must be >= 0")
+	case c.Producer.Retry.Backoff < 0:
+		return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+	}
+
+	if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+		return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+	}
+
+	// validate the Consumer values
+	switch {
+	case c.Consumer.Fetch.Min <= 0:
+		return ConfigurationError("Consumer.Fetch.Min must be > 0")
+	case c.Consumer.Fetch.Default <= 0:
+		return ConfigurationError("Consumer.Fetch.Default must be > 0")
+	case c.Consumer.Fetch.Max < 0:
+		return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+	case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+		return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+	case c.Consumer.MaxProcessingTime <= 0:
+		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+	case c.Consumer.Retry.Backoff < 0:
+		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+	case c.Consumer.Offsets.CommitInterval <= 0:
+		return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+
+	}
+
+	// validate misc shared values
+	switch {
+	case c.ChannelBufferSize < 0:
+		return ConfigurationError("ChannelBufferSize must be >= 0")
+	case !validID.MatchString(c.ClientID):
+		return ConfigurationError("ClientID is invalid")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go
new file mode 100644
index 00000000..848cc9c9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config_resource_type.go
@@ -0,0 +1,15 @@
+package sarama
+
+type ConfigResourceType int8
+
+// Taken from :
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
+
+const (
+	UnknownResource ConfigResourceType = 0
+	AnyResource     ConfigResourceType = 1
+	TopicResource   ConfigResourceType = 2
+	GroupResource   ConfigResourceType = 3
+	ClusterResource ConfigResourceType = 4
+	BrokerResource  ConfigResourceType = 5
+)
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
new file mode 100644
index 00000000..48d231cf
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -0,0 +1,833 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+	Key, Value     []byte
+	Topic          string
+	Partition      int32
+	Offset         int64
+	Timestamp      time.Time       // only set if kafka is version 0.10+, inner message timestamp
+	BlockTimestamp time.Time       // only set if kafka is version 0.10+, outer (compressed) block timestamp
+	Headers        []*RecordHeader // only set if kafka is version 0.11+
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+	Topic     string
+	Partition int32
+	Err       error
+}
+
+func (ce ConsumerError) Error() string {
+	return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+	return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+//
+// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
+// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
+// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
+// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+type Consumer interface {
+
+	// Topics returns the set of available topics as retrieved from the cluster
+	// metadata. This method is the same as Client.Topics(), and is provided for
+	// convenience.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	// This method is the same as Client.Partitions(), and is provided for convenience.
+	Partitions(topic string) ([]int32, error)
+
+	// ConsumePartition creates a PartitionConsumer on the given topic/partition with
+	// the given offset. It will return an error if this Consumer is already consuming
+	// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+	// or OffsetOldest
+	ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+	// HighWaterMarks returns the current high water marks for each topic and partition.
+	// Consistency between partitions is not guaranteed since high water marks are updated separately.
+	HighWaterMarks() map[string]map[int32]int64
+
+	// Close shuts down the consumer. It must be called after all child
+	// PartitionConsumers have already been closed.
+	Close() error
+}
+
+type consumer struct {
+	client    Client
+	conf      *Config
+	ownClient bool
+
+	lock            sync.Mutex
+	children        map[string]map[int32]*partitionConsumer
+	brokerConsumers map[*Broker]*brokerConsumer
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+
+	c, err := NewConsumerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+	c.(*consumer).ownClient = true
+	return c, nil
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	c := &consumer{
+		client:          client,
+		conf:            client.Config(),
+		children:        make(map[string]map[int32]*partitionConsumer),
+		brokerConsumers: make(map[*Broker]*brokerConsumer),
+	}
+
+	return c, nil
+}
+
+func (c *consumer) Close() error {
+	if c.ownClient {
+		return c.client.Close()
+	}
+	return nil
+}
+
+func (c *consumer) Topics() ([]string, error) {
+	return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+	return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+	child := &partitionConsumer{
+		consumer:  c,
+		conf:      c.conf,
+		topic:     topic,
+		partition: partition,
+		messages:  make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+		errors:    make(chan *ConsumerError, c.conf.ChannelBufferSize),
+		feeder:    make(chan *FetchResponse, 1),
+		trigger:   make(chan none, 1),
+		dying:     make(chan none),
+		fetchSize: c.conf.Consumer.Fetch.Default,
+	}
+
+	if err := child.chooseStartingOffset(offset); err != nil {
+		return nil, err
+	}
+
+	var leader *Broker
+	var err error
+	if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
+		return nil, err
+	}
+
+	if err := c.addChild(child); err != nil {
+		return nil, err
+	}
+
+	go withRecover(child.dispatcher)
+	go withRecover(child.responseFeeder)
+
+	child.broker = c.refBrokerConsumer(leader)
+	child.broker.input <- child
+
+	return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	hwms := make(map[string]map[int32]int64)
+	for topic, p := range c.children {
+		hwm := make(map[int32]int64, len(p))
+		for partition, pc := range p {
+			hwm[partition] = pc.HighWaterMarkOffset()
+		}
+		hwms[topic] = hwm
+	}
+
+	return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	topicChildren := c.children[child.topic]
+	if topicChildren == nil {
+		topicChildren = make(map[int32]*partitionConsumer)
+		c.children[child.topic] = topicChildren
+	}
+
+	if topicChildren[child.partition] != nil {
+		return ConfigurationError("That topic/partition is already being consumed")
+	}
+
+	topicChildren[child.partition] = child
+	return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	bc := c.brokerConsumers[broker]
+	if bc == nil {
+		bc = c.newBrokerConsumer(broker)
+		c.brokerConsumers[broker] = bc
+	}
+
+	bc.refs++
+
+	return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	brokerWorker.refs--
+
+	if brokerWorker.refs == 0 {
+		close(brokerWorker.input)
+		if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+			delete(c.brokerConsumers, brokerWorker.broker)
+		}
+	}
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
+// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
+// of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+//
+// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
+// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
+// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
+// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
+// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
+type PartitionConsumer interface {
+
+	// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
+	// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
+	// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
+	// this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
+	// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
+	// the Messages channel when this function is called, you will be competing with Close for messages; consider
+	// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
+	// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
+	Close() error
+
+	// Messages returns the read channel for the messages that are returned by
+	// the broker.
+	Messages() <-chan *ConsumerMessage
+
+	// Errors returns a read channel of errors that occurred during consuming, if
+	// enabled. By default, errors are logged and not returned over this channel.
+	// If you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// HighWaterMarkOffset returns the high water mark offset of the partition,
+	// i.e. the offset that will be used for the next message that will be produced.
+	// You can use this to determine how far behind the processing is.
+	HighWaterMarkOffset() int64
+}
+
+type partitionConsumer struct {
+	highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	consumer            *consumer
+	conf                *Config
+	topic               string
+	partition           int32
+
+	broker   *brokerConsumer
+	messages chan *ConsumerMessage
+	errors   chan *ConsumerError
+	feeder   chan *FetchResponse
+
+	trigger, dying chan none
+	responseResult error
+
+	fetchSize int32
+	offset    int64
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+	cErr := &ConsumerError{
+		Topic:     child.topic,
+		Partition: child.partition,
+		Err:       err,
+	}
+
+	if child.conf.Consumer.Return.Errors {
+		child.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (child *partitionConsumer) dispatcher() {
+	for range child.trigger {
+		select {
+		case <-child.dying:
+			close(child.trigger)
+		case <-time.After(child.conf.Consumer.Retry.Backoff):
+			if child.broker != nil {
+				child.consumer.unrefBrokerConsumer(child.broker)
+				child.broker = nil
+			}
+
+			Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
+			if err := child.dispatch(); err != nil {
+				child.sendError(err)
+				child.trigger <- none{}
+			}
+		}
+	}
+
+	if child.broker != nil {
+		child.consumer.unrefBrokerConsumer(child.broker)
+	}
+	child.consumer.removeChild(child)
+	close(child.feeder)
+}
+
+func (child *partitionConsumer) dispatch() error {
+	if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+		return err
+	}
+
+	var leader *Broker
+	var err error
+	if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+		return err
+	}
+
+	child.broker = child.consumer.refBrokerConsumer(leader)
+
+	child.broker.input <- child
+
+	return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+	newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+	if err != nil {
+		return err
+	}
+	oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+	if err != nil {
+		return err
+	}
+
+	switch {
+	case offset == OffsetNewest:
+		child.offset = newestOffset
+	case offset == OffsetOldest:
+		child.offset = oldestOffset
+	case offset >= oldestOffset && offset <= newestOffset:
+		child.offset = offset
+	default:
+		return ErrOffsetOutOfRange
+	}
+
+	return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+	return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+	return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+	// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+	// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+	// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+	// also just close itself)
+	close(child.dying)
+}
+
+func (child *partitionConsumer) Close() error {
+	child.AsyncClose()
+
+	go withRecover(func() {
+		for range child.messages {
+			// drain
+		}
+	})
+
+	var errors ConsumerErrors
+	for err := range child.errors {
+		errors = append(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+	return atomic.LoadInt64(&child.highWaterMarkOffset)
+}
+
+func (child *partitionConsumer) responseFeeder() {
+	var msgs []*ConsumerMessage
+	expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
+	firstAttempt := true
+
+feederLoop:
+	for response := range child.feeder {
+		msgs, child.responseResult = child.parseResponse(response)
+
+		for i, msg := range msgs {
+		messageSelect:
+			select {
+			case child.messages <- msg:
+				firstAttempt = true
+			case <-expiryTicker.C:
+				if !firstAttempt {
+					child.responseResult = errTimedOut
+					child.broker.acks.Done()
+					for _, msg = range msgs[i:] {
+						child.messages <- msg
+					}
+					child.broker.input <- child
+					expiryTicker.Stop()
+					continue feederLoop
+				} else {
+					// current message has not been sent, return to select
+					// statement
+					firstAttempt = false
+					goto messageSelect
+				}
+			}
+		}
+
+		child.broker.acks.Done()
+	}
+
+	expiryTicker.Stop()
+	close(child.messages)
+	close(child.errors)
+}
+
+func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
+	var messages []*ConsumerMessage
+	var incomplete bool
+	prelude := true
+
+	for _, msgBlock := range msgSet.Messages {
+		for _, msg := range msgBlock.Messages() {
+			offset := msg.Offset
+			if msg.Msg.Version >= 1 {
+				baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+				offset += baseOffset
+			}
+			if prelude && offset < child.offset {
+				continue
+			}
+			prelude = false
+
+			if offset >= child.offset {
+				messages = append(messages, &ConsumerMessage{
+					Topic:          child.topic,
+					Partition:      child.partition,
+					Key:            msg.Msg.Key,
+					Value:          msg.Msg.Value,
+					Offset:         offset,
+					Timestamp:      msg.Msg.Timestamp,
+					BlockTimestamp: msgBlock.Msg.Timestamp,
+				})
+				child.offset = offset + 1
+			} else {
+				incomplete = true
+			}
+		}
+	}
+
+	if incomplete || len(messages) == 0 {
+		return nil, ErrIncompleteResponse
+	}
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
+	var messages []*ConsumerMessage
+	var incomplete bool
+	prelude := true
+	originalOffset := child.offset
+
+	for _, rec := range batch.Records {
+		offset := batch.FirstOffset + rec.OffsetDelta
+		if prelude && offset < child.offset {
+			continue
+		}
+		prelude = false
+
+		if offset >= child.offset {
+			messages = append(messages, &ConsumerMessage{
+				Topic:     child.topic,
+				Partition: child.partition,
+				Key:       rec.Key,
+				Value:     rec.Value,
+				Offset:    offset,
+				Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
+				Headers:   rec.Headers,
+			})
+			child.offset = offset + 1
+		} else {
+			incomplete = true
+		}
+	}
+
+	if incomplete {
+		return nil, ErrIncompleteResponse
+	}
+
+	child.offset = batch.FirstOffset + int64(batch.LastOffsetDelta) + 1
+	if child.offset <= originalOffset {
+		return nil, ErrConsumerOffsetNotAdvanced
+	}
+
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+	block := response.GetBlock(child.topic, child.partition)
+	if block == nil {
+		return nil, ErrIncompleteResponse
+	}
+
+	if block.Err != ErrNoError {
+		return nil, block.Err
+	}
+
+	nRecs, err := block.numRecords()
+	if err != nil {
+		return nil, err
+	}
+	if nRecs == 0 {
+		partialTrailingMessage, err := block.isPartial()
+		if err != nil {
+			return nil, err
+		}
+		// We got no messages. If we got a trailing one then we need to ask for more data.
+		// Otherwise we just poll again and wait for one to be produced...
+		if partialTrailingMessage {
+			if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+				// we can't ask for more data, we've hit the configured limit
+				child.sendError(ErrMessageTooLarge)
+				child.offset++ // skip this one so we can keep processing future messages
+			} else {
+				child.fetchSize *= 2
+				if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+					child.fetchSize = child.conf.Consumer.Fetch.Max
+				}
+			}
+		}
+
+		return nil, nil
+	}
+
+	// we got messages, reset our fetch size in case it was increased for a previous request
+	child.fetchSize = child.conf.Consumer.Fetch.Default
+	atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
+
+	messages := []*ConsumerMessage{}
+	for _, records := range block.RecordsSet {
+		if control, err := records.isControl(); err != nil || control {
+			continue
+		}
+
+		switch records.recordsType {
+		case legacyRecords:
+			messageSetMessages, err := child.parseMessages(records.msgSet)
+			if err != nil {
+				return nil, err
+			}
+
+			messages = append(messages, messageSetMessages...)
+		case defaultRecords:
+			recordBatchMessages, err := child.parseRecords(records.recordBatch)
+			if err != nil {
+				return nil, err
+			}
+
+			messages = append(messages, recordBatchMessages...)
+		default:
+			return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
+		}
+	}
+
+	return messages, nil
+}
+
+// brokerConsumer
+
+type brokerConsumer struct {
+	consumer         *consumer
+	broker           *Broker
+	input            chan *partitionConsumer
+	newSubscriptions chan []*partitionConsumer
+	wait             chan none
+	subscriptions    map[*partitionConsumer]none
+	acks             sync.WaitGroup
+	refs             int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+	bc := &brokerConsumer{
+		consumer:         c,
+		broker:           broker,
+		input:            make(chan *partitionConsumer),
+		newSubscriptions: make(chan []*partitionConsumer),
+		wait:             make(chan none),
+		subscriptions:    make(map[*partitionConsumer]none),
+		refs:             0,
+	}
+
+	go withRecover(bc.subscriptionManager)
+	go withRecover(bc.subscriptionConsumer)
+
+	return bc
+}
+
+func (bc *brokerConsumer) subscriptionManager() {
+	var buffer []*partitionConsumer
+
+	// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+	// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+	// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+	// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
+	// so the main goroutine can block waiting for work if it has none.
+	for {
+		if len(buffer) > 0 {
+			select {
+			case event, ok := <-bc.input:
+				if !ok {
+					goto done
+				}
+				buffer = append(buffer, event)
+			case bc.newSubscriptions <- buffer:
+				buffer = nil
+			case bc.wait <- none{}:
+			}
+		} else {
+			select {
+			case event, ok := <-bc.input:
+				if !ok {
+					goto done
+				}
+				buffer = append(buffer, event)
+			case bc.newSubscriptions <- nil:
+			}
+		}
+	}
+
+done:
+	close(bc.wait)
+	if len(buffer) > 0 {
+		bc.newSubscriptions <- buffer
+	}
+	close(bc.newSubscriptions)
+}
+
+func (bc *brokerConsumer) subscriptionConsumer() {
+	<-bc.wait // wait for our first piece of work
+
+	// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+	for newSubscriptions := range bc.newSubscriptions {
+		bc.updateSubscriptions(newSubscriptions)
+
+		if len(bc.subscriptions) == 0 {
+			// We're about to be shut down or we're about to receive more subscriptions.
+			// Either way, the signal just hasn't propagated to our goroutine yet.
+			<-bc.wait
+			continue
+		}
+
+		response, err := bc.fetchNewMessages()
+
+		if err != nil {
+			Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+			bc.abort(err)
+			return
+		}
+
+		bc.acks.Add(len(bc.subscriptions))
+		for child := range bc.subscriptions {
+			child.feeder <- response
+		}
+		bc.acks.Wait()
+		bc.handleResponses()
+	}
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+	for _, child := range newSubscriptions {
+		bc.subscriptions[child] = none{}
+		Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+	}
+
+	for child := range bc.subscriptions {
+		select {
+		case <-child.dying:
+			Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		default:
+			break
+		}
+	}
+}
+
+func (bc *brokerConsumer) handleResponses() {
+	// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+	for child := range bc.subscriptions {
+		result := child.responseResult
+		child.responseResult = nil
+
+		switch result {
+		case nil:
+			break
+		case errTimedOut:
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+				bc.broker.ID(), child.topic, child.partition)
+			delete(bc.subscriptions, child)
+		case ErrOffsetOutOfRange:
+			// there's no point in retrying this it will just fail the same way again
+			// shut it down and force the user to choose what to do
+			child.sendError(result)
+			Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
+			// not an error, but does need redispatching
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		default:
+			// dunno, tell the user and try redispatching
+			child.sendError(result)
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		}
+	}
+}
+
+func (bc *brokerConsumer) abort(err error) {
+	bc.consumer.abandonBrokerConsumer(bc)
+	_ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+	for child := range bc.subscriptions {
+		child.sendError(err)
+		child.trigger <- none{}
+	}
+
+	for newSubscriptions := range bc.newSubscriptions {
+		if len(newSubscriptions) == 0 {
+			<-bc.wait
+			continue
+		}
+		for _, child := range newSubscriptions {
+			child.sendError(err)
+			child.trigger <- none{}
+		}
+	}
+}
+
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+	request := &FetchRequest{
+		MinBytes:    bc.consumer.conf.Consumer.Fetch.Min,
+		MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+		request.Version = 2
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+		request.Version = 3
+		request.MaxBytes = MaxResponseSize
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 4
+		request.Isolation = ReadUncommitted // We don't support yet transactions.
+	}
+
+	for child := range bc.subscriptions {
+		request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
+	}
+
+	return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
new file mode 100644
index 00000000..9d92d350
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go
@@ -0,0 +1,94 @@
+package sarama
+
+type ConsumerGroupMemberMetadata struct {
+	Version  int16
+	Topics   []string
+	UserData []byte
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putStringArray(m.Topics); err != nil {
+		return err
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	if m.Topics, err = pd.getStringArray(); err != nil {
+		return
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+type ConsumerGroupMemberAssignment struct {
+	Version  int16
+	Topics   map[string][]int32
+	UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
new file mode 100644
index 00000000..483be335
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -0,0 +1,26 @@
+package sarama
+
+type ConsumerMetadataRequest struct {
+	ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+	return pe.putString(r.ConsumerGroup)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.ConsumerGroup, err = pd.getString()
+	return err
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+	return 10
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+	return 0
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+	return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
new file mode 100644
index 00000000..6b9632bb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -0,0 +1,85 @@
+package sarama
+
+import (
+	"net"
+	"strconv"
+)
+
+type ConsumerMetadataResponse struct {
+	Err             KError
+	Coordinator     *Broker
+	CoordinatorID   int32  // deprecated: use Coordinator.ID()
+	CoordinatorHost string // deprecated: use Coordinator.Addr()
+	CoordinatorPort int32  // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.Err = KError(tmp)
+
+	coordinator := new(Broker)
+	if err := coordinator.decode(pd); err != nil {
+		return err
+	}
+	if coordinator.addr == ":0" {
+		return nil
+	}
+	r.Coordinator = coordinator
+
+	// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+	// backwards compatibility
+	host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+	if err != nil {
+		return err
+	}
+	port, err := strconv.ParseInt(portstr, 10, 32)
+	if err != nil {
+		return err
+	}
+	r.CoordinatorID = r.Coordinator.ID()
+	r.CoordinatorHost = host
+	r.CoordinatorPort = int32(port)
+
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	if r.Coordinator != nil {
+		host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+		if err != nil {
+			return err
+		}
+		port, err := strconv.ParseInt(portstr, 10, 32)
+		if err != nil {
+			return err
+		}
+		pe.putInt32(r.Coordinator.ID())
+		if err := pe.putString(host); err != nil {
+			return err
+		}
+		pe.putInt32(int32(port))
+		return nil
+	}
+	pe.putInt32(r.CoordinatorID)
+	if err := pe.putString(r.CoordinatorHost); err != nil {
+		return err
+	}
+	pe.putInt32(r.CoordinatorPort)
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+	return 10
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+	return 0
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+	return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
new file mode 100644
index 00000000..1f144431
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -0,0 +1,69 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+)
+
+type crcPolynomial int8
+
+const (
+	crcIEEE crcPolynomial = iota
+	crcCastagnoli
+)
+
+var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
+type crc32Field struct {
+	startOffset int
+	polynomial  crcPolynomial
+}
+
+func (c *crc32Field) saveOffset(in int) {
+	c.startOffset = in
+}
+
+func (c *crc32Field) reserveLength() int {
+	return 4
+}
+
+func newCRC32Field(polynomial crcPolynomial) *crc32Field {
+	return &crc32Field{polynomial: polynomial}
+}
+
+func (c *crc32Field) run(curOffset int, buf []byte) error {
+	crc, err := c.crc(curOffset, buf)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
+	return nil
+}
+
+func (c *crc32Field) check(curOffset int, buf []byte) error {
+	crc, err := c.crc(curOffset, buf)
+	if err != nil {
+		return err
+	}
+
+	expected := binary.BigEndian.Uint32(buf[c.startOffset:])
+	if crc != expected {
+		return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
+	}
+
+	return nil
+}
+func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
+	var tab *crc32.Table
+	switch c.polynomial {
+	case crcIEEE:
+		tab = crc32.IEEETable
+	case crcCastagnoli:
+		tab = castagnoliTable
+	default:
+		return 0, PacketDecodingError{"invalid CRC type"}
+	}
+	return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go
new file mode 100644
index 00000000..af321e99
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go
@@ -0,0 +1,121 @@
+package sarama
+
+import "time"
+
+type CreatePartitionsRequest struct {
+	TopicPartitions map[string]*TopicPartition
+	Timeout         time.Duration
+	ValidateOnly    bool
+}
+
+func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
+		return err
+	}
+
+	for topic, partition := range c.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partition.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	pe.putBool(c.ValidateOnly)
+
+	return nil
+}
+
+func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	c.TopicPartitions = make(map[string]*TopicPartition, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitions[topic] = new(TopicPartition)
+		if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if c.ValidateOnly, err = pd.getBool(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsRequest) key() int16 {
+	return 37
+}
+
+func (r *CreatePartitionsRequest) version() int16 {
+	return 0
+}
+
+func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
+
+type TopicPartition struct {
+	Count      int32
+	Assignment [][]int32
+}
+
+func (t *TopicPartition) encode(pe packetEncoder) error {
+	pe.putInt32(t.Count)
+
+	if len(t.Assignment) == 0 {
+		pe.putInt32(-1)
+		return nil
+	}
+
+	if err := pe.putArrayLength(len(t.Assignment)); err != nil {
+		return err
+	}
+
+	for _, assign := range t.Assignment {
+		if err := pe.putInt32Array(assign); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
+	if t.Count, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	n, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if n <= 0 {
+		return nil
+	}
+	t.Assignment = make([][]int32, n)
+
+	for i := 0; i < int(n); i++ {
+		if t.Assignment[i], err = pd.getInt32Array(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
new file mode 100644
index 00000000..abd621c6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go
@@ -0,0 +1,94 @@
+package sarama
+
+import "time"
+
+type CreatePartitionsResponse struct {
+	ThrottleTime         time.Duration
+	TopicPartitionErrors map[string]*TopicPartitionError
+}
+
+func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+	if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
+		return err
+	}
+
+	for topic, partitionError := range c.TopicPartitionErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partitionError.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitionErrors[topic] = new(TopicPartitionError)
+		if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsResponse) key() int16 {
+	return 37
+}
+
+func (r *CreatePartitionsResponse) version() int16 {
+	return 0
+}
+
+func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
+
+type TopicPartitionError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicPartitionError) encode(pe packetEncoder) error {
+	pe.putInt16(int16(t.Err))
+
+	if err := pe.putNullableString(t.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	t.Err = KError(kerr)
+
+	if t.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go
new file mode 100644
index 00000000..709c0a44
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_topics_request.go
@@ -0,0 +1,174 @@
+package sarama
+
+import (
+	"time"
+)
+
+type CreateTopicsRequest struct {
+	Version int16
+
+	TopicDetails map[string]*TopicDetail
+	Timeout      time.Duration
+	ValidateOnly bool
+}
+
+func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
+		return err
+	}
+	for topic, detail := range c.TopicDetails {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := detail.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	if c.Version >= 1 {
+		pe.putBool(c.ValidateOnly)
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicDetails = make(map[string]*TopicDetail, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicDetails[topic] = new(TopicDetail)
+		if err = c.TopicDetails[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if version >= 1 {
+		c.ValidateOnly, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+
+		c.Version = version
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsRequest) key() int16 {
+	return 19
+}
+
+func (c *CreateTopicsRequest) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 2:
+		return V1_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
+
+type TopicDetail struct {
+	NumPartitions     int32
+	ReplicationFactor int16
+	ReplicaAssignment map[int32][]int32
+	ConfigEntries     map[string]*string
+}
+
+func (t *TopicDetail) encode(pe packetEncoder) error {
+	pe.putInt32(t.NumPartitions)
+	pe.putInt16(t.ReplicationFactor)
+
+	if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
+		return err
+	}
+	for partition, assignment := range t.ReplicaAssignment {
+		pe.putInt32(partition)
+		if err := pe.putInt32Array(assignment); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range t.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
+	if t.NumPartitions, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if t.ReplicationFactor, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ReplicaAssignment = make(map[int32][]int32, n)
+		for i := 0; i < n; i++ {
+			replica, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
+				return err
+			}
+		}
+	}
+
+	n, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
new file mode 100644
index 00000000..66207e00
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_topics_response.go
@@ -0,0 +1,112 @@
+package sarama
+
+import "time"
+
+type CreateTopicsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	TopicErrors  map[string]*TopicError
+}
+
+func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
+	if c.Version >= 2 {
+		pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+	}
+
+	if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
+		return err
+	}
+	for topic, topicError := range c.TopicErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := topicError.encode(pe, c.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	c.Version = version
+
+	if version >= 2 {
+		throttleTime, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicErrors = make(map[string]*TopicError, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicErrors[topic] = new(TopicError)
+		if err := c.TopicErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsResponse) key() int16 {
+	return 19
+}
+
+func (c *CreateTopicsResponse) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 2:
+		return V1_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
+
+type TopicError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicError) encode(pe packetEncoder, version int16) error {
+	pe.putInt16(int16(t.Err))
+
+	if version >= 1 {
+		if err := pe.putNullableString(t.ErrMsg); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
+	kErr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	t.Err = KError(kErr)
+
+	if version >= 1 {
+		if t.ErrMsg, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go
new file mode 100644
index 00000000..ed9089ea
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go
@@ -0,0 +1,41 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsRequest struct {
+	Topics  []string
+	Timeout time.Duration
+}
+
+func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putStringArray(d.Topics); err != nil {
+		return err
+	}
+	pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+	return nil
+}
+
+func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	if d.Topics, err = pd.getStringArray(); err != nil {
+		return err
+	}
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.Timeout = time.Duration(timeout) * time.Millisecond
+	return nil
+}
+
+func (d *DeleteTopicsRequest) key() int16 {
+	return 20
+}
+
+func (d *DeleteTopicsRequest) version() int16 {
+	return 0
+}
+
+func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
+	return V0_10_1_0
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go
new file mode 100644
index 00000000..34225460
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go
@@ -0,0 +1,78 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	TopicErrorCodes map[string]KError
+}
+
+func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
+	if d.Version >= 1 {
+		pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+	}
+
+	if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
+		return err
+	}
+	for topic, errorCode := range d.TopicErrorCodes {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		pe.putInt16(int16(errorCode))
+	}
+
+	return nil
+}
+
+func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		throttleTime, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+		d.Version = version
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.TopicErrorCodes = make(map[string]KError, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		errorCode, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+
+		d.TopicErrorCodes[topic] = KError(errorCode)
+	}
+
+	return nil
+}
+
+func (d *DeleteTopicsResponse) key() int16 {
+	return 20
+}
+
+func (d *DeleteTopicsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go
new file mode 100644
index 00000000..7a7cffc3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go
@@ -0,0 +1,91 @@
+package sarama
+
+type ConfigResource struct {
+	Type        ConfigResourceType
+	Name        string
+	ConfigNames []string
+}
+
+type DescribeConfigsRequest struct {
+	Resources []*ConfigResource
+}
+
+func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		pe.putInt8(int8(c.Type))
+		if err := pe.putString(c.Name); err != nil {
+			return err
+		}
+
+		if len(c.ConfigNames) == 0 {
+			pe.putInt32(-1)
+			continue
+		}
+		if err := pe.putStringArray(c.ConfigNames); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ConfigResource, n)
+
+	for i := 0; i < n; i++ {
+		r.Resources[i] = &ConfigResource{}
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Type = ConfigResourceType(t)
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Name = name
+
+		confLength, err := pd.getArrayLength()
+
+		if err != nil {
+			return err
+		}
+
+		if confLength == -1 {
+			continue
+		}
+
+		cfnames := make([]string, confLength)
+		for i := 0; i < confLength; i++ {
+			s, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			cfnames[i] = s
+		}
+		r.Resources[i].ConfigNames = cfnames
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) key() int16 {
+	return 32
+}
+
+func (r *DescribeConfigsRequest) version() int16 {
+	return 0
+}
+
+func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
new file mode 100644
index 00000000..6e5d30e4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go
@@ -0,0 +1,188 @@
+package sarama
+
+import "time"
+
+type DescribeConfigsResponse struct {
+	ThrottleTime time.Duration
+	Resources    []*ResourceResponse
+}
+
+type ResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+	Configs   []*ConfigEntry
+}
+
+type ConfigEntry struct {
+	Name      string
+	Value     string
+	ReadOnly  bool
+	Default   bool
+	Sensitive bool
+}
+
+func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	if err = pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		if err = c.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ResourceResponse, n)
+	for i := 0; i < n; i++ {
+		rr := &ResourceResponse{}
+		if err := rr.decode(pd, version); err != nil {
+			return err
+		}
+		r.Resources[i] = rr
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsResponse) key() int16 {
+	return 32
+}
+
+func (r *DescribeConfigsResponse) version() int16 {
+	return 0
+}
+
+func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+func (r *ResourceResponse) encode(pe packetEncoder) (err error) {
+	pe.putInt16(r.ErrorCode)
+
+	if err = pe.putString(r.ErrorMsg); err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(r.Type))
+
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putArrayLength(len(r.Configs)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Configs {
+		if err = c.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
+	ec, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.ErrorCode = ec
+
+	em, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.ErrorMsg = em
+
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	r.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Configs = make([]*ConfigEntry, n)
+	for i := 0; i < n; i++ {
+		c := &ConfigEntry{}
+		if err := c.decode(pd, version); err != nil {
+			return err
+		}
+		r.Configs[i] = c
+	}
+	return nil
+}
+
+func (r *ConfigEntry) encode(pe packetEncoder) (err error) {
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putString(r.Value); err != nil {
+		return err
+	}
+
+	pe.putBool(r.ReadOnly)
+	pe.putBool(r.Default)
+	pe.putBool(r.Sensitive)
+	return nil
+}
+
+func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	value, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Value = value
+
+	read, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.ReadOnly = read
+
+	de, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.Default = de
+
+	sensitive, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.Sensitive = sensitive
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
new file mode 100644
index 00000000..1fb35677
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+	Groups []string
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+	return pe.putStringArray(r.Groups)
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Groups, err = pd.getStringArray()
+	return
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+	return 15
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+	return 0
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+	r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
new file mode 100644
index 00000000..542b3a97
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go
@@ -0,0 +1,187 @@
+package sarama
+
+type DescribeGroupsResponse struct {
+	Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+
+	for _, groupDescription := range r.Groups {
+		if err := groupDescription.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Groups = make([]*GroupDescription, n)
+	for i := 0; i < n; i++ {
+		r.Groups[i] = new(GroupDescription)
+		if err := r.Groups[i].decode(pd); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+	return 15
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+	return 0
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+type GroupDescription struct {
+	Err          KError
+	GroupId      string
+	State        string
+	ProtocolType string
+	Protocol     string
+	Members      map[string]*GroupMemberDescription
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder) error {
+	pe.putInt16(int16(gd.Err))
+
+	if err := pe.putString(gd.GroupId); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.State); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.ProtocolType); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.Protocol); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(gd.Members)); err != nil {
+		return err
+	}
+
+	for memberId, groupMemberDescription := range gd.Members {
+		if err := pe.putString(memberId); err != nil {
+			return err
+		}
+		if err := groupMemberDescription.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	gd.Err = KError(kerr)
+
+	if gd.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if gd.State, err = pd.getString(); err != nil {
+		return
+	}
+	if gd.ProtocolType, err = pd.getString(); err != nil {
+		return
+	}
+	if gd.Protocol, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	gd.Members = make(map[string]*GroupMemberDescription)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		gd.Members[memberId] = new(GroupMemberDescription)
+		if err := gd.Members[memberId].decode(pd); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type GroupMemberDescription struct {
+	ClientId         string
+	ClientHost       string
+	MemberMetadata   []byte
+	MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
+	if err := pe.putString(gmd.ClientId); err != nil {
+		return err
+	}
+	if err := pe.putString(gmd.ClientHost); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
+	if gmd.ClientId, err = pd.getString(); err != nil {
+		return
+	}
+	if gmd.ClientHost, err = pd.getString(); err != nil {
+		return
+	}
+	if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+		return
+	}
+	if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(gmd.MemberAssignment, assignment)
+	return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+	metadata := new(ConsumerGroupMemberMetadata)
+	err := decode(gmd.MemberMetadata, metadata)
+	return metadata, err
+}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
new file mode 100644
index 00000000..294fcdb4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -0,0 +1,10 @@
+name: sarama
+
+up:
+  - go:
+      version: '1.9'
+
+commands:
+  test:
+    run: make test
+    desc: 'run unit tests'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
new file mode 100644
index 00000000..7ce3bc0f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go
@@ -0,0 +1,89 @@
+package sarama
+
+import (
+	"fmt"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+	encode(pe packetEncoder) error
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+	if e == nil {
+		return nil, nil
+	}
+
+	var prepEnc prepEncoder
+	var realEnc realEncoder
+
+	err := e.encode(&prepEnc)
+	if err != nil {
+		return nil, err
+	}
+
+	if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+		return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+	}
+
+	realEnc.raw = make([]byte, prepEnc.length)
+	realEnc.registry = metricRegistry
+	err = e.encode(&realEnc)
+	if err != nil {
+		return nil, err
+	}
+
+	return realEnc.raw, nil
+}
+
+// Decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+	decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+	decode(pd packetDecoder, version int16) error
+}
+
+// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder) error {
+	if buf == nil {
+		return nil
+	}
+
+	helper := realDecoder{raw: buf}
+	err := in.decode(&helper)
+	if err != nil {
+		return err
+	}
+
+	if helper.off != len(buf) {
+		return PacketDecodingError{"invalid length"}
+	}
+
+	return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
+	if buf == nil {
+		return nil
+	}
+
+	helper := realDecoder{raw: buf}
+	err := in.decode(&helper, version)
+	if err != nil {
+		return err
+	}
+
+	if helper.off != len(buf) {
+		return PacketDecodingError{"invalid length"}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go
new file mode 100644
index 00000000..2cd9b506
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/end_txn_request.go
@@ -0,0 +1,50 @@
+package sarama
+
+type EndTxnRequest struct {
+	TransactionalID   string
+	ProducerID        int64
+	ProducerEpoch     int16
+	TransactionResult bool
+}
+
+func (a *EndTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	pe.putBool(a.TransactionResult)
+
+	return nil
+}
+
+func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.TransactionResult, err = pd.getBool(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *EndTxnRequest) key() int16 {
+	return 26
+}
+
+func (a *EndTxnRequest) version() int16 {
+	return 0
+}
+
+func (a *EndTxnRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go
new file mode 100644
index 00000000..33b27e33
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/end_txn_response.go
@@ -0,0 +1,44 @@
+package sarama
+
+import (
+	"time"
+)
+
+type EndTxnResponse struct {
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (e *EndTxnResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(e.Err))
+	return nil
+}
+
+func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	e.Err = KError(kerr)
+
+	return nil
+}
+
+func (e *EndTxnResponse) key() int16 {
+	return 25
+}
+
+func (e *EndTxnResponse) version() int16 {
+	return 0
+}
+
+func (e *EndTxnResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
new file mode 100644
index 00000000..54f431a4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -0,0 +1,273 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
+// a RecordBatch.
+var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+	Info string
+}
+
+func (err PacketEncodingError) Error() string {
+	return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+	Info string
+}
+
+func (err PacketDecodingError) Error() string {
+	return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+	return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// Numeric error codes returned by the Kafka server.
+const (
+	ErrNoError                            KError = 0
+	ErrUnknown                            KError = -1
+	ErrOffsetOutOfRange                   KError = 1
+	ErrInvalidMessage                     KError = 2
+	ErrUnknownTopicOrPartition            KError = 3
+	ErrInvalidMessageSize                 KError = 4
+	ErrLeaderNotAvailable                 KError = 5
+	ErrNotLeaderForPartition              KError = 6
+	ErrRequestTimedOut                    KError = 7
+	ErrBrokerNotAvailable                 KError = 8
+	ErrReplicaNotAvailable                KError = 9
+	ErrMessageSizeTooLarge                KError = 10
+	ErrStaleControllerEpochCode           KError = 11
+	ErrOffsetMetadataTooLarge             KError = 12
+	ErrNetworkException                   KError = 13
+	ErrOffsetsLoadInProgress              KError = 14
+	ErrConsumerCoordinatorNotAvailable    KError = 15
+	ErrNotCoordinatorForConsumer          KError = 16
+	ErrInvalidTopic                       KError = 17
+	ErrMessageSetSizeTooLarge             KError = 18
+	ErrNotEnoughReplicas                  KError = 19
+	ErrNotEnoughReplicasAfterAppend       KError = 20
+	ErrInvalidRequiredAcks                KError = 21
+	ErrIllegalGeneration                  KError = 22
+	ErrInconsistentGroupProtocol          KError = 23
+	ErrInvalidGroupId                     KError = 24
+	ErrUnknownMemberId                    KError = 25
+	ErrInvalidSessionTimeout              KError = 26
+	ErrRebalanceInProgress                KError = 27
+	ErrInvalidCommitOffsetSize            KError = 28
+	ErrTopicAuthorizationFailed           KError = 29
+	ErrGroupAuthorizationFailed           KError = 30
+	ErrClusterAuthorizationFailed         KError = 31
+	ErrInvalidTimestamp                   KError = 32
+	ErrUnsupportedSASLMechanism           KError = 33
+	ErrIllegalSASLState                   KError = 34
+	ErrUnsupportedVersion                 KError = 35
+	ErrTopicAlreadyExists                 KError = 36
+	ErrInvalidPartitions                  KError = 37
+	ErrInvalidReplicationFactor           KError = 38
+	ErrInvalidReplicaAssignment           KError = 39
+	ErrInvalidConfig                      KError = 40
+	ErrNotController                      KError = 41
+	ErrInvalidRequest                     KError = 42
+	ErrUnsupportedForMessageFormat        KError = 43
+	ErrPolicyViolation                    KError = 44
+	ErrOutOfOrderSequenceNumber           KError = 45
+	ErrDuplicateSequenceNumber            KError = 46
+	ErrInvalidProducerEpoch               KError = 47
+	ErrInvalidTxnState                    KError = 48
+	ErrInvalidProducerIDMapping           KError = 49
+	ErrInvalidTransactionTimeout          KError = 50
+	ErrConcurrentTransactions             KError = 51
+	ErrTransactionCoordinatorFenced       KError = 52
+	ErrTransactionalIDAuthorizationFailed KError = 53
+	ErrSecurityDisabled                   KError = 54
+	ErrOperationNotAttempted              KError = 55
+	ErrKafkaStorageError                  KError = 56
+	ErrLogDirNotFound                     KError = 57
+	ErrSASLAuthenticationFailed           KError = 58
+	ErrUnknownProducerID                  KError = 59
+	ErrReassignmentInProgress             KError = 60
+)
+
+func (err KError) Error() string {
+	// Error messages stolen/adapted from
+	// https://kafka.apache.org/protocol#protocol_error_codes
+	switch err {
+	case ErrNoError:
+		return "kafka server: Not an error, why are you printing me?"
+	case ErrUnknown:
+		return "kafka server: Unexpected (unknown?) server error."
+	case ErrOffsetOutOfRange:
+		return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
+	case ErrInvalidMessage:
+		return "kafka server: Message contents does not match its CRC."
+	case ErrUnknownTopicOrPartition:
+		return "kafka server: Request was for a topic or partition that does not exist on this broker."
+	case ErrInvalidMessageSize:
+		return "kafka server: The message has a negative size."
+	case ErrLeaderNotAvailable:
+		return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
+	case ErrNotLeaderForPartition:
+		return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
+	case ErrRequestTimedOut:
+		return "kafka server: Request exceeded the user-specified time limit in the request."
+	case ErrBrokerNotAvailable:
+		return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+	case ErrReplicaNotAvailable:
+		return "kafka server: Replica information not available, one or more brokers are down."
+	case ErrMessageSizeTooLarge:
+		return "kafka server: Message was too large, server rejected it to avoid allocation error."
+	case ErrStaleControllerEpochCode:
+		return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
+	case ErrOffsetMetadataTooLarge:
+		return "kafka server: Specified a string larger than the configured maximum for offset metadata."
+	case ErrNetworkException:
+		return "kafka server: The server disconnected before a response was received."
+	case ErrOffsetsLoadInProgress:
+		return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
+	case ErrConsumerCoordinatorNotAvailable:
+		return "kafka server: Offset's topic has not yet been created."
+	case ErrNotCoordinatorForConsumer:
+		return "kafka server: Request was for a consumer group that is not coordinated by this broker."
+	case ErrInvalidTopic:
+		return "kafka server: The request attempted to perform an operation on an invalid topic."
+	case ErrMessageSetSizeTooLarge:
+		return "kafka server: The request included message batch larger than the configured segment size on the server."
+	case ErrNotEnoughReplicas:
+		return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
+	case ErrNotEnoughReplicasAfterAppend:
+		return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
+	case ErrInvalidRequiredAcks:
+		return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
+	case ErrIllegalGeneration:
+		return "kafka server: The provided generation id is not the current generation."
+	case ErrInconsistentGroupProtocol:
+		return "kafka server: The provider group protocol type is incompatible with the other members."
+	case ErrInvalidGroupId:
+		return "kafka server: The provided group id was empty."
+	case ErrUnknownMemberId:
+		return "kafka server: The provided member is not known in the current generation."
+	case ErrInvalidSessionTimeout:
+		return "kafka server: The provided session timeout is outside the allowed range."
+	case ErrRebalanceInProgress:
+		return "kafka server: A rebalance for the group is in progress. Please re-join the group."
+	case ErrInvalidCommitOffsetSize:
+		return "kafka server: The provided commit metadata was too large."
+	case ErrTopicAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this topic."
+	case ErrGroupAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this group."
+	case ErrClusterAuthorizationFailed:
+		return "kafka server: The client is not authorized to send this request type."
+	case ErrInvalidTimestamp:
+		return "kafka server: The timestamp of the message is out of acceptable range."
+	case ErrUnsupportedSASLMechanism:
+		return "kafka server: The broker does not support the requested SASL mechanism."
+	case ErrIllegalSASLState:
+		return "kafka server: Request is not valid given the current SASL state."
+	case ErrUnsupportedVersion:
+		return "kafka server: The version of API is not supported."
+	case ErrTopicAlreadyExists:
+		return "kafka server: Topic with this name already exists."
+	case ErrInvalidPartitions:
+		return "kafka server: Number of partitions is invalid."
+	case ErrInvalidReplicationFactor:
+		return "kafka server: Replication-factor is invalid."
+	case ErrInvalidReplicaAssignment:
+		return "kafka server: Replica assignment is invalid."
+	case ErrInvalidConfig:
+		return "kafka server: Configuration is invalid."
+	case ErrNotController:
+		return "kafka server: This is not the correct controller for this cluster."
+	case ErrInvalidRequest:
+		return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
+	case ErrUnsupportedForMessageFormat:
+		return "kafka server: The requested operation is not supported by the message format version."
+	case ErrPolicyViolation:
+		return "kafka server: Request parameters do not satisfy the configured policy."
+	case ErrOutOfOrderSequenceNumber:
+		return "kafka server: The broker received an out of order sequence number."
+	case ErrDuplicateSequenceNumber:
+		return "kafka server: The broker received a duplicate sequence number."
+	case ErrInvalidProducerEpoch:
+		return "kafka server: Producer attempted an operation with an old epoch."
+	case ErrInvalidTxnState:
+		return "kafka server: The producer attempted a transactional operation in an invalid state."
+	case ErrInvalidProducerIDMapping:
+		return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
+	case ErrInvalidTransactionTimeout:
+		return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
+	case ErrConcurrentTransactions:
+		return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
+	case ErrTransactionCoordinatorFenced:
+		return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
+	case ErrTransactionalIDAuthorizationFailed:
+		return "kafka server: Transactional ID authorization failed."
+	case ErrSecurityDisabled:
+		return "kafka server: Security features are disabled."
+	case ErrOperationNotAttempted:
+		return "kafka server: The broker did not attempt to execute this operation."
+	case ErrKafkaStorageError:
+		return "kafka server: Disk error when trying to access log file on the disk."
+	case ErrLogDirNotFound:
+		return "kafka server: The specified log directory is not found in the broker config."
+	case ErrSASLAuthenticationFailed:
+		return "kafka server: SASL Authentication failed."
+	case ErrUnknownProducerID:
+		return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
+	case ErrReassignmentInProgress:
+		return "kafka server: A partition reassignment is in progress."
+	}
+
+	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
new file mode 100644
index 00000000..8c8e3a5a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -0,0 +1,170 @@
+package sarama
+
+type fetchRequestBlock struct {
+	fetchOffset int64
+	maxBytes    int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder) error {
+	pe.putInt64(b.fetchOffset)
+	pe.putInt32(b.maxBytes)
+	return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+	if b.fetchOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if b.maxBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that.  The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+	MaxWaitTime int32
+	MinBytes    int32
+	MaxBytes    int32
+	Version     int16
+	Isolation   IsolationLevel
+	blocks      map[string]map[int32]*fetchRequestBlock
+}
+
+type IsolationLevel int8
+
+const (
+	ReadUncommitted IsolationLevel = 0
+	ReadCommitted   IsolationLevel = 1
+)
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+	pe.putInt32(-1) // replica ID is always -1 for clients
+	pe.putInt32(r.MaxWaitTime)
+	pe.putInt32(r.MinBytes)
+	if r.Version >= 3 {
+		pe.putInt32(r.MaxBytes)
+	}
+	if r.Version >= 4 {
+		pe.putInt8(int8(r.Isolation))
+	}
+	err = pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, blocks := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(blocks))
+		if err != nil {
+			return err
+		}
+		for partition, block := range blocks {
+			pe.putInt32(partition)
+			err = block.encode(pe)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if _, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MinBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.Version >= 3 {
+		if r.MaxBytes, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	if r.Version >= 4 {
+		isolation, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Isolation = IsolationLevel(isolation)
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			fetchBlock := &fetchRequestBlock{}
+			if err = fetchBlock.decode(pd); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = fetchBlock
+		}
+	}
+	return nil
+}
+
+func (r *FetchRequest) key() int16 {
+	return 1
+}
+
+func (r *FetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_10_1_0
+	case 4:
+		return V0_11_0_0
+	default:
+		return minVersion
+	}
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+	}
+
+	tmp := new(fetchRequestBlock)
+	tmp.maxBytes = maxBytes
+	tmp.fetchOffset = fetchOffset
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
new file mode 100644
index 00000000..0e81ad89
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -0,0 +1,385 @@
+package sarama
+
+import (
+	"time"
+)
+
+type AbortedTransaction struct {
+	ProducerID  int64
+	FirstOffset int64
+}
+
+func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if t.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
+	pe.putInt64(t.ProducerID)
+	pe.putInt64(t.FirstOffset)
+
+	return nil
+}
+
+type FetchResponseBlock struct {
+	Err                 KError
+	HighWaterMarkOffset int64
+	LastStableOffset    int64
+	AbortedTransactions []*AbortedTransaction
+	Records             *Records // deprecated: use FetchResponseBlock.Records
+	RecordsSet          []*Records
+	Partial             bool
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	b.HighWaterMarkOffset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 4 {
+		b.LastStableOffset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+
+		numTransact, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		if numTransact >= 0 {
+			b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
+		}
+
+		for i := 0; i < numTransact; i++ {
+			transact := new(AbortedTransaction)
+			if err = transact.decode(pd); err != nil {
+				return err
+			}
+			b.AbortedTransactions[i] = transact
+		}
+	}
+
+	recordsSize, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	recordsDecoder, err := pd.getSubset(int(recordsSize))
+	if err != nil {
+		return err
+	}
+
+	b.RecordsSet = []*Records{}
+
+	for recordsDecoder.remaining() > 0 {
+		records := &Records{}
+		if err := records.decode(recordsDecoder); err != nil {
+			// If we have at least one decoded records, this is not an error
+			if err == ErrInsufficientData {
+				if len(b.RecordsSet) == 0 {
+					b.Partial = true
+				}
+				break
+			}
+			return err
+		}
+
+		partial, err := records.isPartial()
+		if err != nil {
+			return err
+		}
+
+		// If we have at least one full records, we skip incomplete ones
+		if partial && len(b.RecordsSet) > 0 {
+			break
+		}
+
+		b.RecordsSet = append(b.RecordsSet, records)
+
+		if b.Records == nil {
+			b.Records = records
+		}
+	}
+
+	return nil
+}
+
+func (b *FetchResponseBlock) numRecords() (int, error) {
+	sum := 0
+
+	for _, records := range b.RecordsSet {
+		count, err := records.numRecords()
+		if err != nil {
+			return 0, err
+		}
+
+		sum += count
+	}
+
+	return sum, nil
+}
+
+func (b *FetchResponseBlock) isPartial() (bool, error) {
+	if b.Partial {
+		return true, nil
+	}
+
+	if len(b.RecordsSet) == 1 {
+		return b.RecordsSet[0].isPartial()
+	}
+
+	return false, nil
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(b.Err))
+
+	pe.putInt64(b.HighWaterMarkOffset)
+
+	if version >= 4 {
+		pe.putInt64(b.LastStableOffset)
+
+		if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
+			return err
+		}
+		for _, transact := range b.AbortedTransactions {
+			if err = transact.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	pe.push(&lengthField{})
+	for _, records := range b.RecordsSet {
+		err = records.encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return pe.pop()
+}
+
+type FetchResponse struct {
+	Blocks       map[string]map[int32]*FetchResponseBlock
+	ThrottleTime time.Duration
+	Version      int16 // v1 requires 0.9+, v2 requires 0.10+
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.Version >= 1 {
+		throttle, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		r.ThrottleTime = time.Duration(throttle) * time.Millisecond
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(FetchResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 1 {
+		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	}
+
+	err = pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+
+		for id, block := range partitions {
+			pe.putInt32(id)
+			err = block.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+
+	}
+	return nil
+}
+
+func (r *FetchResponse) key() int16 {
+	return 1
+}
+
+func (r *FetchResponse) version() int16 {
+	return r.Version
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_10_1_0
+	case 4:
+		return V0_11_0_0
+	default:
+		return minVersion
+	}
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+	frb.Err = err
+}
+
+func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+
+	return frb
+}
+
+func encodeKV(key, value Encoder) ([]byte, []byte) {
+	var kb []byte
+	var vb []byte
+	if key != nil {
+		kb, _ = key.Encode()
+	}
+	if value != nil {
+		vb, _ = value.Encode()
+	}
+
+	return kb, vb
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	msg := &Message{Key: kb, Value: vb}
+	msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+	if len(frb.RecordsSet) == 0 {
+		records := newLegacyRecords(&MessageSet{})
+		frb.RecordsSet = []*Records{&records}
+	}
+	set := frb.RecordsSet[0].msgSet
+	set.Messages = append(set.Messages, msgBlock)
+}
+
+func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	rec := &Record{Key: kb, Value: vb, OffsetDelta: offset}
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].recordBatch
+	batch.addRecord(rec)
+}
+
+func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
+	frb := r.getOrCreateBlock(topic, partition)
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].recordBatch
+	batch.LastOffsetDelta = offset
+}
+
+func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
+	frb := r.getOrCreateBlock(topic, partition)
+	frb.LastStableOffset = offset
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
new file mode 100644
index 00000000..ce49c473
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+type HeartbeatRequest struct {
+	GroupId      string
+	GenerationId int32
+	MemberId     string
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+func (r *HeartbeatRequest) key() int16 {
+	return 12
+}
+
+func (r *HeartbeatRequest) version() int16 {
+	return 0
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
new file mode 100644
index 00000000..766f5fde
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type HeartbeatResponse struct {
+	Err KError
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.Err = KError(kerr)
+
+	return nil
+}
+
+func (r *HeartbeatResponse) key() int16 {
+	return 12
+}
+
+func (r *HeartbeatResponse) version() int16 {
+	return 0
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
new file mode 100644
index 00000000..8ceb6c23
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
@@ -0,0 +1,43 @@
+package sarama
+
+import "time"
+
+type InitProducerIDRequest struct {
+	TransactionalID    *string
+	TransactionTimeout time.Duration
+}
+
+func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
+	if err := pe.putNullableString(i.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
+
+	return nil
+}
+
+func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
+	if i.TransactionalID, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
+
+	return nil
+}
+
+func (i *InitProducerIDRequest) key() int16 {
+	return 22
+}
+
+func (i *InitProducerIDRequest) version() int16 {
+	return 0
+}
+
+func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
new file mode 100644
index 00000000..1b32eb08
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
@@ -0,0 +1,55 @@
+package sarama
+
+import "time"
+
+type InitProducerIDResponse struct {
+	ThrottleTime  time.Duration
+	Err           KError
+	ProducerID    int64
+	ProducerEpoch int16
+}
+
+func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(i.Err))
+	pe.putInt64(i.ProducerID)
+	pe.putInt16(i.ProducerEpoch)
+
+	return nil
+}
+
+func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	i.Err = KError(kerr)
+
+	if i.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (i *InitProducerIDResponse) key() int16 {
+	return 22
+}
+
+func (i *InitProducerIDResponse) version() int16 {
+	return 0
+}
+
+func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
new file mode 100644
index 00000000..3a7ba171
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_request.go
@@ -0,0 +1,143 @@
+package sarama
+
+type GroupProtocol struct {
+	Name     string
+	Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+	p.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+	p.Metadata, err = pd.getBytes()
+	return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+	if err := pe.putString(p.Name); err != nil {
+		return err
+	}
+	if err := pe.putBytes(p.Metadata); err != nil {
+		return err
+	}
+	return nil
+}
+
+type JoinGroupRequest struct {
+	GroupId               string
+	SessionTimeout        int32
+	MemberId              string
+	ProtocolType          string
+	GroupProtocols        map[string][]byte // deprecated; use OrderedGroupProtocols
+	OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	pe.putInt32(r.SessionTimeout)
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.ProtocolType); err != nil {
+		return err
+	}
+
+	if len(r.GroupProtocols) > 0 {
+		if len(r.OrderedGroupProtocols) > 0 {
+			return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+		}
+
+		if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+			return err
+		}
+		for name, metadata := range r.GroupProtocols {
+			if err := pe.putString(name); err != nil {
+				return err
+			}
+			if err := pe.putBytes(metadata); err != nil {
+				return err
+			}
+		}
+	} else {
+		if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+			return err
+		}
+		for _, protocol := range r.OrderedGroupProtocols {
+			if err := protocol.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.SessionTimeout, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.ProtocolType, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupProtocols = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		protocol := &GroupProtocol{}
+		if err := protocol.decode(pd); err != nil {
+			return err
+		}
+		r.GroupProtocols[protocol.Name] = protocol.Metadata
+		r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+	}
+
+	return nil
+}
+
+func (r *JoinGroupRequest) key() int16 {
+	return 11
+}
+
+func (r *JoinGroupRequest) version() int16 {
+	return 0
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+	r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+		Name:     name,
+		Metadata: metadata,
+	})
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+	bin, err := encode(metadata, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupProtocol(name, bin)
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
new file mode 100644
index 00000000..6d35fe36
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_response.go
@@ -0,0 +1,115 @@
+package sarama
+
+type JoinGroupResponse struct {
+	Err           KError
+	GenerationId  int32
+	GroupProtocol string
+	LeaderId      string
+	MemberId      string
+	Members       map[string][]byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+	members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+	for id, bin := range r.Members {
+		meta := new(ConsumerGroupMemberMetadata)
+		if err := decode(bin, meta); err != nil {
+			return nil, err
+		}
+		members[id] = *meta
+	}
+	return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.GroupProtocol); err != nil {
+		return err
+	}
+	if err := pe.putString(r.LeaderId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Members)); err != nil {
+		return err
+	}
+
+	for memberId, memberMetadata := range r.Members {
+		if err := pe.putString(memberId); err != nil {
+			return err
+		}
+
+		if err := pe.putBytes(memberMetadata); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if r.GroupProtocol, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.LeaderId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.Members = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		memberMetadata, err := pd.getBytes()
+		if err != nil {
+			return err
+		}
+
+		r.Members[memberId] = memberMetadata
+	}
+
+	return nil
+}
+
+func (r *JoinGroupResponse) key() int16 {
+	return 11
+}
+
+func (r *JoinGroupResponse) version() int16 {
+	return 0
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
new file mode 100644
index 00000000..e1774274
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_request.go
@@ -0,0 +1,40 @@
+package sarama
+
+type LeaveGroupRequest struct {
+	GroupId  string
+	MemberId string
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+	return 13
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+	return 0
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
new file mode 100644
index 00000000..d60c626d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type LeaveGroupResponse struct {
+	Err KError
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.Err = KError(kerr)
+
+	return nil
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+	return 13
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+	return 0
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go
new file mode 100644
index 00000000..576b1a6f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/length_field.go
@@ -0,0 +1,69 @@
+package sarama
+
+import "encoding/binary"
+
+// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
+type lengthField struct {
+	startOffset int
+}
+
+func (l *lengthField) saveOffset(in int) {
+	l.startOffset = in
+}
+
+func (l *lengthField) reserveLength() int {
+	return 4
+}
+
+func (l *lengthField) run(curOffset int, buf []byte) error {
+	binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
+	return nil
+}
+
+func (l *lengthField) check(curOffset int, buf []byte) error {
+	if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
+		return PacketDecodingError{"length field invalid"}
+	}
+
+	return nil
+}
+
+type varintLengthField struct {
+	startOffset int
+	length      int64
+}
+
+func (l *varintLengthField) decode(pd packetDecoder) error {
+	var err error
+	l.length, err = pd.getVarint()
+	return err
+}
+
+func (l *varintLengthField) saveOffset(in int) {
+	l.startOffset = in
+}
+
+func (l *varintLengthField) adjustLength(currOffset int) int {
+	oldFieldSize := l.reserveLength()
+	l.length = int64(currOffset - l.startOffset - oldFieldSize)
+
+	return l.reserveLength() - oldFieldSize
+}
+
+func (l *varintLengthField) reserveLength() int {
+	var tmp [binary.MaxVarintLen64]byte
+	return binary.PutVarint(tmp[:], l.length)
+}
+
+func (l *varintLengthField) run(curOffset int, buf []byte) error {
+	binary.PutVarint(buf[l.startOffset:], l.length)
+	return nil
+}
+
+func (l *varintLengthField) check(curOffset int, buf []byte) error {
+	if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
+		return PacketDecodingError{"length field invalid"}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
new file mode 100644
index 00000000..3b16abf7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ListGroupsRequest struct {
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+	return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	return nil
+}
+
+func (r *ListGroupsRequest) key() int16 {
+	return 16
+}
+
+func (r *ListGroupsRequest) version() int16 {
+	return 0
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
new file mode 100644
index 00000000..56115d4c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_response.go
@@ -0,0 +1,69 @@
+package sarama
+
+type ListGroupsResponse struct {
+	Err    KError
+	Groups map[string]string
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+	for groupId, protocolType := range r.Groups {
+		if err := pe.putString(groupId); err != nil {
+			return err
+		}
+		if err := pe.putString(protocolType); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.Groups = make(map[string]string)
+	for i := 0; i < n; i++ {
+		groupId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		protocolType, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		r.Groups[groupId] = protocolType
+	}
+
+	return nil
+}
+
+func (r *ListGroupsResponse) key() int16 {
+	return 16
+}
+
+func (r *ListGroupsResponse) version() int16 {
+	return 0
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
new file mode 100644
index 00000000..bd5650bb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -0,0 +1,200 @@
+package sarama
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+	"time"
+
+	"github.com/eapache/go-xerial-snappy"
+	"github.com/pierrec/lz4"
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+// only the last two bits are really used
+const compressionCodecMask int8 = 0x03
+
+const (
+	CompressionNone   CompressionCodec = 0
+	CompressionGZIP   CompressionCodec = 1
+	CompressionSnappy CompressionCodec = 2
+	CompressionLZ4    CompressionCodec = 3
+)
+
+type Message struct {
+	Codec     CompressionCodec // codec used to compress the message contents
+	Key       []byte           // the message key, may be nil
+	Value     []byte           // the message contents
+	Set       *MessageSet      // the message set a message might wrap
+	Version   int8             // v1 requires Kafka 0.10
+	Timestamp time.Time        // the timestamp of the message (version 1+ only)
+
+	compressedCache []byte
+	compressedSize  int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+	pe.push(newCRC32Field(crcIEEE))
+
+	pe.putInt8(m.Version)
+
+	attributes := int8(m.Codec) & compressionCodecMask
+	pe.putInt8(attributes)
+
+	if m.Version >= 1 {
+		if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
+			return err
+		}
+	}
+
+	err := pe.putBytes(m.Key)
+	if err != nil {
+		return err
+	}
+
+	var payload []byte
+
+	if m.compressedCache != nil {
+		payload = m.compressedCache
+		m.compressedCache = nil
+	} else if m.Value != nil {
+		switch m.Codec {
+		case CompressionNone:
+			payload = m.Value
+		case CompressionGZIP:
+			var buf bytes.Buffer
+			writer := gzip.NewWriter(&buf)
+			if _, err = writer.Write(m.Value); err != nil {
+				return err
+			}
+			if err = writer.Close(); err != nil {
+				return err
+			}
+			m.compressedCache = buf.Bytes()
+			payload = m.compressedCache
+		case CompressionSnappy:
+			tmp := snappy.Encode(m.Value)
+			m.compressedCache = tmp
+			payload = m.compressedCache
+		case CompressionLZ4:
+			var buf bytes.Buffer
+			writer := lz4.NewWriter(&buf)
+			if _, err = writer.Write(m.Value); err != nil {
+				return err
+			}
+			if err = writer.Close(); err != nil {
+				return err
+			}
+			m.compressedCache = buf.Bytes()
+			payload = m.compressedCache
+
+		default:
+			return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
+		}
+		// Keep in mind the compressed payload size for metric gathering
+		m.compressedSize = len(payload)
+	}
+
+	if err = pe.putBytes(payload); err != nil {
+		return err
+	}
+
+	return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+	err = pd.push(newCRC32Field(crcIEEE))
+	if err != nil {
+		return err
+	}
+
+	m.Version, err = pd.getInt8()
+	if err != nil {
+		return err
+	}
+
+	if m.Version > 1 {
+		return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
+	}
+
+	attribute, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	m.Codec = CompressionCodec(attribute & compressionCodecMask)
+
+	if m.Version == 1 {
+		if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
+			return err
+		}
+	}
+
+	m.Key, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	m.Value, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	// Required for deep equal assertion during tests but might be useful
+	// for future metrics about the compression ratio in fetch requests
+	m.compressedSize = len(m.Value)
+
+	switch m.Codec {
+	case CompressionNone:
+		// nothing to do
+	case CompressionGZIP:
+		if m.Value == nil {
+			break
+		}
+		reader, err := gzip.NewReader(bytes.NewReader(m.Value))
+		if err != nil {
+			return err
+		}
+		if m.Value, err = ioutil.ReadAll(reader); err != nil {
+			return err
+		}
+		if err := m.decodeSet(); err != nil {
+			return err
+		}
+	case CompressionSnappy:
+		if m.Value == nil {
+			break
+		}
+		if m.Value, err = snappy.Decode(m.Value); err != nil {
+			return err
+		}
+		if err := m.decodeSet(); err != nil {
+			return err
+		}
+	case CompressionLZ4:
+		if m.Value == nil {
+			break
+		}
+		reader := lz4.NewReader(bytes.NewReader(m.Value))
+		if m.Value, err = ioutil.ReadAll(reader); err != nil {
+			return err
+		}
+		if err := m.decodeSet(); err != nil {
+			return err
+		}
+
+	default:
+		return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
+	}
+
+	return pd.pop()
+}
+
+// decodes a message set from a previousy encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+	pd := realDecoder{raw: m.Value}
+	m.Set = &MessageSet{}
+	return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
new file mode 100644
index 00000000..27db52fd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message_set.go
@@ -0,0 +1,102 @@
+package sarama
+
+type MessageBlock struct {
+	Offset int64
+	Msg    *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+	if msb.Msg.Set != nil {
+		return msb.Msg.Set.Messages
+	}
+	return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+	pe.putInt64(msb.Offset)
+	pe.push(&lengthField{})
+	err := msb.Msg.encode(pe)
+	if err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+	if msb.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if err = pd.push(&lengthField{}); err != nil {
+		return err
+	}
+
+	msb.Msg = new(Message)
+	if err = msb.Msg.decode(pd); err != nil {
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type MessageSet struct {
+	PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+	Messages               []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+	for i := range ms.Messages {
+		err := ms.Messages[i].encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+	ms.Messages = nil
+
+	for pd.remaining() > 0 {
+		magic, err := magicValue(pd)
+		if err != nil {
+			if err == ErrInsufficientData {
+				ms.PartialTrailingMessage = true
+				return nil
+			}
+			return err
+		}
+
+		if magic > 1 {
+			return nil
+		}
+
+		msb := new(MessageBlock)
+		err = msb.decode(pd)
+		switch err {
+		case nil:
+			ms.Messages = append(ms.Messages, msb)
+		case ErrInsufficientData:
+			// As an optimization the server is allowed to return a partial message at the
+			// end of the message set. Clients should handle this case. So we just ignore such things.
+			ms.PartialTrailingMessage = true
+			return nil
+		default:
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+	block := new(MessageBlock)
+	block.Msg = msg
+	ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
new file mode 100644
index 00000000..9a26b55f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -0,0 +1,52 @@
+package sarama
+
+type MetadataRequest struct {
+	Topics []string
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) error {
+	err := pe.putArrayLength(len(r.Topics))
+	if err != nil {
+		return err
+	}
+
+	for i := range r.Topics {
+		err = pe.putString(r.Topics[i])
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+
+	r.Topics = make([]string, topicCount)
+	for i := range r.Topics {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.Topics[i] = topic
+	}
+	return nil
+}
+
+func (r *MetadataRequest) key() int16 {
+	return 3
+}
+
+func (r *MetadataRequest) version() int16 {
+	return 0
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+	return minVersion
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
new file mode 100644
index 00000000..f9d6a427
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -0,0 +1,239 @@
+package sarama
+
+type PartitionMetadata struct {
+	Err      KError
+	ID       int32
+	Leader   int32
+	Replicas []int32
+	Isr      []int32
+}
+
+func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	pm.Err = KError(tmp)
+
+	pm.ID, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	pm.Leader, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	pm.Replicas, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	pm.Isr, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
+	pe.putInt16(int16(pm.Err))
+	pe.putInt32(pm.ID)
+	pe.putInt32(pm.Leader)
+
+	err = pe.putInt32Array(pm.Replicas)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putInt32Array(pm.Isr)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type TopicMetadata struct {
+	Err        KError
+	Name       string
+	Partitions []*PartitionMetadata
+}
+
+func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	tm.Err = KError(tmp)
+
+	tm.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	tm.Partitions = make([]*PartitionMetadata, n)
+	for i := 0; i < n; i++ {
+		tm.Partitions[i] = new(PartitionMetadata)
+		err = tm.Partitions[i].decode(pd)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
+	pe.putInt16(int16(tm.Err))
+
+	err = pe.putString(tm.Name)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putArrayLength(len(tm.Partitions))
+	if err != nil {
+		return err
+	}
+
+	for _, pm := range tm.Partitions {
+		err = pm.encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type MetadataResponse struct {
+	Brokers []*Broker
+	Topics  []*TopicMetadata
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Brokers = make([]*Broker, n)
+	for i := 0; i < n; i++ {
+		r.Brokers[i] = new(Broker)
+		err = r.Brokers[i].decode(pd)
+		if err != nil {
+			return err
+		}
+	}
+
+	n, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]*TopicMetadata, n)
+	for i := 0; i < n; i++ {
+		r.Topics[i] = new(TopicMetadata)
+		err = r.Topics[i].decode(pd)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) error {
+	err := pe.putArrayLength(len(r.Brokers))
+	if err != nil {
+		return err
+	}
+	for _, broker := range r.Brokers {
+		err = broker.encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+
+	err = pe.putArrayLength(len(r.Topics))
+	if err != nil {
+		return err
+	}
+	for _, tm := range r.Topics {
+		err = tm.encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+	return 3
+}
+
+func (r *MetadataResponse) version() int16 {
+	return 0
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+	return minVersion
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+	r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+	var tmatch *TopicMetadata
+
+	for _, tm := range r.Topics {
+		if tm.Name == topic {
+			tmatch = tm
+			goto foundTopic
+		}
+	}
+
+	tmatch = new(TopicMetadata)
+	tmatch.Name = topic
+	r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+	tmatch.Err = err
+	return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
+	tmatch := r.AddTopic(topic, ErrNoError)
+	var pmatch *PartitionMetadata
+
+	for _, pm := range tmatch.Partitions {
+		if pm.ID == partition {
+			pmatch = pm
+			goto foundPartition
+		}
+	}
+
+	pmatch = new(PartitionMetadata)
+	pmatch.ID = partition
+	tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+
+	pmatch.Leader = brokerID
+	pmatch.Replicas = replicas
+	pmatch.Isr = isr
+	pmatch.Err = err
+
+}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
new file mode 100644
index 00000000..4869708e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metrics.go
@@ -0,0 +1,51 @@
+package sarama
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+	metricsReservoirSize = 1028
+	metricsAlphaFactor   = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+	return r.GetOrRegister(name, func() metrics.Histogram {
+		return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+	}).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+	// Use broker id like the Java client as it does not contain '.' or ':' characters that
+	// can be interpreted as special character by monitoring tool (e.g. Graphite)
+	return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
+}
+
+func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
+	return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
+}
+
+func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
+	return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+	// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+	// cf. KAFKA-1902 and KAFKA-2337
+	return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+	return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+	return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
new file mode 100644
index 00000000..55ef1e29
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -0,0 +1,330 @@
+package sarama
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+const (
+	expectationTimeout = 500 * time.Millisecond
+)
+
+type requestHandlerFunc func(req *request) (res encoder)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshaling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behaviour.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+	brokerID     int32
+	port         int32
+	closing      chan none
+	stopper      chan none
+	expectations chan encoder
+	listener     net.Listener
+	t            TestReporter
+	latency      time.Duration
+	handler      requestHandlerFunc
+	notifier     RequestNotifierFunc
+	history      []RequestResponse
+	lock         sync.Mutex
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+	Request  protocolBody
+	Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+	b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+	b.setHandler(func(req *request) (res encoder) {
+		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+		mockResponse := handlerMap[reqTypeName]
+		if mockResponse == nil {
+			return nil
+		}
+		return mockResponse.For(req.body)
+	})
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+	b.lock.Lock()
+	b.notifier = notifier
+	b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+	return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+	b.lock.Lock()
+	history := make([]RequestResponse, len(b.history))
+	copy(history, b.history)
+	b.lock.Unlock()
+	return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+	return b.port
+}
+
+// Addr returns the broker connection string in the form "<address>:<port>".
+func (b *MockBroker) Addr() string {
+	return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+	close(b.expectations)
+	if len(b.expectations) > 0 {
+		buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+		for e := range b.expectations {
+			_, _ = buf.WriteString(spew.Sdump(e))
+		}
+		b.t.Error(buf.String())
+	}
+	close(b.closing)
+	<-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+	b.lock.Lock()
+	b.handler = handler
+	b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+	defer close(b.stopper)
+	var err error
+	var conn net.Conn
+
+	go func() {
+		<-b.closing
+		err := b.listener.Close()
+		if err != nil {
+			b.t.Error(err)
+		}
+	}()
+
+	wg := &sync.WaitGroup{}
+	i := 0
+	for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+		wg.Add(1)
+		go b.handleRequests(conn, i, wg)
+		i++
+	}
+	wg.Wait()
+	Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+}
+
+func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
+	defer wg.Done()
+	defer func() {
+		_ = conn.Close()
+	}()
+	Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+	var err error
+
+	abort := make(chan none)
+	defer close(abort)
+	go func() {
+		select {
+		case <-b.closing:
+			_ = conn.Close()
+		case <-abort:
+		}
+	}()
+
+	resHeader := make([]byte, 8)
+	for {
+		req, bytesRead, err := decodeRequest(conn)
+		if err != nil {
+			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+			b.serverError(err)
+			break
+		}
+
+		if b.latency > 0 {
+			time.Sleep(b.latency)
+		}
+
+		b.lock.Lock()
+		res := b.handler(req)
+		b.history = append(b.history, RequestResponse{req.body, res})
+		b.lock.Unlock()
+
+		if res == nil {
+			Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+			continue
+		}
+		Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+
+		encodedRes, err := encode(res, nil)
+		if err != nil {
+			b.serverError(err)
+			break
+		}
+		if len(encodedRes) == 0 {
+			b.lock.Lock()
+			if b.notifier != nil {
+				b.notifier(bytesRead, 0)
+			}
+			b.lock.Unlock()
+			continue
+		}
+
+		binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
+		binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+		if _, err = conn.Write(resHeader); err != nil {
+			b.serverError(err)
+			break
+		}
+		if _, err = conn.Write(encodedRes); err != nil {
+			b.serverError(err)
+			break
+		}
+
+		b.lock.Lock()
+		if b.notifier != nil {
+			b.notifier(bytesRead, len(resHeader)+len(encodedRes))
+		}
+		b.lock.Unlock()
+	}
+	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
+	select {
+	case res, ok := <-b.expectations:
+		if !ok {
+			return nil
+		}
+		return res
+	case <-time.After(expectationTimeout):
+		return nil
+	}
+}
+
+func (b *MockBroker) serverError(err error) {
+	isConnectionClosedError := false
+	if _, ok := err.(*net.OpError); ok {
+		isConnectionClosedError = true
+	} else if err == io.EOF {
+		isConnectionClosedError = true
+	} else if err.Error() == "use of closed network connection" {
+		isConnectionClosedError = true
+	}
+
+	if isConnectionClosedError {
+		return
+	}
+
+	b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use.  If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+	return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+	listener, err := net.Listen("tcp", addr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return NewMockBrokerListener(t, brokerID, listener)
+}
+
+// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
+func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
+	var err error
+
+	broker := &MockBroker{
+		closing:      make(chan none),
+		stopper:      make(chan none),
+		t:            t,
+		brokerID:     brokerID,
+		expectations: make(chan encoder, 512),
+		listener:     listener,
+	}
+	broker.handler = broker.defaultRequestHandler
+
+	Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+	_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmp, err := strconv.ParseInt(portStr, 10, 32)
+	if err != nil {
+		t.Fatal(err)
+	}
+	broker.port = int32(tmp)
+
+	go broker.serverLoop()
+
+	return broker
+}
+
+func (b *MockBroker) Returns(e encoder) {
+	b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
new file mode 100644
index 00000000..f79a9d5e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -0,0 +1,477 @@
+package sarama
+
+import (
+	"fmt"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+	Error(...interface{})
+	Errorf(string, ...interface{})
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+	For(reqBody versionedDecoder) (res encoder)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+	res encoder
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
+	return mw.res
+}
+
+func NewMockWrapper(res encoder) *MockWrapper {
+	return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+	responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+	ms := &MockSequence{}
+	ms.responses = make([]MockResponse, len(responses))
+	for i, res := range responses {
+		switch res := res.(type) {
+		case MockResponse:
+			ms.responses[i] = res
+		case encoder:
+			ms.responses[i] = NewMockWrapper(res)
+		default:
+			panic(fmt.Sprintf("Unexpected response type: %T", res))
+		}
+	}
+	return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
+	res = mc.responses[0].For(reqBody)
+	if len(mc.responses) > 1 {
+		mc.responses = mc.responses[1:]
+	}
+	return res
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+	leaders map[string]map[int32]int32
+	brokers map[string]int32
+	t       TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+	return &MockMetadataResponse{
+		leaders: make(map[string]map[int32]int32),
+		brokers: make(map[string]int32),
+		t:       t,
+	}
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+	partitions := mmr.leaders[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int32)
+		mmr.leaders[topic] = partitions
+	}
+	partitions[partition] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+	mmr.brokers[addr] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
+	metadataRequest := reqBody.(*MetadataRequest)
+	metadataResponse := &MetadataResponse{}
+	for addr, brokerID := range mmr.brokers {
+		metadataResponse.AddBroker(addr, brokerID)
+	}
+	if len(metadataRequest.Topics) == 0 {
+		for topic, partitions := range mmr.leaders {
+			for partition, brokerID := range partitions {
+				metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+			}
+		}
+		return metadataResponse
+	}
+	for _, topic := range metadataRequest.Topics {
+		for partition, brokerID := range mmr.leaders[topic] {
+			metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+		}
+	}
+	return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+	offsets map[string]map[int32]map[int64]int64
+	t       TestReporter
+	version int16
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+	return &MockOffsetResponse{
+		offsets: make(map[string]map[int32]map[int64]int64),
+		t:       t,
+	}
+}
+
+func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
+	mor.version = version
+	return mor
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]int64)
+		mor.offsets[topic] = partitions
+	}
+	times := partitions[partition]
+	if times == nil {
+		times = make(map[int64]int64)
+		partitions[partition] = times
+	}
+	times[time] = offset
+	return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
+	offsetRequest := reqBody.(*OffsetRequest)
+	offsetResponse := &OffsetResponse{Version: mor.version}
+	for topic, partitions := range offsetRequest.blocks {
+		for partition, block := range partitions {
+			offset := mor.getOffset(topic, partition, block.time)
+			offsetResponse.AddTopicPartition(topic, partition, offset)
+		}
+	}
+	return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		mor.t.Errorf("missing topic: %s", topic)
+	}
+	times := partitions[partition]
+	if times == nil {
+		mor.t.Errorf("missing partition: %d", partition)
+	}
+	offset, ok := times[time]
+	if !ok {
+		mor.t.Errorf("missing time: %d", time)
+	}
+	return offset
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+	messages       map[string]map[int32]map[int64]Encoder
+	highWaterMarks map[string]map[int32]int64
+	t              TestReporter
+	batchSize      int
+	version        int16
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+	return &MockFetchResponse{
+		messages:       make(map[string]map[int32]map[int64]Encoder),
+		highWaterMarks: make(map[string]map[int32]int64),
+		t:              t,
+		batchSize:      batchSize,
+	}
+}
+
+func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
+	mfr.version = version
+	return mfr
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]Encoder)
+		mfr.messages[topic] = partitions
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		messages = make(map[int64]Encoder)
+		partitions[partition] = messages
+	}
+	messages[offset] = msg
+	return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int64)
+		mfr.highWaterMarks[topic] = partitions
+	}
+	partitions[partition] = offset
+	return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
+	fetchRequest := reqBody.(*FetchRequest)
+	res := &FetchResponse{
+		Version: mfr.version,
+	}
+	for topic, partitions := range fetchRequest.blocks {
+		for partition, block := range partitions {
+			initialOffset := block.fetchOffset
+			offset := initialOffset
+			maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+			for i := 0; i < mfr.batchSize && offset < maxOffset; {
+				msg := mfr.getMessage(topic, partition, offset)
+				if msg != nil {
+					res.AddMessage(topic, partition, nil, msg, offset)
+					i++
+				}
+				offset++
+			}
+			fb := res.GetBlock(topic, partition)
+			if fb == nil {
+				res.AddError(topic, partition, ErrNoError)
+				fb = res.GetBlock(topic, partition)
+			}
+			fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+		}
+	}
+	return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return nil
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return nil
+	}
+	return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return 0
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return 0
+	}
+	return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		return 0
+	}
+	return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+	coordinators map[string]interface{}
+	t            TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+	return &MockConsumerMetadataResponse{
+		coordinators: make(map[string]interface{}),
+		t:            t,
+	}
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = broker
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = kerror
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*ConsumerMetadataRequest)
+	group := req.ConsumerGroup
+	res := &ConsumerMetadataResponse{}
+	v := mr.coordinators[group]
+	switch v := v.(type) {
+	case *MockBroker:
+		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+	case KError:
+		res.Err = v
+	}
+	return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+	errors map[string]map[string]map[int32]KError
+	t      TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+	return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[string]map[int32]KError)
+	}
+	topics := mr.errors[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]KError)
+		mr.errors[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		topics[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*OffsetCommitRequest)
+	group := req.ConsumerGroup
+	res := &OffsetCommitResponse{}
+	for topic, partitions := range req.blocks {
+		for partition := range partitions {
+			res.AddError(topic, partition, mr.getError(group, topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+	topics := mr.errors[group]
+	if topics == nil {
+		return ErrNoError
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+	version int16
+	errors  map[string]map[int32]KError
+	t       TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+	return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
+	mr.version = version
+	return mr
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[int32]KError)
+	}
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		mr.errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*ProduceRequest)
+	res := &ProduceResponse{
+		Version: mr.version,
+	}
+	for topic, partitions := range req.records {
+		for partition := range partitions {
+			res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+	offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+	t       TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+	return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+	if mr.offsets == nil {
+		mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	topics := mr.offsets[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+		mr.offsets[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		topics[topic] = partitions
+	}
+	partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
+	return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*OffsetFetchRequest)
+	group := req.ConsumerGroup
+	res := &OffsetFetchResponse{}
+	for topic, partitions := range mr.offsets[group] {
+		for partition, block := range partitions {
+			res.AddBlock(topic, partition, block)
+		}
+	}
+	return res
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
new file mode 100644
index 00000000..b21ea634
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -0,0 +1,190 @@
+package sarama
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+	offset    int64
+	timestamp int64
+	metadata  string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+	pe.putInt64(b.offset)
+	if version == 1 {
+		pe.putInt64(b.timestamp)
+	} else if b.timestamp != 0 {
+		Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+	}
+
+	return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	if b.offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if version == 1 {
+		if b.timestamp, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+	b.metadata, err = pd.getString()
+	return err
+}
+
+type OffsetCommitRequest struct {
+	ConsumerGroup           string
+	ConsumerGroupGeneration int32  // v1 or later
+	ConsumerID              string // v1 or later
+	RetentionTime           int64  // v2 or later
+
+	// Version can be:
+	// - 0 (kafka 0.8.1 and later)
+	// - 1 (kafka 0.8.2 and later)
+	// - 2 (kafka 0.9.0 and later)
+	Version int16
+	blocks  map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+	if r.Version < 0 || r.Version > 2 {
+		return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+	}
+
+	if err := pe.putString(r.ConsumerGroup); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ConsumerGroupGeneration)
+		if err := pe.putString(r.ConsumerID); err != nil {
+			return err
+		}
+	} else {
+		if r.ConsumerGroupGeneration != 0 {
+			Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+		}
+		if r.ConsumerID != "" {
+			Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+		}
+	}
+
+	if r.Version >= 2 {
+		pe.putInt64(r.RetentionTime)
+	} else if r.RetentionTime != 0 {
+		Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+	}
+
+	if err := pe.putArrayLength(len(r.blocks)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ConsumerGroup, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+			return err
+		}
+		if r.ConsumerID, err = pd.getString(); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 2 {
+		if r.RetentionTime, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetCommitRequestBlock{}
+			if err := block.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+	return 8
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	case 2:
+		return V0_9_0_0
+	default:
+		return minVersion
+	}
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+	}
+
+	r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
new file mode 100644
index 00000000..7f277e77
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go
@@ -0,0 +1,85 @@
+package sarama
+
+type OffsetCommitResponse struct {
+	Errors map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]KError)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		r.Errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Errors)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, kerror := range partitions {
+			pe.putInt32(partition)
+			pe.putInt16(int16(kerror))
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	numTopics, err := pd.getArrayLength()
+	if err != nil || numTopics == 0 {
+		return err
+	}
+
+	r.Errors = make(map[string]map[int32]KError, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numErrors, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Errors[name] = make(map[int32]KError, numErrors)
+
+		for j := 0; j < numErrors; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			tmp, err := pd.getInt16()
+			if err != nil {
+				return err
+			}
+			r.Errors[name][id] = KError(tmp)
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+	return 8
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+	return 0
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+	return minVersion
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
new file mode 100644
index 00000000..b19fe79b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
@@ -0,0 +1,81 @@
+package sarama
+
+type OffsetFetchRequest struct {
+	ConsumerGroup string
+	Version       int16
+	partitions    map[string][]int32
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+	if r.Version < 0 || r.Version > 1 {
+		return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+	}
+
+	if err = pe.putString(r.ConsumerGroup); err != nil {
+		return err
+	}
+	if err = pe.putArrayLength(len(r.partitions)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.partitions {
+		if err = pe.putString(topic); err != nil {
+			return err
+		}
+		if err = pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.ConsumerGroup, err = pd.getString(); err != nil {
+		return err
+	}
+	partitionCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if partitionCount == 0 {
+		return nil
+	}
+	r.partitions = make(map[string][]int32)
+	for i := 0; i < partitionCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		r.partitions[topic] = partitions
+	}
+	return nil
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+	return 9
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	default:
+		return minVersion
+	}
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+	if r.partitions == nil {
+		r.partitions = make(map[string][]int32)
+	}
+
+	r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
new file mode 100644
index 00000000..323220ea
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
@@ -0,0 +1,143 @@
+package sarama
+
+type OffsetFetchResponseBlock struct {
+	Offset   int64
+	Metadata string
+	Err      KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	b.Metadata, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	return nil
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
+	pe.putInt64(b.Offset)
+
+	err = pe.putString(b.Metadata)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt16(int16(b.Err))
+
+	return nil
+}
+
+type OffsetFetchResponse struct {
+	Blocks map[string]map[int32]*OffsetFetchResponseBlock
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	numTopics, err := pd.getArrayLength()
+	if err != nil || numTopics == 0 {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		if numBlocks == 0 {
+			r.Blocks[name] = nil
+			continue
+		}
+		r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(OffsetFetchResponseBlock)
+			err = block.decode(pd)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+	return 9
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+	return 0
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+	return minVersion
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	partitions := r.Blocks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	partitions[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
new file mode 100644
index 00000000..6c01f959
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -0,0 +1,560 @@
+package sarama
+
+import (
+	"sync"
+	"time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+	// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+	// It will return an error if this OffsetManager is already managing the given
+	// topic/partition.
+	ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+	// Close stops the OffsetManager from managing offsets. It is required to call
+	// this function before an OffsetManager object passes out of scope, as it
+	// will otherwise leak memory. You must call this after all the
+	// PartitionOffsetManagers are closed.
+	Close() error
+}
+
+type offsetManager struct {
+	client Client
+	conf   *Config
+	group  string
+
+	lock sync.Mutex
+	poms map[string]map[int32]*partitionOffsetManager
+	boms map[*Broker]*brokerOffsetManager
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	om := &offsetManager{
+		client: client,
+		conf:   client.Config(),
+		group:  group,
+		poms:   make(map[string]map[int32]*partitionOffsetManager),
+		boms:   make(map[*Broker]*brokerOffsetManager),
+	}
+
+	return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+	pom, err := om.newPartitionOffsetManager(topic, partition)
+	if err != nil {
+		return nil, err
+	}
+
+	om.lock.Lock()
+	defer om.lock.Unlock()
+
+	topicManagers := om.poms[topic]
+	if topicManagers == nil {
+		topicManagers = make(map[int32]*partitionOffsetManager)
+		om.poms[topic] = topicManagers
+	}
+
+	if topicManagers[partition] != nil {
+		return nil, ConfigurationError("That topic/partition is already being managed")
+	}
+
+	topicManagers[partition] = pom
+	return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+	return nil
+}
+
+func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+	om.lock.Lock()
+	defer om.lock.Unlock()
+
+	bom := om.boms[broker]
+	if bom == nil {
+		bom = om.newBrokerOffsetManager(broker)
+		om.boms[broker] = bom
+	}
+
+	bom.refs++
+
+	return bom
+}
+
+func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
+	om.lock.Lock()
+	defer om.lock.Unlock()
+
+	bom.refs--
+
+	if bom.refs == 0 {
+		close(bom.updateSubscriptions)
+		if om.boms[bom.broker] == bom {
+			delete(om.boms, bom.broker)
+		}
+	}
+}
+
+func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
+	om.lock.Lock()
+	defer om.lock.Unlock()
+
+	delete(om.boms, bom.broker)
+}
+
+func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
+	om.lock.Lock()
+	defer om.lock.Unlock()
+
+	delete(om.poms[pom.topic], pom.partition)
+	if len(om.poms[pom.topic]) == 0 {
+		delete(om.poms, pom.topic)
+	}
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+	// NextOffset returns the next offset that should be consumed for the managed
+	// partition, accompanied by metadata which can be used to reconstruct the state
+	// of the partition consumer when it resumes. NextOffset() will return
+	// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+	// was committed for this partition yet.
+	NextOffset() (int64, string)
+
+	// MarkOffset marks the provided offset, alongside a metadata string
+	// that represents the state of the partition consumer at that point in time. The
+	// metadata string can be used by another consumer to restore that state, so it
+	// can resume consumption.
+	//
+	// To follow upstream conventions, you are expected to mark the offset of the
+	// next message to read, not the last message read. Thus, when calling `MarkOffset`
+	// you should typically add one to the offset of the last consumed message.
+	//
+	// Note: calling MarkOffset does not necessarily commit the offset to the backend
+	// store immediately for efficiency reasons, and it may never be committed if
+	// your application crashes. This means that you may end up processing the same
+	// message twice, and your processing should ideally be idempotent.
+	MarkOffset(offset int64, metadata string)
+
+	// ResetOffset resets to the provided offset, alongside a metadata string that
+	// represents the state of the partition consumer at that point in time. Reset
+	// acts as a counterpart to MarkOffset, the difference being that it allows to
+	// reset an offset to an earlier or smaller value, where MarkOffset only
+	// allows incrementing the offset. cf MarkOffset for more details.
+	ResetOffset(offset int64, metadata string)
+
+	// Errors returns a read channel of errors that occur during offset management, if
+	// enabled. By default, errors are logged and not returned over this channel. If
+	// you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+	// return immediately, after which you should wait until the 'errors' channel has
+	// been drained and closed. It is required to call this function, or Close before
+	// a consumer object passes out of scope, as it will otherwise leak memory. You
+	// must call this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionOffsetManager from managing offsets. It is required to
+	// call this function (or AsyncClose) before a PartitionOffsetManager object
+	// passes out of scope, as it will otherwise leak memory. You must call this
+	// before calling Close on the underlying client.
+	Close() error
+}
+
+type partitionOffsetManager struct {
+	parent    *offsetManager
+	topic     string
+	partition int32
+
+	lock     sync.Mutex
+	offset   int64
+	metadata string
+	dirty    bool
+	clean    sync.Cond
+	broker   *brokerOffsetManager
+
+	errors    chan *ConsumerError
+	rebalance chan none
+	dying     chan none
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+	pom := &partitionOffsetManager{
+		parent:    om,
+		topic:     topic,
+		partition: partition,
+		errors:    make(chan *ConsumerError, om.conf.ChannelBufferSize),
+		rebalance: make(chan none, 1),
+		dying:     make(chan none),
+	}
+	pom.clean.L = &pom.lock
+
+	if err := pom.selectBroker(); err != nil {
+		return nil, err
+	}
+
+	if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
+		return nil, err
+	}
+
+	pom.broker.updateSubscriptions <- pom
+
+	go withRecover(pom.mainLoop)
+
+	return pom, nil
+}
+
+func (pom *partitionOffsetManager) mainLoop() {
+	for {
+		select {
+		case <-pom.rebalance:
+			if err := pom.selectBroker(); err != nil {
+				pom.handleError(err)
+				pom.rebalance <- none{}
+			} else {
+				pom.broker.updateSubscriptions <- pom
+			}
+		case <-pom.dying:
+			if pom.broker != nil {
+				select {
+				case <-pom.rebalance:
+				case pom.broker.updateSubscriptions <- pom:
+				}
+				pom.parent.unrefBrokerOffsetManager(pom.broker)
+			}
+			pom.parent.abandonPartitionOffsetManager(pom)
+			close(pom.errors)
+			return
+		}
+	}
+}
+
+func (pom *partitionOffsetManager) selectBroker() error {
+	if pom.broker != nil {
+		pom.parent.unrefBrokerOffsetManager(pom.broker)
+		pom.broker = nil
+	}
+
+	var broker *Broker
+	var err error
+
+	if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
+		return err
+	}
+
+	if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
+		return err
+	}
+
+	pom.broker = pom.parent.refBrokerOffsetManager(broker)
+	return nil
+}
+
+func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
+	request := new(OffsetFetchRequest)
+	request.Version = 1
+	request.ConsumerGroup = pom.parent.group
+	request.AddPartition(pom.topic, pom.partition)
+
+	response, err := pom.broker.broker.FetchOffset(request)
+	if err != nil {
+		return err
+	}
+
+	block := response.GetBlock(pom.topic, pom.partition)
+	if block == nil {
+		return ErrIncompleteResponse
+	}
+
+	switch block.Err {
+	case ErrNoError:
+		pom.offset = block.Offset
+		pom.metadata = block.Metadata
+		return nil
+	case ErrNotCoordinatorForConsumer:
+		if retries <= 0 {
+			return block.Err
+		}
+		if err := pom.selectBroker(); err != nil {
+			return err
+		}
+		return pom.fetchInitialOffset(retries - 1)
+	case ErrOffsetsLoadInProgress:
+		if retries <= 0 {
+			return block.Err
+		}
+		time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
+		return pom.fetchInitialOffset(retries - 1)
+	default:
+		return block.Err
+	}
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+	cErr := &ConsumerError{
+		Topic:     pom.topic,
+		Partition: pom.partition,
+		Err:       err,
+	}
+
+	if pom.parent.conf.Consumer.Return.Errors {
+		pom.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+	return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset > pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset <= pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset == offset && pom.metadata == metadata {
+		pom.dirty = false
+		pom.clean.Signal()
+	}
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset >= 0 {
+		return pom.offset, pom.metadata
+	}
+
+	return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+	go func() {
+		pom.lock.Lock()
+		defer pom.lock.Unlock()
+
+		for pom.dirty {
+			pom.clean.Wait()
+		}
+
+		close(pom.dying)
+	}()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+	pom.AsyncClose()
+
+	var errors ConsumerErrors
+	for err := range pom.errors {
+		errors = append(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+// Broker Offset Manager
+
+type brokerOffsetManager struct {
+	parent              *offsetManager
+	broker              *Broker
+	timer               *time.Ticker
+	updateSubscriptions chan *partitionOffsetManager
+	subscriptions       map[*partitionOffsetManager]none
+	refs                int
+}
+
+func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+	bom := &brokerOffsetManager{
+		parent:              om,
+		broker:              broker,
+		timer:               time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
+		updateSubscriptions: make(chan *partitionOffsetManager),
+		subscriptions:       make(map[*partitionOffsetManager]none),
+	}
+
+	go withRecover(bom.mainLoop)
+
+	return bom
+}
+
+func (bom *brokerOffsetManager) mainLoop() {
+	for {
+		select {
+		case <-bom.timer.C:
+			if len(bom.subscriptions) > 0 {
+				bom.flushToBroker()
+			}
+		case s, ok := <-bom.updateSubscriptions:
+			if !ok {
+				bom.timer.Stop()
+				return
+			}
+			if _, ok := bom.subscriptions[s]; ok {
+				delete(bom.subscriptions, s)
+			} else {
+				bom.subscriptions[s] = none{}
+			}
+		}
+	}
+}
+
+func (bom *brokerOffsetManager) flushToBroker() {
+	request := bom.constructRequest()
+	if request == nil {
+		return
+	}
+
+	response, err := bom.broker.CommitOffset(request)
+
+	if err != nil {
+		bom.abort(err)
+		return
+	}
+
+	for s := range bom.subscriptions {
+		if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
+			continue
+		}
+
+		var err KError
+		var ok bool
+
+		if response.Errors[s.topic] == nil {
+			s.handleError(ErrIncompleteResponse)
+			delete(bom.subscriptions, s)
+			s.rebalance <- none{}
+			continue
+		}
+		if err, ok = response.Errors[s.topic][s.partition]; !ok {
+			s.handleError(ErrIncompleteResponse)
+			delete(bom.subscriptions, s)
+			s.rebalance <- none{}
+			continue
+		}
+
+		switch err {
+		case ErrNoError:
+			block := request.blocks[s.topic][s.partition]
+			s.updateCommitted(block.offset, block.metadata)
+		case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+			ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+			// not a critical error, we just need to redispatch
+			delete(bom.subscriptions, s)
+			s.rebalance <- none{}
+		case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+			// nothing we can do about this, just tell the user and carry on
+			s.handleError(err)
+		case ErrOffsetsLoadInProgress:
+			// nothing wrong but we didn't commit, we'll get it next time round
+			break
+		case ErrUnknownTopicOrPartition:
+			// let the user know *and* try redispatching - if topic-auto-create is
+			// enabled, redispatching should trigger a metadata request and create the
+			// topic; if not then re-dispatching won't help, but we've let the user
+			// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
+			fallthrough
+		default:
+			// dunno, tell the user and try redispatching
+			s.handleError(err)
+			delete(bom.subscriptions, s)
+			s.rebalance <- none{}
+		}
+	}
+}
+
+func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
+	var r *OffsetCommitRequest
+	var perPartitionTimestamp int64
+	if bom.parent.conf.Consumer.Offsets.Retention == 0 {
+		perPartitionTimestamp = ReceiveTime
+		r = &OffsetCommitRequest{
+			Version:                 1,
+			ConsumerGroup:           bom.parent.group,
+			ConsumerGroupGeneration: GroupGenerationUndefined,
+		}
+	} else {
+		r = &OffsetCommitRequest{
+			Version:                 2,
+			RetentionTime:           int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
+			ConsumerGroup:           bom.parent.group,
+			ConsumerGroupGeneration: GroupGenerationUndefined,
+		}
+
+	}
+
+	for s := range bom.subscriptions {
+		s.lock.Lock()
+		if s.dirty {
+			r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
+		}
+		s.lock.Unlock()
+	}
+
+	if len(r.blocks) > 0 {
+		return r
+	}
+
+	return nil
+}
+
+func (bom *brokerOffsetManager) abort(err error) {
+	_ = bom.broker.Close() // we don't care about the error this might return, we already have one
+	bom.parent.abandonBroker(bom)
+
+	for pom := range bom.subscriptions {
+		pom.handleError(err)
+		pom.rebalance <- none{}
+	}
+
+	for s := range bom.updateSubscriptions {
+		if _, ok := bom.subscriptions[s]; !ok {
+			s.handleError(err)
+			s.rebalance <- none{}
+		}
+	}
+
+	bom.subscriptions = make(map[*partitionOffsetManager]none)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
new file mode 100644
index 00000000..6c269601
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_request.go
@@ -0,0 +1,132 @@
+package sarama
+
+type offsetRequestBlock struct {
+	time       int64
+	maxOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+	pe.putInt64(int64(b.time))
+	if version == 0 {
+		pe.putInt32(b.maxOffsets)
+	}
+
+	return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	if b.time, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if version == 0 {
+		if b.maxOffsets, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type OffsetRequest struct {
+	Version int16
+	blocks  map[string]map[int32]*offsetRequestBlock
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+	pe.putInt32(-1) // replica ID is always -1 for clients
+	err := pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	// Ignore replica ID
+	if _, err := pd.getInt32(); err != nil {
+		return err
+	}
+	blockCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if blockCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	for i := 0; i < blockCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetRequestBlock{}
+			if err := block.decode(pd, version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+	return 2
+}
+
+func (r *OffsetRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_1_0
+	default:
+		return minVersion
+	}
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+	}
+
+	tmp := new(offsetRequestBlock)
+	tmp.time = time
+	if r.Version == 0 {
+		tmp.maxOffsets = maxOffsets
+	}
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
new file mode 100644
index 00000000..9a9cfe96
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_response.go
@@ -0,0 +1,174 @@
+package sarama
+
+type OffsetResponseBlock struct {
+	Err       KError
+	Offsets   []int64 // Version 0
+	Offset    int64   // Version 1
+	Timestamp int64   // Version 1
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	if version == 0 {
+		b.Offsets, err = pd.getInt64Array()
+
+		return err
+	}
+
+	b.Timestamp, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	// For backwards compatibility put the offset in the offsets array too
+	b.Offsets = []int64{b.Offset}
+
+	return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(b.Err))
+
+	if version == 0 {
+		return pe.putInt64Array(b.Offsets)
+	}
+
+	pe.putInt64(b.Timestamp)
+	pe.putInt64(b.Offset)
+
+	return nil
+}
+
+type OffsetResponse struct {
+	Version int16
+	Blocks  map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(OffsetResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1] <nil>
+
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		if err = pe.putString(topic); err != nil {
+			return err
+		}
+		if err = pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.version()); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+	return 2
+}
+
+func (r *OffsetResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_1_0
+	default:
+		return minVersion
+	}
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*OffsetResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
new file mode 100644
index 00000000..74805ccb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -0,0 +1,60 @@
+package sarama
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+	// Primitives
+	getInt8() (int8, error)
+	getInt16() (int16, error)
+	getInt32() (int32, error)
+	getInt64() (int64, error)
+	getVarint() (int64, error)
+	getArrayLength() (int, error)
+	getBool() (bool, error)
+
+	// Collections
+	getBytes() ([]byte, error)
+	getVarintBytes() ([]byte, error)
+	getRawBytes(length int) ([]byte, error)
+	getString() (string, error)
+	getNullableString() (*string, error)
+	getInt32Array() ([]int32, error)
+	getInt64Array() ([]int64, error)
+	getStringArray() ([]string, error)
+
+	// Subsets
+	remaining() int
+	getSubset(length int) (packetDecoder, error)
+	peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
+
+	// Stacks, see PushDecoder
+	push(in pushDecoder) error
+	pop() error
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+	// Saves the offset into the input buffer as the location to actually read the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and check the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+	// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+	check(curOffset int, buf []byte) error
+}
+
+// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
+// fields itself is unknown until its value was decoded (for instance varint encoded length
+// fields).
+// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
+type dynamicPushDecoder interface {
+	pushDecoder
+	decoder
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
new file mode 100644
index 00000000..67b8daed
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_encoder.go
@@ -0,0 +1,65 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+	// Primitives
+	putInt8(in int8)
+	putInt16(in int16)
+	putInt32(in int32)
+	putInt64(in int64)
+	putVarint(in int64)
+	putArrayLength(in int) error
+	putBool(in bool)
+
+	// Collections
+	putBytes(in []byte) error
+	putVarintBytes(in []byte) error
+	putRawBytes(in []byte) error
+	putString(in string) error
+	putNullableString(in *string) error
+	putStringArray(in []string) error
+	putInt32Array(in []int32) error
+	putInt64Array(in []int64) error
+
+	// Provide the current offset to record the batch size metric
+	offset() int
+
+	// Stacks, see PushEncoder
+	push(in pushEncoder)
+	pop() error
+
+	// To record metrics when provided
+	metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+	// Saves the offset into the input buffer as the location to actually write the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and write the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+	// of data to the saved offset, based on the data between the saved offset and curOffset.
+	run(curOffset int, buf []byte) error
+}
+
+// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
+// fields itself is unknown until its value was computed (for instance varint encoded length
+// fields).
+type dynamicPushEncoder interface {
+	pushEncoder
+
+	// Called during pop() to adjust the length of the field.
+	// It should return the difference in bytes between the last computed length and current length.
+	adjustLength(currOffset int) int
+}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
new file mode 100644
index 00000000..97293272
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/partitioner.go
@@ -0,0 +1,135 @@
+package sarama
+
+import (
+	"hash"
+	"hash/fnv"
+	"math/rand"
+	"time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+	// Partition takes a message and partition count and chooses a partition
+	Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+	// RequiresConsistency indicates to the user of the partitioner whether the
+	// mapping of key->partition is consistent or not. Specifically, if a
+	// partitioner requires consistency then it must be allowed to choose from all
+	// partitions (even ones known to be unavailable), and its choice must be
+	// respected by the caller. The obvious example is the HashPartitioner.
+	RequiresConsistency() bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+	return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+	return true
+}
+
+type randomPartitioner struct {
+	generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+	p := new(randomPartitioner)
+	p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+	return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type roundRobinPartitioner struct {
+	partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+	return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if p.partition >= numPartitions {
+		p.partition = 0
+	}
+	ret := p.partition
+	p.partition++
+	return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type hashPartitioner struct {
+	random Partitioner
+	hasher hash.Hash32
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+	return func(topic string) Partitioner {
+		p := new(hashPartitioner)
+		p.random = NewRandomPartitioner(topic)
+		p.hasher = hasher()
+		return p
+	}
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = fnv.New32a()
+	return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if message.Key == nil {
+		return p.random.Partition(message, numPartitions)
+	}
+	bytes, err := message.Key.Encode()
+	if err != nil {
+		return -1, err
+	}
+	p.hasher.Reset()
+	_, err = p.hasher.Write(bytes)
+	if err != nil {
+		return -1, err
+	}
+	partition := int32(p.hasher.Sum32()) % numPartitions
+	if partition < 0 {
+		partition = -partition
+	}
+	return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+	return true
+}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
new file mode 100644
index 00000000..b633cd15
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/prep_encoder.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"math"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+	stack  []pushEncoder
+	length int
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+	pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+	pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+	pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+	pe.length += 8
+}
+
+func (pe *prepEncoder) putVarint(in int64) {
+	var buf [binary.MaxVarintLen64]byte
+	pe.length += binary.PutVarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+	if in > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+	}
+	pe.length += 4
+	return nil
+}
+
+func (pe *prepEncoder) putBool(in bool) {
+	pe.length++
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+	pe.length += 4
+	if in == nil {
+		return nil
+	}
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		pe.putVarint(-1)
+		return nil
+	}
+	pe.putVarint(int64(len(in)))
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+	if len(in) > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putNullableString(in *string) error {
+	if in == nil {
+		pe.length += 2
+		return nil
+	}
+	return pe.putString(*in)
+}
+
+func (pe *prepEncoder) putString(in string) error {
+	pe.length += 2
+	if len(in) > math.MaxInt16 {
+		return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, str := range in {
+		if err := pe.putString(str); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 8 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) offset() int {
+	return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+	in.saveOffset(pe.length)
+	pe.length += in.reserveLength()
+	pe.stack = append(pe.stack, in)
+}
+
+func (pe *prepEncoder) pop() error {
+	in := pe.stack[len(pe.stack)-1]
+	pe.stack = pe.stack[:len(pe.stack)-1]
+	if dpe, ok := in.(dynamicPushEncoder); ok {
+		pe.length += dpe.adjustLength(pe.length)
+	}
+
+	return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
new file mode 100644
index 00000000..0ec4d8d5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_request.go
@@ -0,0 +1,252 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+	// NoResponse doesn't send any response, the TCP ACK is all you get.
+	NoResponse RequiredAcks = 0
+	// WaitForLocal waits for only the local commit to succeed before responding.
+	WaitForLocal RequiredAcks = 1
+	// WaitForAll waits for all in-sync replicas to commit before responding.
+	// The minimum number of in-sync replicas is configured on the broker via
+	// the `min.insync.replicas` configuration key.
+	WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+	TransactionalID *string
+	RequiredAcks    RequiredAcks
+	Timeout         int32
+	Version         int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
+	records         map[string]map[int32]Records
+}
+
+func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram) int64 {
+	var topicRecordCount int64
+	for _, messageBlock := range msgSet.Messages {
+		// Is this a fake "message" wrapping real messages?
+		if messageBlock.Msg.Set != nil {
+			topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+		} else {
+			// A single uncompressed message
+			topicRecordCount++
+		}
+		// Better be safe than sorry when computing the compression ratio
+		if messageBlock.Msg.compressedSize != 0 {
+			compressionRatio := float64(len(messageBlock.Msg.Value)) /
+				float64(messageBlock.Msg.compressedSize)
+			// Histogram do not support decimal values, let's multiple it by 100 for better precision
+			intCompressionRatio := int64(100 * compressionRatio)
+			compressionRatioMetric.Update(intCompressionRatio)
+			topicCompressionRatioMetric.Update(intCompressionRatio)
+		}
+	}
+	return topicRecordCount
+}
+
+func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram) int64 {
+	if recordBatch.compressedRecords != nil {
+		compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
+		compressionRatioMetric.Update(compressionRatio)
+		topicCompressionRatioMetric.Update(compressionRatio)
+	}
+
+	return int64(len(recordBatch.Records))
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		if err := pe.putNullableString(r.TransactionalID); err != nil {
+			return err
+		}
+	}
+	pe.putInt16(int16(r.RequiredAcks))
+	pe.putInt32(r.Timeout)
+	metricRegistry := pe.metricRegistry()
+	var batchSizeMetric metrics.Histogram
+	var compressionRatioMetric metrics.Histogram
+	if metricRegistry != nil {
+		batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+		compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+	}
+	totalRecordCount := int64(0)
+
+	err := pe.putArrayLength(len(r.records))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.records {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		topicRecordCount := int64(0)
+		var topicCompressionRatioMetric metrics.Histogram
+		if metricRegistry != nil {
+			topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+		}
+		for id, records := range partitions {
+			startOffset := pe.offset()
+			pe.putInt32(id)
+			pe.push(&lengthField{})
+			err = records.encode(pe)
+			if err != nil {
+				return err
+			}
+			err = pe.pop()
+			if err != nil {
+				return err
+			}
+			if metricRegistry != nil {
+				if r.Version >= 3 {
+					topicRecordCount += updateBatchMetrics(records.recordBatch, compressionRatioMetric, topicCompressionRatioMetric)
+				} else {
+					topicRecordCount += updateMsgSetMetrics(records.msgSet, compressionRatioMetric, topicCompressionRatioMetric)
+				}
+				batchSize := int64(pe.offset() - startOffset)
+				batchSizeMetric.Update(batchSize)
+				getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+			}
+		}
+		if topicRecordCount > 0 {
+			getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+			getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+			totalRecordCount += topicRecordCount
+		}
+	}
+	if totalRecordCount > 0 {
+		metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+		getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	if version >= 3 {
+		id, err := pd.getNullableString()
+		if err != nil {
+			return err
+		}
+		r.TransactionalID = id
+	}
+	requiredAcks, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.RequiredAcks = RequiredAcks(requiredAcks)
+	if r.Timeout, err = pd.getInt32(); err != nil {
+		return err
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+
+	r.records = make(map[string]map[int32]Records)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.records[topic] = make(map[int32]Records)
+
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			size, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			recordsDecoder, err := pd.getSubset(int(size))
+			if err != nil {
+				return err
+			}
+			var records Records
+			if err := records.decode(recordsDecoder); err != nil {
+				return err
+			}
+			r.records[topic][partition] = records
+		}
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+	return 0
+}
+
+func (r *ProduceRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_11_0_0
+	default:
+		return minVersion
+	}
+}
+
+func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
+	if r.records == nil {
+		r.records = make(map[string]map[int32]Records)
+	}
+
+	if r.records[topic] == nil {
+		r.records[topic] = make(map[int32]Records)
+	}
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+	r.ensureRecords(topic, partition)
+	set := r.records[topic][partition].msgSet
+
+	if set == nil {
+		set = new(MessageSet)
+		r.records[topic][partition] = newLegacyRecords(set)
+	}
+
+	set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newLegacyRecords(set)
+}
+
+func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newDefaultRecords(batch)
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
new file mode 100644
index 00000000..043c40f8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_response.go
@@ -0,0 +1,183 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type ProduceResponseBlock struct {
+	Err    KError
+	Offset int64
+	// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
+	Timestamp time.Time
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 2 {
+		if millis, err := pd.getInt64(); err != nil {
+			return err
+		} else if millis != -1 {
+			b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+		}
+	}
+
+	return nil
+}
+
+func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(b.Err))
+	pe.putInt64(b.Offset)
+
+	if version >= 2 {
+		timestamp := int64(-1)
+		if !b.Timestamp.Before(time.Unix(0, 0)) {
+			timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
+		} else if !b.Timestamp.IsZero() {
+			return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
+		}
+		pe.putInt64(timestamp)
+	}
+
+	return nil
+}
+
+type ProduceResponse struct {
+	Blocks       map[string]map[int32]*ProduceResponseBlock
+	Version      int16
+	ThrottleTime time.Duration // only provided if Version >= 1
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(ProduceResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	if r.Version >= 1 {
+		millis, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+
+		r.ThrottleTime = time.Duration(millis) * time.Millisecond
+	}
+
+	return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+	err := pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for id, prb := range partitions {
+			pe.putInt32(id)
+			err = prb.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	if r.Version >= 1 {
+		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	}
+	return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+	return 0
+}
+
+func (r *ProduceResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_11_0_0
+	default:
+		return minVersion
+	}
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*ProduceResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	byTopic[partition] = &ProduceResponseBlock{Err: err}
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
new file mode 100644
index 00000000..3cbaeb5f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -0,0 +1,243 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"time"
+)
+
+type partitionSet struct {
+	msgs          []*ProducerMessage
+	recordsToSend Records
+	bufferBytes   int
+}
+
+type produceSet struct {
+	parent *asyncProducer
+	msgs   map[string]map[int32]*partitionSet
+
+	bufferBytes int
+	bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+	return &produceSet{
+		msgs:   make(map[string]map[int32]*partitionSet),
+		parent: parent,
+	}
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+	var err error
+	var key, val []byte
+
+	if msg.Key != nil {
+		if key, err = msg.Key.Encode(); err != nil {
+			return err
+		}
+	}
+
+	if msg.Value != nil {
+		if val, err = msg.Value.Encode(); err != nil {
+			return err
+		}
+	}
+
+	timestamp := msg.Timestamp
+	if msg.Timestamp.IsZero() {
+		timestamp = time.Now()
+	}
+
+	partitions := ps.msgs[msg.Topic]
+	if partitions == nil {
+		partitions = make(map[int32]*partitionSet)
+		ps.msgs[msg.Topic] = partitions
+	}
+
+	var size int
+
+	set := partitions[msg.Partition]
+	if set == nil {
+		if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+			batch := &RecordBatch{
+				FirstTimestamp: timestamp,
+				Version:        2,
+				ProducerID:     -1, /* No producer id */
+				Codec:          ps.parent.conf.Producer.Compression,
+			}
+			set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
+			size = recordBatchOverhead
+		} else {
+			set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
+		}
+		partitions[msg.Partition] = set
+	}
+
+	set.msgs = append(set.msgs, msg)
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		// We are being conservative here to avoid having to prep encode the record
+		size += maximumRecordOverhead
+		rec := &Record{
+			Key:            key,
+			Value:          val,
+			TimestampDelta: timestamp.Sub(set.recordsToSend.recordBatch.FirstTimestamp),
+		}
+		size += len(key) + len(val)
+		if len(msg.Headers) > 0 {
+			rec.Headers = make([]*RecordHeader, len(msg.Headers))
+			for i := range msg.Headers {
+				rec.Headers[i] = &msg.Headers[i]
+				size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
+			}
+		}
+		set.recordsToSend.recordBatch.addRecord(rec)
+	} else {
+		msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+		if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+			msgToSend.Timestamp = timestamp
+			msgToSend.Version = 1
+		}
+		set.recordsToSend.msgSet.addMessage(msgToSend)
+		size = producerMessageOverhead + len(key) + len(val)
+	}
+
+	set.bufferBytes += size
+	ps.bufferBytes += size
+	ps.bufferCount++
+
+	return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+	req := &ProduceRequest{
+		RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+		Timeout:      int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+		req.Version = 2
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 3
+	}
+
+	for topic, partitionSet := range ps.msgs {
+		for partition, set := range partitionSet {
+			if req.Version >= 3 {
+				rb := set.recordsToSend.recordBatch
+				if len(rb.Records) > 0 {
+					rb.LastOffsetDelta = int32(len(rb.Records) - 1)
+					for i, record := range rb.Records {
+						record.OffsetDelta = int64(i)
+					}
+				}
+
+				req.AddBatch(topic, partition, rb)
+				continue
+			}
+			if ps.parent.conf.Producer.Compression == CompressionNone {
+				req.AddSet(topic, partition, set.recordsToSend.msgSet)
+			} else {
+				// When compression is enabled, the entire set for each partition is compressed
+				// and sent as the payload of a single fake "message" with the appropriate codec
+				// set and no key. When the server sees a message with a compression codec, it
+				// decompresses the payload and treats the result as its message set.
+
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					// If our version is 0.10 or later, assign relative offsets
+					// to the inner messages. This lets the broker avoid
+					// recompressing the message set.
+					// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
+					// for details on relative offsets.)
+					for i, msg := range set.recordsToSend.msgSet.Messages {
+						msg.Offset = int64(i)
+					}
+				}
+				payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry)
+				if err != nil {
+					Logger.Println(err) // if this happens, it's basically our fault.
+					panic(err)
+				}
+				compMsg := &Message{
+					Codec: ps.parent.conf.Producer.Compression,
+					Key:   nil,
+					Value: payload,
+					Set:   set.recordsToSend.msgSet, // Provide the underlying message set for accurate metrics
+				}
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					compMsg.Version = 1
+					compMsg.Timestamp = set.recordsToSend.msgSet.Messages[0].Msg.Timestamp
+				}
+				req.AddMessage(topic, partition, compMsg)
+			}
+		}
+	}
+
+	return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
+	for topic, partitionSet := range ps.msgs {
+		for partition, set := range partitionSet {
+			cb(topic, partition, set.msgs)
+		}
+	}
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+	if ps.msgs[topic] == nil {
+		return nil
+	}
+	set := ps.msgs[topic][partition]
+	if set == nil {
+		return nil
+	}
+	ps.bufferBytes -= set.bufferBytes
+	ps.bufferCount -= len(set.msgs)
+	delete(ps.msgs[topic], partition)
+	return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+	version := 1
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		version = 2
+	}
+
+	switch {
+	// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+	case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
+		return true
+	// Would we overflow the size-limit of a compressed message-batch for this partition?
+	case ps.parent.conf.Producer.Compression != CompressionNone &&
+		ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+		ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
+		return true
+	// Would we overflow simply in number of messages?
+	case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) readyToFlush() bool {
+	switch {
+	// If we don't have any messages, nothing else matters
+	case ps.empty():
+		return false
+	// If all three config values are 0, we always flush as-fast-as-possible
+	case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+		return true
+	// If we've passed the message trigger-point
+	case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+		return true
+	// If we've passed the byte trigger-point
+	case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) empty() bool {
+	return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
new file mode 100644
index 00000000..23045e7d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -0,0 +1,324 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"math"
+)
+
+var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
+var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
+var errInvalidStringLength = PacketDecodingError{"invalid string length"}
+var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
+var errVarintOverflow = PacketDecodingError{"varint overflow"}
+var errInvalidBool = PacketDecodingError{"invalid bool"}
+
+type realDecoder struct {
+	raw   []byte
+	off   int
+	stack []pushDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+	if rd.remaining() < 1 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int8(rd.raw[rd.off])
+	rd.off++
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+	if rd.remaining() < 2 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+	rd.off += 2
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+	if rd.remaining() < 8 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+	rd.off += 8
+	return tmp, nil
+}
+
+func (rd *realDecoder) getVarint() (int64, error) {
+	tmp, n := binary.Varint(rd.raw[rd.off:])
+	if n == 0 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	if n < 0 {
+		rd.off -= n
+		return -1, errVarintOverflow
+	}
+	rd.off += n
+	return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
+	rd.off += 4
+	if tmp > rd.remaining() {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	} else if tmp > 2*math.MaxUint16 {
+		return -1, errInvalidArrayLength
+	}
+	return tmp, nil
+}
+
+func (rd *realDecoder) getBool() (bool, error) {
+	b, err := rd.getInt8()
+	if err != nil || b == 0 {
+		return false, err
+	}
+	if b != 1 {
+		return false, errInvalidBool
+	}
+	return true, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+	tmp, err := rd.getInt32()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getVarintBytes() ([]byte, error) {
+	tmp, err := rd.getVarint()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getStringLength() (int, error) {
+	length, err := rd.getInt16()
+	if err != nil {
+		return 0, err
+	}
+
+	n := int(length)
+
+	switch {
+	case n < -1:
+		return 0, errInvalidStringLength
+	case n > rd.remaining():
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	return n, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return "", err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return tmpStr, nil
+}
+
+func (rd *realDecoder) getNullableString() (*string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return nil, err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return &tmpStr, err
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+
+	if rd.remaining() < 4*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	if n < 0 {
+		return nil, errInvalidArrayLength
+	}
+
+	ret := make([]int32, n)
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += 4
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+
+	if rd.remaining() < 8*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	if n < 0 {
+		return nil, errInvalidArrayLength
+	}
+
+	ret := make([]int64, n)
+	for i := range ret {
+		ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+		rd.off += 8
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	if n < 0 {
+		return nil, errInvalidArrayLength
+	}
+
+	ret := make([]string, n)
+	for i := range ret {
+		str, err := rd.getString()
+		if err != nil {
+			return nil, err
+		}
+
+		ret[i] = str
+	}
+	return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+	return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+	buf, err := rd.getRawBytes(length)
+	if err != nil {
+		return nil, err
+	}
+	return &realDecoder{raw: buf}, nil
+}
+
+func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
+	if length < 0 {
+		return nil, errInvalidByteSliceLength
+	} else if length > rd.remaining() {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	start := rd.off
+	rd.off += length
+	return rd.raw[start:rd.off], nil
+}
+
+func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
+	if rd.remaining() < offset+length {
+		return nil, ErrInsufficientData
+	}
+	off := rd.off + offset
+	return &realDecoder{raw: rd.raw[off : off+length]}, nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+	in.saveOffset(rd.off)
+
+	var reserve int
+	if dpd, ok := in.(dynamicPushDecoder); ok {
+		if err := dpd.decode(rd); err != nil {
+			return err
+		}
+	} else {
+		reserve = in.reserveLength()
+		if rd.remaining() < reserve {
+			rd.off = len(rd.raw)
+			return ErrInsufficientData
+		}
+	}
+
+	rd.stack = append(rd.stack, in)
+
+	rd.off += reserve
+
+	return nil
+}
+
+func (rd *realDecoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := rd.stack[len(rd.stack)-1]
+	rd.stack = rd.stack[:len(rd.stack)-1]
+
+	return in.check(rd.off, rd.raw)
+}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
new file mode 100644
index 00000000..3c75387f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_encoder.go
@@ -0,0 +1,156 @@
+package sarama
+
+import (
+	"encoding/binary"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+	raw      []byte
+	off      int
+	stack    []pushEncoder
+	registry metrics.Registry
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+	re.raw[re.off] = byte(in)
+	re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+	binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+	re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+	binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+	re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+	binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+	re.off += 8
+}
+
+func (re *realEncoder) putVarint(in int64) {
+	re.off += binary.PutVarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+	re.putInt32(int32(in))
+	return nil
+}
+
+func (re *realEncoder) putBool(in bool) {
+	if in {
+		re.putInt8(1)
+		return
+	}
+	re.putInt8(0)
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+	if in == nil {
+		re.putInt32(-1)
+		return nil
+	}
+	re.putInt32(int32(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		re.putVarint(-1)
+		return nil
+	}
+	re.putVarint(int64(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putString(in string) error {
+	re.putInt16(int16(len(in)))
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putNullableString(in *string) error {
+	if in == nil {
+		re.putInt16(-1)
+		return nil
+	}
+	return re.putString(*in)
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, val := range in {
+		if err := re.putString(val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt64(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) offset() int {
+	return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+	in.saveOffset(re.off)
+	re.off += in.reserveLength()
+	re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := re.stack[len(re.stack)-1]
+	re.stack = re.stack[:len(re.stack)-1]
+
+	return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+	return re.registry
+}
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go
new file mode 100644
index 00000000..cded308c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record.go
@@ -0,0 +1,113 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"time"
+)
+
+const (
+	controlMask           = 0x20
+	maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
+)
+
+type RecordHeader struct {
+	Key   []byte
+	Value []byte
+}
+
+func (h *RecordHeader) encode(pe packetEncoder) error {
+	if err := pe.putVarintBytes(h.Key); err != nil {
+		return err
+	}
+	return pe.putVarintBytes(h.Value)
+}
+
+func (h *RecordHeader) decode(pd packetDecoder) (err error) {
+	if h.Key, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+
+	if h.Value, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+	return nil
+}
+
+type Record struct {
+	Attributes     int8
+	TimestampDelta time.Duration
+	OffsetDelta    int64
+	Key            []byte
+	Value          []byte
+	Headers        []*RecordHeader
+
+	length varintLengthField
+}
+
+func (r *Record) encode(pe packetEncoder) error {
+	pe.push(&r.length)
+	pe.putInt8(r.Attributes)
+	pe.putVarint(int64(r.TimestampDelta / time.Millisecond))
+	pe.putVarint(r.OffsetDelta)
+	if err := pe.putVarintBytes(r.Key); err != nil {
+		return err
+	}
+	if err := pe.putVarintBytes(r.Value); err != nil {
+		return err
+	}
+	pe.putVarint(int64(len(r.Headers)))
+
+	for _, h := range r.Headers {
+		if err := h.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return pe.pop()
+}
+
+func (r *Record) decode(pd packetDecoder) (err error) {
+	if err = pd.push(&r.length); err != nil {
+		return err
+	}
+
+	if r.Attributes, err = pd.getInt8(); err != nil {
+		return err
+	}
+
+	timestamp, err := pd.getVarint()
+	if err != nil {
+		return err
+	}
+	r.TimestampDelta = time.Duration(timestamp) * time.Millisecond
+
+	if r.OffsetDelta, err = pd.getVarint(); err != nil {
+		return err
+	}
+
+	if r.Key, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+
+	if r.Value, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+
+	numHeaders, err := pd.getVarint()
+	if err != nil {
+		return err
+	}
+
+	if numHeaders >= 0 {
+		r.Headers = make([]*RecordHeader, numHeaders)
+	}
+	for i := int64(0); i < numHeaders; i++ {
+		hdr := new(RecordHeader)
+		if err := hdr.decode(pd); err != nil {
+			return err
+		}
+		r.Headers[i] = hdr
+	}
+
+	return pd.pop()
+}
diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go
new file mode 100644
index 00000000..321de485
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record_batch.go
@@ -0,0 +1,259 @@
+package sarama
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+	"time"
+
+	"github.com/eapache/go-xerial-snappy"
+	"github.com/pierrec/lz4"
+)
+
+const recordBatchOverhead = 49
+
+type recordsArray []*Record
+
+func (e recordsArray) encode(pe packetEncoder) error {
+	for _, r := range e {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e recordsArray) decode(pd packetDecoder) error {
+	for i := range e {
+		rec := &Record{}
+		if err := rec.decode(pd); err != nil {
+			return err
+		}
+		e[i] = rec
+	}
+	return nil
+}
+
+type RecordBatch struct {
+	FirstOffset           int64
+	PartitionLeaderEpoch  int32
+	Version               int8
+	Codec                 CompressionCodec
+	Control               bool
+	LastOffsetDelta       int32
+	FirstTimestamp        time.Time
+	MaxTimestamp          time.Time
+	ProducerID            int64
+	ProducerEpoch         int16
+	FirstSequence         int32
+	Records               []*Record
+	PartialTrailingRecord bool
+
+	compressedRecords []byte
+	recordsLen        int // uncompressed records size
+}
+
+func (b *RecordBatch) encode(pe packetEncoder) error {
+	if b.Version != 2 {
+		return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
+	}
+	pe.putInt64(b.FirstOffset)
+	pe.push(&lengthField{})
+	pe.putInt32(b.PartitionLeaderEpoch)
+	pe.putInt8(b.Version)
+	pe.push(newCRC32Field(crcCastagnoli))
+	pe.putInt16(b.computeAttributes())
+	pe.putInt32(b.LastOffsetDelta)
+
+	if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	pe.putInt64(b.ProducerID)
+	pe.putInt16(b.ProducerEpoch)
+	pe.putInt32(b.FirstSequence)
+
+	if err := pe.putArrayLength(len(b.Records)); err != nil {
+		return err
+	}
+
+	if b.compressedRecords == nil {
+		if err := b.encodeRecords(pe); err != nil {
+			return err
+		}
+	}
+	if err := pe.putRawBytes(b.compressedRecords); err != nil {
+		return err
+	}
+
+	if err := pe.pop(); err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (b *RecordBatch) decode(pd packetDecoder) (err error) {
+	if b.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	batchLen, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if b.Version, err = pd.getInt8(); err != nil {
+		return err
+	}
+
+	if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil {
+		return err
+	}
+
+	attributes, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
+	b.Control = attributes&controlMask == controlMask
+
+	if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if b.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if b.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.FirstSequence, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	numRecs, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if numRecs >= 0 {
+		b.Records = make([]*Record, numRecs)
+	}
+
+	bufSize := int(batchLen) - recordBatchOverhead
+	recBuffer, err := pd.getRawBytes(bufSize)
+	if err != nil {
+		if err == ErrInsufficientData {
+			b.PartialTrailingRecord = true
+			b.Records = nil
+			return nil
+		}
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	switch b.Codec {
+	case CompressionNone:
+	case CompressionGZIP:
+		reader, err := gzip.NewReader(bytes.NewReader(recBuffer))
+		if err != nil {
+			return err
+		}
+		if recBuffer, err = ioutil.ReadAll(reader); err != nil {
+			return err
+		}
+	case CompressionSnappy:
+		if recBuffer, err = snappy.Decode(recBuffer); err != nil {
+			return err
+		}
+	case CompressionLZ4:
+		reader := lz4.NewReader(bytes.NewReader(recBuffer))
+		if recBuffer, err = ioutil.ReadAll(reader); err != nil {
+			return err
+		}
+	default:
+		return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)}
+	}
+
+	b.recordsLen = len(recBuffer)
+	err = decode(recBuffer, recordsArray(b.Records))
+	if err == ErrInsufficientData {
+		b.PartialTrailingRecord = true
+		b.Records = nil
+		return nil
+	}
+	return err
+}
+
+func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
+	var raw []byte
+	var err error
+	if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
+		return err
+	}
+	b.recordsLen = len(raw)
+
+	switch b.Codec {
+	case CompressionNone:
+		b.compressedRecords = raw
+	case CompressionGZIP:
+		var buf bytes.Buffer
+		writer := gzip.NewWriter(&buf)
+		if _, err := writer.Write(raw); err != nil {
+			return err
+		}
+		if err := writer.Close(); err != nil {
+			return err
+		}
+		b.compressedRecords = buf.Bytes()
+	case CompressionSnappy:
+		b.compressedRecords = snappy.Encode(raw)
+	case CompressionLZ4:
+		var buf bytes.Buffer
+		writer := lz4.NewWriter(&buf)
+		if _, err := writer.Write(raw); err != nil {
+			return err
+		}
+		if err := writer.Close(); err != nil {
+			return err
+		}
+		b.compressedRecords = buf.Bytes()
+	default:
+		return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
+	}
+
+	return nil
+}
+
+func (b *RecordBatch) computeAttributes() int16 {
+	attr := int16(b.Codec) & int16(compressionCodecMask)
+	if b.Control {
+		attr |= controlMask
+	}
+	return attr
+}
+
+func (b *RecordBatch) addRecord(r *Record) {
+	b.Records = append(b.Records, r)
+}
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
new file mode 100644
index 00000000..258dcbac
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/records.go
@@ -0,0 +1,173 @@
+package sarama
+
+import "fmt"
+
+const (
+	unknownRecords = iota
+	legacyRecords
+	defaultRecords
+
+	magicOffset = 16
+	magicLength = 1
+)
+
+// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
+type Records struct {
+	recordsType int
+	msgSet      *MessageSet
+	recordBatch *RecordBatch
+}
+
+func newLegacyRecords(msgSet *MessageSet) Records {
+	return Records{recordsType: legacyRecords, msgSet: msgSet}
+}
+
+func newDefaultRecords(batch *RecordBatch) Records {
+	return Records{recordsType: defaultRecords, recordBatch: batch}
+}
+
+// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil.
+// The first return value indicates whether both fields are nil (and the type is not set).
+// If both fields are not nil, it returns an error.
+func (r *Records) setTypeFromFields() (bool, error) {
+	if r.msgSet == nil && r.recordBatch == nil {
+		return true, nil
+	}
+	if r.msgSet != nil && r.recordBatch != nil {
+		return false, fmt.Errorf("both msgSet and recordBatch are set, but record type is unknown")
+	}
+	r.recordsType = defaultRecords
+	if r.msgSet != nil {
+		r.recordsType = legacyRecords
+	}
+	return false, nil
+}
+
+func (r *Records) encode(pe packetEncoder) error {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.msgSet == nil {
+			return nil
+		}
+		return r.msgSet.encode(pe)
+	case defaultRecords:
+		if r.recordBatch == nil {
+			return nil
+		}
+		return r.recordBatch.encode(pe)
+	}
+
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) setTypeFromMagic(pd packetDecoder) error {
+	magic, err := magicValue(pd)
+	if err != nil {
+		return err
+	}
+
+	r.recordsType = defaultRecords
+	if magic < 2 {
+		r.recordsType = legacyRecords
+	}
+
+	return nil
+}
+
+func (r *Records) decode(pd packetDecoder) error {
+	if r.recordsType == unknownRecords {
+		if err := r.setTypeFromMagic(pd); err != nil {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		r.msgSet = &MessageSet{}
+		return r.msgSet.decode(pd)
+	case defaultRecords:
+		r.recordBatch = &RecordBatch{}
+		return r.recordBatch.decode(pd)
+	}
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) numRecords() (int, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return 0, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.msgSet == nil {
+			return 0, nil
+		}
+		return len(r.msgSet.Messages), nil
+	case defaultRecords:
+		if r.recordBatch == nil {
+			return 0, nil
+		}
+		return len(r.recordBatch.Records), nil
+	}
+	return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isPartial() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case unknownRecords:
+		return false, nil
+	case legacyRecords:
+		if r.msgSet == nil {
+			return false, nil
+		}
+		return r.msgSet.PartialTrailingMessage, nil
+	case defaultRecords:
+		if r.recordBatch == nil {
+			return false, nil
+		}
+		return r.recordBatch.PartialTrailingRecord, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isControl() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		return false, nil
+	case defaultRecords:
+		if r.recordBatch == nil {
+			return false, nil
+		}
+		return r.recordBatch.Control, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func magicValue(pd packetDecoder) (int8, error) {
+	dec, err := pd.peek(magicOffset, magicLength)
+	if err != nil {
+		return 0, err
+	}
+
+	return dec.getInt8()
+}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
new file mode 100644
index 00000000..5f7cb76e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -0,0 +1,145 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+type protocolBody interface {
+	encoder
+	versionedDecoder
+	key() int16
+	version() int16
+	requiredVersion() KafkaVersion
+}
+
+type request struct {
+	correlationID int32
+	clientID      string
+	body          protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) (err error) {
+	pe.push(&lengthField{})
+	pe.putInt16(r.body.key())
+	pe.putInt16(r.body.version())
+	pe.putInt32(r.correlationID)
+	err = pe.putString(r.clientID)
+	if err != nil {
+		return err
+	}
+	err = r.body.encode(pe)
+	if err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+	var key int16
+	if key, err = pd.getInt16(); err != nil {
+		return err
+	}
+	var version int16
+	if version, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if r.correlationID, err = pd.getInt32(); err != nil {
+		return err
+	}
+	r.clientID, err = pd.getString()
+
+	r.body = allocateBody(key, version)
+	if r.body == nil {
+		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+	}
+	return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
+	lengthBytes := make([]byte, 4)
+	if _, err := io.ReadFull(r, lengthBytes); err != nil {
+		return nil, bytesRead, err
+	}
+	bytesRead += len(lengthBytes)
+
+	length := int32(binary.BigEndian.Uint32(lengthBytes))
+	if length <= 4 || length > MaxRequestSize {
+		return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+	}
+
+	encodedReq := make([]byte, length)
+	if _, err := io.ReadFull(r, encodedReq); err != nil {
+		return nil, bytesRead, err
+	}
+	bytesRead += len(encodedReq)
+
+	req = &request{}
+	if err := decode(encodedReq, req); err != nil {
+		return nil, bytesRead, err
+	}
+	return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+	switch key {
+	case 0:
+		return &ProduceRequest{}
+	case 1:
+		return &FetchRequest{}
+	case 2:
+		return &OffsetRequest{Version: version}
+	case 3:
+		return &MetadataRequest{}
+	case 8:
+		return &OffsetCommitRequest{Version: version}
+	case 9:
+		return &OffsetFetchRequest{}
+	case 10:
+		return &ConsumerMetadataRequest{}
+	case 11:
+		return &JoinGroupRequest{}
+	case 12:
+		return &HeartbeatRequest{}
+	case 13:
+		return &LeaveGroupRequest{}
+	case 14:
+		return &SyncGroupRequest{}
+	case 15:
+		return &DescribeGroupsRequest{}
+	case 16:
+		return &ListGroupsRequest{}
+	case 17:
+		return &SaslHandshakeRequest{}
+	case 18:
+		return &ApiVersionsRequest{}
+	case 19:
+		return &CreateTopicsRequest{}
+	case 20:
+		return &DeleteTopicsRequest{}
+	case 22:
+		return &InitProducerIDRequest{}
+	case 24:
+		return &AddPartitionsToTxnRequest{}
+	case 25:
+		return &AddOffsetsToTxnRequest{}
+	case 26:
+		return &EndTxnRequest{}
+	case 28:
+		return &TxnOffsetCommitRequest{}
+	case 29:
+		return &DescribeAclsRequest{}
+	case 30:
+		return &CreateAclsRequest{}
+	case 31:
+		return &DeleteAclsRequest{}
+	case 32:
+		return &DescribeConfigsRequest{}
+	case 33:
+		return &AlterConfigsRequest{}
+	case 37:
+		return &CreatePartitionsRequest{}
+	}
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
new file mode 100644
index 00000000..f3f4d27d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -0,0 +1,21 @@
+package sarama
+
+import "fmt"
+
+type responseHeader struct {
+	length        int32
+	correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder) (err error) {
+	r.length, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if r.length <= 4 || r.length > MaxResponseSize {
+		return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+	}
+
+	r.correlationID, err = pd.getInt32()
+	return err
+}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
new file mode 100644
index 00000000..7d5dc60d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -0,0 +1,99 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
+consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
+https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
+and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+	+----------------------------------------------+------------+---------------------------------------------------------------+
+	| Name                                         | Type       | Description                                                   |
+	+----------------------------------------------+------------+---------------------------------------------------------------+
+	| incoming-byte-rate                           | meter      | Bytes/second read off all brokers                             |
+	| incoming-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second read off a given broker                          |
+	| outgoing-byte-rate                           | meter      | Bytes/second written off all brokers                          |
+	| outgoing-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second written off a given broker                       |
+	| request-rate                                 | meter      | Requests/second sent to all brokers                           |
+	| request-rate-for-broker-<broker-id>          | meter      | Requests/second sent to a given broker                        |
+	| request-size                                 | histogram  | Distribution of the request size in bytes for all brokers     |
+	| request-size-for-broker-<broker-id>          | histogram  | Distribution of the request size in bytes for a given broker  |
+	| request-latency-in-ms                        | histogram  | Distribution of the request latency in ms for all brokers     |
+	| request-latency-in-ms-for-broker-<broker-id> | histogram  | Distribution of the request latency in ms for a given broker  |
+	| response-rate                                | meter      | Responses/second received from all brokers                    |
+	| response-rate-for-broker-<broker-id>         | meter      | Responses/second received from a given broker                 |
+	| response-size                                | histogram  | Distribution of the response size in bytes for all brokers    |
+	| response-size-for-broker-<broker-id>         | histogram  | Distribution of the response size in bytes for a given broker |
+	+----------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| Name                                      | Type       | Description                                                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| batch-size                                | histogram  | Distribution of the number of bytes sent per partition per request for all topics    |
+	| batch-size-for-topic-<topic>              | histogram  | Distribution of the number of bytes sent per partition per request for a given topic |
+	| record-send-rate                          | meter      | Records/second sent to all topics                                                    |
+	| record-send-rate-for-topic-<topic>        | meter      | Records/second sent to a given topic                                                 |
+	| records-per-request                       | histogram  | Distribution of the number of records sent per request for all topics                |
+	| records-per-request-for-topic-<topic>     | histogram  | Distribution of the number of records sent per request for a given topic             |
+	| compression-ratio                         | histogram  | Distribution of the compression ratio times 100 of record batches for all topics     |
+	| compression-ratio-for-topic-<topic>       | histogram  | Distribution of the compression ratio times 100 of record batches for a given topic  |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+*/
+package sarama
+
+import (
+	"io/ioutil"
+	"log"
+)
+
+// Logger is the instance of a StdLogger interface that Sarama writes connection
+// management events to. By default it is set to discard all log messages via ioutil.Discard,
+// but you can set it to redirect wherever you want.
+var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+	Print(v ...interface{})
+	Printf(format string, v ...interface{})
+	Println(v ...interface{})
+}
+
+// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+var PanicHandler func(interface{})
+
+// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+// to process.
+var MaxRequestSize int32 = 100 * 1024 * 1024
+
+// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+var MaxResponseSize int32 = 100 * 1024 * 1024
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
new file mode 100644
index 00000000..fbbc8947
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
@@ -0,0 +1,33 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+	Mechanism string
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.Mechanism); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.Mechanism, err = pd.getString(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+	return 17
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+	return 0
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
new file mode 100644
index 00000000..ef290d4b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
@@ -0,0 +1,38 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+	Err               KError
+	EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+	return 17
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+	return 0
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
new file mode 100644
index 00000000..fe207080
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type SyncGroupRequest struct {
+	GroupId          string
+	GenerationId     int32
+	MemberId         string
+	GroupAssignments map[string][]byte
+}
+
+func (r *SyncGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
+		return err
+	}
+	for memberId, memberAssignment := range r.GroupAssignments {
+		if err := pe.putString(memberId); err != nil {
+			return err
+		}
+		if err := pe.putBytes(memberAssignment); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupAssignments = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		memberAssignment, err := pd.getBytes()
+		if err != nil {
+			return err
+		}
+
+		r.GroupAssignments[memberId] = memberAssignment
+	}
+
+	return nil
+}
+
+func (r *SyncGroupRequest) key() int16 {
+	return 14
+}
+
+func (r *SyncGroupRequest) version() int16 {
+	return 0
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+	if r.GroupAssignments == nil {
+		r.GroupAssignments = make(map[string][]byte)
+	}
+
+	r.GroupAssignments[memberId] = memberAssignment
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
+	bin, err := encode(memberAssignment, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupAssignment(memberId, bin)
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
new file mode 100644
index 00000000..194b382b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_response.go
@@ -0,0 +1,41 @@
+package sarama
+
+type SyncGroupResponse struct {
+	Err              KError
+	MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(r.MemberAssignment, assignment)
+	return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return pe.putBytes(r.MemberAssignment)
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	r.MemberAssignment, err = pd.getBytes()
+	return
+}
+
+func (r *SyncGroupResponse) key() int16 {
+	return 14
+}
+
+func (r *SyncGroupResponse) version() int16 {
+	return 0
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
new file mode 100644
index 00000000..dd096b6d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_producer.go
@@ -0,0 +1,164 @@
+package sarama
+
+import "sync"
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+	// SendMessage produces a given message, and returns only when it either has
+	// succeeded or failed to produce. It will return the partition and the offset
+	// of the produced message, or an error if the message failed to produce.
+	SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+	// SendMessages produces a given set of messages, and returns only when all
+	// messages in the set have either succeeded or failed. Note that messages
+	// can succeed and fail individually; if some succeed and some fail,
+	// SendMessages will return an error.
+	SendMessages(msgs []*ProducerMessage) error
+
+	// Close shuts down the producer and waits for any buffered messages to be
+	// flushed. You must call this function before a producer object passes out of
+	// scope, as it may otherwise leak memory. You must call this before calling
+	// Close on the underlying client.
+	Close() error
+}
+
+type syncProducer struct {
+	producer *asyncProducer
+	wg       sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+	if config == nil {
+		config = NewConfig()
+		config.Producer.Return.Successes = true
+	}
+
+	if err := verifyProducerConfig(config); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducer(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+	if err := verifyProducerConfig(client.Config()); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+	sp := &syncProducer{producer: p}
+
+	sp.wg.Add(2)
+	go withRecover(sp.handleSuccesses)
+	go withRecover(sp.handleErrors)
+
+	return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+	if !config.Producer.Return.Errors {
+		return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+	}
+	if !config.Producer.Return.Successes {
+		return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+	}
+	return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+	oldMetadata := msg.Metadata
+	defer func() {
+		msg.Metadata = oldMetadata
+	}()
+
+	expectation := make(chan *ProducerError, 1)
+	msg.Metadata = expectation
+	sp.producer.Input() <- msg
+
+	if err := <-expectation; err != nil {
+		return -1, -1, err.Err
+	}
+
+	return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+	savedMetadata := make([]interface{}, len(msgs))
+	for i := range msgs {
+		savedMetadata[i] = msgs[i].Metadata
+	}
+	defer func() {
+		for i := range msgs {
+			msgs[i].Metadata = savedMetadata[i]
+		}
+	}()
+
+	expectations := make(chan chan *ProducerError, len(msgs))
+	go func() {
+		for _, msg := range msgs {
+			expectation := make(chan *ProducerError, 1)
+			msg.Metadata = expectation
+			sp.producer.Input() <- msg
+			expectations <- expectation
+		}
+		close(expectations)
+	}()
+
+	var errors ProducerErrors
+	for expectation := range expectations {
+		if err := <-expectation; err != nil {
+			errors = append(errors, err)
+		}
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+	defer sp.wg.Done()
+	for msg := range sp.producer.Successes() {
+		expectation := msg.Metadata.(chan *ProducerError)
+		expectation <- nil
+	}
+}
+
+func (sp *syncProducer) handleErrors() {
+	defer sp.wg.Done()
+	for err := range sp.producer.Errors() {
+		expectation := err.Msg.Metadata.(chan *ProducerError)
+		expectation <- err
+	}
+}
+
+func (sp *syncProducer) Close() error {
+	sp.producer.AsyncClose()
+	sp.wg.Wait()
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go
new file mode 100644
index 00000000..372278d0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/timestamp.go
@@ -0,0 +1,40 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type Timestamp struct {
+	*time.Time
+}
+
+func (t Timestamp) encode(pe packetEncoder) error {
+	timestamp := int64(-1)
+
+	if !t.Before(time.Unix(0, 0)) {
+		timestamp = t.UnixNano() / int64(time.Millisecond)
+	} else if !t.IsZero() {
+		return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)}
+	}
+
+	pe.putInt64(timestamp)
+	return nil
+}
+
+func (t Timestamp) decode(pd packetDecoder) error {
+	millis, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	// negative timestamps are invalid, in these cases we should return
+	// a zero time
+	timestamp := time.Time{}
+	if millis >= 0 {
+		timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+	}
+
+	*t.Time = timestamp
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
new file mode 100644
index 00000000..71e95b81
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
@@ -0,0 +1,126 @@
+package sarama
+
+type TxnOffsetCommitRequest struct {
+	TransactionalID string
+	GroupID         string
+	ProducerID      int64
+	ProducerEpoch   int16
+	Topics          map[string][]*PartitionOffsetMetadata
+}
+
+func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(t.TransactionalID); err != nil {
+		return err
+	}
+	if err := pe.putString(t.GroupID); err != nil {
+		return err
+	}
+	pe.putInt64(t.ProducerID)
+	pe.putInt16(t.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+	for topic, partitions := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for _, partition := range partitions {
+			if err := partition.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	if t.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if t.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionOffsetMetadata)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
+
+		for j := 0; j < m; j++ {
+			partitionOffsetMetadata := new(PartitionOffsetMetadata)
+			if err := partitionOffsetMetadata.decode(pd, version); err != nil {
+				return err
+			}
+			t.Topics[topic][j] = partitionOffsetMetadata
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitRequest) key() int16 {
+	return 28
+}
+
+func (a *TxnOffsetCommitRequest) version() int16 {
+	return 0
+}
+
+func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type PartitionOffsetMetadata struct {
+	Partition int32
+	Offset    int64
+	Metadata  *string
+}
+
+func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error {
+	pe.putInt32(p.Partition)
+	pe.putInt64(p.Offset)
+	if err := pe.putNullableString(p.Metadata); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if p.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if p.Metadata, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
new file mode 100644
index 00000000..6c980f40
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
@@ -0,0 +1,83 @@
+package sarama
+
+import (
+	"time"
+)
+
+type TxnOffsetCommitResponse struct {
+	ThrottleTime time.Duration
+	Topics       map[string][]*PartitionError
+}
+
+func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(t.ThrottleTime / time.Millisecond))
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+
+	for topic, e := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			t.Topics[topic][j] = new(PartitionError)
+			if err := t.Topics[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitResponse) key() int16 {
+	return 28
+}
+
+func (a *TxnOffsetCommitResponse) version() int16 {
+	return 0
+}
+
+func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
new file mode 100644
index 00000000..9d7b60f1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -0,0 +1,184 @@
+package sarama
+
+import (
+	"bufio"
+	"fmt"
+	"net"
+	"regexp"
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+	return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+	return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupInt32Slice(input []int32) []int32 {
+	ret := make([]int32, 0, len(input))
+	for _, val := range input {
+		ret = append(ret, val)
+	}
+	return ret
+}
+
+func withRecover(fn func()) {
+	defer func() {
+		handler := PanicHandler
+		if handler != nil {
+			if err := recover(); err != nil {
+				handler(err)
+			}
+		}
+	}()
+
+	fn()
+}
+
+func safeAsyncClose(b *Broker) {
+	tmp := b // local var prevents clobbering in goroutine
+	go withRecover(func() {
+		if connected, _ := tmp.Connected(); connected {
+			if err := tmp.Close(); err != nil {
+				Logger.Println("Error closing broker", tmp.ID(), ":", err)
+			}
+		}
+	})
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+	Encode() ([]byte, error)
+	Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+	return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+	return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+	return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+	return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+	net.Conn
+	buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+	return &bufConn{
+		Conn: conn,
+		buf:  bufio.NewReader(conn),
+	}
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+	return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+	// it's a struct rather than just typing the array directly to make it opaque and stop people
+	// generating their own arbitrary versions
+	version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+	return KafkaVersion{
+		version: [4]uint{major, minor, veryMinor, patch},
+	}
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+//    V1.IsAtLeast(V2) // false
+//    V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+	for i := range v.version {
+		if v.version[i] > other.version[i] {
+			return true
+		} else if v.version[i] < other.version[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+	V0_8_2_0   = newKafkaVersion(0, 8, 2, 0)
+	V0_8_2_1   = newKafkaVersion(0, 8, 2, 1)
+	V0_8_2_2   = newKafkaVersion(0, 8, 2, 2)
+	V0_9_0_0   = newKafkaVersion(0, 9, 0, 0)
+	V0_9_0_1   = newKafkaVersion(0, 9, 0, 1)
+	V0_10_0_0  = newKafkaVersion(0, 10, 0, 0)
+	V0_10_0_1  = newKafkaVersion(0, 10, 0, 1)
+	V0_10_1_0  = newKafkaVersion(0, 10, 1, 0)
+	V0_10_2_0  = newKafkaVersion(0, 10, 2, 0)
+	V0_11_0_0  = newKafkaVersion(0, 11, 0, 0)
+	V1_0_0_0   = newKafkaVersion(1, 0, 0, 0)
+	minVersion = V0_8_2_0
+)
+
+func ParseKafkaVersion(s string) (KafkaVersion, error) {
+	var major, minor, veryMinor, patch uint
+	var err error
+	if s[0] == '0' {
+		err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
+	} else {
+		err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
+	}
+	if err != nil {
+		return minVersion, err
+	}
+	return newKafkaVersion(major, minor, veryMinor, patch), nil
+}
+
+func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
+	if !regexp.MustCompile(pattern).MatchString(s) {
+		return fmt.Errorf("invalid version `%s`", s)
+	}
+	_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
+	return err
+}
+
+func (v KafkaVersion) String() string {
+	if v.version[0] == 0 {
+		return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
+	} else {
+		return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
+	}
+}
diff --git a/vendor/github.com/apache/thrift/LICENSE b/vendor/github.com/apache/thrift/LICENSE
new file mode 100644
index 00000000..3b6d7d74
--- /dev/null
+++ b/vendor/github.com/apache/thrift/LICENSE
@@ -0,0 +1,239 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+--------------------------------------------------
+SOFTWARE DISTRIBUTED WITH THRIFT:
+
+The Apache Thrift software includes a number of subcomponents with
+separate copyright notices and license terms. Your use of the source
+code for the these subcomponents is subject to the terms and
+conditions of the following licenses.
+
+--------------------------------------------------
+Portions of the following files are licensed under the MIT License:
+
+  lib/erl/src/Makefile.am
+
+Please see doc/otp-base-license.txt for the full terms of this license.
+
+--------------------------------------------------
+For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
+
+#   Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
+#
+#   Copying and distribution of this file, with or without
+#   modification, are permitted in any medium without royalty provided
+#   the copyright notice and this notice are preserved.
+
+--------------------------------------------------
+For the lib/nodejs/lib/thrift/json_parse.js:
+
+/*
+    json_parse.js
+    2015-05-02
+    Public Domain.
+    NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+*/
+(By Douglas Crockford <douglas@crockford.com>)
+--------------------------------------------------
diff --git a/vendor/github.com/apache/thrift/NOTICE b/vendor/github.com/apache/thrift/NOTICE
new file mode 100644
index 00000000..902dc8d3
--- /dev/null
+++ b/vendor/github.com/apache/thrift/NOTICE
@@ -0,0 +1,5 @@
+Apache Thrift
+Copyright 2006-2017 The Apache Software Foundation.
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
diff --git a/vendor/github.com/apache/thrift/contrib/fb303/LICENSE b/vendor/github.com/apache/thrift/contrib/fb303/LICENSE
new file mode 100644
index 00000000..4eacb643
--- /dev/null
+++ b/vendor/github.com/apache/thrift/contrib/fb303/LICENSE
@@ -0,0 +1,16 @@
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
diff --git a/vendor/github.com/apache/thrift/debian/copyright b/vendor/github.com/apache/thrift/debian/copyright
new file mode 100644
index 00000000..850643c9
--- /dev/null
+++ b/vendor/github.com/apache/thrift/debian/copyright
@@ -0,0 +1,129 @@
+This package was debianized by Thrift Developer's <dev@thrift.apache.org>.
+
+
+This package and the Debian packaging is licensed under the Apache License,
+see `/usr/share/common-licenses/Apache-2.0'.
+
+The following information was copied from Apache Thrift LICENSE file.
+
+--------------------------------------------------
+SOFTWARE DISTRIBUTED WITH THRIFT:
+
+The Apache Thrift software includes a number of subcomponents with
+separate copyright notices and license terms. Your use of the source
+code for the these subcomponents is subject to the terms and
+conditions of the following licenses.
+
+--------------------------------------------------
+Portions of the following files are licensed under the MIT License:
+
+  lib/erl/src/Makefile.am
+
+Please see doc/otp-base-license.txt for the full terms of this license.
+
+
+--------------------------------------------------
+The following files contain some portions of code contributed under
+the Thrift Software License (see doc/old-thrift-license.txt), and relicensed
+under the Apache 2.0 License:
+
+  compiler/cpp/Makefile.am
+  compiler/cpp/src/generate/t_cocoa_generator.cc
+  compiler/cpp/src/generate/t_cpp_generator.cc
+  compiler/cpp/src/generate/t_csharp_generator.cc
+  compiler/cpp/src/generate/t_erl_generator.cc
+  compiler/cpp/src/generate/t_hs_generator.cc
+  compiler/cpp/src/generate/t_java_generator.cc
+  compiler/cpp/src/generate/t_ocaml_generator.cc
+  compiler/cpp/src/generate/t_perl_generator.cc
+  compiler/cpp/src/generate/t_php_generator.cc
+  compiler/cpp/src/generate/t_py_generator.cc
+  compiler/cpp/src/generate/t_rb_generator.cc
+  compiler/cpp/src/generate/t_st_generator.cc
+  compiler/cpp/src/generate/t_xsd_generator.cc
+  compiler/cpp/src/main.cc
+  compiler/cpp/src/parse/t_field.h
+  compiler/cpp/src/parse/t_program.h
+  compiler/cpp/src/platform.h
+  compiler/cpp/src/thriftl.ll
+  compiler/cpp/src/thrifty.yy
+  lib/csharp/src/Protocol/TBinaryProtocol.cs
+  lib/csharp/src/Protocol/TField.cs
+  lib/csharp/src/Protocol/TList.cs
+  lib/csharp/src/Protocol/TMap.cs
+  lib/csharp/src/Protocol/TMessage.cs
+  lib/csharp/src/Protocol/TMessageType.cs
+  lib/csharp/src/Protocol/TProtocol.cs
+  lib/csharp/src/Protocol/TProtocolException.cs
+  lib/csharp/src/Protocol/TProtocolFactory.cs
+  lib/csharp/src/Protocol/TProtocolUtil.cs
+  lib/csharp/src/Protocol/TSet.cs
+  lib/csharp/src/Protocol/TStruct.cs
+  lib/csharp/src/Protocol/TType.cs
+  lib/csharp/src/Server/TServer.cs
+  lib/csharp/src/Server/TSimpleServer.cs
+  lib/csharp/src/Server/TThreadPoolServer.cs
+  lib/csharp/src/TApplicationException.cs
+  lib/csharp/src/Thrift.csproj
+  lib/csharp/src/Thrift.sln
+  lib/csharp/src/TProcessor.cs
+  lib/csharp/src/Transport/TServerSocket.cs
+  lib/csharp/src/Transport/TServerTransport.cs
+  lib/csharp/src/Transport/TSocket.cs
+  lib/csharp/src/Transport/TStreamTransport.cs
+  lib/csharp/src/Transport/TTransport.cs
+  lib/csharp/src/Transport/TTransportException.cs
+  lib/csharp/src/Transport/TTransportFactory.cs
+  lib/csharp/ThriftMSBuildTask/Properties/AssemblyInfo.cs
+  lib/csharp/ThriftMSBuildTask/ThriftBuild.cs
+  lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj
+  lib/rb/lib/thrift.rb
+  lib/st/README
+  lib/st/thrift.st
+  test/OptionalRequiredTest.cpp
+  test/OptionalRequiredTest.thrift
+  test/ThriftTest.thrift
+
+--------------------------------------------------
+For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
+
+#   Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
+#
+#   Copying and distribution of this file, with or without
+#   modification, are permitted in any medium without royalty provided
+#   the copyright notice and this notice are preserved.
+
+--------------------------------------------------
+For the compiler/cpp/src/md5.[ch] components:
+
+/*
+  Copyright (C) 1999, 2000, 2002 Aladdin Enterprises.  All rights reserved.
+
+  This software is provided 'as-is', without any express or implied
+  warranty.  In no event will the authors be held liable for any damages
+  arising from the use of this software.
+
+  Permission is granted to anyone to use this software for any purpose,
+  including commercial applications, and to alter it and redistribute it
+  freely, subject to the following restrictions:
+
+  1. The origin of this software must not be misrepresented; you must not
+     claim that you wrote the original software. If you use this software
+     in a product, an acknowledgment in the product documentation would be
+     appreciated but is not required.
+  2. Altered source versions must be plainly marked as such, and must not be
+     misrepresented as being the original software.
+  3. This notice may not be removed or altered from any source distribution.
+
+  L. Peter Deutsch
+  ghost@aladdin.com
+
+ */
+
+---------------------------------------------------
+For the lib/rb/setup.rb: Copyright (c) 2000-2005 Minero Aoki,
+lib/ocaml/OCamlMakefile and lib/ocaml/README-OCamlMakefile components:
+     Copyright (C) 1999 - 2007  Markus Mottl
+
+Licensed under the terms of the GNU Lesser General Public License 2.1
+(see doc/lgpl-2.1.txt for the full terms of this license)
diff --git a/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER b/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER
new file mode 100644
index 00000000..4eacb643
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER
@@ -0,0 +1,16 @@
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go
new file mode 100644
index 00000000..b9d7eedc
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+const (
+	UNKNOWN_APPLICATION_EXCEPTION  = 0
+	UNKNOWN_METHOD                 = 1
+	INVALID_MESSAGE_TYPE_EXCEPTION = 2
+	WRONG_METHOD_NAME              = 3
+	BAD_SEQUENCE_ID                = 4
+	MISSING_RESULT                 = 5
+	INTERNAL_ERROR                 = 6
+	PROTOCOL_ERROR                 = 7
+)
+
+var defaultApplicationExceptionMessage = map[int32]string{
+	UNKNOWN_APPLICATION_EXCEPTION:  "unknown application exception",
+	UNKNOWN_METHOD:                 "unknown method",
+	INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
+	WRONG_METHOD_NAME:              "wrong method name",
+	BAD_SEQUENCE_ID:                "bad sequence ID",
+	MISSING_RESULT:                 "missing result",
+	INTERNAL_ERROR:                 "unknown internal error",
+	PROTOCOL_ERROR:                 "unknown protocol error",
+}
+
+// Application level Thrift exception
+type TApplicationException interface {
+	TException
+	TypeId() int32
+	Read(iprot TProtocol) error
+	Write(oprot TProtocol) error
+}
+
+type tApplicationException struct {
+	message string
+	type_   int32
+}
+
+func (e tApplicationException) Error() string {
+	if e.message != "" {
+		return e.message
+	}
+	return defaultApplicationExceptionMessage[e.type_]
+}
+
+func NewTApplicationException(type_ int32, message string) TApplicationException {
+	return &tApplicationException{message, type_}
+}
+
+func (p *tApplicationException) TypeId() int32 {
+	return p.type_
+}
+
+func (p *tApplicationException) Read(iprot TProtocol) error {
+	// TODO: this should really be generated by the compiler
+	_, err := iprot.ReadStructBegin()
+	if err != nil {
+		return err
+	}
+
+	message := ""
+	type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
+
+	for {
+		_, ttype, id, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return err
+		}
+		if ttype == STOP {
+			break
+		}
+		switch id {
+		case 1:
+			if ttype == STRING {
+				if message, err = iprot.ReadString(); err != nil {
+					return err
+				}
+			} else {
+				if err = SkipDefaultDepth(iprot, ttype); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if ttype == I32 {
+				if type_, err = iprot.ReadI32(); err != nil {
+					return err
+				}
+			} else {
+				if err = SkipDefaultDepth(iprot, ttype); err != nil {
+					return err
+				}
+			}
+		default:
+			if err = SkipDefaultDepth(iprot, ttype); err != nil {
+				return err
+			}
+		}
+		if err = iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return err
+	}
+
+	p.message = message
+	p.type_ = type_
+
+	return nil
+}
+
+func (p *tApplicationException) Write(oprot TProtocol) (err error) {
+	err = oprot.WriteStructBegin("TApplicationException")
+	if len(p.Error()) > 0 {
+		err = oprot.WriteFieldBegin("message", STRING, 1)
+		if err != nil {
+			return
+		}
+		err = oprot.WriteString(p.Error())
+		if err != nil {
+			return
+		}
+		err = oprot.WriteFieldEnd()
+		if err != nil {
+			return
+		}
+	}
+	err = oprot.WriteFieldBegin("type", I32, 2)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteI32(p.type_)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteFieldEnd()
+	if err != nil {
+		return
+	}
+	err = oprot.WriteFieldStop()
+	if err != nil {
+		return
+	}
+	err = oprot.WriteStructEnd()
+	return
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go
new file mode 100644
index 00000000..690d3411
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go
@@ -0,0 +1,514 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+)
+
+type TBinaryProtocol struct {
+	trans         TRichTransport
+	origTransport TTransport
+	reader        io.Reader
+	writer        io.Writer
+	strictRead    bool
+	strictWrite   bool
+	buffer        [64]byte
+}
+
+type TBinaryProtocolFactory struct {
+	strictRead  bool
+	strictWrite bool
+}
+
+func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
+	return NewTBinaryProtocol(t, false, true)
+}
+
+func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
+	p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
+	if et, ok := t.(TRichTransport); ok {
+		p.trans = et
+	} else {
+		p.trans = NewTRichTransport(t)
+	}
+	p.reader = p.trans
+	p.writer = p.trans
+	return p
+}
+
+func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
+	return NewTBinaryProtocolFactory(false, true)
+}
+
+func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
+	return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
+}
+
+func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
+	return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
+}
+
+/**
+ * Writing Methods
+ */
+
+func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+	if p.strictWrite {
+		version := uint32(VERSION_1) | uint32(typeId)
+		e := p.WriteI32(int32(version))
+		if e != nil {
+			return e
+		}
+		e = p.WriteString(name)
+		if e != nil {
+			return e
+		}
+		e = p.WriteI32(seqId)
+		return e
+	} else {
+		e := p.WriteString(name)
+		if e != nil {
+			return e
+		}
+		e = p.WriteByte(int8(typeId))
+		if e != nil {
+			return e
+		}
+		e = p.WriteI32(seqId)
+		return e
+	}
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteMessageEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteStructBegin(name string) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteStructEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+	e := p.WriteByte(int8(typeId))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI16(id)
+	return e
+}
+
+func (p *TBinaryProtocol) WriteFieldEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldStop() error {
+	e := p.WriteByte(STOP)
+	return e
+}
+
+func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+	e := p.WriteByte(int8(keyType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteByte(int8(valueType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteMapEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
+	e := p.WriteByte(int8(elemType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteListEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
+	e := p.WriteByte(int8(elemType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteSetEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteBool(value bool) error {
+	if value {
+		return p.WriteByte(1)
+	}
+	return p.WriteByte(0)
+}
+
+func (p *TBinaryProtocol) WriteByte(value int8) error {
+	e := p.trans.WriteByte(byte(value))
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI16(value int16) error {
+	v := p.buffer[0:2]
+	binary.BigEndian.PutUint16(v, uint16(value))
+	_, e := p.writer.Write(v)
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI32(value int32) error {
+	v := p.buffer[0:4]
+	binary.BigEndian.PutUint32(v, uint32(value))
+	_, e := p.writer.Write(v)
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI64(value int64) error {
+	v := p.buffer[0:8]
+	binary.BigEndian.PutUint64(v, uint64(value))
+	_, err := p.writer.Write(v)
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteDouble(value float64) error {
+	return p.WriteI64(int64(math.Float64bits(value)))
+}
+
+func (p *TBinaryProtocol) WriteString(value string) error {
+	e := p.WriteI32(int32(len(value)))
+	if e != nil {
+		return e
+	}
+	_, err := p.trans.WriteString(value)
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteBinary(value []byte) error {
+	e := p.WriteI32(int32(len(value)))
+	if e != nil {
+		return e
+	}
+	_, err := p.writer.Write(value)
+	return NewTProtocolException(err)
+}
+
+/**
+ * Reading methods
+ */
+
+func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+	size, e := p.ReadI32()
+	if e != nil {
+		return "", typeId, 0, NewTProtocolException(e)
+	}
+	if size < 0 {
+		typeId = TMessageType(size & 0x0ff)
+		version := int64(int64(size) & VERSION_MASK)
+		if version != VERSION_1 {
+			return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
+		}
+		name, e = p.ReadString()
+		if e != nil {
+			return name, typeId, seqId, NewTProtocolException(e)
+		}
+		seqId, e = p.ReadI32()
+		if e != nil {
+			return name, typeId, seqId, NewTProtocolException(e)
+		}
+		return name, typeId, seqId, nil
+	}
+	if p.strictRead {
+		return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
+	}
+	name, e2 := p.readStringBody(size)
+	if e2 != nil {
+		return name, typeId, seqId, e2
+	}
+	b, e3 := p.ReadByte()
+	if e3 != nil {
+		return name, typeId, seqId, e3
+	}
+	typeId = TMessageType(b)
+	seqId, e4 := p.ReadI32()
+	if e4 != nil {
+		return name, typeId, seqId, e4
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TBinaryProtocol) ReadMessageEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
+	return
+}
+
+func (p *TBinaryProtocol) ReadStructEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
+	t, err := p.ReadByte()
+	typeId = TType(t)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if t != STOP {
+		seqId, err = p.ReadI16()
+	}
+	return name, typeId, seqId, err
+}
+
+func (p *TBinaryProtocol) ReadFieldEnd() error {
+	return nil
+}
+
+var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
+
+func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
+	k, e := p.ReadByte()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	kType = TType(k)
+	v, e := p.ReadByte()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	vType = TType(v)
+	size32, e := p.ReadI32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+	return kType, vType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadMapEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
+	b, e := p.ReadByte()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	elemType = TType(b)
+	size32, e := p.ReadI32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+
+	return
+}
+
+func (p *TBinaryProtocol) ReadListEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
+	b, e := p.ReadByte()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	elemType = TType(b)
+	size32, e := p.ReadI32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+	return elemType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadSetEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadBool() (bool, error) {
+	b, e := p.ReadByte()
+	v := true
+	if b != 1 {
+		v = false
+	}
+	return v, e
+}
+
+func (p *TBinaryProtocol) ReadByte() (int8, error) {
+	v, err := p.trans.ReadByte()
+	return int8(v), err
+}
+
+func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
+	buf := p.buffer[0:2]
+	err = p.readAll(buf)
+	value = int16(binary.BigEndian.Uint16(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
+	buf := p.buffer[0:4]
+	err = p.readAll(buf)
+	value = int32(binary.BigEndian.Uint32(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
+	buf := p.buffer[0:8]
+	err = p.readAll(buf)
+	value = int64(binary.BigEndian.Uint64(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
+	buf := p.buffer[0:8]
+	err = p.readAll(buf)
+	value = math.Float64frombits(binary.BigEndian.Uint64(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadString() (value string, err error) {
+	size, e := p.ReadI32()
+	if e != nil {
+		return "", e
+	}
+	if size < 0 {
+		err = invalidDataLength
+		return
+	}
+
+	return p.readStringBody(size)
+}
+
+func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
+	size, e := p.ReadI32()
+	if e != nil {
+		return nil, e
+	}
+	if size < 0 {
+		return nil, invalidDataLength
+	}
+	if uint64(size) > p.trans.RemainingBytes() {
+		return nil, invalidDataLength
+	}
+
+	isize := int(size)
+	buf := make([]byte, isize)
+	_, err := io.ReadFull(p.trans, buf)
+	return buf, NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) Flush() (err error) {
+	return NewTProtocolException(p.trans.Flush())
+}
+
+func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
+	return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TBinaryProtocol) Transport() TTransport {
+	return p.origTransport
+}
+
+func (p *TBinaryProtocol) readAll(buf []byte) error {
+	_, err := io.ReadFull(p.reader, buf)
+	return NewTProtocolException(err)
+}
+
+const readLimit = 32768
+
+func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
+	if size < 0 {
+		return "", nil
+	}
+	if uint64(size) > p.trans.RemainingBytes() {
+		return "", invalidDataLength
+	}
+
+	var (
+		buf bytes.Buffer
+		e   error
+		b   []byte
+	)
+
+	switch {
+	case int(size) <= len(p.buffer):
+		b = p.buffer[:size] // avoids allocation for small reads
+	case int(size) < readLimit:
+		b = make([]byte, size)
+	default:
+		b = make([]byte, readLimit)
+	}
+
+	for size > 0 {
+		_, e = io.ReadFull(p.trans, b)
+		buf.Write(b)
+		if e != nil {
+			break
+		}
+		size -= readLimit
+		if size < readLimit && size > 0 {
+			b = b[:size]
+		}
+	}
+	return buf.String(), NewTProtocolException(e)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go
new file mode 100644
index 00000000..b754f925
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+)
+
+type TBufferedTransportFactory struct {
+	size int
+}
+
+type TBufferedTransport struct {
+	bufio.ReadWriter
+	tp TTransport
+}
+
+func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	return NewTBufferedTransport(trans, p.size), nil
+}
+
+func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
+	return &TBufferedTransportFactory{size: bufferSize}
+}
+
+func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
+	return &TBufferedTransport{
+		ReadWriter: bufio.ReadWriter{
+			Reader: bufio.NewReaderSize(trans, bufferSize),
+			Writer: bufio.NewWriterSize(trans, bufferSize),
+		},
+		tp: trans,
+	}
+}
+
+func (p *TBufferedTransport) IsOpen() bool {
+	return p.tp.IsOpen()
+}
+
+func (p *TBufferedTransport) Open() (err error) {
+	return p.tp.Open()
+}
+
+func (p *TBufferedTransport) Close() (err error) {
+	return p.tp.Close()
+}
+
+func (p *TBufferedTransport) Read(b []byte) (int, error) {
+	n, err := p.ReadWriter.Read(b)
+	if err != nil {
+		p.ReadWriter.Reader.Reset(p.tp)
+	}
+	return n, err
+}
+
+func (p *TBufferedTransport) Write(b []byte) (int, error) {
+	n, err := p.ReadWriter.Write(b)
+	if err != nil {
+		p.ReadWriter.Writer.Reset(p.tp)
+	}
+	return n, err
+}
+
+func (p *TBufferedTransport) Flush() error {
+	if err := p.ReadWriter.Flush(); err != nil {
+		p.ReadWriter.Writer.Reset(p.tp)
+		return err
+	}
+	return p.tp.Flush()
+}
+
+func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
+	return p.tp.RemainingBytes()
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/client.go b/vendor/github.com/apache/thrift/lib/go/thrift/client.go
new file mode 100644
index 00000000..8bdb53d8
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/client.go
@@ -0,0 +1,78 @@
+package thrift
+
+import "fmt"
+
+type TStandardClient struct {
+	seqId        int32
+	iprot, oprot TProtocol
+}
+
+// TStandardClient implements TClient, and uses the standard message format for Thrift.
+// It is not safe for concurrent use.
+func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
+	return &TStandardClient{
+		iprot: inputProtocol,
+		oprot: outputProtocol,
+	}
+}
+
+func (p *TStandardClient) Send(oprot TProtocol, seqId int32, method string, args TStruct) error {
+	if err := oprot.WriteMessageBegin(method, CALL, seqId); err != nil {
+		return err
+	}
+	if err := args.Write(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteMessageEnd(); err != nil {
+		return err
+	}
+	return oprot.Flush()
+}
+
+func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, result TStruct) error {
+	rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin()
+	if err != nil {
+		return err
+	}
+
+	if method != rMethod {
+		return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
+	} else if seqId != rSeqId {
+		return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
+	} else if rTypeId == EXCEPTION {
+		var exception tApplicationException
+		if err := exception.Read(iprot); err != nil {
+			return err
+		}
+
+		if err := iprot.ReadMessageEnd(); err != nil {
+			return err
+		}
+
+		return &exception
+	} else if rTypeId != REPLY {
+		return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
+	}
+
+	if err := result.Read(iprot); err != nil {
+		return err
+	}
+
+	return iprot.ReadMessageEnd()
+}
+
+func (p *TStandardClient) call(method string, args, result TStruct) error {
+	p.seqId++
+	seqId := p.seqId
+
+	if err := p.Send(p.oprot, seqId, method, args); err != nil {
+		return err
+	}
+
+	// method is oneway
+	if result == nil {
+		return nil
+	}
+
+	return p.Recv(p.iprot, seqId, method, result)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/client_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/client_go17.go
new file mode 100644
index 00000000..15c1c52c
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/client_go17.go
@@ -0,0 +1,13 @@
+// +build go1.7
+
+package thrift
+
+import "context"
+
+type TClient interface {
+	Call(ctx context.Context, method string, args, result TStruct) error
+}
+
+func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error {
+	return p.call(method, args, result)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/client_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/client_pre_go17.go
new file mode 100644
index 00000000..d2e99ef2
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/client_pre_go17.go
@@ -0,0 +1,13 @@
+// +build !go1.7
+
+package thrift
+
+import "golang.org/x/net/context"
+
+type TClient interface {
+	Call(ctx context.Context, method string, args, result TStruct) error
+}
+
+func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error {
+	return p.call(method, args, result)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go
new file mode 100644
index 00000000..2c729a22
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go
@@ -0,0 +1,32 @@
+// +build go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+type mockProcessor struct {
+	ProcessFunc func(in, out TProtocol) (bool, TException)
+}
+
+func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
+	return m.ProcessFunc(in, out)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go
new file mode 100644
index 00000000..e6d0c4d9
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go
@@ -0,0 +1,32 @@
+// +build !go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "golang.org/x/net/context"
+
+type mockProcessor struct {
+	ProcessFunc func(in, out TProtocol) (bool, TException)
+}
+
+func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
+	return m.ProcessFunc(in, out)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go
new file mode 100644
index 00000000..0bc5fdde
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go
@@ -0,0 +1,815 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"math"
+)
+
+const (
+	COMPACT_PROTOCOL_ID       = 0x082
+	COMPACT_VERSION           = 1
+	COMPACT_VERSION_MASK      = 0x1f
+	COMPACT_TYPE_MASK         = 0x0E0
+	COMPACT_TYPE_BITS         = 0x07
+	COMPACT_TYPE_SHIFT_AMOUNT = 5
+)
+
+type tCompactType byte
+
+const (
+	COMPACT_BOOLEAN_TRUE  = 0x01
+	COMPACT_BOOLEAN_FALSE = 0x02
+	COMPACT_BYTE          = 0x03
+	COMPACT_I16           = 0x04
+	COMPACT_I32           = 0x05
+	COMPACT_I64           = 0x06
+	COMPACT_DOUBLE        = 0x07
+	COMPACT_BINARY        = 0x08
+	COMPACT_LIST          = 0x09
+	COMPACT_SET           = 0x0A
+	COMPACT_MAP           = 0x0B
+	COMPACT_STRUCT        = 0x0C
+)
+
+var (
+	ttypeToCompactType map[TType]tCompactType
+)
+
+func init() {
+	ttypeToCompactType = map[TType]tCompactType{
+		STOP:   STOP,
+		BOOL:   COMPACT_BOOLEAN_TRUE,
+		BYTE:   COMPACT_BYTE,
+		I16:    COMPACT_I16,
+		I32:    COMPACT_I32,
+		I64:    COMPACT_I64,
+		DOUBLE: COMPACT_DOUBLE,
+		STRING: COMPACT_BINARY,
+		LIST:   COMPACT_LIST,
+		SET:    COMPACT_SET,
+		MAP:    COMPACT_MAP,
+		STRUCT: COMPACT_STRUCT,
+	}
+}
+
+type TCompactProtocolFactory struct{}
+
+func NewTCompactProtocolFactory() *TCompactProtocolFactory {
+	return &TCompactProtocolFactory{}
+}
+
+func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTCompactProtocol(trans)
+}
+
+type TCompactProtocol struct {
+	trans         TRichTransport
+	origTransport TTransport
+
+	// Used to keep track of the last field for the current and previous structs,
+	// so we can do the delta stuff.
+	lastField   []int
+	lastFieldId int
+
+	// If we encounter a boolean field begin, save the TField here so it can
+	// have the value incorporated.
+	booleanFieldName    string
+	booleanFieldId      int16
+	booleanFieldPending bool
+
+	// If we read a field header, and it's a boolean field, save the boolean
+	// value here so that readBool can use it.
+	boolValue          bool
+	boolValueIsNotNull bool
+	buffer             [64]byte
+}
+
+// Create a TCompactProtocol given a TTransport
+func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
+	p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
+	if et, ok := trans.(TRichTransport); ok {
+		p.trans = et
+	} else {
+		p.trans = NewTRichTransport(trans)
+	}
+
+	return p
+
+}
+
+//
+// Public Writing methods.
+//
+
+// Write a message header to the wire. Compact Protocol messages contain the
+// protocol version so we can migrate forwards in the future if need be.
+func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
+	err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	_, err = p.writeVarint32(seqid)
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	e := p.WriteString(name)
+	return e
+
+}
+
+func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
+
+// Write a struct begin. This doesn't actually put anything on the wire. We
+// use it as an opportunity to put special placeholder markers on the field
+// stack so we can get the field id deltas correct.
+func (p *TCompactProtocol) WriteStructBegin(name string) error {
+	p.lastField = append(p.lastField, p.lastFieldId)
+	p.lastFieldId = 0
+	return nil
+}
+
+// Write a struct end. This doesn't actually put anything on the wire. We use
+// this as an opportunity to pop the last field from the current struct off
+// of the field stack.
+func (p *TCompactProtocol) WriteStructEnd() error {
+	p.lastFieldId = p.lastField[len(p.lastField)-1]
+	p.lastField = p.lastField[:len(p.lastField)-1]
+	return nil
+}
+
+func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+	if typeId == BOOL {
+		// we want to possibly include the value, so we'll wait.
+		p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
+		return nil
+	}
+	_, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
+	return NewTProtocolException(err)
+}
+
+// The workhorse of writeFieldBegin. It has the option of doing a
+// 'type override' of the type header. This is used specifically in the
+// boolean field case.
+func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
+	// short lastField = lastField_.pop();
+
+	// if there's a type override, use that.
+	var typeToWrite byte
+	if typeOverride == 0xFF {
+		typeToWrite = byte(p.getCompactType(typeId))
+	} else {
+		typeToWrite = typeOverride
+	}
+	// check if we can use delta encoding for the field id
+	fieldId := int(id)
+	written := 0
+	if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
+		// write them together
+		err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
+		if err != nil {
+			return 0, err
+		}
+	} else {
+		// write them separate
+		err := p.writeByteDirect(typeToWrite)
+		if err != nil {
+			return 0, err
+		}
+		err = p.WriteI16(id)
+		written = 1 + 2
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	p.lastFieldId = fieldId
+	// p.lastField.Push(field.id);
+	return written, nil
+}
+
+func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
+
+func (p *TCompactProtocol) WriteFieldStop() error {
+	err := p.writeByteDirect(STOP)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+	if size == 0 {
+		err := p.writeByteDirect(0)
+		return NewTProtocolException(err)
+	}
+	_, err := p.writeVarint32(int32(size))
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapEnd() error { return nil }
+
+// Write a list header.
+func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
+	_, err := p.writeCollectionBegin(elemType, size)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteListEnd() error { return nil }
+
+// Write a set header.
+func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
+	_, err := p.writeCollectionBegin(elemType, size)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteSetEnd() error { return nil }
+
+func (p *TCompactProtocol) WriteBool(value bool) error {
+	v := byte(COMPACT_BOOLEAN_FALSE)
+	if value {
+		v = byte(COMPACT_BOOLEAN_TRUE)
+	}
+	if p.booleanFieldPending {
+		// we haven't written the field header yet
+		_, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
+		p.booleanFieldPending = false
+		return NewTProtocolException(err)
+	}
+	// we're not part of a field, so just write the value.
+	err := p.writeByteDirect(v)
+	return NewTProtocolException(err)
+}
+
+// Write a byte. Nothing to see here!
+func (p *TCompactProtocol) WriteByte(value int8) error {
+	err := p.writeByteDirect(byte(value))
+	return NewTProtocolException(err)
+}
+
+// Write an I16 as a zigzag varint.
+func (p *TCompactProtocol) WriteI16(value int16) error {
+	_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
+	return NewTProtocolException(err)
+}
+
+// Write an i32 as a zigzag varint.
+func (p *TCompactProtocol) WriteI32(value int32) error {
+	_, err := p.writeVarint32(p.int32ToZigzag(value))
+	return NewTProtocolException(err)
+}
+
+// Write an i64 as a zigzag varint.
+func (p *TCompactProtocol) WriteI64(value int64) error {
+	_, err := p.writeVarint64(p.int64ToZigzag(value))
+	return NewTProtocolException(err)
+}
+
+// Write a double to the wire as 8 bytes.
+func (p *TCompactProtocol) WriteDouble(value float64) error {
+	buf := p.buffer[0:8]
+	binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
+	_, err := p.trans.Write(buf)
+	return NewTProtocolException(err)
+}
+
+// Write a string to the wire with a varint size preceding.
+func (p *TCompactProtocol) WriteString(value string) error {
+	_, e := p.writeVarint32(int32(len(value)))
+	if e != nil {
+		return NewTProtocolException(e)
+	}
+	if len(value) > 0 {
+	}
+	_, e = p.trans.WriteString(value)
+	return e
+}
+
+// Write a byte array, using a varint for the size.
+func (p *TCompactProtocol) WriteBinary(bin []byte) error {
+	_, e := p.writeVarint32(int32(len(bin)))
+	if e != nil {
+		return NewTProtocolException(e)
+	}
+	if len(bin) > 0 {
+		_, e = p.trans.Write(bin)
+		return NewTProtocolException(e)
+	}
+	return nil
+}
+
+//
+// Reading methods.
+//
+
+// Read a message header.
+func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+
+	protocolId, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	if protocolId != COMPACT_PROTOCOL_ID {
+		e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
+		return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
+	}
+
+	versionAndType, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	version := versionAndType & COMPACT_VERSION_MASK
+	typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
+	if version != COMPACT_VERSION {
+		e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
+		err = NewTProtocolExceptionWithType(BAD_VERSION, e)
+		return
+	}
+	seqId, e := p.readVarint32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	name, err = p.ReadString()
+	return
+}
+
+func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
+
+// Read a struct begin. There's nothing on the wire for this, but it is our
+// opportunity to push a new struct begin marker onto the field stack.
+func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
+	p.lastField = append(p.lastField, p.lastFieldId)
+	p.lastFieldId = 0
+	return
+}
+
+// Doesn't actually consume any wire data, just removes the last field for
+// this struct from the field stack.
+func (p *TCompactProtocol) ReadStructEnd() error {
+	// consume the last field we read off the wire.
+	p.lastFieldId = p.lastField[len(p.lastField)-1]
+	p.lastField = p.lastField[:len(p.lastField)-1]
+	return nil
+}
+
+// Read a field header off the wire.
+func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
+	t, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	// if it's a stop, then we can return immediately, as the struct is over.
+	if (t & 0x0f) == STOP {
+		return "", STOP, 0, nil
+	}
+
+	// mask off the 4 MSB of the type header. it could contain a field id delta.
+	modifier := int16((t & 0xf0) >> 4)
+	if modifier == 0 {
+		// not a delta. look ahead for the zigzag varint field id.
+		id, err = p.ReadI16()
+		if err != nil {
+			return
+		}
+	} else {
+		// has a delta. add the delta to the last read field id.
+		id = int16(p.lastFieldId) + modifier
+	}
+	typeId, e := p.getTType(tCompactType(t & 0x0f))
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+
+	// if this happens to be a boolean field, the value is encoded in the type
+	if p.isBoolType(t) {
+		// save the boolean value in a special instance variable.
+		p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
+		p.boolValueIsNotNull = true
+	}
+
+	// push the new field onto the field stack so we can keep the deltas going.
+	p.lastFieldId = int(id)
+	return
+}
+
+func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
+
+// Read a map header off the wire. If the size is zero, skip reading the key
+// and value type. This means that 0-length maps will yield TMaps without the
+// "correct" types.
+func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
+	size32, e := p.readVarint32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+
+	keyAndValueType := byte(STOP)
+	if size != 0 {
+		keyAndValueType, err = p.readByteDirect()
+		if err != nil {
+			return
+		}
+	}
+	keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
+	valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
+	return
+}
+
+func (p *TCompactProtocol) ReadMapEnd() error { return nil }
+
+// Read a list header off the wire. If the list size is 0-14, the size will
+// be packed into the element type header. If it's a longer list, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
+	size_and_type, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+	size = int((size_and_type >> 4) & 0x0f)
+	if size == 15 {
+		size2, e := p.readVarint32()
+		if e != nil {
+			err = NewTProtocolException(e)
+			return
+		}
+		if size2 < 0 {
+			err = invalidDataLength
+			return
+		}
+		size = int(size2)
+	}
+	elemType, e := p.getTType(tCompactType(size_and_type))
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	return
+}
+
+func (p *TCompactProtocol) ReadListEnd() error { return nil }
+
+// Read a set header off the wire. If the set size is 0-14, the size will
+// be packed into the element type header. If it's a longer set, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
+	return p.ReadListBegin()
+}
+
+func (p *TCompactProtocol) ReadSetEnd() error { return nil }
+
+// Read a boolean off the wire. If this is a boolean field, the value should
+// already have been read during readFieldBegin, so we'll just consume the
+// pre-stored value. Otherwise, read a byte.
+func (p *TCompactProtocol) ReadBool() (value bool, err error) {
+	if p.boolValueIsNotNull {
+		p.boolValueIsNotNull = false
+		return p.boolValue, nil
+	}
+	v, err := p.readByteDirect()
+	return v == COMPACT_BOOLEAN_TRUE, err
+}
+
+// Read a single byte off the wire. Nothing interesting here.
+func (p *TCompactProtocol) ReadByte() (int8, error) {
+	v, err := p.readByteDirect()
+	if err != nil {
+		return 0, NewTProtocolException(err)
+	}
+	return int8(v), err
+}
+
+// Read an i16 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI16() (value int16, err error) {
+	v, err := p.ReadI32()
+	return int16(v), err
+}
+
+// Read an i32 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI32() (value int32, err error) {
+	v, e := p.readVarint32()
+	if e != nil {
+		return 0, NewTProtocolException(e)
+	}
+	value = p.zigzagToInt32(v)
+	return value, nil
+}
+
+// Read an i64 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI64() (value int64, err error) {
+	v, e := p.readVarint64()
+	if e != nil {
+		return 0, NewTProtocolException(e)
+	}
+	value = p.zigzagToInt64(v)
+	return value, nil
+}
+
+// No magic here - just read a double off the wire.
+func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
+	longBits := p.buffer[0:8]
+	_, e := io.ReadFull(p.trans, longBits)
+	if e != nil {
+		return 0.0, NewTProtocolException(e)
+	}
+	return math.Float64frombits(p.bytesToUint64(longBits)), nil
+}
+
+// Reads a []byte (via readBinary), and then UTF-8 decodes it.
+func (p *TCompactProtocol) ReadString() (value string, err error) {
+	length, e := p.readVarint32()
+	if e != nil {
+		return "", NewTProtocolException(e)
+	}
+	if length < 0 {
+		return "", invalidDataLength
+	}
+	if uint64(length) > p.trans.RemainingBytes() {
+		return "", invalidDataLength
+	}
+
+	if length == 0 {
+		return "", nil
+	}
+	var buf []byte
+	if length <= int32(len(p.buffer)) {
+		buf = p.buffer[0:length]
+	} else {
+		buf = make([]byte, length)
+	}
+	_, e = io.ReadFull(p.trans, buf)
+	return string(buf), NewTProtocolException(e)
+}
+
+// Read a []byte from the wire.
+func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
+	length, e := p.readVarint32()
+	if e != nil {
+		return nil, NewTProtocolException(e)
+	}
+	if length == 0 {
+		return []byte{}, nil
+	}
+	if length < 0 {
+		return nil, invalidDataLength
+	}
+	if uint64(length) > p.trans.RemainingBytes() {
+		return nil, invalidDataLength
+	}
+
+	buf := make([]byte, length)
+	_, e = io.ReadFull(p.trans, buf)
+	return buf, NewTProtocolException(e)
+}
+
+func (p *TCompactProtocol) Flush() (err error) {
+	return NewTProtocolException(p.trans.Flush())
+}
+
+func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
+	return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TCompactProtocol) Transport() TTransport {
+	return p.origTransport
+}
+
+//
+// Internal writing methods
+//
+
+// Abstract method for writing the start of lists and sets. List and sets on
+// the wire differ only by the type indicator.
+func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
+	if size <= 14 {
+		return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
+	}
+	err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
+	if err != nil {
+		return 0, err
+	}
+	m, err := p.writeVarint32(int32(size))
+	return 1 + m, err
+}
+
+// Write an i32 as a varint. Results in 1-5 bytes on the wire.
+// TODO(pomack): make a permanent buffer like writeVarint64?
+func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
+	i32buf := p.buffer[0:5]
+	idx := 0
+	for {
+		if (n & ^0x7F) == 0 {
+			i32buf[idx] = byte(n)
+			idx++
+			// p.writeByteDirect(byte(n));
+			break
+			// return;
+		} else {
+			i32buf[idx] = byte((n & 0x7F) | 0x80)
+			idx++
+			// p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
+			u := uint32(n)
+			n = int32(u >> 7)
+		}
+	}
+	return p.trans.Write(i32buf[0:idx])
+}
+
+// Write an i64 as a varint. Results in 1-10 bytes on the wire.
+func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
+	varint64out := p.buffer[0:10]
+	idx := 0
+	for {
+		if (n & ^0x7F) == 0 {
+			varint64out[idx] = byte(n)
+			idx++
+			break
+		} else {
+			varint64out[idx] = byte((n & 0x7F) | 0x80)
+			idx++
+			u := uint64(n)
+			n = int64(u >> 7)
+		}
+	}
+	return p.trans.Write(varint64out[0:idx])
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
+	return (l << 1) ^ (l >> 63)
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
+	return (n << 1) ^ (n >> 31)
+}
+
+func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
+	binary.LittleEndian.PutUint64(buf, n)
+}
+
+func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
+	binary.LittleEndian.PutUint64(buf, uint64(n))
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+// Used internally by other writing methods that know they need to write a byte.
+func (p *TCompactProtocol) writeByteDirect(b byte) error {
+	return p.trans.WriteByte(b)
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
+	return 1, p.writeByteDirect(byte(n))
+}
+
+//
+// Internal reading methods
+//
+
+// Read an i32 from the wire as a varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 5 bytes.
+func (p *TCompactProtocol) readVarint32() (int32, error) {
+	// if the wire contains the right stuff, this will just truncate the i64 we
+	// read and get us the right sign.
+	v, err := p.readVarint64()
+	return int32(v), err
+}
+
+// Read an i64 from the wire as a proper varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 10 bytes.
+func (p *TCompactProtocol) readVarint64() (int64, error) {
+	shift := uint(0)
+	result := int64(0)
+	for {
+		b, err := p.readByteDirect()
+		if err != nil {
+			return 0, err
+		}
+		result |= int64(b&0x7f) << shift
+		if (b & 0x80) != 0x80 {
+			break
+		}
+		shift += 7
+	}
+	return result, nil
+}
+
+// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
+func (p *TCompactProtocol) readByteDirect() (byte, error) {
+	return p.trans.ReadByte()
+}
+
+//
+// encoding helpers
+//
+
+// Convert from zigzag int to int.
+func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
+	u := uint32(n)
+	return int32(u>>1) ^ -(n & 1)
+}
+
+// Convert from zigzag long to long.
+func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
+	u := uint64(n)
+	return int64(u>>1) ^ -(n & 1)
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
+	return int64(binary.LittleEndian.Uint64(b))
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
+	return binary.LittleEndian.Uint64(b)
+}
+
+//
+// type testing and converting
+//
+
+func (p *TCompactProtocol) isBoolType(b byte) bool {
+	return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
+}
+
+// Given a tCompactType constant, convert it to its corresponding
+// TType value.
+func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
+	switch byte(t) & 0x0f {
+	case STOP:
+		return STOP, nil
+	case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
+		return BOOL, nil
+	case COMPACT_BYTE:
+		return BYTE, nil
+	case COMPACT_I16:
+		return I16, nil
+	case COMPACT_I32:
+		return I32, nil
+	case COMPACT_I64:
+		return I64, nil
+	case COMPACT_DOUBLE:
+		return DOUBLE, nil
+	case COMPACT_BINARY:
+		return STRING, nil
+	case COMPACT_LIST:
+		return LIST, nil
+	case COMPACT_SET:
+		return SET, nil
+	case COMPACT_MAP:
+		return MAP, nil
+	case COMPACT_STRUCT:
+		return STRUCT, nil
+	}
+	return STOP, TException(fmt.Errorf("don't know what type: %s", t&0x0f))
+}
+
+// Given a TType value, find the appropriate TCompactProtocol.Types constant.
+func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
+	return ttypeToCompactType[t]
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go
new file mode 100644
index 00000000..d37252cc
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"log"
+)
+
+type TDebugProtocol struct {
+	Delegate  TProtocol
+	LogPrefix string
+}
+
+type TDebugProtocolFactory struct {
+	Underlying TProtocolFactory
+	LogPrefix  string
+}
+
+func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
+	return &TDebugProtocolFactory{
+		Underlying: underlying,
+		LogPrefix:  logPrefix,
+	}
+}
+
+func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return &TDebugProtocol{
+		Delegate:  t.Underlying.GetProtocol(trans),
+		LogPrefix: t.LogPrefix,
+	}
+}
+
+func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
+	err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid)
+	log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteMessageEnd() error {
+	err := tdp.Delegate.WriteMessageEnd()
+	log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteStructBegin(name string) error {
+	err := tdp.Delegate.WriteStructBegin(name)
+	log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteStructEnd() error {
+	err := tdp.Delegate.WriteStructEnd()
+	log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+	err := tdp.Delegate.WriteFieldBegin(name, typeId, id)
+	log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteFieldEnd() error {
+	err := tdp.Delegate.WriteFieldEnd()
+	log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteFieldStop() error {
+	err := tdp.Delegate.WriteFieldStop()
+	log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+	err := tdp.Delegate.WriteMapBegin(keyType, valueType, size)
+	log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteMapEnd() error {
+	err := tdp.Delegate.WriteMapEnd()
+	log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error {
+	err := tdp.Delegate.WriteListBegin(elemType, size)
+	log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteListEnd() error {
+	err := tdp.Delegate.WriteListEnd()
+	log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error {
+	err := tdp.Delegate.WriteSetBegin(elemType, size)
+	log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteSetEnd() error {
+	err := tdp.Delegate.WriteSetEnd()
+	log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteBool(value bool) error {
+	err := tdp.Delegate.WriteBool(value)
+	log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteByte(value int8) error {
+	err := tdp.Delegate.WriteByte(value)
+	log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteI16(value int16) error {
+	err := tdp.Delegate.WriteI16(value)
+	log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteI32(value int32) error {
+	err := tdp.Delegate.WriteI32(value)
+	log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteI64(value int64) error {
+	err := tdp.Delegate.WriteI64(value)
+	log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteDouble(value float64) error {
+	err := tdp.Delegate.WriteDouble(value)
+	log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteString(value string) error {
+	err := tdp.Delegate.WriteString(value)
+	log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	return err
+}
+func (tdp *TDebugProtocol) WriteBinary(value []byte) error {
+	err := tdp.Delegate.WriteBinary(value)
+	log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	return err
+}
+
+func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
+	name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin()
+	log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadMessageEnd() (err error) {
+	err = tdp.Delegate.ReadMessageEnd()
+	log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) {
+	name, err = tdp.Delegate.ReadStructBegin()
+	log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadStructEnd() (err error) {
+	err = tdp.Delegate.ReadStructEnd()
+	log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
+	name, typeId, id, err = tdp.Delegate.ReadFieldBegin()
+	log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadFieldEnd() (err error) {
+	err = tdp.Delegate.ReadFieldEnd()
+	log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
+	keyType, valueType, size, err = tdp.Delegate.ReadMapBegin()
+	log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadMapEnd() (err error) {
+	err = tdp.Delegate.ReadMapEnd()
+	log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) {
+	elemType, size, err = tdp.Delegate.ReadListBegin()
+	log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadListEnd() (err error) {
+	err = tdp.Delegate.ReadListEnd()
+	log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) {
+	elemType, size, err = tdp.Delegate.ReadSetBegin()
+	log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadSetEnd() (err error) {
+	err = tdp.Delegate.ReadSetEnd()
+	log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadBool() (value bool, err error) {
+	value, err = tdp.Delegate.ReadBool()
+	log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadByte() (value int8, err error) {
+	value, err = tdp.Delegate.ReadByte()
+	log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadI16() (value int16, err error) {
+	value, err = tdp.Delegate.ReadI16()
+	log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadI32() (value int32, err error) {
+	value, err = tdp.Delegate.ReadI32()
+	log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadI64() (value int64, err error) {
+	value, err = tdp.Delegate.ReadI64()
+	log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) {
+	value, err = tdp.Delegate.ReadDouble()
+	log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadString() (value string, err error) {
+	value, err = tdp.Delegate.ReadString()
+	log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	return
+}
+func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) {
+	value, err = tdp.Delegate.ReadBinary()
+	log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	return
+}
+func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
+	err = tdp.Delegate.Skip(fieldType)
+	log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
+	return
+}
+func (tdp *TDebugProtocol) Flush() (err error) {
+	err = tdp.Delegate.Flush()
+	log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
+	return
+}
+
+func (tdp *TDebugProtocol) Transport() TTransport {
+	return tdp.Delegate.Transport()
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go
new file mode 100644
index 00000000..91a0983a
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+type TDeserializer struct {
+	Transport TTransport
+	Protocol  TProtocol
+}
+
+func NewTDeserializer() *TDeserializer {
+	var transport TTransport
+	transport = NewTMemoryBufferLen(1024)
+
+	protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
+
+	return &TDeserializer{
+		transport,
+		protocol}
+}
+
+func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) {
+	err = nil
+	if _, err = t.Transport.Write([]byte(s)); err != nil {
+		return
+	}
+	if err = msg.Read(t.Protocol); err != nil {
+		return
+	}
+	return
+}
+
+func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) {
+	err = nil
+	if _, err = t.Transport.Write(b); err != nil {
+		return
+	}
+	if err = msg.Read(t.Protocol); err != nil {
+		return
+	}
+	return
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go
new file mode 100644
index 00000000..ea8d6f66
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+)
+
+// Generic Thrift exception
+type TException interface {
+	error
+}
+
+// Prepends additional information to an error without losing the Thrift exception interface
+func PrependError(prepend string, err error) error {
+	if t, ok := err.(TTransportException); ok {
+		return NewTTransportException(t.TypeId(), prepend+t.Error())
+	}
+	if t, ok := err.(TProtocolException); ok {
+		return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error()))
+	}
+	if t, ok := err.(TApplicationException); ok {
+		return NewTApplicationException(t.TypeId(), prepend+t.Error())
+	}
+
+	return errors.New(prepend + err.Error())
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/field.go b/vendor/github.com/apache/thrift/lib/go/thrift/field.go
new file mode 100644
index 00000000..9d665255
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/field.go
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Helper class that encapsulates field metadata.
+type field struct {
+	name   string
+	typeId TType
+	id     int
+}
+
+func newField(n string, t TType, i int) *field {
+	return &field{name: n, typeId: t, id: i}
+}
+
+func (p *field) Name() string {
+	if p == nil {
+		return ""
+	}
+	return p.name
+}
+
+func (p *field) TypeId() TType {
+	if p == nil {
+		return TType(VOID)
+	}
+	return p.typeId
+}
+
+func (p *field) Id() int {
+	if p == nil {
+		return -1
+	}
+	return p.id
+}
+
+func (p *field) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return "<TField name:'" + p.name + "' type:" + string(p.typeId) + " field-id:" + string(p.id) + ">"
+}
+
+var ANONYMOUS_FIELD *field
+
+type fieldSlice []field
+
+func (p fieldSlice) Len() int {
+	return len(p)
+}
+
+func (p fieldSlice) Less(i, j int) bool {
+	return p[i].Id() < p[j].Id()
+}
+
+func (p fieldSlice) Swap(i, j int) {
+	p[i], p[j] = p[j], p[i]
+}
+
+func init() {
+	ANONYMOUS_FIELD = newField("", STOP, 0)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go
new file mode 100644
index 00000000..60b12499
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+const DEFAULT_MAX_LENGTH = 16384000
+
+type TFramedTransport struct {
+	transport TTransport
+	buf       bytes.Buffer
+	reader    *bufio.Reader
+	frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header
+	buffer    [4]byte
+	maxLength uint32
+}
+
+type tFramedTransportFactory struct {
+	factory   TTransportFactory
+	maxLength uint32
+}
+
+func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
+	return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
+}
+
+func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
+	return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
+}
+
+func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
+	tt, err := p.factory.GetTransport(base)
+	if err != nil {
+		return nil, err
+	}
+	return NewTFramedTransportMaxLength(tt, p.maxLength), nil
+}
+
+func NewTFramedTransport(transport TTransport) *TFramedTransport {
+	return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
+}
+
+func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
+	return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
+}
+
+func (p *TFramedTransport) Open() error {
+	return p.transport.Open()
+}
+
+func (p *TFramedTransport) IsOpen() bool {
+	return p.transport.IsOpen()
+}
+
+func (p *TFramedTransport) Close() error {
+	return p.transport.Close()
+}
+
+func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
+	if p.frameSize == 0 {
+		p.frameSize, err = p.readFrameHeader()
+		if err != nil {
+			return
+		}
+	}
+	if p.frameSize < uint32(len(buf)) {
+		frameSize := p.frameSize
+		tmp := make([]byte, p.frameSize)
+		l, err = p.Read(tmp)
+		copy(buf, tmp)
+		if err == nil {
+			err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf)))
+			return
+		}
+	}
+	got, err := p.reader.Read(buf)
+	p.frameSize = p.frameSize - uint32(got)
+	//sanity check
+	if p.frameSize < 0 {
+		return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
+	}
+	return got, NewTTransportExceptionFromError(err)
+}
+
+func (p *TFramedTransport) ReadByte() (c byte, err error) {
+	if p.frameSize == 0 {
+		p.frameSize, err = p.readFrameHeader()
+		if err != nil {
+			return
+		}
+	}
+	if p.frameSize < 1 {
+		return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1))
+	}
+	c, err = p.reader.ReadByte()
+	if err == nil {
+		p.frameSize--
+	}
+	return
+}
+
+func (p *TFramedTransport) Write(buf []byte) (int, error) {
+	n, err := p.buf.Write(buf)
+	return n, NewTTransportExceptionFromError(err)
+}
+
+func (p *TFramedTransport) WriteByte(c byte) error {
+	return p.buf.WriteByte(c)
+}
+
+func (p *TFramedTransport) WriteString(s string) (n int, err error) {
+	return p.buf.WriteString(s)
+}
+
+func (p *TFramedTransport) Flush() error {
+	size := p.buf.Len()
+	buf := p.buffer[:4]
+	binary.BigEndian.PutUint32(buf, uint32(size))
+	_, err := p.transport.Write(buf)
+	if err != nil {
+		p.buf.Truncate(0)
+		return NewTTransportExceptionFromError(err)
+	}
+	if size > 0 {
+		if n, err := p.buf.WriteTo(p.transport); err != nil {
+			print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
+			p.buf.Truncate(0)
+			return NewTTransportExceptionFromError(err)
+		}
+	}
+	err = p.transport.Flush()
+	return NewTTransportExceptionFromError(err)
+}
+
+func (p *TFramedTransport) readFrameHeader() (uint32, error) {
+	buf := p.buffer[:4]
+	if _, err := io.ReadFull(p.reader, buf); err != nil {
+		return 0, err
+	}
+	size := binary.BigEndian.Uint32(buf)
+	if size < 0 || size > p.maxLength {
+		return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
+	}
+	return size, nil
+}
+
+func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
+	return uint64(p.frameSize)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/go17.go
new file mode 100644
index 00000000..e3b21c4b
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/go17.go
@@ -0,0 +1,26 @@
+// +build go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+var defaultCtx = context.Background()
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go
new file mode 100644
index 00000000..33f2aa4b
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strconv"
+)
+
+// Default to using the shared http client. Library users are
+// free to change this global client or specify one through
+// THttpClientOptions.
+var DefaultHttpClient *http.Client = http.DefaultClient
+
+type THttpClient struct {
+	client             *http.Client
+	response           *http.Response
+	url                *url.URL
+	requestBuffer      *bytes.Buffer
+	header             http.Header
+	nsecConnectTimeout int64
+	nsecReadTimeout    int64
+}
+
+type THttpClientTransportFactory struct {
+	options THttpClientOptions
+	url     string
+}
+
+func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if trans != nil {
+		t, ok := trans.(*THttpClient)
+		if ok && t.url != nil {
+			return NewTHttpClientWithOptions(t.url.String(), p.options)
+		}
+	}
+	return NewTHttpClientWithOptions(p.url, p.options)
+}
+
+type THttpClientOptions struct {
+	// If nil, DefaultHttpClient is used
+	Client *http.Client
+}
+
+func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
+	return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
+}
+
+func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
+	return &THttpClientTransportFactory{url: url, options: options}
+}
+
+func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
+	parsedURL, err := url.Parse(urlstr)
+	if err != nil {
+		return nil, err
+	}
+	buf := make([]byte, 0, 1024)
+	client := options.Client
+	if client == nil {
+		client = DefaultHttpClient
+	}
+	httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}}
+	return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
+}
+
+func NewTHttpClient(urlstr string) (TTransport, error) {
+	return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
+}
+
+// Set the HTTP Header for this specific Thrift Transport
+// It is important that you first assert the TTransport as a THttpClient type
+// like so:
+//
+// httpTrans := trans.(THttpClient)
+// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
+func (p *THttpClient) SetHeader(key string, value string) {
+	p.header.Add(key, value)
+}
+
+// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
+// It is important that you first assert the TTransport as a THttpClient type
+// like so:
+//
+// httpTrans := trans.(THttpClient)
+// hdrValue := httpTrans.GetHeader("User-Agent")
+func (p *THttpClient) GetHeader(key string) string {
+	return p.header.Get(key)
+}
+
+// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
+// It is important that you first assert the TTransport as a THttpClient type
+// like so:
+//
+// httpTrans := trans.(THttpClient)
+// httpTrans.DelHeader("User-Agent")
+func (p *THttpClient) DelHeader(key string) {
+	p.header.Del(key)
+}
+
+func (p *THttpClient) Open() error {
+	// do nothing
+	return nil
+}
+
+func (p *THttpClient) IsOpen() bool {
+	return p.response != nil || p.requestBuffer != nil
+}
+
+func (p *THttpClient) closeResponse() error {
+	var err error
+	if p.response != nil && p.response.Body != nil {
+		// The docs specify that if keepalive is enabled and the response body is not
+		// read to completion the connection will never be returned to the pool and
+		// reused. Errors are being ignored here because if the connection is invalid
+		// and this fails for some reason, the Close() method will do any remaining
+		// cleanup.
+		io.Copy(ioutil.Discard, p.response.Body)
+
+		err = p.response.Body.Close()
+	}
+
+	p.response = nil
+	return err
+}
+
+func (p *THttpClient) Close() error {
+	if p.requestBuffer != nil {
+		p.requestBuffer.Reset()
+		p.requestBuffer = nil
+	}
+	return p.closeResponse()
+}
+
+func (p *THttpClient) Read(buf []byte) (int, error) {
+	if p.response == nil {
+		return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
+	}
+	n, err := p.response.Body.Read(buf)
+	if n > 0 && (err == nil || err == io.EOF) {
+		return n, nil
+	}
+	return n, NewTTransportExceptionFromError(err)
+}
+
+func (p *THttpClient) ReadByte() (c byte, err error) {
+	return readByte(p.response.Body)
+}
+
+func (p *THttpClient) Write(buf []byte) (int, error) {
+	n, err := p.requestBuffer.Write(buf)
+	return n, err
+}
+
+func (p *THttpClient) WriteByte(c byte) error {
+	return p.requestBuffer.WriteByte(c)
+}
+
+func (p *THttpClient) WriteString(s string) (n int, err error) {
+	return p.requestBuffer.WriteString(s)
+}
+
+func (p *THttpClient) Flush() error {
+	// Close any previous response body to avoid leaking connections.
+	p.closeResponse()
+
+	req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer)
+	if err != nil {
+		return NewTTransportExceptionFromError(err)
+	}
+	req.Header = p.header
+	response, err := p.client.Do(req)
+	if err != nil {
+		return NewTTransportExceptionFromError(err)
+	}
+	if response.StatusCode != http.StatusOK {
+		// Close the response to avoid leaking file descriptors. closeResponse does
+		// more than just call Close(), so temporarily assign it and reuse the logic.
+		p.response = response
+		p.closeResponse()
+
+		// TODO(pomack) log bad response
+		return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
+	}
+	p.response = response
+	return nil
+}
+
+func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
+	len := p.response.ContentLength
+	if len >= 0 {
+		return uint64(len)
+	}
+
+	const maxSize = ^uint64(0)
+	return maxSize // the thruth is, we just don't know unless framed is used
+}
+
+// Deprecated: Use NewTHttpClientTransportFactory instead.
+func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
+	return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
+}
+
+// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead.
+func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
+	return NewTHttpClientTransportFactoryWithOptions(url, options)
+}
+
+// Deprecated: Use NewTHttpClientWithOptions instead.
+func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
+	return NewTHttpClientWithOptions(urlstr, options)
+}
+
+// Deprecated: Use NewTHttpClient instead.
+func NewTHttpPostClient(urlstr string) (TTransport, error) {
+	return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go
new file mode 100644
index 00000000..601855b9
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"compress/gzip"
+	"io"
+	"net/http"
+	"strings"
+)
+
+// gz transparently compresses the HTTP response if the client supports it.
+func gz(handler http.HandlerFunc) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
+			handler(w, r)
+			return
+		}
+		w.Header().Set("Content-Encoding", "gzip")
+		gz := gzip.NewWriter(w)
+		defer gz.Close()
+		gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
+		handler(gzw, r)
+	}
+}
+
+type gzipResponseWriter struct {
+	io.Writer
+	http.ResponseWriter
+}
+
+func (w gzipResponseWriter) Write(b []byte) (int, error) {
+	return w.Writer.Write(b)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go
new file mode 100644
index 00000000..1313ac22
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go
@@ -0,0 +1,38 @@
+// +build go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"net/http"
+)
+
+// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
+func NewThriftHandlerFunc(processor TProcessor,
+	inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
+
+	return gz(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Add("Content-Type", "application/x-thrift")
+
+		transport := NewStreamTransport(r.Body, w)
+		processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
+	})
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go
new file mode 100644
index 00000000..13aa1c11
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go
@@ -0,0 +1,40 @@
+// +build !go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"net/http"
+
+	"golang.org/x/net/context"
+)
+
+// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
+func NewThriftHandlerFunc(processor TProcessor,
+	inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
+
+	return gz(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Add("Content-Type", "application/x-thrift")
+
+		transport := NewStreamTransport(r.Body, w)
+		processor.Process(context.Background(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
+	})
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go
new file mode 100644
index 00000000..b18be81c
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"io"
+)
+
+// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
+type StreamTransport struct {
+	io.Reader
+	io.Writer
+	isReadWriter bool
+	closed       bool
+}
+
+type StreamTransportFactory struct {
+	Reader       io.Reader
+	Writer       io.Writer
+	isReadWriter bool
+}
+
+func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if trans != nil {
+		t, ok := trans.(*StreamTransport)
+		if ok {
+			if t.isReadWriter {
+				return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil
+			}
+			if t.Reader != nil && t.Writer != nil {
+				return NewStreamTransport(t.Reader, t.Writer), nil
+			}
+			if t.Reader != nil && t.Writer == nil {
+				return NewStreamTransportR(t.Reader), nil
+			}
+			if t.Reader == nil && t.Writer != nil {
+				return NewStreamTransportW(t.Writer), nil
+			}
+			return &StreamTransport{}, nil
+		}
+	}
+	if p.isReadWriter {
+		return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil
+	}
+	if p.Reader != nil && p.Writer != nil {
+		return NewStreamTransport(p.Reader, p.Writer), nil
+	}
+	if p.Reader != nil && p.Writer == nil {
+		return NewStreamTransportR(p.Reader), nil
+	}
+	if p.Reader == nil && p.Writer != nil {
+		return NewStreamTransportW(p.Writer), nil
+	}
+	return &StreamTransport{}, nil
+}
+
+func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
+	return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
+}
+
+func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
+	return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
+}
+
+func NewStreamTransportR(r io.Reader) *StreamTransport {
+	return &StreamTransport{Reader: bufio.NewReader(r)}
+}
+
+func NewStreamTransportW(w io.Writer) *StreamTransport {
+	return &StreamTransport{Writer: bufio.NewWriter(w)}
+}
+
+func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
+	bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
+	return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
+}
+
+func (p *StreamTransport) IsOpen() bool {
+	return !p.closed
+}
+
+// implicitly opened on creation, can't be reopened once closed
+func (p *StreamTransport) Open() error {
+	if !p.closed {
+		return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
+	} else {
+		return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
+	}
+}
+
+// Closes both the input and output streams.
+func (p *StreamTransport) Close() error {
+	if p.closed {
+		return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
+	}
+	p.closed = true
+	closedReader := false
+	if p.Reader != nil {
+		c, ok := p.Reader.(io.Closer)
+		if ok {
+			e := c.Close()
+			closedReader = true
+			if e != nil {
+				return e
+			}
+		}
+		p.Reader = nil
+	}
+	if p.Writer != nil && (!closedReader || !p.isReadWriter) {
+		c, ok := p.Writer.(io.Closer)
+		if ok {
+			e := c.Close()
+			if e != nil {
+				return e
+			}
+		}
+		p.Writer = nil
+	}
+	return nil
+}
+
+// Flushes the underlying output stream if not null.
+func (p *StreamTransport) Flush() error {
+	if p.Writer == nil {
+		return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
+	}
+	f, ok := p.Writer.(Flusher)
+	if ok {
+		err := f.Flush()
+		if err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+	}
+	return nil
+}
+
+func (p *StreamTransport) Read(c []byte) (n int, err error) {
+	n, err = p.Reader.Read(c)
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) ReadByte() (c byte, err error) {
+	f, ok := p.Reader.(io.ByteReader)
+	if ok {
+		c, err = f.ReadByte()
+	} else {
+		c, err = readByte(p.Reader)
+	}
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) Write(c []byte) (n int, err error) {
+	n, err = p.Writer.Write(c)
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) WriteByte(c byte) (err error) {
+	f, ok := p.Writer.(io.ByteWriter)
+	if ok {
+		err = f.WriteByte(c)
+	} else {
+		err = writeByte(p.Writer, c)
+	}
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) WriteString(s string) (n int, err error) {
+	f, ok := p.Writer.(stringWriter)
+	if ok {
+		n, err = f.WriteString(s)
+	} else {
+		n, err = p.Writer.Write([]byte(s))
+	}
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
+	const maxSize = ^uint64(0)
+	return maxSize // the thruth is, we just don't know unless framed is used
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go
new file mode 100644
index 00000000..442fa914
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go
@@ -0,0 +1,583 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"encoding/base64"
+	"fmt"
+)
+
+const (
+	THRIFT_JSON_PROTOCOL_VERSION = 1
+)
+
+// for references to _ParseContext see tsimplejson_protocol.go
+
+// JSON protocol implementation for thrift.
+//
+// This protocol produces/consumes a simple output format
+// suitable for parsing by scripting languages.  It should not be
+// confused with the full-featured TJSONProtocol.
+//
+type TJSONProtocol struct {
+	*TSimpleJSONProtocol
+}
+
+// Constructor
+func NewTJSONProtocol(t TTransport) *TJSONProtocol {
+	v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
+	v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
+	v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
+	return v
+}
+
+// Factory
+type TJSONProtocolFactory struct{}
+
+func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTJSONProtocol(trans)
+}
+
+func NewTJSONProtocolFactory() *TJSONProtocolFactory {
+	return &TJSONProtocolFactory{}
+}
+
+func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+	p.resetContextStack() // THRIFT-3735
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil {
+		return e
+	}
+	if e := p.WriteString(name); e != nil {
+		return e
+	}
+	if e := p.WriteByte(int8(typeId)); e != nil {
+		return e
+	}
+	if e := p.WriteI32(seqId); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) WriteMessageEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TJSONProtocol) WriteStructBegin(name string) error {
+	if e := p.OutputObjectBegin(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) WriteStructEnd() error {
+	return p.OutputObjectEnd()
+}
+
+func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+	if e := p.WriteI16(id); e != nil {
+		return e
+	}
+	if e := p.OutputObjectBegin(); e != nil {
+		return e
+	}
+	s, e1 := p.TypeIdToString(typeId)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.WriteString(s); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) WriteFieldEnd() error {
+	return p.OutputObjectEnd()
+}
+
+func (p *TJSONProtocol) WriteFieldStop() error { return nil }
+
+func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	s, e1 := p.TypeIdToString(keyType)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.WriteString(s); e != nil {
+		return e
+	}
+	s, e1 = p.TypeIdToString(valueType)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.WriteString(s); e != nil {
+		return e
+	}
+	if e := p.WriteI64(int64(size)); e != nil {
+		return e
+	}
+	return p.OutputObjectBegin()
+}
+
+func (p *TJSONProtocol) WriteMapEnd() error {
+	if e := p.OutputObjectEnd(); e != nil {
+		return e
+	}
+	return p.OutputListEnd()
+}
+
+func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TJSONProtocol) WriteListEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TJSONProtocol) WriteSetEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TJSONProtocol) WriteBool(b bool) error {
+	if b {
+		return p.WriteI32(1)
+	}
+	return p.WriteI32(0)
+}
+
+func (p *TJSONProtocol) WriteByte(b int8) error {
+	return p.WriteI32(int32(b))
+}
+
+func (p *TJSONProtocol) WriteI16(v int16) error {
+	return p.WriteI32(int32(v))
+}
+
+func (p *TJSONProtocol) WriteI32(v int32) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TJSONProtocol) WriteI64(v int64) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TJSONProtocol) WriteDouble(v float64) error {
+	return p.OutputF64(v)
+}
+
+func (p *TJSONProtocol) WriteString(v string) error {
+	return p.OutputString(v)
+}
+
+func (p *TJSONProtocol) WriteBinary(v []byte) error {
+	// JSON library only takes in a string,
+	// not an arbitrary byte array, to ensure bytes are transmitted
+	// efficiently we must convert this into a valid JSON string
+	// therefore we use base64 encoding to avoid excessive escaping/quoting
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	writer := base64.NewEncoder(base64.StdEncoding, p.writer)
+	if _, e := writer.Write(v); e != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+		return NewTProtocolException(e)
+	}
+	if e := writer.Close(); e != nil {
+		return NewTProtocolException(e)
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+// Reading methods.
+func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+	p.resetContextStack() // THRIFT-3735
+	if isNull, err := p.ParseListBegin(); isNull || err != nil {
+		return name, typeId, seqId, err
+	}
+	version, err := p.ReadI32()
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if version != THRIFT_JSON_PROTOCOL_VERSION {
+		e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
+		return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
+
+	}
+	if name, err = p.ReadString(); err != nil {
+		return name, typeId, seqId, err
+	}
+	bTypeId, err := p.ReadByte()
+	typeId = TMessageType(bTypeId)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if seqId, err = p.ReadI32(); err != nil {
+		return name, typeId, seqId, err
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TJSONProtocol) ReadMessageEnd() error {
+	err := p.ParseListEnd()
+	return err
+}
+
+func (p *TJSONProtocol) ReadStructBegin() (name string, err error) {
+	_, err = p.ParseObjectStart()
+	return "", err
+}
+
+func (p *TJSONProtocol) ReadStructEnd() error {
+	return p.ParseObjectEnd()
+}
+
+func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
+	b, _ := p.reader.Peek(1)
+	if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
+		return "", STOP, -1, nil
+	}
+	fieldId, err := p.ReadI16()
+	if err != nil {
+		return "", STOP, fieldId, err
+	}
+	if _, err = p.ParseObjectStart(); err != nil {
+		return "", STOP, fieldId, err
+	}
+	sType, err := p.ReadString()
+	if err != nil {
+		return "", STOP, fieldId, err
+	}
+	fType, err := p.StringToTypeId(sType)
+	return "", fType, fieldId, err
+}
+
+func (p *TJSONProtocol) ReadFieldEnd() error {
+	return p.ParseObjectEnd()
+}
+
+func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, VOID, 0, e
+	}
+
+	// read keyType
+	sKeyType, e := p.ReadString()
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+	keyType, e = p.StringToTypeId(sKeyType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read valueType
+	sValueType, e := p.ReadString()
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+	valueType, e = p.StringToTypeId(sValueType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read size
+	iSize, e := p.ReadI64()
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+	size = int(iSize)
+
+	_, e = p.ParseObjectStart()
+	return keyType, valueType, size, e
+}
+
+func (p *TJSONProtocol) ReadMapEnd() error {
+	e := p.ParseObjectEnd()
+	if e != nil {
+		return e
+	}
+	return p.ParseListEnd()
+}
+
+func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TJSONProtocol) ReadListEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TJSONProtocol) ReadSetEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TJSONProtocol) ReadBool() (bool, error) {
+	value, err := p.ReadI32()
+	return (value != 0), err
+}
+
+func (p *TJSONProtocol) ReadByte() (int8, error) {
+	v, err := p.ReadI64()
+	return int8(v), err
+}
+
+func (p *TJSONProtocol) ReadI16() (int16, error) {
+	v, err := p.ReadI64()
+	return int16(v), err
+}
+
+func (p *TJSONProtocol) ReadI32() (int32, error) {
+	v, err := p.ReadI64()
+	return int32(v), err
+}
+
+func (p *TJSONProtocol) ReadI64() (int64, error) {
+	v, _, err := p.ParseI64()
+	return v, err
+}
+
+func (p *TJSONProtocol) ReadDouble() (float64, error) {
+	v, _, err := p.ParseF64()
+	return v, err
+}
+
+func (p *TJSONProtocol) ReadString() (string, error) {
+	var v string
+	if err := p.ParsePreValue(); err != nil {
+		return v, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseStringBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, p.ParsePostValue()
+}
+
+func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
+	var v []byte
+	if err := p.ParsePreValue(); err != nil {
+		return nil, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseBase64EncodedBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+
+	return v, p.ParsePostValue()
+}
+
+func (p *TJSONProtocol) Flush() (err error) {
+	err = p.writer.Flush()
+	if err == nil {
+		err = p.trans.Flush()
+	}
+	return NewTProtocolException(err)
+}
+
+func (p *TJSONProtocol) Skip(fieldType TType) (err error) {
+	return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TJSONProtocol) Transport() TTransport {
+	return p.trans
+}
+
+func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	s, e1 := p.TypeIdToString(elemType)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.WriteString(s); e != nil {
+		return e
+	}
+	if e := p.WriteI64(int64(size)); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, 0, e
+	}
+	sElemType, err := p.ReadString()
+	if err != nil {
+		return VOID, size, err
+	}
+	elemType, err = p.StringToTypeId(sElemType)
+	if err != nil {
+		return elemType, size, err
+	}
+	nSize, err2 := p.ReadI64()
+	size = int(nSize)
+	return elemType, size, err2
+}
+
+func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, 0, e
+	}
+	sElemType, err := p.ReadString()
+	if err != nil {
+		return VOID, size, err
+	}
+	elemType, err = p.StringToTypeId(sElemType)
+	if err != nil {
+		return elemType, size, err
+	}
+	nSize, err2 := p.ReadI64()
+	size = int(nSize)
+	return elemType, size, err2
+}
+
+func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	s, e1 := p.TypeIdToString(elemType)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.OutputString(s); e != nil {
+		return e
+	}
+	if e := p.OutputI64(int64(size)); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
+	switch byte(fieldType) {
+	case BOOL:
+		return "tf", nil
+	case BYTE:
+		return "i8", nil
+	case I16:
+		return "i16", nil
+	case I32:
+		return "i32", nil
+	case I64:
+		return "i64", nil
+	case DOUBLE:
+		return "dbl", nil
+	case STRING:
+		return "str", nil
+	case STRUCT:
+		return "rec", nil
+	case MAP:
+		return "map", nil
+	case SET:
+		return "set", nil
+	case LIST:
+		return "lst", nil
+	}
+
+	e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
+	return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
+	switch fieldType {
+	case "tf":
+		return TType(BOOL), nil
+	case "i8":
+		return TType(BYTE), nil
+	case "i16":
+		return TType(I16), nil
+	case "i32":
+		return TType(I32), nil
+	case "i64":
+		return TType(I64), nil
+	case "dbl":
+		return TType(DOUBLE), nil
+	case "str":
+		return TType(STRING), nil
+	case "rec":
+		return TType(STRUCT), nil
+	case "map":
+		return TType(MAP), nil
+	case "set":
+		return TType(SET), nil
+	case "lst":
+		return TType(LIST), nil
+	}
+
+	e := fmt.Errorf("Unknown type identifier: %s", fieldType)
+	return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go
new file mode 100644
index 00000000..97a4edfa
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+)
+
+// Memory buffer-based implementation of the TTransport interface.
+type TMemoryBuffer struct {
+	*bytes.Buffer
+	size int
+}
+
+type TMemoryBufferTransportFactory struct {
+	size int
+}
+
+func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if trans != nil {
+		t, ok := trans.(*TMemoryBuffer)
+		if ok && t.size > 0 {
+			return NewTMemoryBufferLen(t.size), nil
+		}
+	}
+	return NewTMemoryBufferLen(p.size), nil
+}
+
+func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
+	return &TMemoryBufferTransportFactory{size: size}
+}
+
+func NewTMemoryBuffer() *TMemoryBuffer {
+	return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
+}
+
+func NewTMemoryBufferLen(size int) *TMemoryBuffer {
+	buf := make([]byte, 0, size)
+	return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
+}
+
+func (p *TMemoryBuffer) IsOpen() bool {
+	return true
+}
+
+func (p *TMemoryBuffer) Open() error {
+	return nil
+}
+
+func (p *TMemoryBuffer) Close() error {
+	p.Buffer.Reset()
+	return nil
+}
+
+// Flushing a memory buffer is a no-op
+func (p *TMemoryBuffer) Flush() error {
+	return nil
+}
+
+func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
+	return uint64(p.Buffer.Len())
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go b/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go
new file mode 100644
index 00000000..25ab2e98
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Message type constants in the Thrift protocol.
+type TMessageType int32
+
+const (
+	INVALID_TMESSAGE_TYPE TMessageType = 0
+	CALL                  TMessageType = 1
+	REPLY                 TMessageType = 2
+	EXCEPTION             TMessageType = 3
+	ONEWAY                TMessageType = 4
+)
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go
new file mode 100644
index 00000000..b7f4f8a1
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+/*
+TMultiplexedProtocol is a protocol-independent concrete decorator
+that allows a Thrift client to communicate with a multiplexing Thrift server,
+by prepending the service name to the function name during function calls.
+
+NOTE: THIS IS NOT USED BY SERVERS.  On the server, use TMultiplexedProcessor to handle request
+from a multiplexing client.
+
+This example uses a single socket transport to invoke two services:
+
+socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
+transport := thrift.NewTFramedTransport(socket)
+protocol := thrift.NewTBinaryProtocolTransport(transport)
+
+mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
+service := Calculator.NewCalculatorClient(mp)
+
+mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
+service2 := WeatherReport.NewWeatherReportClient(mp2)
+
+err := transport.Open()
+if err != nil {
+	t.Fatal("Unable to open client socket", err)
+}
+
+fmt.Println(service.Add(2,2))
+fmt.Println(service2.GetTemperature())
+*/
+
+type TMultiplexedProtocol struct {
+	TProtocol
+	serviceName string
+}
+
+const MULTIPLEXED_SEPARATOR = ":"
+
+func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
+	return &TMultiplexedProtocol{
+		TProtocol:   protocol,
+		serviceName: serviceName,
+	}
+}
+
+func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
+	if typeId == CALL || typeId == ONEWAY {
+		return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
+	} else {
+		return t.TProtocol.WriteMessageBegin(name, typeId, seqid)
+	}
+}
+
+/*
+TMultiplexedProcessor is a TProcessor allowing
+a single TServer to provide multiple services.
+
+To do so, you instantiate the processor and then register additional
+processors with it, as shown in the following example:
+
+var processor = thrift.NewTMultiplexedProcessor()
+
+firstProcessor :=
+processor.RegisterProcessor("FirstService", firstProcessor)
+
+processor.registerProcessor(
+  "Calculator",
+  Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
+)
+
+processor.registerProcessor(
+  "WeatherReport",
+  WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
+)
+
+serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
+if err != nil {
+  t.Fatal("Unable to create server socket", err)
+}
+server := thrift.NewTSimpleServer2(processor, serverTransport)
+server.Serve();
+*/
+
+type TMultiplexedProcessor struct {
+	serviceProcessorMap map[string]TProcessor
+	DefaultProcessor    TProcessor
+}
+
+func NewTMultiplexedProcessor() *TMultiplexedProcessor {
+	return &TMultiplexedProcessor{
+		serviceProcessorMap: make(map[string]TProcessor),
+	}
+}
+
+func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
+	t.DefaultProcessor = processor
+}
+
+func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
+	if t.serviceProcessorMap == nil {
+		t.serviceProcessorMap = make(map[string]TProcessor)
+	}
+	t.serviceProcessorMap[name] = processor
+}
+
+//Protocol that use stored message for ReadMessageBegin
+type storedMessageProtocol struct {
+	TProtocol
+	name   string
+	typeId TMessageType
+	seqid  int32
+}
+
+func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
+	return &storedMessageProtocol{protocol, name, typeId, seqid}
+}
+
+func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
+	return s.name, s.typeId, s.seqid, nil
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go
new file mode 100644
index 00000000..c71035e6
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go
@@ -0,0 +1,53 @@
+// +build go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"fmt"
+	"strings"
+)
+
+func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
+	name, typeId, seqid, err := in.ReadMessageBegin()
+	if err != nil {
+		return false, err
+	}
+	if typeId != CALL && typeId != ONEWAY {
+		return false, fmt.Errorf("Unexpected message type %v", typeId)
+	}
+	//extract the service name
+	v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
+	if len(v) != 2 {
+		if t.DefaultProcessor != nil {
+			smb := NewStoredMessageProtocol(in, name, typeId, seqid)
+			return t.DefaultProcessor.Process(ctx, smb, out)
+		}
+		return false, fmt.Errorf("Service name not found in message name: %s.  Did you forget to use a TMultiplexProtocol in your client?", name)
+	}
+	actualProcessor, ok := t.serviceProcessorMap[v[0]]
+	if !ok {
+		return false, fmt.Errorf("Service name not found: %s.  Did you forget to call registerProcessor()?", v[0])
+	}
+	smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
+	return actualProcessor.Process(ctx, smb, out)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go
new file mode 100644
index 00000000..5c27b387
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go
@@ -0,0 +1,54 @@
+// +build !go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"fmt"
+	"strings"
+
+	"golang.org/x/net/context"
+)
+
+func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
+	name, typeId, seqid, err := in.ReadMessageBegin()
+	if err != nil {
+		return false, err
+	}
+	if typeId != CALL && typeId != ONEWAY {
+		return false, fmt.Errorf("Unexpected message type %v", typeId)
+	}
+	//extract the service name
+	v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
+	if len(v) != 2 {
+		if t.DefaultProcessor != nil {
+			smb := NewStoredMessageProtocol(in, name, typeId, seqid)
+			return t.DefaultProcessor.Process(ctx, smb, out)
+		}
+		return false, fmt.Errorf("Service name not found in message name: %s.  Did you forget to use a TMultiplexProtocol in your client?", name)
+	}
+	actualProcessor, ok := t.serviceProcessorMap[v[0]]
+	if !ok {
+		return false, fmt.Errorf("Service name not found: %s.  Did you forget to call registerProcessor()?", v[0])
+	}
+	smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
+	return actualProcessor.Process(ctx, smb, out)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go b/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go
new file mode 100644
index 00000000..aa8daa9b
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"math"
+	"strconv"
+)
+
+type Numeric interface {
+	Int64() int64
+	Int32() int32
+	Int16() int16
+	Byte() byte
+	Int() int
+	Float64() float64
+	Float32() float32
+	String() string
+	isNull() bool
+}
+
+type numeric struct {
+	iValue int64
+	dValue float64
+	sValue string
+	isNil  bool
+}
+
+var (
+	INFINITY          Numeric
+	NEGATIVE_INFINITY Numeric
+	NAN               Numeric
+	ZERO              Numeric
+	NUMERIC_NULL      Numeric
+)
+
+func NewNumericFromDouble(dValue float64) Numeric {
+	if math.IsInf(dValue, 1) {
+		return INFINITY
+	}
+	if math.IsInf(dValue, -1) {
+		return NEGATIVE_INFINITY
+	}
+	if math.IsNaN(dValue) {
+		return NAN
+	}
+	iValue := int64(dValue)
+	sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
+	isNil := false
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI64(iValue int64) Numeric {
+	dValue := float64(iValue)
+	sValue := string(iValue)
+	isNil := false
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI32(iValue int32) Numeric {
+	dValue := float64(iValue)
+	sValue := string(iValue)
+	isNil := false
+	return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromString(sValue string) Numeric {
+	if sValue == INFINITY.String() {
+		return INFINITY
+	}
+	if sValue == NEGATIVE_INFINITY.String() {
+		return NEGATIVE_INFINITY
+	}
+	if sValue == NAN.String() {
+		return NAN
+	}
+	iValue, _ := strconv.ParseInt(sValue, 10, 64)
+	dValue, _ := strconv.ParseFloat(sValue, 64)
+	isNil := len(sValue) == 0
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
+	if isNull {
+		return NewNullNumeric()
+	}
+	if sValue == JSON_INFINITY {
+		return INFINITY
+	}
+	if sValue == JSON_NEGATIVE_INFINITY {
+		return NEGATIVE_INFINITY
+	}
+	if sValue == JSON_NAN {
+		return NAN
+	}
+	iValue, _ := strconv.ParseInt(sValue, 10, 64)
+	dValue, _ := strconv.ParseFloat(sValue, 64)
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
+}
+
+func NewNullNumeric() Numeric {
+	return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
+}
+
+func (p *numeric) Int64() int64 {
+	return p.iValue
+}
+
+func (p *numeric) Int32() int32 {
+	return int32(p.iValue)
+}
+
+func (p *numeric) Int16() int16 {
+	return int16(p.iValue)
+}
+
+func (p *numeric) Byte() byte {
+	return byte(p.iValue)
+}
+
+func (p *numeric) Int() int {
+	return int(p.iValue)
+}
+
+func (p *numeric) Float64() float64 {
+	return p.dValue
+}
+
+func (p *numeric) Float32() float32 {
+	return float32(p.dValue)
+}
+
+func (p *numeric) String() string {
+	return p.sValue
+}
+
+func (p *numeric) isNull() bool {
+	return p.isNil
+}
+
+func init() {
+	INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
+	NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
+	NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
+	ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
+	NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go b/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go
new file mode 100644
index 00000000..8d6b2c21
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+///////////////////////////////////////////////////////////////////////////////
+// This file is home to helpers that convert from various base types to
+// respective pointer types. This is necessary because Go does not permit
+// references to constants, nor can a pointer type to base type be allocated
+// and initialized in a single expression.
+//
+// E.g., this is not allowed:
+//
+//    var ip *int = &5
+//
+// But this *is* allowed:
+//
+//    func IntPtr(i int) *int { return &i }
+//    var ip *int = IntPtr(5)
+//
+// Since pointers to base types are commonplace as [optional] fields in
+// exported thrift structs, we factor such helpers here.
+///////////////////////////////////////////////////////////////////////////////
+
+func Float32Ptr(v float32) *float32 { return &v }
+func Float64Ptr(v float64) *float64 { return &v }
+func IntPtr(v int) *int             { return &v }
+func Int32Ptr(v int32) *int32       { return &v }
+func Int64Ptr(v int64) *int64       { return &v }
+func StringPtr(v string) *string    { return &v }
+func Uint32Ptr(v uint32) *uint32    { return &v }
+func Uint64Ptr(v uint64) *uint64    { return &v }
+func BoolPtr(v bool) *bool          { return &v }
+func ByteSlicePtr(v []byte) *[]byte { return &v }
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go
new file mode 100644
index 00000000..cb564b8d
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go
@@ -0,0 +1,26 @@
+// +build !go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "golang.org/x/net/context"
+
+var defaultCtx = context.Background()
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor.go
new file mode 100644
index 00000000..566aaaf7
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/processor.go
@@ -0,0 +1,34 @@
+// +build !go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "golang.org/x/net/context"
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+	Process(ctx context.Context, in, out TProtocol) (bool, TException)
+}
+
+type TProcessorFunction interface {
+	Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go
new file mode 100644
index 00000000..9d645df2
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// The default processor factory just returns a singleton
+// instance.
+type TProcessorFactory interface {
+	GetProcessor(trans TTransport) TProcessor
+}
+
+type tProcessorFactory struct {
+	processor TProcessor
+}
+
+func NewTProcessorFactory(p TProcessor) TProcessorFactory {
+	return &tProcessorFactory{processor: p}
+}
+
+func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
+	return p.processor
+}
+
+/**
+ * The default processor factory just returns a singleton
+ * instance.
+ */
+type TProcessorFunctionFactory interface {
+	GetProcessorFunction(trans TTransport) TProcessorFunction
+}
+
+type tProcessorFunctionFactory struct {
+	processor TProcessorFunction
+}
+
+func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
+	return &tProcessorFunctionFactory{processor: p}
+}
+
+func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
+	return p.processor
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go
new file mode 100644
index 00000000..fb0b165d
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go
@@ -0,0 +1,34 @@
+// +build go1.7
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+	Process(ctx context.Context, in, out TProtocol) (bool, TException)
+}
+
+type TProcessorFunction interface {
+	Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go
new file mode 100644
index 00000000..25e6d24b
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"fmt"
+)
+
+const (
+	VERSION_MASK = 0xffff0000
+	VERSION_1    = 0x80010000
+)
+
+type TProtocol interface {
+	WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
+	WriteMessageEnd() error
+	WriteStructBegin(name string) error
+	WriteStructEnd() error
+	WriteFieldBegin(name string, typeId TType, id int16) error
+	WriteFieldEnd() error
+	WriteFieldStop() error
+	WriteMapBegin(keyType TType, valueType TType, size int) error
+	WriteMapEnd() error
+	WriteListBegin(elemType TType, size int) error
+	WriteListEnd() error
+	WriteSetBegin(elemType TType, size int) error
+	WriteSetEnd() error
+	WriteBool(value bool) error
+	WriteByte(value int8) error
+	WriteI16(value int16) error
+	WriteI32(value int32) error
+	WriteI64(value int64) error
+	WriteDouble(value float64) error
+	WriteString(value string) error
+	WriteBinary(value []byte) error
+
+	ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
+	ReadMessageEnd() error
+	ReadStructBegin() (name string, err error)
+	ReadStructEnd() error
+	ReadFieldBegin() (name string, typeId TType, id int16, err error)
+	ReadFieldEnd() error
+	ReadMapBegin() (keyType TType, valueType TType, size int, err error)
+	ReadMapEnd() error
+	ReadListBegin() (elemType TType, size int, err error)
+	ReadListEnd() error
+	ReadSetBegin() (elemType TType, size int, err error)
+	ReadSetEnd() error
+	ReadBool() (value bool, err error)
+	ReadByte() (value int8, err error)
+	ReadI16() (value int16, err error)
+	ReadI32() (value int32, err error)
+	ReadI64() (value int64, err error)
+	ReadDouble() (value float64, err error)
+	ReadString() (value string, err error)
+	ReadBinary() (value []byte, err error)
+
+	Skip(fieldType TType) (err error)
+	Flush() (err error)
+
+	Transport() TTransport
+}
+
+// The maximum recursive depth the skip() function will traverse
+const DEFAULT_RECURSION_DEPTH = 64
+
+// Skips over the next data element from the provided input TProtocol object.
+func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
+	return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
+}
+
+// Skips over the next data element from the provided input TProtocol object.
+func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
+
+	if maxDepth <= 0 {
+		return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
+	}
+
+	switch fieldType {
+	case STOP:
+		return
+	case BOOL:
+		_, err = self.ReadBool()
+		return
+	case BYTE:
+		_, err = self.ReadByte()
+		return
+	case I16:
+		_, err = self.ReadI16()
+		return
+	case I32:
+		_, err = self.ReadI32()
+		return
+	case I64:
+		_, err = self.ReadI64()
+		return
+	case DOUBLE:
+		_, err = self.ReadDouble()
+		return
+	case STRING:
+		_, err = self.ReadString()
+		return
+	case STRUCT:
+		if _, err = self.ReadStructBegin(); err != nil {
+			return err
+		}
+		for {
+			_, typeId, _, _ := self.ReadFieldBegin()
+			if typeId == STOP {
+				break
+			}
+			err := Skip(self, typeId, maxDepth-1)
+			if err != nil {
+				return err
+			}
+			self.ReadFieldEnd()
+		}
+		return self.ReadStructEnd()
+	case MAP:
+		keyType, valueType, size, err := self.ReadMapBegin()
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(self, keyType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+			self.Skip(valueType)
+		}
+		return self.ReadMapEnd()
+	case SET:
+		elemType, size, err := self.ReadSetBegin()
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(self, elemType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+		}
+		return self.ReadSetEnd()
+	case LIST:
+		elemType, size, err := self.ReadListBegin()
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(self, elemType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+		}
+		return self.ReadListEnd()
+	default:
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
+	}
+	return nil
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go
new file mode 100644
index 00000000..29ab75d9
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"encoding/base64"
+)
+
+// Thrift Protocol exception
+type TProtocolException interface {
+	TException
+	TypeId() int
+}
+
+const (
+	UNKNOWN_PROTOCOL_EXCEPTION = 0
+	INVALID_DATA               = 1
+	NEGATIVE_SIZE              = 2
+	SIZE_LIMIT                 = 3
+	BAD_VERSION                = 4
+	NOT_IMPLEMENTED            = 5
+	DEPTH_LIMIT                = 6
+)
+
+type tProtocolException struct {
+	typeId  int
+	message string
+}
+
+func (p *tProtocolException) TypeId() int {
+	return p.typeId
+}
+
+func (p *tProtocolException) String() string {
+	return p.message
+}
+
+func (p *tProtocolException) Error() string {
+	return p.message
+}
+
+func NewTProtocolException(err error) TProtocolException {
+	if err == nil {
+		return nil
+	}
+	if e, ok := err.(TProtocolException); ok {
+		return e
+	}
+	if _, ok := err.(base64.CorruptInputError); ok {
+		return &tProtocolException{INVALID_DATA, err.Error()}
+	}
+	return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()}
+}
+
+func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
+	if err == nil {
+		return nil
+	}
+	return &tProtocolException{errType, err.Error()}
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go
new file mode 100644
index 00000000..c40f796d
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory interface for constructing protocol instances.
+type TProtocolFactory interface {
+	GetProtocol(trans TTransport) TProtocol
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go
new file mode 100644
index 00000000..4025bebe
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "io"
+
+type RichTransport struct {
+	TTransport
+}
+
+// Wraps Transport to provide TRichTransport interface
+func NewTRichTransport(trans TTransport) *RichTransport {
+	return &RichTransport{trans}
+}
+
+func (r *RichTransport) ReadByte() (c byte, err error) {
+	return readByte(r.TTransport)
+}
+
+func (r *RichTransport) WriteByte(c byte) error {
+	return writeByte(r.TTransport, c)
+}
+
+func (r *RichTransport) WriteString(s string) (n int, err error) {
+	return r.Write([]byte(s))
+}
+
+func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
+	return r.TTransport.RemainingBytes()
+}
+
+func readByte(r io.Reader) (c byte, err error) {
+	v := [1]byte{0}
+	n, err := r.Read(v[0:1])
+	if n > 0 && (err == nil || err == io.EOF) {
+		return v[0], nil
+	}
+	if n > 0 && err != nil {
+		return v[0], err
+	}
+	if err != nil {
+		return 0, err
+	}
+	return v[0], nil
+}
+
+func writeByte(w io.Writer, c byte) error {
+	v := [1]byte{c}
+	_, err := w.Write(v[0:1])
+	return err
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go
new file mode 100644
index 00000000..77122299
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+type TSerializer struct {
+	Transport *TMemoryBuffer
+	Protocol  TProtocol
+}
+
+type TStruct interface {
+	Write(p TProtocol) error
+	Read(p TProtocol) error
+}
+
+func NewTSerializer() *TSerializer {
+	transport := NewTMemoryBufferLen(1024)
+	protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
+
+	return &TSerializer{
+		transport,
+		protocol}
+}
+
+func (t *TSerializer) WriteString(msg TStruct) (s string, err error) {
+	t.Transport.Reset()
+
+	if err = msg.Write(t.Protocol); err != nil {
+		return
+	}
+
+	if err = t.Protocol.Flush(); err != nil {
+		return
+	}
+	if err = t.Transport.Flush(); err != nil {
+		return
+	}
+
+	return t.Transport.String(), nil
+}
+
+func (t *TSerializer) Write(msg TStruct) (b []byte, err error) {
+	t.Transport.Reset()
+
+	if err = msg.Write(t.Protocol); err != nil {
+		return
+	}
+
+	if err = t.Protocol.Flush(); err != nil {
+		return
+	}
+
+	if err = t.Transport.Flush(); err != nil {
+		return
+	}
+
+	b = append(b, t.Transport.Bytes()...)
+	return
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server.go b/vendor/github.com/apache/thrift/lib/go/thrift/server.go
new file mode 100644
index 00000000..f813fa35
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/server.go
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+type TServer interface {
+	ProcessorFactory() TProcessorFactory
+	ServerTransport() TServerTransport
+	InputTransportFactory() TTransportFactory
+	OutputTransportFactory() TTransportFactory
+	InputProtocolFactory() TProtocolFactory
+	OutputProtocolFactory() TProtocolFactory
+
+	// Starts the server
+	Serve() error
+	// Stops the server. This is optional on a per-implementation basis. Not
+	// all servers are required to be cleanly stoppable.
+	Stop() error
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go
new file mode 100644
index 00000000..80313c4b
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"net"
+	"sync"
+	"time"
+)
+
+type TServerSocket struct {
+	listener      net.Listener
+	addr          net.Addr
+	clientTimeout time.Duration
+
+	// Protects the interrupted value to make it thread safe.
+	mu          sync.RWMutex
+	interrupted bool
+}
+
+func NewTServerSocket(listenAddr string) (*TServerSocket, error) {
+	return NewTServerSocketTimeout(listenAddr, 0)
+}
+
+func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) {
+	addr, err := net.ResolveTCPAddr("tcp", listenAddr)
+	if err != nil {
+		return nil, err
+	}
+	return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
+}
+
+// Creates a TServerSocket from a net.Addr
+func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket {
+	return &TServerSocket{addr: addr, clientTimeout: clientTimeout}
+}
+
+func (p *TServerSocket) Listen() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.IsListening() {
+		return nil
+	}
+	l, err := net.Listen(p.addr.Network(), p.addr.String())
+	if err != nil {
+		return err
+	}
+	p.listener = l
+	return nil
+}
+
+func (p *TServerSocket) Accept() (TTransport, error) {
+	p.mu.RLock()
+	interrupted := p.interrupted
+	p.mu.RUnlock()
+
+	if interrupted {
+		return nil, errTransportInterrupted
+	}
+
+	listener := p.listener
+	if listener == nil {
+		return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
+	}
+
+	conn, err := listener.Accept()
+	if err != nil {
+		return nil, NewTTransportExceptionFromError(err)
+	}
+	return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil
+}
+
+// Checks whether the socket is listening.
+func (p *TServerSocket) IsListening() bool {
+	return p.listener != nil
+}
+
+// Connects the socket, creating a new socket object if necessary.
+func (p *TServerSocket) Open() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.IsListening() {
+		return NewTTransportException(ALREADY_OPEN, "Server socket already open")
+	}
+	if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
+		return err
+	} else {
+		p.listener = l
+	}
+	return nil
+}
+
+func (p *TServerSocket) Addr() net.Addr {
+	if p.listener != nil {
+		return p.listener.Addr()
+	}
+	return p.addr
+}
+
+func (p *TServerSocket) Close() error {
+	defer func() {
+		p.listener = nil
+	}()
+	if p.IsListening() {
+		return p.listener.Close()
+	}
+	return nil
+}
+
+func (p *TServerSocket) Interrupt() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	p.interrupted = true
+	p.Close()
+
+	return nil
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go
new file mode 100644
index 00000000..51c40b64
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Server transport. Object which provides client transports.
+type TServerTransport interface {
+	Listen() error
+	Accept() (TTransport, error)
+	Close() error
+
+	// Optional method implementation. This signals to the server transport
+	// that it should break out of any accept() or listen() that it is currently
+	// blocked on. This method, if implemented, MUST be thread safe, as it may
+	// be called from a different thread context than the other TServerTransport
+	// methods.
+	Interrupt() error
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go
new file mode 100644
index 00000000..73533223
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go
@@ -0,0 +1,1337 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+)
+
+type _ParseContext int
+
+const (
+	_CONTEXT_IN_TOPLEVEL          _ParseContext = 1
+	_CONTEXT_IN_LIST_FIRST        _ParseContext = 2
+	_CONTEXT_IN_LIST              _ParseContext = 3
+	_CONTEXT_IN_OBJECT_FIRST      _ParseContext = 4
+	_CONTEXT_IN_OBJECT_NEXT_KEY   _ParseContext = 5
+	_CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6
+)
+
+func (p _ParseContext) String() string {
+	switch p {
+	case _CONTEXT_IN_TOPLEVEL:
+		return "TOPLEVEL"
+	case _CONTEXT_IN_LIST_FIRST:
+		return "LIST-FIRST"
+	case _CONTEXT_IN_LIST:
+		return "LIST"
+	case _CONTEXT_IN_OBJECT_FIRST:
+		return "OBJECT-FIRST"
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		return "OBJECT-NEXT-KEY"
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		return "OBJECT-NEXT-VALUE"
+	}
+	return "UNKNOWN-PARSE-CONTEXT"
+}
+
+// JSON protocol implementation for thrift.
+//
+// This protocol produces/consumes a simple output format
+// suitable for parsing by scripting languages.  It should not be
+// confused with the full-featured TJSONProtocol.
+//
+type TSimpleJSONProtocol struct {
+	trans TTransport
+
+	parseContextStack []int
+	dumpContext       []int
+
+	writer *bufio.Writer
+	reader *bufio.Reader
+}
+
+// Constructor
+func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
+	v := &TSimpleJSONProtocol{trans: t,
+		writer: bufio.NewWriter(t),
+		reader: bufio.NewReader(t),
+	}
+	v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
+	v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
+	return v
+}
+
+// Factory
+type TSimpleJSONProtocolFactory struct{}
+
+func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTSimpleJSONProtocol(trans)
+}
+
+func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
+	return &TSimpleJSONProtocolFactory{}
+}
+
+var (
+	JSON_COMMA                   []byte
+	JSON_COLON                   []byte
+	JSON_LBRACE                  []byte
+	JSON_RBRACE                  []byte
+	JSON_LBRACKET                []byte
+	JSON_RBRACKET                []byte
+	JSON_QUOTE                   byte
+	JSON_QUOTE_BYTES             []byte
+	JSON_NULL                    []byte
+	JSON_TRUE                    []byte
+	JSON_FALSE                   []byte
+	JSON_INFINITY                string
+	JSON_NEGATIVE_INFINITY       string
+	JSON_NAN                     string
+	JSON_INFINITY_BYTES          []byte
+	JSON_NEGATIVE_INFINITY_BYTES []byte
+	JSON_NAN_BYTES               []byte
+	json_nonbase_map_elem_bytes  []byte
+)
+
+func init() {
+	JSON_COMMA = []byte{','}
+	JSON_COLON = []byte{':'}
+	JSON_LBRACE = []byte{'{'}
+	JSON_RBRACE = []byte{'}'}
+	JSON_LBRACKET = []byte{'['}
+	JSON_RBRACKET = []byte{']'}
+	JSON_QUOTE = '"'
+	JSON_QUOTE_BYTES = []byte{'"'}
+	JSON_NULL = []byte{'n', 'u', 'l', 'l'}
+	JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
+	JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
+	JSON_INFINITY = "Infinity"
+	JSON_NEGATIVE_INFINITY = "-Infinity"
+	JSON_NAN = "NaN"
+	JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+	JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+	JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
+	json_nonbase_map_elem_bytes = []byte{']', ',', '['}
+}
+
+func jsonQuote(s string) string {
+	b, _ := json.Marshal(s)
+	s1 := string(b)
+	return s1
+}
+
+func jsonUnquote(s string) (string, bool) {
+	s1 := new(string)
+	err := json.Unmarshal([]byte(s), s1)
+	return *s1, err == nil
+}
+
+func mismatch(expected, actual string) error {
+	return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+	p.resetContextStack() // THRIFT-3735
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteString(name); e != nil {
+		return e
+	}
+	if e := p.WriteByte(int8(typeId)); e != nil {
+		return e
+	}
+	if e := p.WriteI32(seqId); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error {
+	if e := p.OutputObjectBegin(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteStructEnd() error {
+	return p.OutputObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+	if e := p.WriteString(name); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldEnd() error {
+	//return p.OutputListEnd()
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil }
+
+func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteByte(int8(keyType)); e != nil {
+		return e
+	}
+	if e := p.WriteByte(int8(valueType)); e != nil {
+		return e
+	}
+	return p.WriteI32(int32(size))
+}
+
+func (p *TSimpleJSONProtocol) WriteMapEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteListEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteSetEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteBool(b bool) error {
+	return p.OutputBool(b)
+}
+
+func (p *TSimpleJSONProtocol) WriteByte(b int8) error {
+	return p.WriteI32(int32(b))
+}
+
+func (p *TSimpleJSONProtocol) WriteI16(v int16) error {
+	return p.WriteI32(int32(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI32(v int32) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI64(v int64) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteDouble(v float64) error {
+	return p.OutputF64(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteString(v string) error {
+	return p.OutputString(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error {
+	// JSON library only takes in a string,
+	// not an arbitrary byte array, to ensure bytes are transmitted
+	// efficiently we must convert this into a valid JSON string
+	// therefore we use base64 encoding to avoid excessive escaping/quoting
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	writer := base64.NewEncoder(base64.StdEncoding, p.writer)
+	if _, e := writer.Write(v); e != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+		return NewTProtocolException(e)
+	}
+	if e := writer.Close(); e != nil {
+		return NewTProtocolException(e)
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+// Reading methods.
+func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+	p.resetContextStack() // THRIFT-3735
+	if isNull, err := p.ParseListBegin(); isNull || err != nil {
+		return name, typeId, seqId, err
+	}
+	if name, err = p.ReadString(); err != nil {
+		return name, typeId, seqId, err
+	}
+	bTypeId, err := p.ReadByte()
+	typeId = TMessageType(bTypeId)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if seqId, err = p.ReadI32(); err != nil {
+		return name, typeId, seqId, err
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMessageEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) {
+	_, err = p.ParseObjectStart()
+	return "", err
+}
+
+func (p *TSimpleJSONProtocol) ReadStructEnd() error {
+	return p.ParseObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return "", STOP, 0, err
+	}
+	b, _ := p.reader.Peek(1)
+	if len(b) > 0 {
+		switch b[0] {
+		case JSON_RBRACE[0]:
+			return "", STOP, 0, nil
+		case JSON_QUOTE:
+			p.reader.ReadByte()
+			name, err := p.ParseStringBody()
+			// simplejson is not meant to be read back into thrift
+			// - see http://wiki.apache.org/thrift/ThriftUsageJava
+			// - use JSON instead
+			if err != nil {
+				return name, STOP, 0, err
+			}
+			return name, STOP, -1, p.ParsePostValue()
+			/*
+			   if err = p.ParsePostValue(); err != nil {
+			     return name, STOP, 0, err
+			   }
+			   if isNull, err := p.ParseListBegin(); isNull || err != nil {
+			     return name, STOP, 0, err
+			   }
+			   bType, err := p.ReadByte()
+			   thetype := TType(bType)
+			   if err != nil {
+			     return name, thetype, 0, err
+			   }
+			   id, err := p.ReadI16()
+			   return name, thetype, id, err
+			*/
+		}
+		e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
+		return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return "", STOP, 0, NewTProtocolException(io.EOF)
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldEnd() error {
+	return nil
+	//return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, VOID, 0, e
+	}
+
+	// read keyType
+	bKeyType, e := p.ReadByte()
+	keyType = TType(bKeyType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read valueType
+	bValueType, e := p.ReadByte()
+	valueType = TType(bValueType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read size
+	iSize, err := p.ReadI64()
+	size = int(iSize)
+	return keyType, valueType, size, err
+}
+
+func (p *TSimpleJSONProtocol) ReadMapEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadListEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadBool() (bool, error) {
+	var value bool
+
+	if err := p.ParsePreValue(); err != nil {
+		return value, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 {
+		switch f[0] {
+		case JSON_TRUE[0]:
+			b := make([]byte, len(JSON_TRUE))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_TRUE) {
+				value = true
+			} else {
+				e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			break
+		case JSON_FALSE[0]:
+			b := make([]byte, len(JSON_FALSE))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_FALSE) {
+				value = false
+			} else {
+				e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			break
+		case JSON_NULL[0]:
+			b := make([]byte, len(JSON_NULL))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_NULL) {
+				value = false
+			} else {
+				e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		default:
+			e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
+			return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	return value, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadByte() (int8, error) {
+	v, err := p.ReadI64()
+	return int8(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI16() (int16, error) {
+	v, err := p.ReadI64()
+	return int16(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI32() (int32, error) {
+	v, err := p.ReadI64()
+	return int32(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI64() (int64, error) {
+	v, _, err := p.ParseI64()
+	return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) {
+	v, _, err := p.ParseF64()
+	return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadString() (string, error) {
+	var v string
+	if err := p.ParsePreValue(); err != nil {
+		return v, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseStringBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) {
+	var v []byte
+	if err := p.ParsePreValue(); err != nil {
+		return nil, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseBase64EncodedBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+
+	return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) Flush() (err error) {
+	return NewTProtocolException(p.writer.Flush())
+}
+
+func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) {
+	return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TSimpleJSONProtocol) Transport() TTransport {
+	return p.trans
+}
+
+func (p *TSimpleJSONProtocol) OutputPreValue() error {
+	cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+	switch cxt {
+	case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		if _, e := p.write(JSON_COMMA); e != nil {
+			return NewTProtocolException(e)
+		}
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		if _, e := p.write(JSON_COLON); e != nil {
+			return NewTProtocolException(e)
+		}
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputPostValue() error {
+	cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+	switch cxt {
+	case _CONTEXT_IN_LIST_FIRST:
+		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST))
+		break
+	case _CONTEXT_IN_OBJECT_FIRST:
+		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	var v string
+	if value {
+		v = string(JSON_TRUE)
+	} else {
+		v = string(JSON_FALSE)
+	}
+	switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		v = jsonQuote(v)
+	default:
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputNull() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_NULL); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	var v string
+	if math.IsNaN(value) {
+		v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
+	} else if math.IsInf(value, 1) {
+		v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
+	} else if math.IsInf(value, -1) {
+		v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
+	} else {
+		v = strconv.FormatFloat(value, 'g', -1, 64)
+		switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+		case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+			v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
+		default:
+		}
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	v := strconv.FormatInt(value, 10)
+	switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		v = jsonQuote(v)
+	default:
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputString(s string) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if e := p.OutputStringData(jsonQuote(s)); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
+	_, e := p.write([]byte(s))
+	return NewTProtocolException(e)
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_LBRACE); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST))
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
+	if _, e := p.write(JSON_RBRACE); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+	if e := p.OutputPostValue(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListBegin() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_LBRACKET); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST))
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListEnd() error {
+	if _, e := p.write(JSON_RBRACKET); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+	if e := p.OutputPostValue(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteByte(int8(elemType)); e != nil {
+		return e
+	}
+	if e := p.WriteI64(int64(size)); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePreValue() error {
+	if e := p.readNonSignificantWhitespace(); e != nil {
+		return NewTProtocolException(e)
+	}
+	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	b, _ := p.reader.Peek(1)
+	switch cxt {
+	case _CONTEXT_IN_LIST:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_RBRACKET[0]:
+				return nil
+			case JSON_COMMA[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_RBRACE[0]:
+				return nil
+			case JSON_COMMA[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_COLON[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePostValue() error {
+	if e := p.readNonSignificantWhitespace(); e != nil {
+		return NewTProtocolException(e)
+	}
+	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	switch cxt {
+	case _CONTEXT_IN_LIST_FIRST:
+		p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST))
+		break
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
+	for {
+		b, _ := p.reader.Peek(1)
+		if len(b) < 1 {
+			return nil
+		}
+		switch b[0] {
+		case ' ', '\r', '\n', '\t':
+			p.reader.ReadByte()
+			continue
+		default:
+			break
+		}
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
+	line, err := p.reader.ReadString(JSON_QUOTE)
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	l := len(line)
+	// count number of escapes to see if we need to keep going
+	i := 1
+	for ; i < l; i++ {
+		if line[l-i-1] != '\\' {
+			break
+		}
+	}
+	if i&0x01 == 1 {
+		v, ok := jsonUnquote(string(JSON_QUOTE) + line)
+		if !ok {
+			return "", NewTProtocolException(err)
+		}
+		return v, nil
+	}
+	s, err := p.ParseQuotedStringBody()
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	str := string(JSON_QUOTE) + line + s
+	v, ok := jsonUnquote(str)
+	if !ok {
+		e := fmt.Errorf("Unable to parse as JSON string %s", str)
+		return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
+	line, err := p.reader.ReadString(JSON_QUOTE)
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	l := len(line)
+	// count number of escapes to see if we need to keep going
+	i := 1
+	for ; i < l; i++ {
+		if line[l-i-1] != '\\' {
+			break
+		}
+	}
+	if i&0x01 == 1 {
+		return line, nil
+	}
+	s, err := p.ParseQuotedStringBody()
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	v := line + s
+	return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
+	line, err := p.reader.ReadBytes(JSON_QUOTE)
+	if err != nil {
+		return line, NewTProtocolException(err)
+	}
+	line2 := line[0 : len(line)-1]
+	l := len(line2)
+	if (l % 4) != 0 {
+		pad := 4 - (l % 4)
+		fill := [...]byte{'=', '=', '='}
+		line2 = append(line2, fill[:pad]...)
+		l = len(line2)
+	}
+	output := make([]byte, base64.StdEncoding.DecodedLen(l))
+	n, err := base64.StdEncoding.Decode(output, line2)
+	return output[0:n], NewTProtocolException(err)
+}
+
+func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return 0, false, err
+	}
+	var value int64
+	var isnull bool
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		isnull = true
+	} else {
+		num, err := p.readNumeric()
+		isnull = (num == nil)
+		if !isnull {
+			value = num.Int64()
+		}
+		if err != nil {
+			return value, isnull, err
+		}
+	}
+	return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return 0, false, err
+	}
+	var value float64
+	var isnull bool
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		isnull = true
+	} else {
+		num, err := p.readNumeric()
+		isnull = (num == nil)
+		if !isnull {
+			value = num.Float64()
+		}
+		if err != nil {
+			return value, isnull, err
+		}
+	}
+	return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return false, err
+	}
+	var b []byte
+	b, err := p.reader.Peek(1)
+	if err != nil {
+		return false, err
+	}
+	if len(b) > 0 && b[0] == JSON_LBRACE[0] {
+		p.reader.ReadByte()
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST))
+		return false, nil
+	} else if p.safePeekContains(JSON_NULL) {
+		return true, nil
+	}
+	e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
+	return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
+	if isNull, err := p.readIfNull(); isNull || err != nil {
+		return err
+	}
+	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
+		e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
+		return NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	line, err := p.reader.ReadString(JSON_RBRACE[0])
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	for _, char := range line {
+		switch char {
+		default:
+			e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
+			return NewTProtocolExceptionWithType(INVALID_DATA, e)
+		case ' ', '\n', '\r', '\t', '}':
+			break
+		}
+	}
+	p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+	return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
+	if e := p.ParsePreValue(); e != nil {
+		return false, e
+	}
+	var b []byte
+	b, err = p.reader.Peek(1)
+	if err != nil {
+		return false, err
+	}
+	if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST))
+		p.reader.ReadByte()
+		isNull = false
+	} else if p.safePeekContains(JSON_NULL) {
+		isNull = true
+	} else {
+		err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
+	}
+	return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
+}
+
+func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, 0, e
+	}
+	bElemType, err := p.ReadByte()
+	elemType = TType(bElemType)
+	if err != nil {
+		return elemType, size, err
+	}
+	nSize, err2 := p.ReadI64()
+	size = int(nSize)
+	return elemType, size, err2
+}
+
+func (p *TSimpleJSONProtocol) ParseListEnd() error {
+	if isNull, err := p.readIfNull(); isNull || err != nil {
+		return err
+	}
+	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	if cxt != _CONTEXT_IN_LIST {
+		e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
+		return NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	line, err := p.reader.ReadString(JSON_RBRACKET[0])
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	for _, char := range line {
+		switch char {
+		default:
+			e := fmt.Errorf("Expecting end of list \"]\", but found: \"", line, "\"")
+			return NewTProtocolExceptionWithType(INVALID_DATA, e)
+		case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
+			break
+		}
+	}
+	p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+	if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL {
+		return nil
+	}
+	return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
+	e := p.readNonSignificantWhitespace()
+	if e != nil {
+		return nil, VOID, NewTProtocolException(e)
+	}
+	b, e := p.reader.Peek(1)
+	if len(b) > 0 {
+		c := b[0]
+		switch c {
+		case JSON_NULL[0]:
+			buf := make([]byte, len(JSON_NULL))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return nil, VOID, NewTProtocolException(e)
+			}
+			if string(JSON_NULL) != string(buf) {
+				e = mismatch(string(JSON_NULL), string(buf))
+				return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return nil, VOID, nil
+		case JSON_QUOTE:
+			p.reader.ReadByte()
+			v, e := p.ParseStringBody()
+			if e != nil {
+				return v, UTF8, NewTProtocolException(e)
+			}
+			if v == JSON_INFINITY {
+				return INFINITY, DOUBLE, nil
+			} else if v == JSON_NEGATIVE_INFINITY {
+				return NEGATIVE_INFINITY, DOUBLE, nil
+			} else if v == JSON_NAN {
+				return NAN, DOUBLE, nil
+			}
+			return v, UTF8, nil
+		case JSON_TRUE[0]:
+			buf := make([]byte, len(JSON_TRUE))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return true, BOOL, NewTProtocolException(e)
+			}
+			if string(JSON_TRUE) != string(buf) {
+				e := mismatch(string(JSON_TRUE), string(buf))
+				return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return true, BOOL, nil
+		case JSON_FALSE[0]:
+			buf := make([]byte, len(JSON_FALSE))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return false, BOOL, NewTProtocolException(e)
+			}
+			if string(JSON_FALSE) != string(buf) {
+				e := mismatch(string(JSON_FALSE), string(buf))
+				return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return false, BOOL, nil
+		case JSON_LBRACKET[0]:
+			_, e := p.reader.ReadByte()
+			return make([]interface{}, 0), LIST, NewTProtocolException(e)
+		case JSON_LBRACE[0]:
+			_, e := p.reader.ReadByte()
+			return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
+			// assume numeric
+			v, e := p.readNumeric()
+			return v, DOUBLE, e
+		default:
+			e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
+			return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	e = fmt.Errorf("Cannot read a single element while parsing JSON.")
+	return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+
+}
+
+func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
+	cont := true
+	for cont {
+		b, _ := p.reader.Peek(1)
+		if len(b) < 1 {
+			return false, nil
+		}
+		switch b[0] {
+		default:
+			return false, nil
+		case JSON_NULL[0]:
+			cont = false
+			break
+		case ' ', '\n', '\r', '\t':
+			p.reader.ReadByte()
+			break
+		}
+	}
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		return true, nil
+	}
+	return false, nil
+}
+
+func (p *TSimpleJSONProtocol) readQuoteIfNext() {
+	b, _ := p.reader.Peek(1)
+	if len(b) > 0 && b[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+	}
+}
+
+func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
+	isNull, err := p.readIfNull()
+	if isNull || err != nil {
+		return NUMERIC_NULL, err
+	}
+	hasDecimalPoint := false
+	nextCanBeSign := true
+	hasE := false
+	MAX_LEN := 40
+	buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
+	continueFor := true
+	inQuotes := false
+	for continueFor {
+		c, err := p.reader.ReadByte()
+		if err != nil {
+			if err == io.EOF {
+				break
+			}
+			return NUMERIC_NULL, NewTProtocolException(err)
+		}
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			buf.WriteByte(c)
+			nextCanBeSign = false
+		case '.':
+			if hasDecimalPoint {
+				e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			if hasE {
+				e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			hasDecimalPoint, nextCanBeSign = true, false
+		case 'e', 'E':
+			if hasE {
+				e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			hasE, nextCanBeSign = true, true
+		case '-', '+':
+			if !nextCanBeSign {
+				e := fmt.Errorf("Negative sign within number")
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			nextCanBeSign = false
+		case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
+			p.reader.UnreadByte()
+			continueFor = false
+		case JSON_NAN[0]:
+			if buf.Len() == 0 {
+				buffer := make([]byte, len(JSON_NAN))
+				buffer[0] = c
+				_, e := p.reader.Read(buffer[1:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_NAN != string(buffer) {
+					e := mismatch(JSON_NAN, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return NAN, nil
+			} else {
+				e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		case JSON_INFINITY[0]:
+			if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
+				buffer := make([]byte, len(JSON_INFINITY))
+				buffer[0] = c
+				_, e := p.reader.Read(buffer[1:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_INFINITY != string(buffer) {
+					e := mismatch(JSON_INFINITY, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return INFINITY, nil
+			} else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
+				buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
+				buffer[0] = JSON_NEGATIVE_INFINITY[0]
+				buffer[1] = c
+				_, e := p.reader.Read(buffer[2:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_NEGATIVE_INFINITY != string(buffer) {
+					e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return NEGATIVE_INFINITY, nil
+			} else {
+				e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		case JSON_QUOTE:
+			if !inQuotes {
+				inQuotes = true
+			} else {
+				break
+			}
+		default:
+			e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+			return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	if buf.Len() == 0 {
+		e := fmt.Errorf("Unable to parse number from empty string ''")
+		return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return NewNumericFromJSONString(buf.String(), false), nil
+}
+
+// Safely peeks into the buffer, reading only what is necessary
+func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
+	for i := 0; i < len(b); i++ {
+		a, _ := p.reader.Peek(i + 1)
+		if len(a) == 0 || a[i] != b[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Reset the context stack to its initial state.
+func (p *TSimpleJSONProtocol) resetContextStack() {
+	p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)}
+	p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)}
+}
+
+func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
+	n, err := p.writer.Write(b)
+	if err != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+	}
+	return n, err
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go
new file mode 100644
index 00000000..37081bd8
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"log"
+	"runtime/debug"
+	"sync"
+	"sync/atomic"
+)
+
+/*
+ * This is not a typical TSimpleServer as it is not blocked after accept a socket.
+ * It is more like a TThreadedServer that can handle different connections in different goroutines.
+ * This will work if golang user implements a conn-pool like thing in client side.
+ */
+type TSimpleServer struct {
+	closed int32
+	wg     sync.WaitGroup
+	mu     sync.Mutex
+
+	processorFactory       TProcessorFactory
+	serverTransport        TServerTransport
+	inputTransportFactory  TTransportFactory
+	outputTransportFactory TTransportFactory
+	inputProtocolFactory   TProtocolFactory
+	outputProtocolFactory  TProtocolFactory
+}
+
+func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
+	return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
+}
+
+func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
+		serverTransport,
+		transportFactory,
+		protocolFactory,
+	)
+}
+
+func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
+		serverTransport,
+		inputTransportFactory,
+		outputTransportFactory,
+		inputProtocolFactory,
+		outputProtocolFactory,
+	)
+}
+
+func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
+	return NewTSimpleServerFactory6(processorFactory,
+		serverTransport,
+		NewTTransportFactory(),
+		NewTTransportFactory(),
+		NewTBinaryProtocolFactoryDefault(),
+		NewTBinaryProtocolFactoryDefault(),
+	)
+}
+
+func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory6(processorFactory,
+		serverTransport,
+		transportFactory,
+		transportFactory,
+		protocolFactory,
+		protocolFactory,
+	)
+}
+
+func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+	return &TSimpleServer{
+		processorFactory:       processorFactory,
+		serverTransport:        serverTransport,
+		inputTransportFactory:  inputTransportFactory,
+		outputTransportFactory: outputTransportFactory,
+		inputProtocolFactory:   inputProtocolFactory,
+		outputProtocolFactory:  outputProtocolFactory,
+	}
+}
+
+func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
+	return p.processorFactory
+}
+
+func (p *TSimpleServer) ServerTransport() TServerTransport {
+	return p.serverTransport
+}
+
+func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
+	return p.inputTransportFactory
+}
+
+func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
+	return p.outputTransportFactory
+}
+
+func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
+	return p.inputProtocolFactory
+}
+
+func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
+	return p.outputProtocolFactory
+}
+
+func (p *TSimpleServer) Listen() error {
+	return p.serverTransport.Listen()
+}
+
+func (p *TSimpleServer) AcceptLoop() error {
+	for {
+		client, err := p.serverTransport.Accept()
+		p.mu.Lock()
+		if atomic.LoadInt32(&p.closed) != 0 {
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+		if client != nil {
+			p.wg.Add(1)
+			go func() {
+				defer p.wg.Done()
+				if err := p.processRequests(client); err != nil {
+					log.Println("error processing request:", err)
+				}
+			}()
+		}
+		p.mu.Unlock()
+	}
+}
+
+func (p *TSimpleServer) Serve() error {
+	err := p.Listen()
+	if err != nil {
+		return err
+	}
+	p.AcceptLoop()
+	return nil
+}
+
+func (p *TSimpleServer) Stop() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if atomic.LoadInt32(&p.closed) != 0 {
+		return nil
+	}
+	atomic.StoreInt32(&p.closed, 1)
+	p.serverTransport.Interrupt()
+	p.wg.Wait()
+	return nil
+}
+
+func (p *TSimpleServer) processRequests(client TTransport) error {
+	processor := p.processorFactory.GetProcessor(client)
+	inputTransport, err := p.inputTransportFactory.GetTransport(client)
+	if err != nil {
+		return err
+	}
+	outputTransport, err := p.outputTransportFactory.GetTransport(client)
+	if err != nil {
+		return err
+	}
+	inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
+	outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)
+	defer func() {
+		if e := recover(); e != nil {
+			log.Printf("panic in processor: %s: %s", e, debug.Stack())
+		}
+	}()
+
+	if inputTransport != nil {
+		defer inputTransport.Close()
+	}
+	if outputTransport != nil {
+		defer outputTransport.Close()
+	}
+	for {
+		if atomic.LoadInt32(&p.closed) != 0 {
+			return nil
+		}
+
+		ok, err := processor.Process(defaultCtx, inputProtocol, outputProtocol)
+		if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {
+			return nil
+		} else if err != nil {
+			return err
+		}
+		if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD {
+			continue
+		}
+		if !ok {
+			break
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket.go
new file mode 100644
index 00000000..383b1fe3
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/socket.go
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"net"
+	"time"
+)
+
+type TSocket struct {
+	conn    net.Conn
+	addr    net.Addr
+	timeout time.Duration
+}
+
+// NewTSocket creates a net.Conn-backed TTransport, given a host and port
+//
+// Example:
+// 	trans, err := thrift.NewTSocket("localhost:9090")
+func NewTSocket(hostPort string) (*TSocket, error) {
+	return NewTSocketTimeout(hostPort, 0)
+}
+
+// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port
+// it also accepts a timeout as a time.Duration
+func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) {
+	//conn, err := net.DialTimeout(network, address, timeout)
+	addr, err := net.ResolveTCPAddr("tcp", hostPort)
+	if err != nil {
+		return nil, err
+	}
+	return NewTSocketFromAddrTimeout(addr, timeout), nil
+}
+
+// Creates a TSocket from a net.Addr
+func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket {
+	return &TSocket{addr: addr, timeout: timeout}
+}
+
+// Creates a TSocket from an existing net.Conn
+func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket {
+	return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout}
+}
+
+// Sets the socket timeout
+func (p *TSocket) SetTimeout(timeout time.Duration) error {
+	p.timeout = timeout
+	return nil
+}
+
+func (p *TSocket) pushDeadline(read, write bool) {
+	var t time.Time
+	if p.timeout > 0 {
+		t = time.Now().Add(time.Duration(p.timeout))
+	}
+	if read && write {
+		p.conn.SetDeadline(t)
+	} else if read {
+		p.conn.SetReadDeadline(t)
+	} else if write {
+		p.conn.SetWriteDeadline(t)
+	}
+}
+
+// Connects the socket, creating a new socket object if necessary.
+func (p *TSocket) Open() error {
+	if p.IsOpen() {
+		return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
+	}
+	if p.addr == nil {
+		return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
+	}
+	if len(p.addr.Network()) == 0 {
+		return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
+	}
+	if len(p.addr.String()) == 0 {
+		return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
+	}
+	var err error
+	if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil {
+		return NewTTransportException(NOT_OPEN, err.Error())
+	}
+	return nil
+}
+
+// Retrieve the underlying net.Conn
+func (p *TSocket) Conn() net.Conn {
+	return p.conn
+}
+
+// Returns true if the connection is open
+func (p *TSocket) IsOpen() bool {
+	if p.conn == nil {
+		return false
+	}
+	return true
+}
+
+// Closes the socket.
+func (p *TSocket) Close() error {
+	// Close the socket
+	if p.conn != nil {
+		err := p.conn.Close()
+		if err != nil {
+			return err
+		}
+		p.conn = nil
+	}
+	return nil
+}
+
+//Returns the remote address of the socket.
+func (p *TSocket) Addr() net.Addr {
+	return p.addr
+}
+
+func (p *TSocket) Read(buf []byte) (int, error) {
+	if !p.IsOpen() {
+		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
+	}
+	p.pushDeadline(true, false)
+	n, err := p.conn.Read(buf)
+	return n, NewTTransportExceptionFromError(err)
+}
+
+func (p *TSocket) Write(buf []byte) (int, error) {
+	if !p.IsOpen() {
+		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
+	}
+	p.pushDeadline(false, true)
+	return p.conn.Write(buf)
+}
+
+func (p *TSocket) Flush() error {
+	return nil
+}
+
+func (p *TSocket) Interrupt() error {
+	if !p.IsOpen() {
+		return nil
+	}
+	return p.conn.Close()
+}
+
+func (p *TSocket) RemainingBytes() (num_bytes uint64) {
+	const maxSize = ^uint64(0)
+	return maxSize // the thruth is, we just don't know unless framed is used
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go
new file mode 100644
index 00000000..907afca3
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"crypto/tls"
+	"net"
+	"time"
+)
+
+type TSSLServerSocket struct {
+	listener      net.Listener
+	addr          net.Addr
+	clientTimeout time.Duration
+	interrupted   bool
+	cfg           *tls.Config
+}
+
+func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) {
+	return NewTSSLServerSocketTimeout(listenAddr, cfg, 0)
+}
+
+func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
+	if cfg.MinVersion == 0 {
+		cfg.MinVersion = tls.VersionTLS10
+	}
+	addr, err := net.ResolveTCPAddr("tcp", listenAddr)
+	if err != nil {
+		return nil, err
+	}
+	return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil
+}
+
+func (p *TSSLServerSocket) Listen() error {
+	if p.IsListening() {
+		return nil
+	}
+	l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg)
+	if err != nil {
+		return err
+	}
+	p.listener = l
+	return nil
+}
+
+func (p *TSSLServerSocket) Accept() (TTransport, error) {
+	if p.interrupted {
+		return nil, errTransportInterrupted
+	}
+	if p.listener == nil {
+		return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
+	}
+	conn, err := p.listener.Accept()
+	if err != nil {
+		return nil, NewTTransportExceptionFromError(err)
+	}
+	return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil
+}
+
+// Checks whether the socket is listening.
+func (p *TSSLServerSocket) IsListening() bool {
+	return p.listener != nil
+}
+
+// Connects the socket, creating a new socket object if necessary.
+func (p *TSSLServerSocket) Open() error {
+	if p.IsListening() {
+		return NewTTransportException(ALREADY_OPEN, "Server socket already open")
+	}
+	if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
+		return err
+	} else {
+		p.listener = l
+	}
+	return nil
+}
+
+func (p *TSSLServerSocket) Addr() net.Addr {
+	return p.addr
+}
+
+func (p *TSSLServerSocket) Close() error {
+	defer func() {
+		p.listener = nil
+	}()
+	if p.IsListening() {
+		return p.listener.Close()
+	}
+	return nil
+}
+
+func (p *TSSLServerSocket) Interrupt() error {
+	p.interrupted = true
+	return nil
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go
new file mode 100644
index 00000000..c3bd72cc
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"crypto/tls"
+	"net"
+	"time"
+)
+
+type TSSLSocket struct {
+	conn net.Conn
+	// hostPort contains host:port (e.g. "asdf.com:12345"). The field is
+	// only valid if addr is nil.
+	hostPort string
+	// addr is nil when hostPort is not "", and is only used when the
+	// TSSLSocket is constructed from a net.Addr.
+	addr    net.Addr
+	timeout time.Duration
+	cfg     *tls.Config
+}
+
+// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration
+//
+// Example:
+// 	trans, err := thrift.NewTSSLSocket("localhost:9090", nil)
+func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
+	return NewTSSLSocketTimeout(hostPort, cfg, 0)
+}
+
+// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port
+// it also accepts a tls Configuration and a timeout as a time.Duration
+func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) {
+	if cfg.MinVersion == 0 {
+		cfg.MinVersion = tls.VersionTLS10
+	}
+	return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil
+}
+
+// Creates a TSSLSocket from a net.Addr
+func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
+	return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg}
+}
+
+// Creates a TSSLSocket from an existing net.Conn
+func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
+	return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg}
+}
+
+// Sets the socket timeout
+func (p *TSSLSocket) SetTimeout(timeout time.Duration) error {
+	p.timeout = timeout
+	return nil
+}
+
+func (p *TSSLSocket) pushDeadline(read, write bool) {
+	var t time.Time
+	if p.timeout > 0 {
+		t = time.Now().Add(time.Duration(p.timeout))
+	}
+	if read && write {
+		p.conn.SetDeadline(t)
+	} else if read {
+		p.conn.SetReadDeadline(t)
+	} else if write {
+		p.conn.SetWriteDeadline(t)
+	}
+}
+
+// Connects the socket, creating a new socket object if necessary.
+func (p *TSSLSocket) Open() error {
+	var err error
+	// If we have a hostname, we need to pass the hostname to tls.Dial for
+	// certificate hostname checks.
+	if p.hostPort != "" {
+		if p.conn, err = tls.DialWithDialer(&net.Dialer{
+			Timeout: p.timeout}, "tcp", p.hostPort, p.cfg); err != nil {
+			return NewTTransportException(NOT_OPEN, err.Error())
+		}
+	} else {
+		if p.IsOpen() {
+			return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
+		}
+		if p.addr == nil {
+			return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
+		}
+		if len(p.addr.Network()) == 0 {
+			return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
+		}
+		if len(p.addr.String()) == 0 {
+			return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
+		}
+		if p.conn, err = tls.DialWithDialer(&net.Dialer{
+			Timeout: p.timeout}, p.addr.Network(), p.addr.String(), p.cfg); err != nil {
+			return NewTTransportException(NOT_OPEN, err.Error())
+		}
+	}
+	return nil
+}
+
+// Retrieve the underlying net.Conn
+func (p *TSSLSocket) Conn() net.Conn {
+	return p.conn
+}
+
+// Returns true if the connection is open
+func (p *TSSLSocket) IsOpen() bool {
+	if p.conn == nil {
+		return false
+	}
+	return true
+}
+
+// Closes the socket.
+func (p *TSSLSocket) Close() error {
+	// Close the socket
+	if p.conn != nil {
+		err := p.conn.Close()
+		if err != nil {
+			return err
+		}
+		p.conn = nil
+	}
+	return nil
+}
+
+func (p *TSSLSocket) Read(buf []byte) (int, error) {
+	if !p.IsOpen() {
+		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
+	}
+	p.pushDeadline(true, false)
+	n, err := p.conn.Read(buf)
+	return n, NewTTransportExceptionFromError(err)
+}
+
+func (p *TSSLSocket) Write(buf []byte) (int, error) {
+	if !p.IsOpen() {
+		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
+	}
+	p.pushDeadline(false, true)
+	return p.conn.Write(buf)
+}
+
+func (p *TSSLSocket) Flush() error {
+	return nil
+}
+
+func (p *TSSLSocket) Interrupt() error {
+	if !p.IsOpen() {
+		return nil
+	}
+	return p.conn.Close()
+}
+
+func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
+	const maxSize = ^uint64(0)
+	return maxSize // the thruth is, we just don't know unless framed is used
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport.go
new file mode 100644
index 00000000..70a85a84
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport.go
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"io"
+)
+
+var errTransportInterrupted = errors.New("Transport Interrupted")
+
+type Flusher interface {
+	Flush() (err error)
+}
+
+type ReadSizeProvider interface {
+	RemainingBytes() (num_bytes uint64)
+}
+
+// Encapsulates the I/O layer
+type TTransport interface {
+	io.ReadWriteCloser
+	Flusher
+	ReadSizeProvider
+
+	// Opens the transport for communication
+	Open() error
+
+	// Returns true if the transport is open
+	IsOpen() bool
+}
+
+type stringWriter interface {
+	WriteString(s string) (n int, err error)
+}
+
+// This is "enchanced" transport with extra capabilities. You need to use one of these
+// to construct protocol.
+// Notably, TSocket does not implement this interface, and it is always a mistake to use
+// TSocket directly in protocol.
+type TRichTransport interface {
+	io.ReadWriter
+	io.ByteReader
+	io.ByteWriter
+	stringWriter
+	Flusher
+	ReadSizeProvider
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go
new file mode 100644
index 00000000..9505b446
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"io"
+)
+
+type timeoutable interface {
+	Timeout() bool
+}
+
+// Thrift Transport exception
+type TTransportException interface {
+	TException
+	TypeId() int
+	Err() error
+}
+
+const (
+	UNKNOWN_TRANSPORT_EXCEPTION = 0
+	NOT_OPEN                    = 1
+	ALREADY_OPEN                = 2
+	TIMED_OUT                   = 3
+	END_OF_FILE                 = 4
+)
+
+type tTransportException struct {
+	typeId int
+	err    error
+}
+
+func (p *tTransportException) TypeId() int {
+	return p.typeId
+}
+
+func (p *tTransportException) Error() string {
+	return p.err.Error()
+}
+
+func (p *tTransportException) Err() error {
+	return p.err
+}
+
+func NewTTransportException(t int, e string) TTransportException {
+	return &tTransportException{typeId: t, err: errors.New(e)}
+}
+
+func NewTTransportExceptionFromError(e error) TTransportException {
+	if e == nil {
+		return nil
+	}
+
+	if t, ok := e.(TTransportException); ok {
+		return t
+	}
+
+	switch v := e.(type) {
+	case TTransportException:
+		return v
+	case timeoutable:
+		if v.Timeout() {
+			return &tTransportException{typeId: TIMED_OUT, err: e}
+		}
+	}
+
+	if e == io.EOF {
+		return &tTransportException{typeId: END_OF_FILE, err: e}
+	}
+
+	return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e}
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go
new file mode 100644
index 00000000..c8058079
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory class used to create wrapped instance of Transports.
+// This is used primarily in servers, which get Transports from
+// a ServerTransport and then may want to mutate them (i.e. create
+// a BufferedTransport from the underlying base transport)
+type TTransportFactory interface {
+	GetTransport(trans TTransport) (TTransport, error)
+}
+
+type tTransportFactory struct{}
+
+// Return a wrapped instance of the base Transport.
+func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	return trans, nil
+}
+
+func NewTTransportFactory() TTransportFactory {
+	return &tTransportFactory{}
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/type.go b/vendor/github.com/apache/thrift/lib/go/thrift/type.go
new file mode 100644
index 00000000..4292ffca
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/type.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Type constants in the Thrift protocol
+type TType byte
+
+const (
+	STOP   = 0
+	VOID   = 1
+	BOOL   = 2
+	BYTE   = 3
+	I08    = 3
+	DOUBLE = 4
+	I16    = 6
+	I32    = 8
+	I64    = 10
+	STRING = 11
+	UTF7   = 11
+	STRUCT = 12
+	MAP    = 13
+	SET    = 14
+	LIST   = 15
+	UTF8   = 16
+	UTF16  = 17
+	//BINARY = 18   wrong and unusued
+)
+
+var typeNames = map[int]string{
+	STOP:   "STOP",
+	VOID:   "VOID",
+	BOOL:   "BOOL",
+	BYTE:   "BYTE",
+	DOUBLE: "DOUBLE",
+	I16:    "I16",
+	I32:    "I32",
+	I64:    "I64",
+	STRING: "STRING",
+	STRUCT: "STRUCT",
+	MAP:    "MAP",
+	SET:    "SET",
+	LIST:   "LIST",
+	UTF8:   "UTF8",
+	UTF16:  "UTF16",
+}
+
+func (p TType) String() string {
+	if s, ok := typeNames[int(p)]; ok {
+		return s
+	}
+	return "Unknown"
+}
diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go
new file mode 100644
index 00000000..f2f07322
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go
@@ -0,0 +1,131 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied. See the License for the
+* specific language governing permissions and limitations
+* under the License.
+ */
+
+package thrift
+
+import (
+	"compress/zlib"
+	"io"
+	"log"
+)
+
+// TZlibTransportFactory is a factory for TZlibTransport instances
+type TZlibTransportFactory struct {
+	level   int
+	factory TTransportFactory
+}
+
+// TZlibTransport is a TTransport implementation that makes use of zlib compression.
+type TZlibTransport struct {
+	reader    io.ReadCloser
+	transport TTransport
+	writer    *zlib.Writer
+}
+
+// GetTransport constructs a new instance of NewTZlibTransport
+func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if p.factory != nil {
+		// wrap other factory
+		var err error
+		trans, err = p.factory.GetTransport(trans)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return NewTZlibTransport(trans, p.level)
+}
+
+// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
+func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
+	return &TZlibTransportFactory{level: level, factory: nil}
+}
+
+// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory
+// as a wrapper over existing transport factory
+func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory {
+	return &TZlibTransportFactory{level: level, factory: factory}
+}
+
+// NewTZlibTransport constructs a new instance of TZlibTransport
+func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) {
+	w, err := zlib.NewWriterLevel(trans, level)
+	if err != nil {
+		log.Println(err)
+		return nil, err
+	}
+
+	return &TZlibTransport{
+		writer:    w,
+		transport: trans,
+	}, nil
+}
+
+// Close closes the reader and writer (flushing any unwritten data) and closes
+// the underlying transport.
+func (z *TZlibTransport) Close() error {
+	if z.reader != nil {
+		if err := z.reader.Close(); err != nil {
+			return err
+		}
+	}
+	if err := z.writer.Close(); err != nil {
+		return err
+	}
+	return z.transport.Close()
+}
+
+// Flush flushes the writer and its underlying transport.
+func (z *TZlibTransport) Flush() error {
+	if err := z.writer.Flush(); err != nil {
+		return err
+	}
+	return z.transport.Flush()
+}
+
+// IsOpen returns true if the transport is open
+func (z *TZlibTransport) IsOpen() bool {
+	return z.transport.IsOpen()
+}
+
+// Open opens the transport for communication
+func (z *TZlibTransport) Open() error {
+	return z.transport.Open()
+}
+
+func (z *TZlibTransport) Read(p []byte) (int, error) {
+	if z.reader == nil {
+		r, err := zlib.NewReader(z.transport)
+		if err != nil {
+			return 0, NewTTransportExceptionFromError(err)
+		}
+		z.reader = r
+	}
+
+	return z.reader.Read(p)
+}
+
+// RemainingBytes returns the size in bytes of the data that is still to be
+// read.
+func (z *TZlibTransport) RemainingBytes() uint64 {
+	return z.transport.RemainingBytes()
+}
+
+func (z *TZlibTransport) Write(p []byte) (int, error) {
+	return z.writer.Write(p)
+}
diff --git a/vendor/github.com/apache/thrift/lib/hs/LICENSE b/vendor/github.com/apache/thrift/lib/hs/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/apache/thrift/lib/hs/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/apache/thrift/tutorial/erl/client.sh b/vendor/github.com/apache/thrift/tutorial/erl/client.sh
new file mode 120000
index 00000000..a417e0da
--- /dev/null
+++ b/vendor/github.com/apache/thrift/tutorial/erl/client.sh
@@ -0,0 +1 @@
+server.sh
\ No newline at end of file
diff --git a/vendor/github.com/apache/thrift/tutorial/hs/LICENSE b/vendor/github.com/apache/thrift/tutorial/hs/LICENSE
new file mode 100644
index 00000000..3b6d7d74
--- /dev/null
+++ b/vendor/github.com/apache/thrift/tutorial/hs/LICENSE
@@ -0,0 +1,239 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+--------------------------------------------------
+SOFTWARE DISTRIBUTED WITH THRIFT:
+
+The Apache Thrift software includes a number of subcomponents with
+separate copyright notices and license terms. Your use of the source
+code for the these subcomponents is subject to the terms and
+conditions of the following licenses.
+
+--------------------------------------------------
+Portions of the following files are licensed under the MIT License:
+
+  lib/erl/src/Makefile.am
+
+Please see doc/otp-base-license.txt for the full terms of this license.
+
+--------------------------------------------------
+For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
+
+#   Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
+#
+#   Copying and distribution of this file, with or without
+#   modification, are permitted in any medium without royalty provided
+#   the copyright notice and this notice are preserved.
+
+--------------------------------------------------
+For the lib/nodejs/lib/thrift/json_parse.js:
+
+/*
+    json_parse.js
+    2015-05-02
+    Public Domain.
+    NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+*/
+(By Douglas Crockford <douglas@crockford.com>)
+--------------------------------------------------
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 00000000..c8364161
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 00000000..8a4a6589
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+package spew
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = false
+
+	// ptrSize is the size of a pointer on the current arch.
+	ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+	// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+	// internal reflect.Value fields.  These values are valid before golang
+	// commit ecccf07e7f9d which changed the format.  The are also valid
+	// after commit 82f48826c6c7 which changed the format again to mirror
+	// the original format.  Code in the init function updates these offsets
+	// as necessary.
+	offsetPtr    = uintptr(ptrSize)
+	offsetScalar = uintptr(0)
+	offsetFlag   = uintptr(ptrSize * 2)
+
+	// flagKindWidth and flagKindShift indicate various bits that the
+	// reflect package uses internally to track kind information.
+	//
+	// flagRO indicates whether or not the value field of a reflect.Value is
+	// read-only.
+	//
+	// flagIndir indicates whether the value field of a reflect.Value is
+	// the actual data or a pointer to the data.
+	//
+	// These values are valid before golang commit 90a7c3c86944 which
+	// changed their positions.  Code in the init function updates these
+	// flags as necessary.
+	flagKindWidth = uintptr(5)
+	flagKindShift = uintptr(flagKindWidth - 1)
+	flagRO        = uintptr(1 << 0)
+	flagIndir     = uintptr(1 << 1)
+)
+
+func init() {
+	// Older versions of reflect.Value stored small integers directly in the
+	// ptr field (which is named val in the older versions).  Versions
+	// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+	// scalar for this purpose which unfortunately came before the flag
+	// field, so the offset of the flag field is different for those
+	// versions.
+	//
+	// This code constructs a new reflect.Value from a known small integer
+	// and checks if the size of the reflect.Value struct indicates it has
+	// the scalar field. When it does, the offsets are updated accordingly.
+	vv := reflect.ValueOf(0xf00)
+	if unsafe.Sizeof(vv) == (ptrSize * 4) {
+		offsetScalar = ptrSize * 2
+		offsetFlag = ptrSize * 3
+	}
+
+	// Commit 90a7c3c86944 changed the flag positions such that the low
+	// order bits are the kind.  This code extracts the kind from the flags
+	// field and ensures it's the correct type.  When it's not, the flag
+	// order has been changed to the newer format, so the flags are updated
+	// accordingly.
+	upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+	upfv := *(*uintptr)(upf)
+	flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
+	if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
+		flagKindShift = 0
+		flagRO = 1 << 5
+		flagIndir = 1 << 6
+
+		// Commit adf9b30e5594 modified the flags to separate the
+		// flagRO flag into two bits which specifies whether or not the
+		// field is embedded.  This causes flagIndir to move over a bit
+		// and means that flagRO is the combination of either of the
+		// original flagRO bit and the new bit.
+		//
+		// This code detects the change by extracting what used to be
+		// the indirect bit to ensure it's set.  When it's not, the flag
+		// order has been changed to the newer format, so the flags are
+		// updated accordingly.
+		if upfv&flagIndir == 0 {
+			flagRO = 3 << 5
+			flagIndir = 1 << 7
+		}
+	}
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data.  It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+	indirects := 1
+	vt := v.Type()
+	upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+	rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+	if rvf&flagIndir != 0 {
+		vt = reflect.PtrTo(v.Type())
+		indirects++
+	} else if offsetScalar != 0 {
+		// The value is in the scalar field when it's not one of the
+		// reference types.
+		switch vt.Kind() {
+		case reflect.Uintptr:
+		case reflect.Chan:
+		case reflect.Func:
+		case reflect.Map:
+		case reflect.Ptr:
+		case reflect.UnsafePointer:
+		default:
+			upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+				offsetScalar)
+		}
+	}
+
+	pv := reflect.NewAt(vt, upv)
+	rv = pv
+	for i := 0; i < indirects; i++ {
+		rv = rv.Elem()
+	}
+	return rv
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 00000000..1fe3cf3d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data.  However, doing this relies on access to
+// the unsafe package.  This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 00000000..7c519ff4
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead.  This mirrors
+// the technique used in the fmt package.
+var (
+	panicBytes            = []byte("(PANIC=")
+	plusBytes             = []byte("+")
+	iBytes                = []byte("i")
+	trueBytes             = []byte("true")
+	falseBytes            = []byte("false")
+	interfaceBytes        = []byte("(interface {})")
+	commaNewlineBytes     = []byte(",\n")
+	newlineBytes          = []byte("\n")
+	openBraceBytes        = []byte("{")
+	openBraceNewlineBytes = []byte("{\n")
+	closeBraceBytes       = []byte("}")
+	asteriskBytes         = []byte("*")
+	colonBytes            = []byte(":")
+	colonSpaceBytes       = []byte(": ")
+	openParenBytes        = []byte("(")
+	closeParenBytes       = []byte(")")
+	spaceBytes            = []byte(" ")
+	pointerChainBytes     = []byte("->")
+	nilAngleBytes         = []byte("<nil>")
+	maxNewlineBytes       = []byte("<max depth reached>\n")
+	maxShortBytes         = []byte("<max>")
+	circularBytes         = []byte("<already shown>")
+	circularShortBytes    = []byte("<shown>")
+	invalidAngleBytes     = []byte("<invalid>")
+	openBracketBytes      = []byte("[")
+	closeBracketBytes     = []byte("]")
+	percentBytes          = []byte("%")
+	precisionBytes        = []byte(".")
+	openAngleBytes        = []byte("<")
+	closeAngleBytes       = []byte(">")
+	openMapBytes          = []byte("map[")
+	closeMapBytes         = []byte("]")
+	lenEqualsBytes        = []byte("len=")
+	capEqualsBytes        = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+	if err := recover(); err != nil {
+		w.Write(panicBytes)
+		fmt.Fprintf(w, "%v", err)
+		w.Write(closeParenBytes)
+	}
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+	// We need an interface to check if the type implements the error or
+	// Stringer interface.  However, the reflect package won't give us an
+	// interface on certain things like unexported struct fields in order
+	// to enforce visibility rules.  We use unsafe, when it's available,
+	// to bypass these restrictions since this package does not mutate the
+	// values.
+	if !v.CanInterface() {
+		if UnsafeDisabled {
+			return false
+		}
+
+		v = unsafeReflectValue(v)
+	}
+
+	// Choose whether or not to do error and Stringer interface lookups against
+	// the base type or a pointer to the base type depending on settings.
+	// Technically calling one of these methods with a pointer receiver can
+	// mutate the value, however, types which choose to satisify an error or
+	// Stringer interface with a pointer receiver should not be mutating their
+	// state inside these interface methods.
+	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+		v = unsafeReflectValue(v)
+	}
+	if v.CanAddr() {
+		v = v.Addr()
+	}
+
+	// Is it an error or Stringer?
+	switch iface := v.Interface().(type) {
+	case error:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.Error()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+
+		w.Write([]byte(iface.Error()))
+		return true
+
+	case fmt.Stringer:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.String()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+		w.Write([]byte(iface.String()))
+		return true
+	}
+	return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+	if val {
+		w.Write(trueBytes)
+	} else {
+		w.Write(falseBytes)
+	}
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+	w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+	w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+	r := real(c)
+	w.Write(openParenBytes)
+	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+	i := imag(c)
+	if i >= 0 {
+		w.Write(plusBytes)
+	}
+	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+	w.Write(iBytes)
+	w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+	// Null pointer.
+	num := uint64(p)
+	if num == 0 {
+		w.Write(nilAngleBytes)
+		return
+	}
+
+	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+	buf := make([]byte, 18)
+
+	// It's simpler to construct the hex string right to left.
+	base := uint64(16)
+	i := len(buf) - 1
+	for num >= base {
+		buf[i] = hexDigits[num%base]
+		num /= base
+		i--
+	}
+	buf[i] = hexDigits[num]
+
+	// Add '0x' prefix.
+	i--
+	buf[i] = 'x'
+	i--
+	buf[i] = '0'
+
+	// Strip unused leading bytes.
+	buf = buf[i:]
+	w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+	values  []reflect.Value
+	strings []string // either nil or same len and values
+	cs      *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted.  It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+	vs := &valuesSorter{values: values, cs: cs}
+	if canSortSimply(vs.values[0].Kind()) {
+		return vs
+	}
+	if !cs.DisableMethods {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			b := bytes.Buffer{}
+			if !handleMethods(cs, &b, vs.values[i]) {
+				vs.strings = nil
+				break
+			}
+			vs.strings[i] = b.String()
+		}
+	}
+	if vs.strings == nil && cs.SpewKeys {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+		}
+	}
+	return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+	// This switch parallels valueSortLess, except for the default case.
+	switch kind {
+	case reflect.Bool:
+		return true
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return true
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Uintptr:
+		return true
+	case reflect.Array:
+		return true
+	}
+	return false
+}
+
+// Len returns the number of values in the slice.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+	return len(s.values)
+}
+
+// Swap swaps the values at the passed indices.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+	s.values[i], s.values[j] = s.values[j], s.values[i]
+	if s.strings != nil {
+		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+	}
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value.  It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return a.Int() < b.Int()
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return a.Uint() < b.Uint()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.String:
+		return a.String() < b.String()
+	case reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Array:
+		// Compare the contents of both arrays.
+		l := a.Len()
+		for i := 0; i < l; i++ {
+			av := a.Index(i)
+			bv := b.Index(i)
+			if av.Interface() == bv.Interface() {
+				continue
+			}
+			return valueSortLess(av, bv)
+		}
+	}
+	return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j.  It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+	if s.strings == nil {
+		return valueSortLess(s.values[i], s.values[j])
+	}
+	return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer.  Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+	if len(values) == 0 {
+		return
+	}
+	sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 00000000..2e3d22f3
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values.  There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality.  Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation.  You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings.  See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+	// Indent specifies the string to use for each indentation level.  The
+	// global config instance that all top-level functions use set this to a
+	// single space by default.  If you would like more indentation, you might
+	// set this to a tab with "\t" or perhaps two spaces with "  ".
+	Indent string
+
+	// MaxDepth controls the maximum number of levels to descend into nested
+	// data structures.  The default, 0, means there is no limit.
+	//
+	// NOTE: Circular data structures are properly detected, so it is not
+	// necessary to set this value unless you specifically want to limit deeply
+	// nested data structures.
+	MaxDepth int
+
+	// DisableMethods specifies whether or not error and Stringer interfaces are
+	// invoked for types that implement them.
+	DisableMethods bool
+
+	// DisablePointerMethods specifies whether or not to check for and invoke
+	// error and Stringer interfaces on types which only accept a pointer
+	// receiver when the current type is not a pointer.
+	//
+	// NOTE: This might be an unsafe action since calling one of these methods
+	// with a pointer receiver could technically mutate the value, however,
+	// in practice, types which choose to satisify an error or Stringer
+	// interface with a pointer receiver should not be mutating their state
+	// inside these interface methods.  As a result, this option relies on
+	// access to the unsafe package, so it will not have any effect when
+	// running in environments without access to the unsafe package such as
+	// Google App Engine or with the "safe" build tag specified.
+	DisablePointerMethods bool
+
+	// DisablePointerAddresses specifies whether to disable the printing of
+	// pointer addresses. This is useful when diffing data structures in tests.
+	DisablePointerAddresses bool
+
+	// DisableCapacities specifies whether to disable the printing of capacities
+	// for arrays, slices, maps and channels. This is useful when diffing
+	// data structures in tests.
+	DisableCapacities bool
+
+	// ContinueOnMethod specifies whether or not recursion should continue once
+	// a custom error or Stringer interface is invoked.  The default, false,
+	// means it will print the results of invoking the custom error or Stringer
+	// interface and return immediately instead of continuing to recurse into
+	// the internals of the data type.
+	//
+	// NOTE: This flag does not have any effect if method invocation is disabled
+	// via the DisableMethods or DisablePointerMethods options.
+	ContinueOnMethod bool
+
+	// SortKeys specifies map keys should be sorted before being printed. Use
+	// this to have a more deterministic, diffable output.  Note that only
+	// native types (bool, int, uint, floats, uintptr and string) and types
+	// that support the error or Stringer interfaces (if methods are
+	// enabled) are supported, with other types sorted according to the
+	// reflect.Value.String() output which guarantees display stability.
+	SortKeys bool
+
+	// SpewKeys specifies that, as a last resort attempt, map keys should
+	// be spewed to strings and sorted by those strings.  This is only
+	// considered if SortKeys is true.
+	SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the formatted string as a value that satisfies error.  See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+	return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+	fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+	fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(c, &buf, a...)
+	return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = newFormatter(c, arg)
+	}
+	return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// 	Indent: " "
+// 	MaxDepth: 0
+// 	DisableMethods: false
+// 	DisablePointerMethods: false
+// 	ContinueOnMethod: false
+// 	SortKeys: false
+func NewDefaultConfig() *ConfigState {
+	return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 00000000..aacaac6f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output (only when using
+	  Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+	* Dump style which prints with newlines, customizable indentation,
+	  and additional debug information such as types and all pointer addresses
+	  used to indirect to the final value
+	* A custom Formatter interface that integrates cleanly with the standard fmt
+	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+	  similar to the default %v while providing the additional functionality
+	  outlined above and passing unsupported format verbs such as %x and %q
+	  along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew.  See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+	spew.Dump(myVar1, myVar2, ...)
+	spew.Fdump(someWriter, myVar1, myVar2, ...)
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type.  For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions.  This allows concurrent configuration
+options.  See the ConfigState documentation for more details.
+
+The following configuration options are available:
+	* Indent
+		String to use for each indentation level for Dump functions.
+		It is a single space by default.  A popular alternative is "\t".
+
+	* MaxDepth
+		Maximum number of levels to descend into nested data structures.
+		There is no limit by default.
+
+	* DisableMethods
+		Disables invocation of error and Stringer interface methods.
+		Method invocation is enabled by default.
+
+	* DisablePointerMethods
+		Disables invocation of error and Stringer interface methods on types
+		which only accept pointer receivers from non-pointer variables.
+		Pointer method invocation is enabled by default.
+
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
+	* ContinueOnMethod
+		Enables recursion into types after invoking error and Stringer interface
+		methods. Recursion after method invocation is disabled by default.
+
+	* SortKeys
+		Specifies map keys should be sorted before being printed. Use
+		this to have a more deterministic, diffable output.  Note that
+		only native types (bool, int, uint, floats, uintptr and string)
+		and types which implement error or Stringer interfaces are
+		supported with other types sorted according to the
+		reflect.Value.String() output which guarantees display
+		stability.  Natural map order is used by default.
+
+	* SpewKeys
+		Specifies that, as a last resort attempt, map keys should be
+		spewed to strings and sorted by those strings.  This is only
+		considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+	spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer.  For example, to dump to standard error:
+
+	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+	(main.Foo) {
+	 unexportedField: (*main.Bar)(0xf84002e210)({
+	  flag: (main.Flag) flagTwo,
+	  data: (uintptr) <nil>
+	 }),
+	 ExportedField: (map[interface {}]interface {}) (len=1) {
+	  (string) (len=3) "one": (bool) true
+	 }
+	}
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+	([]uint8) (len=32 cap=32) {
+	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+	 00000020  31 32                                             |12|
+	}
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
+functions have syntax you are most likely already familiar with:
+
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Println(myVar, myVar2)
+	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+	  %v: <**>5
+	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
+	 %#v: (**uint8)5
+	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+	  %v: <*>{1 <*><shown>}
+	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output.  Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 00000000..df1d582a
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	// uint8Type is a reflect.Type representing a uint8.  It is used to
+	// convert cgo types to uint8 slices for hexdumping.
+	uint8Type = reflect.TypeOf(uint8(0))
+
+	// cCharRE is a regular expression that matches a cgo char.
+	// It is used to detect character arrays to hexdump them.
+	cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
+	// char.  It is used to detect unsigned character arrays to hexdump
+	// them.
+	cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+	// It is used to detect uint8_t arrays to hexdump them.
+	cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+	w                io.Writer
+	depth            int
+	pointers         map[uintptr]int
+	ignoreNextType   bool
+	ignoreNextIndent bool
+	cs               *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+	if d.ignoreNextIndent {
+		d.ignoreNextIndent = false
+		return
+	}
+	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range d.pointers {
+		if depth >= d.depth {
+			delete(d.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by dereferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		d.pointers[addr] = d.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type information.
+	d.w.Write(openParenBytes)
+	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+	d.w.Write([]byte(ve.Type().String()))
+	d.w.Write(closeParenBytes)
+
+	// Display pointer information.
+	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+		d.w.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				d.w.Write(pointerChainBytes)
+			}
+			printHexPtr(d.w, addr)
+		}
+		d.w.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	d.w.Write(openParenBytes)
+	switch {
+	case nilFound == true:
+		d.w.Write(nilAngleBytes)
+
+	case cycleFound == true:
+		d.w.Write(circularBytes)
+
+	default:
+		d.ignoreNextType = true
+		d.dump(ve)
+	}
+	d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+	// Determine whether this type should be hex dumped or not.  Also,
+	// for types which should be hexdumped, try to use the underlying data
+	// first, then fall back to trying to convert them to a uint8 slice.
+	var buf []uint8
+	doConvert := false
+	doHexDump := false
+	numEntries := v.Len()
+	if numEntries > 0 {
+		vt := v.Index(0).Type()
+		vts := vt.String()
+		switch {
+		// C types that need to be converted.
+		case cCharRE.MatchString(vts):
+			fallthrough
+		case cUnsignedCharRE.MatchString(vts):
+			fallthrough
+		case cUint8tCharRE.MatchString(vts):
+			doConvert = true
+
+		// Try to use existing uint8 slices and fall back to converting
+		// and copying if that fails.
+		case vt.Kind() == reflect.Uint8:
+			// We need an addressable interface to convert the type
+			// to a byte slice.  However, the reflect package won't
+			// give us an interface on certain things like
+			// unexported struct fields in order to enforce
+			// visibility rules.  We use unsafe, when available, to
+			// bypass these restrictions since this package does not
+			// mutate the values.
+			vs := v
+			if !vs.CanInterface() || !vs.CanAddr() {
+				vs = unsafeReflectValue(vs)
+			}
+			if !UnsafeDisabled {
+				vs = vs.Slice(0, numEntries)
+
+				// Use the existing uint8 slice if it can be
+				// type asserted.
+				iface := vs.Interface()
+				if slice, ok := iface.([]uint8); ok {
+					buf = slice
+					doHexDump = true
+					break
+				}
+			}
+
+			// The underlying data needs to be converted if it can't
+			// be type asserted to a uint8 slice.
+			doConvert = true
+		}
+
+		// Copy and convert the underlying type if needed.
+		if doConvert && vt.ConvertibleTo(uint8Type) {
+			// Convert and copy each element into a uint8 byte
+			// slice.
+			buf = make([]uint8, numEntries)
+			for i := 0; i < numEntries; i++ {
+				vv := v.Index(i)
+				buf[i] = uint8(vv.Convert(uint8Type).Uint())
+			}
+			doHexDump = true
+		}
+	}
+
+	// Hexdump the entire slice as needed.
+	if doHexDump {
+		indent := strings.Repeat(d.cs.Indent, d.depth)
+		str := indent + hex.Dump(buf)
+		str = strings.Replace(str, "\n", "\n"+indent, -1)
+		str = strings.TrimRight(str, d.cs.Indent)
+		d.w.Write([]byte(str))
+		return
+	}
+
+	// Recursively call dump for each item.
+	for i := 0; i < numEntries; i++ {
+		d.dump(d.unpackValue(v.Index(i)))
+		if i < (numEntries - 1) {
+			d.w.Write(commaNewlineBytes)
+		} else {
+			d.w.Write(newlineBytes)
+		}
+	}
+}
+
+// dump is the main workhorse for dumping a value.  It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately.  It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		d.w.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		d.indent()
+		d.dumpPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !d.ignoreNextType {
+		d.indent()
+		d.w.Write(openParenBytes)
+		d.w.Write([]byte(v.Type().String()))
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+	d.ignoreNextType = false
+
+	// Display length and capacity if the built-in len and cap functions
+	// work with the value's kind and the len/cap itself is non-zero.
+	valueLen, valueCap := 0, 0
+	switch v.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Chan:
+		valueLen, valueCap = v.Len(), v.Cap()
+	case reflect.Map, reflect.String:
+		valueLen = v.Len()
+	}
+	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+		d.w.Write(openParenBytes)
+		if valueLen != 0 {
+			d.w.Write(lenEqualsBytes)
+			printInt(d.w, int64(valueLen), 10)
+		}
+		if !d.cs.DisableCapacities && valueCap != 0 {
+			if valueLen != 0 {
+				d.w.Write(spaceBytes)
+			}
+			d.w.Write(capEqualsBytes)
+			printInt(d.w, int64(valueCap), 10)
+		}
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+
+	// Call Stringer/error interfaces if they exist and the handle methods flag
+	// is enabled
+	if !d.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(d.cs, d.w, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(d.w, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(d.w, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(d.w, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(d.w, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(d.w, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(d.w, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(d.w, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			d.dumpSlice(v)
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.String:
+		d.w.Write([]byte(strconv.Quote(v.String())))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			numEntries := v.Len()
+			keys := v.MapKeys()
+			if d.cs.SortKeys {
+				sortValues(keys, d.cs)
+			}
+			for i, key := range keys {
+				d.dump(d.unpackValue(key))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.MapIndex(key)))
+				if i < (numEntries - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Struct:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			vt := v.Type()
+			numFields := v.NumField()
+			for i := 0; i < numFields; i++ {
+				d.indent()
+				vtf := vt.Field(i)
+				d.w.Write([]byte(vtf.Name))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.Field(i)))
+				if i < (numFields - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(d.w, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(d.w, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it in case any new
+	// types are added.
+	default:
+		if v.CanInterface() {
+			fmt.Fprintf(d.w, "%v", v.Interface())
+		} else {
+			fmt.Fprintf(d.w, "%v", v.String())
+		}
+	}
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+	for _, arg := range a {
+		if arg == nil {
+			w.Write(interfaceBytes)
+			w.Write(spaceBytes)
+			w.Write(nilAngleBytes)
+			w.Write(newlineBytes)
+			continue
+		}
+
+		d := dumpState{w: w, cs: cs}
+		d.pointers = make(map[uintptr]int)
+		d.dump(reflect.ValueOf(arg))
+		d.w.Write(newlineBytes)
+	}
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+	fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(&Config, &buf, a...)
+	return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+	fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 00000000..c49875ba
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation.  The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+	value          interface{}
+	fs             fmt.State
+	depth          int
+	pointers       map[uintptr]int
+	ignoreNextType bool
+	cs             *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type.  Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	buf.WriteRune('v')
+
+	format = buf.String()
+	return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package.  This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	if width, ok := f.fs.Width(); ok {
+		buf.WriteString(strconv.Itoa(width))
+	}
+
+	if precision, ok := f.fs.Precision(); ok {
+		buf.Write(precisionBytes)
+		buf.WriteString(strconv.Itoa(precision))
+	}
+
+	buf.WriteRune(verb)
+
+	format = buf.String()
+	return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface {
+		f.ignoreNextType = false
+		if !v.IsNil() {
+			v = v.Elem()
+		}
+	}
+	return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+	// Display nil if top level pointer is nil.
+	showTypes := f.fs.Flag('#')
+	if v.IsNil() && (!showTypes || f.ignoreNextType) {
+		f.fs.Write(nilAngleBytes)
+		return
+	}
+
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range f.pointers {
+		if depth >= f.depth {
+			delete(f.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to possibly show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by derferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		f.pointers[addr] = f.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type or indirection level depending on flags.
+	if showTypes && !f.ignoreNextType {
+		f.fs.Write(openParenBytes)
+		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+		f.fs.Write([]byte(ve.Type().String()))
+		f.fs.Write(closeParenBytes)
+	} else {
+		if nilFound || cycleFound {
+			indirects += strings.Count(ve.Type().String(), "*")
+		}
+		f.fs.Write(openAngleBytes)
+		f.fs.Write([]byte(strings.Repeat("*", indirects)))
+		f.fs.Write(closeAngleBytes)
+	}
+
+	// Display pointer information depending on flags.
+	if f.fs.Flag('+') && (len(pointerChain) > 0) {
+		f.fs.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				f.fs.Write(pointerChainBytes)
+			}
+			printHexPtr(f.fs, addr)
+		}
+		f.fs.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	switch {
+	case nilFound == true:
+		f.fs.Write(nilAngleBytes)
+
+	case cycleFound == true:
+		f.fs.Write(circularShortBytes)
+
+	default:
+		f.ignoreNextType = true
+		f.format(ve)
+	}
+}
+
+// format is the main workhorse for providing the Formatter interface.  It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately.  It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		f.fs.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		f.formatPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !f.ignoreNextType && f.fs.Flag('#') {
+		f.fs.Write(openParenBytes)
+		f.fs.Write([]byte(v.Type().String()))
+		f.fs.Write(closeParenBytes)
+	}
+	f.ignoreNextType = false
+
+	// Call Stringer/error interfaces if they exist and the handle methods
+	// flag is enabled.
+	if !f.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(f.cs, f.fs, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(f.fs, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(f.fs, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(f.fs, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(f.fs, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(f.fs, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(f.fs, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(f.fs, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		f.fs.Write(openBracketBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			numEntries := v.Len()
+			for i := 0; i < numEntries; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.Index(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBracketBytes)
+
+	case reflect.String:
+		f.fs.Write([]byte(v.String()))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+
+		f.fs.Write(openMapBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			keys := v.MapKeys()
+			if f.cs.SortKeys {
+				sortValues(keys, f.cs)
+			}
+			for i, key := range keys {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(key))
+				f.fs.Write(colonBytes)
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.MapIndex(key)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeMapBytes)
+
+	case reflect.Struct:
+		numFields := v.NumField()
+		f.fs.Write(openBraceBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			vt := v.Type()
+			for i := 0; i < numFields; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				vtf := vt.Field(i)
+				if f.fs.Flag('+') || f.fs.Flag('#') {
+					f.fs.Write([]byte(vtf.Name))
+					f.fs.Write(colonBytes)
+				}
+				f.format(f.unpackValue(v.Field(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(f.fs, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(f.fs, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it if any get added.
+	default:
+		format := f.buildDefaultFormat()
+		if v.CanInterface() {
+			fmt.Fprintf(f.fs, format, v.Interface())
+		} else {
+			fmt.Fprintf(f.fs, format, v.String())
+		}
+	}
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+	f.fs = fs
+
+	// Use standard formatting for verbs that are not v.
+	if verb != 'v' {
+		format := f.constructOrigFormat(verb)
+		fmt.Fprintf(fs, format, f.value)
+		return
+	}
+
+	if f.value == nil {
+		if fs.Flag('#') {
+			fs.Write(interfaceBytes)
+		}
+		fs.Write(nilAngleBytes)
+		return
+	}
+
+	f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+	fs := &formatState{value: v, cs: cs}
+	fs.pointers = make(map[uintptr]int)
+	return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 00000000..32c0e338
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"fmt"
+	"io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the formatted string as a value that satisfies error.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+	return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = NewFormatter(arg)
+	}
+	return formatters
+}
diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE
new file mode 100644
index 00000000..698a3f51
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md
new file mode 100644
index 00000000..2d1b3d93
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md
@@ -0,0 +1,34 @@
+circuit-breaker
+===============
+
+[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency)
+[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+The circuit-breaker resiliency pattern for golang.
+
+Creating a breaker takes three parameters:
+- error threshold (for opening the breaker)
+- success threshold (for closing the breaker)
+- timeout (how long to keep the breaker open)
+
+```go
+b := breaker.New(3, 1, 5*time.Second)
+
+for {
+	result := b.Run(func() error {
+		// communicate with some external service and
+		// return an error if the communication failed
+		return nil
+	})
+
+	switch result {
+	case nil:
+		// success!
+	case breaker.ErrBreakerOpen:
+		// our function wasn't run because the breaker was open
+	default:
+		// some other error
+	}
+}
+```
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
new file mode 100644
index 00000000..f88ca724
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
@@ -0,0 +1,161 @@
+// Package breaker implements the circuit-breaker resiliency pattern for Go.
+package breaker
+
+import (
+	"errors"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// ErrBreakerOpen is the error returned from Run() when the function is not executed
+// because the breaker is currently open.
+var ErrBreakerOpen = errors.New("circuit breaker is open")
+
+const (
+	closed uint32 = iota
+	open
+	halfOpen
+)
+
+// Breaker implements the circuit-breaker resiliency pattern
+type Breaker struct {
+	errorThreshold, successThreshold int
+	timeout                          time.Duration
+
+	lock              sync.Mutex
+	state             uint32
+	errors, successes int
+	lastError         time.Time
+}
+
+// New constructs a new circuit-breaker that starts closed.
+// From closed, the breaker opens if "errorThreshold" errors are seen
+// without an error-free period of at least "timeout". From open, the
+// breaker half-closes after "timeout". From half-open, the breaker closes
+// after "successThreshold" consecutive successes, or opens on a single error.
+func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker {
+	return &Breaker{
+		errorThreshold:   errorThreshold,
+		successThreshold: successThreshold,
+		timeout:          timeout,
+	}
+}
+
+// Run will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function and pass along its return
+// value. It is safe to call Run concurrently on the same Breaker.
+func (b *Breaker) Run(work func() error) error {
+	state := atomic.LoadUint32(&b.state)
+
+	if state == open {
+		return ErrBreakerOpen
+	}
+
+	return b.doWork(state, work)
+}
+
+// Go will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function in a separate goroutine.
+// If the function is run, Go will return nil immediately, and will *not* return
+// the return value of the function. It is safe to call Go concurrently on the
+// same Breaker.
+func (b *Breaker) Go(work func() error) error {
+	state := atomic.LoadUint32(&b.state)
+
+	if state == open {
+		return ErrBreakerOpen
+	}
+
+	// errcheck complains about ignoring the error return value, but
+	// that's on purpose; if you want an error from a goroutine you have to
+	// get it over a channel or something
+	go b.doWork(state, work)
+
+	return nil
+}
+
+func (b *Breaker) doWork(state uint32, work func() error) error {
+	var panicValue interface{}
+
+	result := func() error {
+		defer func() {
+			panicValue = recover()
+		}()
+		return work()
+	}()
+
+	if result == nil && panicValue == nil && state == closed {
+		// short-circuit the normal, success path without contending
+		// on the lock
+		return nil
+	}
+
+	// oh well, I guess we have to contend on the lock
+	b.processResult(result, panicValue)
+
+	if panicValue != nil {
+		// as close as Go lets us come to a "rethrow" although unfortunately
+		// we lose the original panicing location
+		panic(panicValue)
+	}
+
+	return result
+}
+
+func (b *Breaker) processResult(result error, panicValue interface{}) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if result == nil && panicValue == nil {
+		if b.state == halfOpen {
+			b.successes++
+			if b.successes == b.successThreshold {
+				b.closeBreaker()
+			}
+		}
+	} else {
+		if b.errors > 0 {
+			expiry := b.lastError.Add(b.timeout)
+			if time.Now().After(expiry) {
+				b.errors = 0
+			}
+		}
+
+		switch b.state {
+		case closed:
+			b.errors++
+			if b.errors == b.errorThreshold {
+				b.openBreaker()
+			} else {
+				b.lastError = time.Now()
+			}
+		case halfOpen:
+			b.openBreaker()
+		}
+	}
+}
+
+func (b *Breaker) openBreaker() {
+	b.changeState(open)
+	go b.timer()
+}
+
+func (b *Breaker) closeBreaker() {
+	b.changeState(closed)
+}
+
+func (b *Breaker) timer() {
+	time.Sleep(b.timeout)
+
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	b.changeState(halfOpen)
+}
+
+func (b *Breaker) changeState(newState uint32) {
+	b.errors = 0
+	b.successes = 0
+	atomic.StoreUint32(&b.state, newState)
+}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore
new file mode 100644
index 00000000..daf913b1
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
new file mode 100644
index 00000000..d6cf4f1f
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+- 1.5.4
+- 1.6.1
+
+sudo: false
diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
new file mode 100644
index 00000000..5bf3688d
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md
new file mode 100644
index 00000000..3f2695c7
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/README.md
@@ -0,0 +1,13 @@
+# go-xerial-snappy
+
+[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy)
+
+Xerial-compatible Snappy framing support for golang.
+
+Packages using Xerial for snappy encoding use a framing format incompatible with
+basically everything else in existence. This package wraps Go's built-in snappy
+package to support it.
+
+Apps that use this format include Apache Kafka (see
+https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for
+details).
diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
new file mode 100644
index 00000000..b8f8b51f
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
@@ -0,0 +1,43 @@
+package snappy
+
+import (
+	"bytes"
+	"encoding/binary"
+
+	master "github.com/golang/snappy"
+)
+
+var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0}
+
+// Encode encodes data as snappy with no framing header.
+func Encode(src []byte) []byte {
+	return master.Encode(nil, src)
+}
+
+// Decode decodes snappy data whether it is traditional unframed
+// or includes the xerial framing format.
+func Decode(src []byte) ([]byte, error) {
+	if !bytes.Equal(src[:8], xerialHeader) {
+		return master.Decode(nil, src)
+	}
+
+	var (
+		pos   = uint32(16)
+		max   = uint32(len(src))
+		dst   = make([]byte, 0, len(src))
+		chunk []byte
+		err   error
+	)
+	for pos < max {
+		size := binary.BigEndian.Uint32(src[pos : pos+4])
+		pos += 4
+
+		chunk, err = master.Decode(chunk, src[pos:pos+size])
+		if err != nil {
+			return nil, err
+		}
+		pos += size
+		dst = append(dst, chunk...)
+	}
+	return dst, nil
+}
diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore
new file mode 100644
index 00000000..83656241
--- /dev/null
+++ b/vendor/github.com/eapache/queue/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml
new file mode 100644
index 00000000..235a40a4
--- /dev/null
+++ b/vendor/github.com/eapache/queue/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+sudo: false
+
+go:
+  - 1.2
+  - 1.3
+  - 1.4
diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE
new file mode 100644
index 00000000..d5f36dbc
--- /dev/null
+++ b/vendor/github.com/eapache/queue/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md
new file mode 100644
index 00000000..8e782335
--- /dev/null
+++ b/vendor/github.com/eapache/queue/README.md
@@ -0,0 +1,16 @@
+Queue
+=====
+
+[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue)
+[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is in part because it is *not* thread-safe.
+
+Follows semantic versioning using https://gopkg.in/ - import from
+[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1)
+for guaranteed API stability.
diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go
new file mode 100644
index 00000000..71d1acdf
--- /dev/null
+++ b/vendor/github.com/eapache/queue/queue.go
@@ -0,0 +1,102 @@
+/*
+Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
+*/
+package queue
+
+// minQueueLen is smallest capacity that queue may have.
+// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
+const minQueueLen = 16
+
+// Queue represents a single instance of the queue data structure.
+type Queue struct {
+	buf               []interface{}
+	head, tail, count int
+}
+
+// New constructs and returns a new Queue.
+func New() *Queue {
+	return &Queue{
+		buf: make([]interface{}, minQueueLen),
+	}
+}
+
+// Length returns the number of elements currently stored in the queue.
+func (q *Queue) Length() int {
+	return q.count
+}
+
+// resizes the queue to fit exactly twice its current contents
+// this can result in shrinking if the queue is less than half-full
+func (q *Queue) resize() {
+	newBuf := make([]interface{}, q.count<<1)
+
+	if q.tail > q.head {
+		copy(newBuf, q.buf[q.head:q.tail])
+	} else {
+		n := copy(newBuf, q.buf[q.head:])
+		copy(newBuf[n:], q.buf[:q.tail])
+	}
+
+	q.head = 0
+	q.tail = q.count
+	q.buf = newBuf
+}
+
+// Add puts an element on the end of the queue.
+func (q *Queue) Add(elem interface{}) {
+	if q.count == len(q.buf) {
+		q.resize()
+	}
+
+	q.buf[q.tail] = elem
+	// bitwise modulus
+	q.tail = (q.tail + 1) & (len(q.buf) - 1)
+	q.count++
+}
+
+// Peek returns the element at the head of the queue. This call panics
+// if the queue is empty.
+func (q *Queue) Peek() interface{} {
+	if q.count <= 0 {
+		panic("queue: Peek() called on empty queue")
+	}
+	return q.buf[q.head]
+}
+
+// Get returns the element at index i in the queue. If the index is
+// invalid, the call will panic. This method accepts both positive and
+// negative index values. Index 0 refers to the first element, and
+// index -1 refers to the last.
+func (q *Queue) Get(i int) interface{} {
+	// If indexing backwards, convert to positive index.
+	if i < 0 {
+		i += q.count
+	}
+	if i < 0 || i >= q.count {
+		panic("queue: Get() called with index out of range")
+	}
+	// bitwise modulus
+	return q.buf[(q.head+i)&(len(q.buf)-1)]
+}
+
+// Remove removes and returns the element from the front of the queue. If the
+// queue is empty, the call will panic.
+func (q *Queue) Remove() interface{} {
+	if q.count <= 0 {
+		panic("queue: Remove() called on empty queue")
+	}
+	ret := q.buf[q.head]
+	q.buf[q.head] = nil
+	// bitwise modulus
+	q.head = (q.head + 1) & (len(q.buf) - 1)
+	q.count--
+	// Resize down if buffer 1/4 full.
+	if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) {
+		q.resize()
+	}
+	return ret
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
new file mode 100644
index 00000000..03cacd23
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
@@ -0,0 +1,52 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/empty";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "EmptyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}
diff --git a/vendor/github.com/opentracing-contrib/go-observer/.gitignore b/vendor/github.com/opentracing-contrib/go-observer/.gitignore
new file mode 100644
index 00000000..a1338d68
--- /dev/null
+++ b/vendor/github.com/opentracing-contrib/go-observer/.gitignore
@@ -0,0 +1,14 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
diff --git a/vendor/github.com/opentracing-contrib/go-observer/LICENSE b/vendor/github.com/opentracing-contrib/go-observer/LICENSE
new file mode 100644
index 00000000..044f3dfd
--- /dev/null
+++ b/vendor/github.com/opentracing-contrib/go-observer/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright (c) 2017 opentracing-contrib
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/opentracing-contrib/go-observer/README.md b/vendor/github.com/opentracing-contrib/go-observer/README.md
new file mode 100644
index 00000000..334a9aa6
--- /dev/null
+++ b/vendor/github.com/opentracing-contrib/go-observer/README.md
@@ -0,0 +1,64 @@
+# An Observer API for OpenTracing-go Tracers
+
+OTObserver can be used to watch the span events like StartSpan(),
+SetOperationName(), SetTag() and Finish(). A need for observers
+arose when information (metrics) more than just the latency information was
+required for the spans, in the distributed tracers. But, there can be a lot
+of metrics in different domains and adding such metrics to any one (client)
+tracer breaks cross-platform compatibility. There are various ways to
+avoid such issues, however, an observer pattern is cleaner and provides loose
+coupling between the packages exporting metrics (on span events) and the
+tracer.
+
+This information can be in the form of hardware metrics, RPC metrics,
+useful metrics exported out of the kernel or other metrics, profiled for a
+span. These additional metrics can help us in getting better Root-cause
+analysis. With that being said, its not just for calculation of metrics,
+it can be used for anything which needs watching the span events.
+
+## Installation and Usage
+
+The `otobserver` package provides an API to watch span's events and define
+callbacks for these events. This API would be a functional option to a
+tracer constructor that passes an Observer. 3rd party packages (who want to
+watch the span events) should actually implement this observer API.
+To do that, first fetch the package using go get :
+
+```
+   go get -v github.com/opentracing-contrib/go-observer
+```
+
+and say :
+
+```go
+    import "github.com/opentracing-contrib/go-observer"
+```
+
+and then, define the required span event callbacks. These registered
+callbacks would then be called on span events if an observer is created.
+Tracer may allow registering multiple observers. Have a look at the [jaeger's observer](https://github.com/uber/jaeger-client-go/blob/master/observer.go).
+
+With the required setup implemented in the backend tracers, packages
+watching the span events need to implement the observer api defining what
+they need to do for the observed span events.
+
+## Span events
+
+An observer registered with this api, can observe for the following four
+span events :
+
+```go
+    StartSpan()
+    SetOperationName()
+    SetTag()
+    Finish()
+```
+
+### Tradeoffs
+
+As noble as our thoughts might be in fetching additional metrics (other than
+latency) for a span using an observer, there are some overhead costs. Not all
+observers need to observe all the span events, in which case, we may have
+to keep some callback functions empty. In effect, we will still call these
+functions, and that will incur unnecessary overhead. To know more about this
+and other tradeoffs, see this [discussion](https://github.com/opentracing/opentracing-go/pull/135#discussion_r105497329).
diff --git a/vendor/github.com/opentracing-contrib/go-observer/observer.go b/vendor/github.com/opentracing-contrib/go-observer/observer.go
new file mode 100644
index 00000000..c8cbf61b
--- /dev/null
+++ b/vendor/github.com/opentracing-contrib/go-observer/observer.go
@@ -0,0 +1,39 @@
+// This project is licensed under the Apache License 2.0, see LICENSE.
+
+package otobserver
+
+import opentracing "github.com/opentracing/opentracing-go"
+
+// Observer can be registered with a Tracer to recieve notifications
+// about new Spans. Tracers are not required to support the Observer API.
+// The actual registration depends on the implementation, which might look
+// like the below e.g :
+// observer := myobserver.NewObserver()
+// tracer := client.NewTracer(..., client.WithObserver(observer))
+//
+type Observer interface {
+	// Create and return a span observer. Called when a span starts.
+	// If the Observer is not interested in the given span, it must return (nil, false).
+	// E.g :
+	//     func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+	//         var sp opentracing.Span
+	//         sso := opentracing.StartSpanOptions{}
+	//         spanObserver, ok := observer.OnStartSpan(span, opName, sso);
+	//         if ok {
+	//             // we have a valid SpanObserver
+	//         }
+	//         ...
+	//     }
+	OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (SpanObserver, bool)
+}
+
+// SpanObserver is created by the Observer and receives notifications about
+// other Span events.
+type SpanObserver interface {
+	// Callback called from opentracing.Span.SetOperationName()
+	OnSetOperationName(operationName string)
+	// Callback called from opentracing.Span.SetTag()
+	OnSetTag(key string, value interface{})
+	// Callback called from opentracing.Span.Finish()
+	OnFinish(options opentracing.FinishOptions)
+}
diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE b/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE
new file mode 100644
index 00000000..c259d129
--- /dev/null
+++ b/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2016, opentracing-contrib
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of go-stdlib nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go
new file mode 100644
index 00000000..8d33bcb6
--- /dev/null
+++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go
@@ -0,0 +1,301 @@
+// +build go1.7
+
+package nethttp
+
+import (
+	"context"
+	"io"
+	"net/http"
+	"net/http/httptrace"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+	"github.com/opentracing/opentracing-go/log"
+)
+
+type contextKey int
+
+const (
+	keyTracer contextKey = iota
+)
+
+const defaultComponentName = "net/http"
+
+// Transport wraps a RoundTripper. If a request is being traced with
+// Tracer, Transport will inject the current span into the headers,
+// and set HTTP related tags on the span.
+type Transport struct {
+	// The actual RoundTripper to use for the request. A nil
+	// RoundTripper defaults to http.DefaultTransport.
+	http.RoundTripper
+}
+
+type clientOptions struct {
+	operationName      string
+	componentName      string
+	disableClientTrace bool
+}
+
+// ClientOption contols the behavior of TraceRequest.
+type ClientOption func(*clientOptions)
+
+// OperationName returns a ClientOption that sets the operation
+// name for the client-side span.
+func OperationName(operationName string) ClientOption {
+	return func(options *clientOptions) {
+		options.operationName = operationName
+	}
+}
+
+// ComponentName returns a ClientOption that sets the component
+// name for the client-side span.
+func ComponentName(componentName string) ClientOption {
+	return func(options *clientOptions) {
+		options.componentName = componentName
+	}
+}
+
+// ClientTrace returns a ClientOption that turns on or off
+// extra instrumentation via httptrace.WithClientTrace.
+func ClientTrace(enabled bool) ClientOption {
+	return func(options *clientOptions) {
+		options.disableClientTrace = !enabled
+	}
+}
+
+// TraceRequest adds a ClientTracer to req, tracing the request and
+// all requests caused due to redirects. When tracing requests this
+// way you must also use Transport.
+//
+// Example:
+//
+// 	func AskGoogle(ctx context.Context) error {
+// 		client := &http.Client{Transport: &nethttp.Transport{}}
+// 		req, err := http.NewRequest("GET", "http://google.com", nil)
+// 		if err != nil {
+// 			return err
+// 		}
+// 		req = req.WithContext(ctx) // extend existing trace, if any
+//
+// 		req, ht := nethttp.TraceRequest(tracer, req)
+// 		defer ht.Finish()
+//
+// 		res, err := client.Do(req)
+// 		if err != nil {
+// 			return err
+// 		}
+// 		res.Body.Close()
+// 		return nil
+// 	}
+func TraceRequest(tr opentracing.Tracer, req *http.Request, options ...ClientOption) (*http.Request, *Tracer) {
+	opts := &clientOptions{}
+	for _, opt := range options {
+		opt(opts)
+	}
+	ht := &Tracer{tr: tr, opts: opts}
+	ctx := req.Context()
+	if !opts.disableClientTrace {
+		ctx = httptrace.WithClientTrace(ctx, ht.clientTrace())
+	}
+	req = req.WithContext(context.WithValue(ctx, keyTracer, ht))
+	return req, ht
+}
+
+type closeTracker struct {
+	io.ReadCloser
+	sp opentracing.Span
+}
+
+func (c closeTracker) Close() error {
+	err := c.ReadCloser.Close()
+	c.sp.LogFields(log.String("event", "ClosedBody"))
+	c.sp.Finish()
+	return err
+}
+
+// RoundTrip implements the RoundTripper interface.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	rt := t.RoundTripper
+	if rt == nil {
+		rt = http.DefaultTransport
+	}
+	tracer, ok := req.Context().Value(keyTracer).(*Tracer)
+	if !ok {
+		return rt.RoundTrip(req)
+	}
+
+	tracer.start(req)
+
+	ext.HTTPMethod.Set(tracer.sp, req.Method)
+	ext.HTTPUrl.Set(tracer.sp, req.URL.String())
+
+	carrier := opentracing.HTTPHeadersCarrier(req.Header)
+	tracer.sp.Tracer().Inject(tracer.sp.Context(), opentracing.HTTPHeaders, carrier)
+	resp, err := rt.RoundTrip(req)
+
+	if err != nil {
+		tracer.sp.Finish()
+		return resp, err
+	}
+	ext.HTTPStatusCode.Set(tracer.sp, uint16(resp.StatusCode))
+	if req.Method == "HEAD" {
+		tracer.sp.Finish()
+	} else {
+		resp.Body = closeTracker{resp.Body, tracer.sp}
+	}
+	return resp, nil
+}
+
+// Tracer holds tracing details for one HTTP request.
+type Tracer struct {
+	tr   opentracing.Tracer
+	root opentracing.Span
+	sp   opentracing.Span
+	opts *clientOptions
+}
+
+func (h *Tracer) start(req *http.Request) opentracing.Span {
+	if h.root == nil {
+		parent := opentracing.SpanFromContext(req.Context())
+		var spanctx opentracing.SpanContext
+		if parent != nil {
+			spanctx = parent.Context()
+		}
+		operationName := h.opts.operationName
+		if operationName == "" {
+			operationName = "HTTP Client"
+		}
+		root := h.tr.StartSpan(operationName, opentracing.ChildOf(spanctx))
+		h.root = root
+	}
+
+	ctx := h.root.Context()
+	h.sp = h.tr.StartSpan("HTTP "+req.Method, opentracing.ChildOf(ctx))
+	ext.SpanKindRPCClient.Set(h.sp)
+
+	componentName := h.opts.componentName
+	if componentName == "" {
+		componentName = defaultComponentName
+	}
+	ext.Component.Set(h.sp, componentName)
+
+	return h.sp
+}
+
+// Finish finishes the span of the traced request.
+func (h *Tracer) Finish() {
+	if h.root != nil {
+		h.root.Finish()
+	}
+}
+
+// Span returns the root span of the traced request. This function
+// should only be called after the request has been executed.
+func (h *Tracer) Span() opentracing.Span {
+	return h.root
+}
+
+func (h *Tracer) clientTrace() *httptrace.ClientTrace {
+	return &httptrace.ClientTrace{
+		GetConn:              h.getConn,
+		GotConn:              h.gotConn,
+		PutIdleConn:          h.putIdleConn,
+		GotFirstResponseByte: h.gotFirstResponseByte,
+		Got100Continue:       h.got100Continue,
+		DNSStart:             h.dnsStart,
+		DNSDone:              h.dnsDone,
+		ConnectStart:         h.connectStart,
+		ConnectDone:          h.connectDone,
+		WroteHeaders:         h.wroteHeaders,
+		Wait100Continue:      h.wait100Continue,
+		WroteRequest:         h.wroteRequest,
+	}
+}
+
+func (h *Tracer) getConn(hostPort string) {
+	ext.HTTPUrl.Set(h.sp, hostPort)
+	h.sp.LogFields(log.String("event", "GetConn"))
+}
+
+func (h *Tracer) gotConn(info httptrace.GotConnInfo) {
+	h.sp.SetTag("net/http.reused", info.Reused)
+	h.sp.SetTag("net/http.was_idle", info.WasIdle)
+	h.sp.LogFields(log.String("event", "GotConn"))
+}
+
+func (h *Tracer) putIdleConn(error) {
+	h.sp.LogFields(log.String("event", "PutIdleConn"))
+}
+
+func (h *Tracer) gotFirstResponseByte() {
+	h.sp.LogFields(log.String("event", "GotFirstResponseByte"))
+}
+
+func (h *Tracer) got100Continue() {
+	h.sp.LogFields(log.String("event", "Got100Continue"))
+}
+
+func (h *Tracer) dnsStart(info httptrace.DNSStartInfo) {
+	h.sp.LogFields(
+		log.String("event", "DNSStart"),
+		log.String("host", info.Host),
+	)
+}
+
+func (h *Tracer) dnsDone(info httptrace.DNSDoneInfo) {
+	fields := []log.Field{log.String("event", "DNSDone")}
+	for _, addr := range info.Addrs {
+		fields = append(fields, log.String("addr", addr.String()))
+	}
+	if info.Err != nil {
+		fields = append(fields, log.Error(info.Err))
+	}
+	h.sp.LogFields(fields...)
+}
+
+func (h *Tracer) connectStart(network, addr string) {
+	h.sp.LogFields(
+		log.String("event", "ConnectStart"),
+		log.String("network", network),
+		log.String("addr", addr),
+	)
+}
+
+func (h *Tracer) connectDone(network, addr string, err error) {
+	if err != nil {
+		h.sp.LogFields(
+			log.String("message", "ConnectDone"),
+			log.String("network", network),
+			log.String("addr", addr),
+			log.String("event", "error"),
+			log.Error(err),
+		)
+	} else {
+		h.sp.LogFields(
+			log.String("event", "ConnectDone"),
+			log.String("network", network),
+			log.String("addr", addr),
+		)
+	}
+}
+
+func (h *Tracer) wroteHeaders() {
+	h.sp.LogFields(log.String("event", "WroteHeaders"))
+}
+
+func (h *Tracer) wait100Continue() {
+	h.sp.LogFields(log.String("event", "Wait100Continue"))
+}
+
+func (h *Tracer) wroteRequest(info httptrace.WroteRequestInfo) {
+	if info.Err != nil {
+		h.sp.LogFields(
+			log.String("message", "WroteRequest"),
+			log.String("event", "error"),
+			log.Error(info.Err),
+		)
+		ext.Error.Set(h.sp, true)
+	} else {
+		h.sp.LogFields(log.String("event", "WroteRequest"))
+	}
+}
diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go
new file mode 100644
index 00000000..c853ca64
--- /dev/null
+++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go
@@ -0,0 +1,3 @@
+// Package nethttp provides OpenTracing instrumentation for the
+// net/http package.
+package nethttp
diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go
new file mode 100644
index 00000000..1a7ecfa4
--- /dev/null
+++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go
@@ -0,0 +1,110 @@
+// +build go1.7
+
+package nethttp
+
+import (
+	"net/http"
+
+	opentracing "github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+)
+
+type statusCodeTracker struct {
+	http.ResponseWriter
+	status int
+}
+
+func (w *statusCodeTracker) WriteHeader(status int) {
+	w.status = status
+	w.ResponseWriter.WriteHeader(status)
+}
+
+type mwOptions struct {
+	opNameFunc    func(r *http.Request) string
+	spanObserver  func(span opentracing.Span, r *http.Request)
+	componentName string
+}
+
+// MWOption controls the behavior of the Middleware.
+type MWOption func(*mwOptions)
+
+// OperationNameFunc returns a MWOption that uses given function f
+// to generate operation name for each server-side span.
+func OperationNameFunc(f func(r *http.Request) string) MWOption {
+	return func(options *mwOptions) {
+		options.opNameFunc = f
+	}
+}
+
+// MWComponentName returns a MWOption that sets the component name
+// for the server-side span.
+func MWComponentName(componentName string) MWOption {
+	return func(options *mwOptions) {
+		options.componentName = componentName
+	}
+}
+
+// MWSpanObserver returns a MWOption that observe the span
+// for the server-side span.
+func MWSpanObserver(f func(span opentracing.Span, r *http.Request)) MWOption {
+	return func(options *mwOptions) {
+		options.spanObserver = f
+	}
+}
+
+// Middleware wraps an http.Handler and traces incoming requests.
+// Additionally, it adds the span to the request's context.
+//
+// By default, the operation name of the spans is set to "HTTP {method}".
+// This can be overriden with options.
+//
+// Example:
+// 	 http.ListenAndServe("localhost:80", nethttp.Middleware(tracer, http.DefaultServeMux))
+//
+// The options allow fine tuning the behavior of the middleware.
+//
+// Example:
+//   mw := nethttp.Middleware(
+//      tracer,
+//      http.DefaultServeMux,
+//      nethttp.OperationNameFunc(func(r *http.Request) string {
+//	        return "HTTP " + r.Method + ":/api/customers"
+//      }),
+//      nethttp.MWSpanObserver(func(sp opentracing.Span, r *http.Request) {
+//			sp.SetTag("http.uri", r.URL.EscapedPath())
+//		}),
+//   )
+func Middleware(tr opentracing.Tracer, h http.Handler, options ...MWOption) http.Handler {
+	opts := mwOptions{
+		opNameFunc: func(r *http.Request) string {
+			return "HTTP " + r.Method
+		},
+		spanObserver: func(span opentracing.Span, r *http.Request) {},
+	}
+	for _, opt := range options {
+		opt(&opts)
+	}
+	fn := func(w http.ResponseWriter, r *http.Request) {
+		ctx, _ := tr.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(r.Header))
+		sp := tr.StartSpan(opts.opNameFunc(r), ext.RPCServerOption(ctx))
+		ext.HTTPMethod.Set(sp, r.Method)
+		ext.HTTPUrl.Set(sp, r.URL.String())
+		opts.spanObserver(sp, r)
+
+		// set component name, use "net/http" if caller does not specify
+		componentName := opts.componentName
+		if componentName == "" {
+			componentName = defaultComponentName
+		}
+		ext.Component.Set(sp, componentName)
+
+		w = &statusCodeTracker{w, 200}
+		r = r.WithContext(opentracing.ContextWithSpan(r.Context(), sp))
+
+		h.ServeHTTP(w, r)
+
+		ext.HTTPStatusCode.Set(sp, uint16(w.(*statusCodeTracker).status))
+		sp.Finish()
+	}
+	return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore b/vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore
new file mode 100644
index 00000000..37721f69
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore
@@ -0,0 +1,4 @@
+build/*
+.idea/
+.project
+examples/**/build
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml b/vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml
new file mode 100644
index 00000000..8a05cdc0
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+  - 1.8
+  - 1.9
+  - tip
+
+install:
+  - go get -d -t ./...
+  - go get -u github.com/golang/lint/...
+script:
+  - make test vet lint bench
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE b/vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE
new file mode 100644
index 00000000..66fff971
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 The OpenTracing Authors
+Copyright (c) 2016 Bas van Beek
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile b/vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile
new file mode 100644
index 00000000..d0951ed4
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile
@@ -0,0 +1,26 @@
+
+.DEFAULT_GOAL := test
+
+.PHONY: test
+test:
+	go test -v -race -cover ./...
+
+.PHONY: bench
+bench:
+	go test -v -run - -bench . -benchmem ./...
+
+.PHONY: lint
+lint:
+	# Ignore grep's exit code since no match returns 1.
+	-golint ./... | grep --invert-match -E '^.*\.pb\.go|^thrift'
+	@
+	@! (golint ./... | grep --invert-match -E '^.*\.pb\.go|^thrift' | read dummy)
+
+.PHONY: vet
+vet:
+	go vet ./...
+
+.PHONY: all
+all: vet lint test bench
+
+.PHONY: example
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/README.md b/vendor/github.com/openzipkin/zipkin-go-opentracing/README.md
new file mode 100644
index 00000000..a3010843
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/README.md
@@ -0,0 +1,29 @@
+# zipkin-go-opentracing
+
+[![Travis CI](https://travis-ci.org/openzipkin/zipkin-go-opentracing.svg?branch=master)](https://travis-ci.org/openzipkin/zipkin-go-opentracing)
+[![CircleCI](https://circleci.com/gh/openzipkin/zipkin-go-opentracing.svg?style=shield)](https://circleci.com/gh/openzipkin/zipkin-go-opentracing)
+[![GoDoc](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing?status.svg)](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing)
+[![Go Report Card](https://goreportcard.com/badge/github.com/openzipkin/zipkin-go-opentracing)](https://goreportcard.com/report/github.com/openzipkin/zipkin-go-opentracing)
+[![Sourcegraph](https://sourcegraph.com/github.com/openzipkin/zipkin-go-opentracing/-/badge.svg)](https://sourcegraph.com/github.com/openzipkin/zipkin-go-opentracing?badge)
+
+[OpenTracing](http://opentracing.io) Tracer implementation for [Zipkin](http://zipkin.io) in Go.
+
+### Notes
+
+This package is a low level tracing "driver" to allow OpenTracing API consumers
+to use Zipkin as their tracing backend. For details on how to work with spans
+and traces we suggest looking at the documentation and README from the
+[OpenTracing API](https://github.com/opentracing/opentracing-go).
+
+For developers interested in adding Zipkin tracing to their Go services we
+suggest looking at [Go kit](https://gokit.io) which is an excellent toolkit to
+instrument your distributed system with Zipkin and much more with clean
+separation of domains like transport, middleware / instrumentation and
+business logic.
+
+### Examples
+
+For more information on zipkin-go-opentracing, please see the
+[examples](https://github.com/openzipkin/zipkin-go-opentracing/tree/master/examples)
+directory for usage examples as well as documentation at
+[go doc](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing).
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/circle.yml b/vendor/github.com/openzipkin/zipkin-go-opentracing/circle.yml
new file mode 100644
index 00000000..30d5ef4b
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/circle.yml
@@ -0,0 +1,10 @@
+dependencies:
+  override:
+    - sudo rm -rf /home/ubuntu/.go_workspace/src/github.com/openzipkin
+    - mkdir -p /home/ubuntu/.go_workspace/src/github.com/openzipkin
+    - mv /home/ubuntu/zipkin-go-opentracing /home/ubuntu/.go_workspace/src/github.com/openzipkin
+    - ln -s /home/ubuntu/.go_workspace/src/github.com/openzipkin/zipkin-go-opentracing /home/ubuntu/zipkin-go-opentracing
+    - go get -u -t -v github.com/openzipkin/zipkin-go-opentracing/...
+test:
+  override:
+    - make test bench
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go
new file mode 100644
index 00000000..6fb7b8c7
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go
@@ -0,0 +1,234 @@
+package zipkintracer
+
+import (
+	"bytes"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/apache/thrift/lib/go/thrift"
+
+	"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
+)
+
+// Default timeout for http request in seconds
+const defaultHTTPTimeout = time.Second * 5
+
+// defaultBatchInterval in seconds
+const defaultHTTPBatchInterval = 1
+
+const defaultHTTPBatchSize = 100
+
+const defaultHTTPMaxBacklog = 1000
+
+// HTTPCollector implements Collector by forwarding spans to a http server.
+type HTTPCollector struct {
+	logger        Logger
+	url           string
+	client        *http.Client
+	batchInterval time.Duration
+	batchSize     int
+	maxBacklog    int
+	batch         []*zipkincore.Span
+	spanc         chan *zipkincore.Span
+	quit          chan struct{}
+	shutdown      chan error
+	sendMutex     *sync.Mutex
+	batchMutex    *sync.Mutex
+	reqCallback   RequestCallback
+}
+
+// RequestCallback receives the initialized request from the Collector before
+// sending it over the wire. This allows one to plug in additional headers or
+// do other customization.
+type RequestCallback func(*http.Request)
+
+// HTTPOption sets a parameter for the HttpCollector
+type HTTPOption func(c *HTTPCollector)
+
+// HTTPLogger sets the logger used to report errors in the collection
+// process. By default, a no-op logger is used, i.e. no errors are logged
+// anywhere. It's important to set this option in a production service.
+func HTTPLogger(logger Logger) HTTPOption {
+	return func(c *HTTPCollector) { c.logger = logger }
+}
+
+// HTTPTimeout sets maximum timeout for http request.
+func HTTPTimeout(duration time.Duration) HTTPOption {
+	return func(c *HTTPCollector) { c.client.Timeout = duration }
+}
+
+// HTTPBatchSize sets the maximum batch size, after which a collect will be
+// triggered. The default batch size is 100 traces.
+func HTTPBatchSize(n int) HTTPOption {
+	return func(c *HTTPCollector) { c.batchSize = n }
+}
+
+// HTTPMaxBacklog sets the maximum backlog size,
+// when batch size reaches this threshold, spans from the
+// beginning of the batch will be disposed
+func HTTPMaxBacklog(n int) HTTPOption {
+	return func(c *HTTPCollector) { c.maxBacklog = n }
+}
+
+// HTTPBatchInterval sets the maximum duration we will buffer traces before
+// emitting them to the collector. The default batch interval is 1 second.
+func HTTPBatchInterval(d time.Duration) HTTPOption {
+	return func(c *HTTPCollector) { c.batchInterval = d }
+}
+
+// HTTPClient sets a custom http client to use.
+func HTTPClient(client *http.Client) HTTPOption {
+	return func(c *HTTPCollector) { c.client = client }
+}
+
+// HTTPRequestCallback registers a callback function to adjust the collector
+// *http.Request before it sends the request to Zipkin.
+func HTTPRequestCallback(rc RequestCallback) HTTPOption {
+	return func(c *HTTPCollector) { c.reqCallback = rc }
+}
+
+// NewHTTPCollector returns a new HTTP-backend Collector. url should be a http
+// url for handle post request. timeout is passed to http client. queueSize control
+// the maximum size of buffer of async queue. The logger is used to log errors,
+// such as send failures;
+func NewHTTPCollector(url string, options ...HTTPOption) (Collector, error) {
+	c := &HTTPCollector{
+		logger:        NewNopLogger(),
+		url:           url,
+		client:        &http.Client{Timeout: defaultHTTPTimeout},
+		batchInterval: defaultHTTPBatchInterval * time.Second,
+		batchSize:     defaultHTTPBatchSize,
+		maxBacklog:    defaultHTTPMaxBacklog,
+		batch:         []*zipkincore.Span{},
+		spanc:         make(chan *zipkincore.Span),
+		quit:          make(chan struct{}, 1),
+		shutdown:      make(chan error, 1),
+		sendMutex:     &sync.Mutex{},
+		batchMutex:    &sync.Mutex{},
+	}
+
+	for _, option := range options {
+		option(c)
+	}
+
+	go c.loop()
+	return c, nil
+}
+
+// Collect implements Collector.
+func (c *HTTPCollector) Collect(s *zipkincore.Span) error {
+	c.spanc <- s
+	return nil
+}
+
+// Close implements Collector.
+func (c *HTTPCollector) Close() error {
+	close(c.quit)
+	return <-c.shutdown
+}
+
+func httpSerialize(spans []*zipkincore.Span) *bytes.Buffer {
+	t := thrift.NewTMemoryBuffer()
+	p := thrift.NewTBinaryProtocolTransport(t)
+	if err := p.WriteListBegin(thrift.STRUCT, len(spans)); err != nil {
+		panic(err)
+	}
+	for _, s := range spans {
+		if err := s.Write(p); err != nil {
+			panic(err)
+		}
+	}
+	if err := p.WriteListEnd(); err != nil {
+		panic(err)
+	}
+	return t.Buffer
+}
+
+func (c *HTTPCollector) loop() {
+	var (
+		nextSend = time.Now().Add(c.batchInterval)
+		ticker   = time.NewTicker(c.batchInterval / 10)
+		tickc    = ticker.C
+	)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case span := <-c.spanc:
+			currentBatchSize := c.append(span)
+			if currentBatchSize >= c.batchSize {
+				nextSend = time.Now().Add(c.batchInterval)
+				go c.send()
+			}
+		case <-tickc:
+			if time.Now().After(nextSend) {
+				nextSend = time.Now().Add(c.batchInterval)
+				go c.send()
+			}
+		case <-c.quit:
+			c.shutdown <- c.send()
+			return
+		}
+	}
+}
+
+func (c *HTTPCollector) append(span *zipkincore.Span) (newBatchSize int) {
+	c.batchMutex.Lock()
+	defer c.batchMutex.Unlock()
+
+	c.batch = append(c.batch, span)
+	if len(c.batch) > c.maxBacklog {
+		dispose := len(c.batch) - c.maxBacklog
+		c.logger.Log("msg", "backlog too long, disposing spans.", "count", dispose)
+		c.batch = c.batch[dispose:]
+	}
+	newBatchSize = len(c.batch)
+	return
+}
+
+func (c *HTTPCollector) send() error {
+	// in order to prevent sending the same batch twice
+	c.sendMutex.Lock()
+	defer c.sendMutex.Unlock()
+
+	// Select all current spans in the batch to be sent
+	c.batchMutex.Lock()
+	sendBatch := c.batch[:]
+	c.batchMutex.Unlock()
+
+	// Do not send an empty batch
+	if len(sendBatch) == 0 {
+		return nil
+	}
+
+	req, err := http.NewRequest(
+		"POST",
+		c.url,
+		httpSerialize(sendBatch))
+	if err != nil {
+		c.logger.Log("err", err.Error())
+		return err
+	}
+	req.Header.Set("Content-Type", "application/x-thrift")
+	if c.reqCallback != nil {
+		c.reqCallback(req)
+	}
+	resp, err := c.client.Do(req)
+	if err != nil {
+		c.logger.Log("err", err.Error())
+		return err
+	}
+	resp.Body.Close()
+	// non 2xx code
+	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+		c.logger.Log("err", "HTTP POST span failed", "code", resp.Status)
+	}
+
+	// Remove sent spans from the batch
+	c.batchMutex.Lock()
+	c.batch = c.batch[len(sendBatch):]
+	c.batchMutex.Unlock()
+
+	return nil
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go
new file mode 100644
index 00000000..eb18c3f3
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go
@@ -0,0 +1,95 @@
+package zipkintracer
+
+import (
+	"github.com/Shopify/sarama"
+	"github.com/apache/thrift/lib/go/thrift"
+
+	"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
+)
+
+// defaultKafkaTopic sets the standard Kafka topic our Collector will publish
+// on. The default topic for zipkin-receiver-kafka is "zipkin", see:
+// https://github.com/openzipkin/zipkin/tree/master/zipkin-receiver-kafka
+const defaultKafkaTopic = "zipkin"
+
+// KafkaCollector implements Collector by publishing spans to a Kafka
+// broker.
+type KafkaCollector struct {
+	producer sarama.AsyncProducer
+	logger   Logger
+	topic    string
+}
+
+// KafkaOption sets a parameter for the KafkaCollector
+type KafkaOption func(c *KafkaCollector)
+
+// KafkaLogger sets the logger used to report errors in the collection
+// process. By default, a no-op logger is used, i.e. no errors are logged
+// anywhere. It's important to set this option.
+func KafkaLogger(logger Logger) KafkaOption {
+	return func(c *KafkaCollector) { c.logger = logger }
+}
+
+// KafkaProducer sets the producer used to produce to Kafka.
+func KafkaProducer(p sarama.AsyncProducer) KafkaOption {
+	return func(c *KafkaCollector) { c.producer = p }
+}
+
+// KafkaTopic sets the kafka topic to attach the collector producer on.
+func KafkaTopic(t string) KafkaOption {
+	return func(c *KafkaCollector) { c.topic = t }
+}
+
+// NewKafkaCollector returns a new Kafka-backed Collector. addrs should be a
+// slice of TCP endpoints of the form "host:port".
+func NewKafkaCollector(addrs []string, options ...KafkaOption) (Collector, error) {
+	c := &KafkaCollector{
+		logger: NewNopLogger(),
+		topic:  defaultKafkaTopic,
+	}
+
+	for _, option := range options {
+		option(c)
+	}
+	if c.producer == nil {
+		p, err := sarama.NewAsyncProducer(addrs, nil)
+		if err != nil {
+			return nil, err
+		}
+		c.producer = p
+	}
+
+	go c.logErrors()
+
+	return c, nil
+}
+
+func (c *KafkaCollector) logErrors() {
+	for pe := range c.producer.Errors() {
+		_ = c.logger.Log("msg", pe.Msg, "err", pe.Err, "result", "failed to produce msg")
+	}
+}
+
+// Collect implements Collector.
+func (c *KafkaCollector) Collect(s *zipkincore.Span) error {
+	c.producer.Input() <- &sarama.ProducerMessage{
+		Topic: c.topic,
+		Key:   nil,
+		Value: sarama.ByteEncoder(kafkaSerialize(s)),
+	}
+	return nil
+}
+
+// Close implements Collector.
+func (c *KafkaCollector) Close() error {
+	return c.producer.Close()
+}
+
+func kafkaSerialize(s *zipkincore.Span) []byte {
+	t := thrift.NewTMemoryBuffer()
+	p := thrift.NewTBinaryProtocolTransport(t)
+	if err := s.Write(p); err != nil {
+		panic(err)
+	}
+	return t.Buffer.Bytes()
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go
new file mode 100644
index 00000000..1ff353aa
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go
@@ -0,0 +1,235 @@
+package zipkintracer
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"net"
+	"sync"
+	"time"
+
+	"github.com/apache/thrift/lib/go/thrift"
+
+	"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe"
+	"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
+)
+
+const defaultScribeCategory = "zipkin"
+
+// defaultScribeBatchInterval in seconds
+const defaultScribeBatchInterval = 1
+
+const defaultScribeBatchSize = 100
+
+const defaultScribeMaxBacklog = 1000
+
+// ScribeCollector implements Collector by forwarding spans to a Scribe
+// service, in batches.
+type ScribeCollector struct {
+	logger        Logger
+	category      string
+	factory       func() (scribe.Scribe, error)
+	client        scribe.Scribe
+	batchInterval time.Duration
+	batchSize     int
+	maxBacklog    int
+	batch         []*scribe.LogEntry
+	spanc         chan *zipkincore.Span
+	quit          chan struct{}
+	shutdown      chan error
+	sendMutex     *sync.Mutex
+	batchMutex    *sync.Mutex
+}
+
+// ScribeOption sets a parameter for the StdlibAdapter.
+type ScribeOption func(s *ScribeCollector)
+
+// ScribeLogger sets the logger used to report errors in the collection
+// process. By default, a no-op logger is used, i.e. no errors are logged
+// anywhere. It's important to set this option in a production service.
+func ScribeLogger(logger Logger) ScribeOption {
+	return func(s *ScribeCollector) { s.logger = logger }
+}
+
+// ScribeBatchSize sets the maximum batch size, after which a collect will be
+// triggered. The default batch size is 100 traces.
+func ScribeBatchSize(n int) ScribeOption {
+	return func(s *ScribeCollector) { s.batchSize = n }
+}
+
+// ScribeMaxBacklog sets the maximum backlog size,
+// when batch size reaches this threshold, spans from the
+// beginning of the batch will be disposed
+func ScribeMaxBacklog(n int) ScribeOption {
+	return func(c *ScribeCollector) { c.maxBacklog = n }
+}
+
+// ScribeBatchInterval sets the maximum duration we will buffer traces before
+// emitting them to the collector. The default batch interval is 1 second.
+func ScribeBatchInterval(d time.Duration) ScribeOption {
+	return func(s *ScribeCollector) { s.batchInterval = d }
+}
+
+// ScribeCategory sets the Scribe category used to transmit the spans.
+func ScribeCategory(category string) ScribeOption {
+	return func(s *ScribeCollector) { s.category = category }
+}
+
+// NewScribeCollector returns a new Scribe-backed Collector. addr should be a
+// TCP endpoint of the form "host:port". timeout is passed to the Thrift dial
+// function NewTSocketFromAddrTimeout. batchSize and batchInterval control the
+// maximum size and interval of a batch of spans; as soon as either limit is
+// reached, the batch is sent. The logger is used to log errors, such as batch
+// send failures; users should provide an appropriate context, if desired.
+func NewScribeCollector(addr string, timeout time.Duration, options ...ScribeOption) (Collector, error) {
+	factory := scribeClientFactory(addr, timeout)
+	client, err := factory()
+	if err != nil {
+		return nil, err
+	}
+	c := &ScribeCollector{
+		logger:        NewNopLogger(),
+		category:      defaultScribeCategory,
+		factory:       factory,
+		client:        client,
+		batchInterval: defaultScribeBatchInterval * time.Second,
+		batchSize:     defaultScribeBatchSize,
+		maxBacklog:    defaultScribeMaxBacklog,
+		batch:         []*scribe.LogEntry{},
+		spanc:         make(chan *zipkincore.Span),
+		quit:          make(chan struct{}),
+		shutdown:      make(chan error, 1),
+		sendMutex:     &sync.Mutex{},
+		batchMutex:    &sync.Mutex{},
+	}
+
+	for _, option := range options {
+		option(c)
+	}
+
+	go c.loop()
+	return c, nil
+}
+
+// Collect implements Collector.
+func (c *ScribeCollector) Collect(s *zipkincore.Span) error {
+	c.spanc <- s
+	return nil // accepted
+}
+
+// Close implements Collector.
+func (c *ScribeCollector) Close() error {
+	close(c.quit)
+	return <-c.shutdown
+}
+
+func scribeSerialize(s *zipkincore.Span) string {
+	t := thrift.NewTMemoryBuffer()
+	p := thrift.NewTBinaryProtocolTransport(t)
+	if err := s.Write(p); err != nil {
+		panic(err)
+	}
+	return base64.StdEncoding.EncodeToString(t.Buffer.Bytes())
+}
+
+func (c *ScribeCollector) loop() {
+	var (
+		nextSend = time.Now().Add(c.batchInterval)
+		ticker   = time.NewTicker(c.batchInterval / 10)
+		tickc    = ticker.C
+	)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case span := <-c.spanc:
+			currentBatchSize := c.append(span)
+			if currentBatchSize >= c.batchSize {
+				nextSend = time.Now().Add(c.batchInterval)
+				go c.send()
+			}
+		case <-tickc:
+			if time.Now().After(nextSend) {
+				nextSend = time.Now().Add(c.batchInterval)
+				go c.send()
+			}
+		case <-c.quit:
+			c.shutdown <- c.send()
+			return
+		}
+	}
+}
+
+func (c *ScribeCollector) append(span *zipkincore.Span) (newBatchSize int) {
+	c.batchMutex.Lock()
+	defer c.batchMutex.Unlock()
+
+	c.batch = append(c.batch, &scribe.LogEntry{
+		Category: c.category,
+		Message:  scribeSerialize(span),
+	})
+	if len(c.batch) > c.maxBacklog {
+		dispose := len(c.batch) - c.maxBacklog
+		c.logger.Log("Backlog too long, disposing spans.", "count", dispose)
+		c.batch = c.batch[dispose:]
+	}
+	newBatchSize = len(c.batch)
+	return
+}
+
+func (c *ScribeCollector) send() error {
+	// in order to prevent sending the same batch twice
+	c.sendMutex.Lock()
+	defer c.sendMutex.Unlock()
+
+	// Select all current spans in the batch to be sent
+	c.batchMutex.Lock()
+	sendBatch := c.batch[:]
+	c.batchMutex.Unlock()
+
+	// Do not send an empty batch
+	if len(sendBatch) == 0 {
+		return nil
+	}
+
+	if c.client == nil {
+		var err error
+		if c.client, err = c.factory(); err != nil {
+			_ = c.logger.Log("err", fmt.Sprintf("during reconnect: %v", err))
+			return err
+		}
+	}
+	if rc, err := c.client.Log(context.Background(), sendBatch); err != nil {
+		c.client = nil
+		_ = c.logger.Log("err", fmt.Sprintf("during Log: %v", err))
+		return err
+	} else if rc != scribe.ResultCode_OK {
+		// probably transient error; don't reset client
+		_ = c.logger.Log("err", fmt.Sprintf("remote returned %s", rc))
+	}
+
+	// Remove sent spans from the batch
+	c.batchMutex.Lock()
+	c.batch = c.batch[len(sendBatch):]
+	c.batchMutex.Unlock()
+
+	return nil
+}
+
+func scribeClientFactory(addr string, timeout time.Duration) func() (scribe.Scribe, error) {
+	return func() (scribe.Scribe, error) {
+		a, err := net.ResolveTCPAddr("tcp", addr)
+		if err != nil {
+			return nil, err
+		}
+		socket := thrift.NewTSocketFromAddrTimeout(a, timeout)
+		transport := thrift.NewTFramedTransport(socket)
+		if err := transport.Open(); err != nil {
+			_ = socket.Close()
+			return nil, err
+		}
+		proto := thrift.NewTBinaryProtocolTransport(transport)
+		client := scribe.NewScribeClientProtocol(transport, proto, proto)
+		return client, nil
+	}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go
new file mode 100644
index 00000000..f8cfb58e
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go
@@ -0,0 +1,77 @@
+package zipkintracer
+
+import (
+	"strings"
+
+	"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
+)
+
+// Collector represents a Zipkin trace collector, which is probably a set of
+// remote endpoints.
+type Collector interface {
+	Collect(*zipkincore.Span) error
+	Close() error
+}
+
+// NopCollector implements Collector but performs no work.
+type NopCollector struct{}
+
+// Collect implements Collector.
+func (NopCollector) Collect(*zipkincore.Span) error { return nil }
+
+// Close implements Collector.
+func (NopCollector) Close() error { return nil }
+
+// MultiCollector implements Collector by sending spans to all collectors.
+type MultiCollector []Collector
+
+// Collect implements Collector.
+func (c MultiCollector) Collect(s *zipkincore.Span) error {
+	return c.aggregateErrors(func(coll Collector) error { return coll.Collect(s) })
+}
+
+// Close implements Collector.
+func (c MultiCollector) Close() error {
+	return c.aggregateErrors(func(coll Collector) error { return coll.Close() })
+}
+
+func (c MultiCollector) aggregateErrors(f func(c Collector) error) error {
+	var e *collectionError
+	for i, collector := range c {
+		if err := f(collector); err != nil {
+			if e == nil {
+				e = &collectionError{
+					errs: make([]error, len(c)),
+				}
+			}
+			e.errs[i] = err
+		}
+	}
+	return e
+}
+
+// CollectionError represents an array of errors returned by one or more
+// failed Collector methods.
+type CollectionError interface {
+	Error() string
+	GetErrors() []error
+}
+
+type collectionError struct {
+	errs []error
+}
+
+func (c *collectionError) Error() string {
+	errs := []string{}
+	for _, err := range c.errs {
+		if err != nil {
+			errs = append(errs, err.Error())
+		}
+	}
+	return strings.Join(errs, "; ")
+}
+
+// GetErrors implements CollectionError
+func (c *collectionError) GetErrors() []error {
+	return c.errs
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go
new file mode 100644
index 00000000..e9fe2991
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go
@@ -0,0 +1,61 @@
+package zipkintracer
+
+import (
+	"github.com/openzipkin/zipkin-go-opentracing/flag"
+	"github.com/openzipkin/zipkin-go-opentracing/types"
+)
+
+// SpanContext holds the basic Span metadata.
+type SpanContext struct {
+	// A probabilistically unique identifier for a [multi-span] trace.
+	TraceID types.TraceID
+
+	// A probabilistically unique identifier for a span.
+	SpanID uint64
+
+	// Whether the trace is sampled.
+	Sampled bool
+
+	// The span's associated baggage.
+	Baggage map[string]string // initialized on first use
+
+	// The SpanID of this Context's parent, or nil if there is no parent.
+	ParentSpanID *uint64
+
+	// Flags provides the ability to create and communicate feature flags.
+	Flags flag.Flags
+
+	// Whether the span is owned by the current process
+	Owner bool
+}
+
+// ForeachBaggageItem belongs to the opentracing.SpanContext interface
+func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
+	for k, v := range c.Baggage {
+		if !handler(k, v) {
+			break
+		}
+	}
+}
+
+// WithBaggageItem returns an entirely new basictracer SpanContext with the
+// given key:value baggage pair set.
+func (c SpanContext) WithBaggageItem(key, val string) SpanContext {
+	var newBaggage map[string]string
+	if c.Baggage == nil {
+		newBaggage = map[string]string{key: val}
+	} else {
+		newBaggage = make(map[string]string, len(c.Baggage)+1)
+		for k, v := range c.Baggage {
+			newBaggage[k] = v
+		}
+		newBaggage[key] = val
+	}
+	var parentSpanID *uint64
+	if c.ParentSpanID != nil {
+		parentSpanID = new(uint64)
+		*parentSpanID = *c.ParentSpanID
+	}
+	// Use positional parameters so the compiler will help catch new fields.
+	return SpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage, parentSpanID, c.Flags, c.Owner}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go
new file mode 100644
index 00000000..1ee00c8a
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go
@@ -0,0 +1,78 @@
+package zipkintracer
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"strconv"
+	"sync"
+)
+
+const debugGoroutineIDTag = "_initial_goroutine"
+
+type errAssertionFailed struct {
+	span *spanImpl
+	msg  string
+}
+
+// Error implements the error interface.
+func (err *errAssertionFailed) Error() string {
+	return fmt.Sprintf("%s:\n%+v", err.msg, err.span)
+}
+
+func (s *spanImpl) Lock() {
+	s.Mutex.Lock()
+	s.maybeAssertSanityLocked()
+}
+
+func (s *spanImpl) maybeAssertSanityLocked() {
+	if s.tracer == nil {
+		s.Mutex.Unlock()
+		panic(&errAssertionFailed{span: s, msg: "span used after call to Finish()"})
+	}
+	if s.tracer.options.debugAssertSingleGoroutine {
+		startID := curGoroutineID()
+		curID, ok := s.raw.Tags[debugGoroutineIDTag].(uint64)
+		if !ok {
+			// This is likely invoked in the context of the SetTag which sets
+			// debugGoroutineTag.
+			return
+		}
+		if startID != curID {
+			s.Mutex.Unlock()
+			panic(&errAssertionFailed{
+				span: s,
+				msg:  fmt.Sprintf("span started on goroutine %d, but now running on %d", startID, curID),
+			})
+		}
+	}
+}
+
+var goroutineSpace = []byte("goroutine ")
+var littleBuf = sync.Pool{
+	New: func() interface{} {
+		buf := make([]byte, 64)
+		return &buf
+	},
+}
+
+// Credit to @bradfitz:
+// https://github.com/golang/net/blob/master/http2/gotrack.go#L51
+func curGoroutineID() uint64 {
+	bp := littleBuf.Get().(*[]byte)
+	defer littleBuf.Put(bp)
+	b := *bp
+	b = b[:runtime.Stack(b, false)]
+	// Parse the 4707 out of "goroutine 4707 ["
+	b = bytes.TrimPrefix(b, goroutineSpace)
+	i := bytes.IndexByte(b, ' ')
+	if i < 0 {
+		panic(fmt.Sprintf("No space found in %q", b))
+	}
+	b = b[:i]
+	n, err := strconv.ParseUint(string(b), 10, 64)
+	if err != nil {
+		panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+	}
+	return n
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go
new file mode 100644
index 00000000..31b6a009
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go
@@ -0,0 +1,62 @@
+package zipkintracer
+
+import "github.com/opentracing/opentracing-go"
+
+// A SpanEvent is emitted when a mutating command is called on a Span.
+type SpanEvent interface{}
+
+// EventCreate is emitted when a Span is created.
+type EventCreate struct{ OperationName string }
+
+// EventTag is received when SetTag is called.
+type EventTag struct {
+	Key   string
+	Value interface{}
+}
+
+// EventBaggage is received when SetBaggageItem is called.
+type EventBaggage struct {
+	Key, Value string
+}
+
+// EventLogFields is received when LogFields or LogKV is called.
+type EventLogFields opentracing.LogRecord
+
+// EventLog is received when Log (or one of its derivatives) is called.
+//
+// DEPRECATED
+type EventLog opentracing.LogData
+
+// EventFinish is received when Finish is called.
+type EventFinish RawSpan
+
+func (s *spanImpl) onCreate(opName string) {
+	if s.event != nil {
+		s.event(EventCreate{OperationName: opName})
+	}
+}
+func (s *spanImpl) onTag(key string, value interface{}) {
+	if s.event != nil {
+		s.event(EventTag{Key: key, Value: value})
+	}
+}
+func (s *spanImpl) onLog(ld opentracing.LogData) {
+	if s.event != nil {
+		s.event(EventLog(ld))
+	}
+}
+func (s *spanImpl) onLogFields(lr opentracing.LogRecord) {
+	if s.event != nil {
+		s.event(EventLogFields(lr))
+	}
+}
+func (s *spanImpl) onBaggage(key, value string) {
+	if s.event != nil {
+		s.event(EventBaggage{Key: key, Value: value})
+	}
+}
+func (s *spanImpl) onFinish(sp RawSpan) {
+	if s.event != nil {
+		s.event(EventFinish(sp))
+	}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go
new file mode 100644
index 00000000..05cb10ea
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go
@@ -0,0 +1,39 @@
+package flag
+
+// Flags provides the ability to create and communicate feature flags.
+type Flags uint64
+
+// Flags is a bitset
+const (
+	Debug Flags = 1 << 0
+
+	// All flags below deal with binaryPropagators. They will be discarded in the
+	// textMapPropagator (not read and not set)
+
+	// SamplingSet and Sampled handle Sampled tribool logic for interop with
+	// instrumenting libraries / propagation channels not using a separate Sampled
+	// header and potentially encoding this in flags.
+	//
+	// When we receive a flag we do this:
+	// 1. Sampled bit is set => true
+	// 2. Sampled bit is not set => inspect SamplingSet bit.
+	// 2a. SamplingSet bit is set => false
+	// 2b. SamplingSet bit is not set => null
+	// Note on 2b.: depending on the propagator having a separate Sampled header
+	// we either assume Sampling is false or unknown. In the latter case we will
+	// run our sampler even though we are not the root of the trace.
+	//
+	// When propagating to a downstream service we will always be explicit and
+	// will provide a set SamplingSet bit in case of our binary propagator either
+	SamplingSet Flags = 1 << 1
+	Sampled     Flags = 1 << 2
+	// When set, we can ignore the value of the parentId. This is used for binary
+	// fixed width transports or transports like proto3 that return a default
+	// value if a value has not been set (thus not enabling you to distinguish
+	// between the value being set to the default or not set at all).
+	//
+	// While many zipkin systems re-use a trace id as the root span id, we know
+	// that some don't. With this flag, we can tell for sure if the span is root
+	// as opposed to the convention trace id == span id == parent id.
+	IsRoot Flags = 1 << 3
+)
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go
new file mode 100644
index 00000000..f5695e0e
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016 Bas van Beek
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zipkintracer
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+
+	"github.com/go-logfmt/logfmt"
+	"github.com/opentracing/opentracing-go/log"
+)
+
+var errEventLogNotFound = errors.New("event log field not found")
+
+type fieldsAsMap map[string]string
+
+// MaterializeWithJSON converts log Fields into JSON string
+func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
+	fields := fieldsAsMap(make(map[string]string, len(logFields)))
+	for _, field := range logFields {
+		field.Marshal(fields)
+	}
+	return json.Marshal(fields)
+}
+
+// MaterializeWithLogFmt converts log Fields into LogFmt string
+func MaterializeWithLogFmt(logFields []log.Field) ([]byte, error) {
+	var (
+		buffer  = bytes.NewBuffer(nil)
+		encoder = logfmt.NewEncoder(buffer)
+	)
+	for _, field := range logFields {
+		if err := encoder.EncodeKeyval(field.Key(), field.Value()); err != nil {
+			encoder.EncodeKeyval(field.Key(), err.Error())
+		}
+	}
+	return buffer.Bytes(), nil
+}
+
+// StrictZipkinMaterializer will only record a log.Field of type "event".
+func StrictZipkinMaterializer(logFields []log.Field) ([]byte, error) {
+	for _, field := range logFields {
+		if field.Key() == "event" {
+			return []byte(fmt.Sprintf("%+v", field.Value())), nil
+		}
+	}
+	return nil, errEventLogNotFound
+}
+
+func (ml fieldsAsMap) EmitString(key, value string) {
+	ml[key] = value
+}
+
+func (ml fieldsAsMap) EmitBool(key string, value bool) {
+	ml[key] = fmt.Sprintf("%t", value)
+}
+
+func (ml fieldsAsMap) EmitInt(key string, value int) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt32(key string, value int32) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt64(key string, value int64) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
+	ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
+	ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
+	ml[key] = fmt.Sprintf("%+v", value)
+}
+
+func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
+	value(ml)
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go
new file mode 100644
index 00000000..643f6535
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go
@@ -0,0 +1,64 @@
+package zipkintracer
+
+import (
+	"errors"
+	"fmt"
+	"log"
+	"strings"
+)
+
+// ErrMissingValue adds a Missing Value Error when the Logging Parameters are
+// not even in number
+var ErrMissingValue = errors.New("(MISSING)")
+
+// Logger is the fundamental interface for all log operations. Log creates a
+// log event from keyvals, a variadic sequence of alternating keys and values.
+// The signature is compatible with the Go kit log package.
+type Logger interface {
+	Log(keyvals ...interface{}) error
+}
+
+// NewNopLogger provides a Logger that discards all Log data sent to it.
+func NewNopLogger() Logger {
+	return &nopLogger{}
+}
+
+// LogWrapper wraps a standard library logger into a Logger compatible with this
+// package.
+func LogWrapper(l *log.Logger) Logger {
+	return &wrappedLogger{l: l}
+}
+
+// wrappedLogger implements Logger
+type wrappedLogger struct {
+	l *log.Logger
+}
+
+// Log implements Logger
+func (l *wrappedLogger) Log(k ...interface{}) error {
+	if len(k)%2 == 1 {
+		k = append(k, ErrMissingValue)
+	}
+	o := make([]string, len(k)/2)
+	for i := 0; i < len(k); i += 2 {
+		o[i/2] = fmt.Sprintf("%s=%q", k[i], k[i+1])
+	}
+	l.l.Println(strings.Join(o, " "))
+	return nil
+}
+
+// nopLogger implements Logger
+type nopLogger struct{}
+
+// Log implements Logger
+func (*nopLogger) Log(_ ...interface{}) error { return nil }
+
+// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
+// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
+// object that calls f.
+type LoggerFunc func(...interface{}) error
+
+// Log implements Logger by calling f(keyvals...).
+func (f LoggerFunc) Log(keyvals ...interface{}) error {
+	return f(keyvals...)
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go
new file mode 100644
index 00000000..f46ff011
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go
@@ -0,0 +1,52 @@
+package zipkintracer
+
+import (
+	otobserver "github.com/opentracing-contrib/go-observer"
+	opentracing "github.com/opentracing/opentracing-go"
+)
+
+// observer is a dispatcher to other observers
+type observer struct {
+	observers []otobserver.Observer
+}
+
+// spanObserver is a dispatcher to other span observers
+type spanObserver struct {
+	observers []otobserver.SpanObserver
+}
+
+func (o observer) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (otobserver.SpanObserver, bool) {
+	var spanObservers []otobserver.SpanObserver
+	for _, obs := range o.observers {
+		spanObs, ok := obs.OnStartSpan(sp, operationName, options)
+		if ok {
+			if spanObservers == nil {
+				spanObservers = make([]otobserver.SpanObserver, 0, len(o.observers))
+			}
+			spanObservers = append(spanObservers, spanObs)
+		}
+	}
+	if len(spanObservers) == 0 {
+		return nil, false
+	}
+
+	return spanObserver{observers: spanObservers}, true
+}
+
+func (o spanObserver) OnSetOperationName(operationName string) {
+	for _, obs := range o.observers {
+		obs.OnSetOperationName(operationName)
+	}
+}
+
+func (o spanObserver) OnSetTag(key string, value interface{}) {
+	for _, obs := range o.observers {
+		obs.OnSetTag(key, value)
+	}
+}
+
+func (o spanObserver) OnFinish(options opentracing.FinishOptions) {
+	for _, obs := range o.observers {
+		obs.OnFinish(options)
+	}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go
new file mode 100644
index 00000000..56d2d5aa
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go
@@ -0,0 +1,68 @@
+package zipkintracer
+
+import (
+	opentracing "github.com/opentracing/opentracing-go"
+
+	"github.com/openzipkin/zipkin-go-opentracing/flag"
+	"github.com/openzipkin/zipkin-go-opentracing/types"
+)
+
+type accessorPropagator struct {
+	tracer *tracerImpl
+}
+
+// DelegatingCarrier is a flexible carrier interface which can be implemented
+// by types which have a means of storing the trace metadata and already know
+// how to serialize themselves (for example, protocol buffers).
+type DelegatingCarrier interface {
+	SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags)
+	State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags)
+	SetBaggageItem(key, value string)
+	GetBaggage(func(key, value string))
+}
+
+func (p *accessorPropagator) Inject(
+	spanContext opentracing.SpanContext,
+	carrier interface{},
+) error {
+	dc, ok := carrier.(DelegatingCarrier)
+	if !ok || dc == nil {
+		return opentracing.ErrInvalidCarrier
+	}
+	sc, ok := spanContext.(SpanContext)
+	if !ok {
+		return opentracing.ErrInvalidSpanContext
+	}
+	dc.SetState(sc.TraceID, sc.SpanID, sc.ParentSpanID, sc.Sampled, sc.Flags)
+	for k, v := range sc.Baggage {
+		dc.SetBaggageItem(k, v)
+	}
+	return nil
+}
+
+func (p *accessorPropagator) Extract(
+	carrier interface{},
+) (opentracing.SpanContext, error) {
+	dc, ok := carrier.(DelegatingCarrier)
+	if !ok || dc == nil {
+		return nil, opentracing.ErrInvalidCarrier
+	}
+
+	traceID, spanID, parentSpanID, sampled, flags := dc.State()
+	sc := SpanContext{
+		TraceID:      traceID,
+		SpanID:       spanID,
+		Sampled:      sampled,
+		Baggage:      nil,
+		ParentSpanID: parentSpanID,
+		Flags:        flags,
+	}
+	dc.GetBaggage(func(k, v string) {
+		if sc.Baggage == nil {
+			sc.Baggage = map[string]string{}
+		}
+		sc.Baggage[k] = v
+	})
+
+	return sc, nil
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go
new file mode 100644
index 00000000..7d102f90
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go
@@ -0,0 +1,257 @@
+package zipkintracer
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/gogo/protobuf/proto"
+	opentracing "github.com/opentracing/opentracing-go"
+
+	"github.com/openzipkin/zipkin-go-opentracing/flag"
+	"github.com/openzipkin/zipkin-go-opentracing/types"
+	"github.com/openzipkin/zipkin-go-opentracing/wire"
+)
+
+type textMapPropagator struct {
+	tracer *tracerImpl
+}
+type binaryPropagator struct {
+	tracer *tracerImpl
+}
+
+const (
+	prefixTracerState = "x-b3-" // we default to interop with non-opentracing zipkin tracers
+	prefixBaggage     = "ot-baggage-"
+
+	tracerStateFieldCount = 3 // not 5, X-B3-ParentSpanId is optional and we allow optional Sampled header
+	zipkinTraceID         = prefixTracerState + "traceid"
+	zipkinSpanID          = prefixTracerState + "spanid"
+	zipkinParentSpanID    = prefixTracerState + "parentspanid"
+	zipkinSampled         = prefixTracerState + "sampled"
+	zipkinFlags           = prefixTracerState + "flags"
+)
+
+func (p *textMapPropagator) Inject(
+	spanContext opentracing.SpanContext,
+	opaqueCarrier interface{},
+) error {
+	sc, ok := spanContext.(SpanContext)
+	if !ok {
+		return opentracing.ErrInvalidSpanContext
+	}
+	carrier, ok := opaqueCarrier.(opentracing.TextMapWriter)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+	carrier.Set(zipkinTraceID, sc.TraceID.ToHex())
+	carrier.Set(zipkinSpanID, fmt.Sprintf("%016x", sc.SpanID))
+	if sc.Sampled {
+		carrier.Set(zipkinSampled, "1")
+	} else {
+		carrier.Set(zipkinSampled, "0")
+	}
+
+	if sc.ParentSpanID != nil {
+		// we only set ParentSpanID header if there is a parent span
+		carrier.Set(zipkinParentSpanID, fmt.Sprintf("%016x", *sc.ParentSpanID))
+	}
+	// we only need to inject the debug flag if set. see flag package for details.
+	flags := sc.Flags & flag.Debug
+	carrier.Set(zipkinFlags, strconv.FormatUint(uint64(flags), 10))
+
+	for k, v := range sc.Baggage {
+		carrier.Set(prefixBaggage+k, v)
+	}
+	return nil
+}
+
+func (p *textMapPropagator) Extract(
+	opaqueCarrier interface{},
+) (opentracing.SpanContext, error) {
+	carrier, ok := opaqueCarrier.(opentracing.TextMapReader)
+	if !ok {
+		return nil, opentracing.ErrInvalidCarrier
+	}
+	requiredFieldCount := 0
+	var (
+		traceID      types.TraceID
+		spanID       uint64
+		sampled      bool
+		parentSpanID *uint64
+		flags        flag.Flags
+		err          error
+	)
+	decodedBaggage := make(map[string]string)
+	err = carrier.ForeachKey(func(k, v string) error {
+		switch strings.ToLower(k) {
+		case zipkinTraceID:
+			traceID, err = types.TraceIDFromHex(v)
+			if err != nil {
+				return opentracing.ErrSpanContextCorrupted
+			}
+		case zipkinSpanID:
+			spanID, err = strconv.ParseUint(v, 16, 64)
+			if err != nil {
+				return opentracing.ErrSpanContextCorrupted
+			}
+		case zipkinParentSpanID:
+			var id uint64
+			id, err = strconv.ParseUint(v, 16, 64)
+			if err != nil {
+				return opentracing.ErrSpanContextCorrupted
+			}
+			parentSpanID = &id
+		case zipkinSampled:
+			sampled, err = strconv.ParseBool(v)
+			if err != nil {
+				return opentracing.ErrSpanContextCorrupted
+			}
+			// Sampled header was explicitly set
+			flags |= flag.SamplingSet
+		case zipkinFlags:
+			var f uint64
+			f, err = strconv.ParseUint(v, 10, 64)
+			if err != nil {
+				return opentracing.ErrSpanContextCorrupted
+			}
+			if flag.Flags(f)&flag.Debug == flag.Debug {
+				flags |= flag.Debug
+			}
+		default:
+			lowercaseK := strings.ToLower(k)
+			if strings.HasPrefix(lowercaseK, prefixBaggage) {
+				decodedBaggage[strings.TrimPrefix(lowercaseK, prefixBaggage)] = v
+			}
+			// Balance off the requiredFieldCount++ just below...
+			requiredFieldCount--
+		}
+		requiredFieldCount++
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+	if requiredFieldCount < tracerStateFieldCount {
+		if requiredFieldCount == 0 {
+			return nil, opentracing.ErrSpanContextNotFound
+		}
+		return nil, opentracing.ErrSpanContextCorrupted
+	}
+
+	// check if Sample state was communicated through the Flags bitset
+	if !sampled && flags&flag.Sampled == flag.Sampled {
+		sampled = true
+	}
+
+	return SpanContext{
+		TraceID:      traceID,
+		SpanID:       spanID,
+		Sampled:      sampled,
+		Baggage:      decodedBaggage,
+		ParentSpanID: parentSpanID,
+		Flags:        flags,
+	}, nil
+}
+
+func (p *binaryPropagator) Inject(
+	spanContext opentracing.SpanContext,
+	opaqueCarrier interface{},
+) error {
+	sc, ok := spanContext.(SpanContext)
+	if !ok {
+		return opentracing.ErrInvalidSpanContext
+	}
+	carrier, ok := opaqueCarrier.(io.Writer)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+
+	state := wire.TracerState{}
+	state.TraceId = sc.TraceID.Low
+	state.TraceIdHigh = sc.TraceID.High
+	state.SpanId = sc.SpanID
+	state.Sampled = sc.Sampled
+	state.BaggageItems = sc.Baggage
+
+	// encode the debug bit
+	flags := sc.Flags & flag.Debug
+	if sc.ParentSpanID != nil {
+		state.ParentSpanId = *sc.ParentSpanID
+	} else {
+		// root span...
+		state.ParentSpanId = 0
+		flags |= flag.IsRoot
+	}
+
+	// we explicitly inform our sampling state downstream
+	flags |= flag.SamplingSet
+	if sc.Sampled {
+		flags |= flag.Sampled
+	}
+	state.Flags = uint64(flags)
+
+	b, err := proto.Marshal(&state)
+	if err != nil {
+		return err
+	}
+
+	// Write the length of the marshalled binary to the writer.
+	length := uint32(len(b))
+	if err = binary.Write(carrier, binary.BigEndian, &length); err != nil {
+		return err
+	}
+
+	_, err = carrier.Write(b)
+	return err
+}
+
+func (p *binaryPropagator) Extract(
+	opaqueCarrier interface{},
+) (opentracing.SpanContext, error) {
+	carrier, ok := opaqueCarrier.(io.Reader)
+	if !ok {
+		return nil, opentracing.ErrInvalidCarrier
+	}
+
+	// Read the length of marshalled binary. io.ReadAll isn't that performant
+	// since it keeps resizing the underlying buffer as it encounters more bytes
+	// to read. By reading the length, we can allocate a fixed sized buf and read
+	// the exact amount of bytes into it.
+	var length uint32
+	if err := binary.Read(carrier, binary.BigEndian, &length); err != nil {
+		return nil, opentracing.ErrSpanContextCorrupted
+	}
+	buf := make([]byte, length)
+	if n, err := carrier.Read(buf); err != nil {
+		if n > 0 {
+			return nil, opentracing.ErrSpanContextCorrupted
+		}
+		return nil, opentracing.ErrSpanContextNotFound
+	}
+
+	ctx := wire.TracerState{}
+	if err := proto.Unmarshal(buf, &ctx); err != nil {
+		return nil, opentracing.ErrSpanContextCorrupted
+	}
+
+	flags := flag.Flags(ctx.Flags)
+	if flags&flag.Sampled == flag.Sampled {
+		ctx.Sampled = true
+	}
+	// this propagator expects sampling state to be explicitly propagated by the
+	// upstream service. so set this flag to indentify to tracer it should not
+	// run its sampler in case it is not the root of the trace.
+	flags |= flag.SamplingSet
+
+	return SpanContext{
+		TraceID:      types.TraceID{Low: ctx.TraceId, High: ctx.TraceIdHigh},
+		SpanID:       ctx.SpanId,
+		Sampled:      ctx.Sampled,
+		Baggage:      ctx.BaggageItems,
+		ParentSpanID: &ctx.ParentSpanId,
+		Flags:        flags,
+	}, nil
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go
new file mode 100644
index 00000000..03bc15b2
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go
@@ -0,0 +1,30 @@
+package zipkintracer
+
+import (
+	"time"
+
+	opentracing "github.com/opentracing/opentracing-go"
+)
+
+// RawSpan encapsulates all state associated with a (finished) Span.
+type RawSpan struct {
+	// Those recording the RawSpan should also record the contents of its
+	// SpanContext.
+	Context SpanContext
+
+	// The name of the "operation" this span is an instance of. (Called a "span
+	// name" in some implementations)
+	Operation string
+
+	// We store <start, duration> rather than <start, end> so that only
+	// one of the timestamps has global clock uncertainty issues.
+	Start    time.Time
+	Duration time.Duration
+
+	// Essentially an extension mechanism. Can be used for many purposes,
+	// not to be enumerated here.
+	Tags opentracing.Tags
+
+	// The span's "microlog".
+	Logs []opentracing.LogRecord
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go
new file mode 100644
index 00000000..0b8eeb7f
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go
@@ -0,0 +1,60 @@
+package zipkintracer
+
+import "sync"
+
+// A SpanRecorder handles all of the `RawSpan` data generated via an
+// associated `Tracer` (see `NewStandardTracer`) instance. It also names
+// the containing process and provides access to a straightforward tag map.
+type SpanRecorder interface {
+	// Implementations must determine whether and where to store `span`.
+	RecordSpan(span RawSpan)
+}
+
+// InMemorySpanRecorder is a simple thread-safe implementation of
+// SpanRecorder that stores all reported spans in memory, accessible
+// via reporter.GetSpans(). It is primarily intended for testing purposes.
+type InMemorySpanRecorder struct {
+	sync.RWMutex
+	spans []RawSpan
+}
+
+// NewInMemoryRecorder creates new InMemorySpanRecorder
+func NewInMemoryRecorder() *InMemorySpanRecorder {
+	return new(InMemorySpanRecorder)
+}
+
+// RecordSpan implements the respective method of SpanRecorder.
+func (r *InMemorySpanRecorder) RecordSpan(span RawSpan) {
+	r.Lock()
+	defer r.Unlock()
+	r.spans = append(r.spans, span)
+}
+
+// GetSpans returns a copy of the array of spans accumulated so far.
+func (r *InMemorySpanRecorder) GetSpans() []RawSpan {
+	r.RLock()
+	defer r.RUnlock()
+	spans := make([]RawSpan, len(r.spans))
+	copy(spans, r.spans)
+	return spans
+}
+
+// GetSampledSpans returns a slice of spans accumulated so far which were sampled.
+func (r *InMemorySpanRecorder) GetSampledSpans() []RawSpan {
+	r.RLock()
+	defer r.RUnlock()
+	spans := make([]RawSpan, 0, len(r.spans))
+	for _, span := range r.spans {
+		if span.Context.Sampled {
+			spans = append(spans, span)
+		}
+	}
+	return spans
+}
+
+// Reset clears the internal array of spans.
+func (r *InMemorySpanRecorder) Reset() {
+	r.Lock()
+	defer r.Unlock()
+	r.spans = nil
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go
new file mode 100644
index 00000000..bb7ff0a5
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go
@@ -0,0 +1,99 @@
+package zipkintracer
+
+import (
+	"math"
+	"math/rand"
+	"sync"
+	"time"
+)
+
+// Sampler functions return if a Zipkin span should be sampled, based on its
+// traceID.
+type Sampler func(id uint64) bool
+
+func neverSample(_ uint64) bool { return false }
+
+func alwaysSample(_ uint64) bool { return true }
+
+// ModuloSampler provides a typical OpenTracing type Sampler.
+func ModuloSampler(mod uint64) Sampler {
+	if mod < 2 {
+		return alwaysSample
+	}
+	return func(id uint64) bool {
+		return (id % mod) == 0
+	}
+}
+
+// NewBoundarySampler is appropriate for high-traffic instrumentation who
+// provision random trace ids, and make the sampling decision only once.
+// It defends against nodes in the cluster selecting exactly the same ids.
+func NewBoundarySampler(rate float64, salt int64) Sampler {
+	if rate <= 0 {
+		return neverSample
+	}
+	if rate >= 1.0 {
+		return alwaysSample
+	}
+	var (
+		boundary = int64(rate * 10000)
+		usalt    = uint64(salt)
+	)
+	return func(id uint64) bool {
+		return int64(math.Abs(float64(id^usalt)))%10000 < boundary
+	}
+}
+
+// NewCountingSampler is appropriate for low-traffic instrumentation or
+// those who do not provision random trace ids. It is not appropriate for
+// collectors as the sampling decision isn't idempotent (consistent based
+// on trace id).
+func NewCountingSampler(rate float64) Sampler {
+	if rate <= 0 {
+		return neverSample
+	}
+	if rate >= 1.0 {
+		return alwaysSample
+	}
+	var (
+		i         = 0
+		outOf100  = int(rate*100 + math.Copysign(0.5, rate*100)) // for rounding float to int conversion instead of truncation
+		decisions = randomBitSet(100, outOf100, rand.New(rand.NewSource(time.Now().UnixNano())))
+		mtx       = &sync.Mutex{}
+	)
+
+	return func(_ uint64) bool {
+		mtx.Lock()
+		defer mtx.Unlock()
+		result := decisions[i]
+		i++
+		if i == 100 {
+			i = 0
+		}
+		return result
+	}
+}
+
+/**
+ * Reservoir sampling algorithm borrowed from Stack Overflow.
+ *
+ * http://stackoverflow.com/questions/12817946/generate-a-random-bitset-with-n-1s
+ */
+func randomBitSet(size int, cardinality int, rnd *rand.Rand) []bool {
+	result := make([]bool, size)
+	chosen := make([]int, cardinality)
+	var i int
+	for i = 0; i < cardinality; i++ {
+		chosen[i] = i
+		result[i] = true
+	}
+	for ; i < size; i++ {
+		j := rnd.Intn(i + 1)
+		if j < cardinality {
+			result[chosen[j]] = false
+			result[i] = true
+			chosen[j] = i
+		}
+	}
+	return result
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go
new file mode 100644
index 00000000..4850a94d
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go
@@ -0,0 +1,290 @@
+package zipkintracer
+
+import (
+	"sync"
+	"time"
+
+	opentracing "github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+	"github.com/opentracing/opentracing-go/log"
+
+	otobserver "github.com/opentracing-contrib/go-observer"
+	"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
+)
+
+// Span provides access to the essential details of the span, for use
+// by zipkintracer consumers.  These methods may only be called prior
+// to (*opentracing.Span).Finish().
+type Span interface {
+	opentracing.Span
+
+	// Operation names the work done by this span instance
+	Operation() string
+
+	// Start indicates when the span began
+	Start() time.Time
+}
+
+// Implements the `Span` interface. Created via tracerImpl (see
+// `zipkintracer.NewTracer()`).
+type spanImpl struct {
+	tracer     *tracerImpl
+	event      func(SpanEvent)
+	observer   otobserver.SpanObserver
+	sync.Mutex // protects the fields below
+	raw        RawSpan
+	// The number of logs dropped because of MaxLogsPerSpan.
+	numDroppedLogs int
+	Endpoint       *zipkincore.Endpoint
+}
+
+var spanPool = &sync.Pool{New: func() interface{} {
+	return &spanImpl{}
+}}
+
+func (s *spanImpl) reset() {
+	s.tracer, s.event = nil, nil
+	// Note: Would like to do the following, but then the consumer of RawSpan
+	// (the recorder) needs to make sure that they're not holding on to the
+	// baggage or logs when they return (i.e. they need to copy if they care):
+	//
+	//     logs, baggage := s.raw.Logs[:0], s.raw.Baggage
+	//     for k := range baggage {
+	//         delete(baggage, k)
+	//     }
+	//     s.raw.Logs, s.raw.Baggage = logs, baggage
+	//
+	// That's likely too much to ask for. But there is some magic we should
+	// be able to do with `runtime.SetFinalizer` to reclaim that memory into
+	// a buffer pool when GC considers them unreachable, which should ease
+	// some of the load. Hard to say how quickly that would be in practice
+	// though.
+	s.raw = RawSpan{
+		Context: SpanContext{},
+	}
+}
+
+func (s *spanImpl) SetOperationName(operationName string) opentracing.Span {
+	if s.observer != nil {
+		s.observer.OnSetOperationName(operationName)
+	}
+	s.Lock()
+	defer s.Unlock()
+	s.raw.Operation = operationName
+	return s
+}
+
+func (s *spanImpl) trim() bool {
+	return !s.raw.Context.Sampled && s.tracer.options.trimUnsampledSpans
+}
+
+func (s *spanImpl) SetTag(key string, value interface{}) opentracing.Span {
+	defer s.onTag(key, value)
+	if s.observer != nil {
+		s.observer.OnSetTag(key, value)
+	}
+
+	s.Lock()
+	defer s.Unlock()
+	if key == string(ext.SamplingPriority) {
+		if v, ok := value.(uint16); ok {
+			s.raw.Context.Sampled = v != 0
+			return s
+		}
+	}
+	if s.trim() {
+		return s
+	}
+
+	if s.raw.Tags == nil {
+		s.raw.Tags = opentracing.Tags{}
+	}
+	s.raw.Tags[key] = value
+	return s
+}
+
+func (s *spanImpl) LogKV(keyValues ...interface{}) {
+	fields, err := log.InterleavedKVToFields(keyValues...)
+	if err != nil {
+		s.LogFields(log.Error(err), log.String("function", "LogKV"))
+		return
+	}
+	s.LogFields(fields...)
+}
+
+func (s *spanImpl) appendLog(lr opentracing.LogRecord) {
+	maxLogs := s.tracer.options.maxLogsPerSpan
+	if maxLogs == 0 || len(s.raw.Logs) < maxLogs {
+		s.raw.Logs = append(s.raw.Logs, lr)
+		return
+	}
+
+	// We have too many logs. We don't touch the first numOld logs; we treat the
+	// rest as a circular buffer and overwrite the oldest log among those.
+	numOld := (maxLogs - 1) / 2
+	numNew := maxLogs - numOld
+	s.raw.Logs[numOld+s.numDroppedLogs%numNew] = lr
+	s.numDroppedLogs++
+}
+
+func (s *spanImpl) LogFields(fields ...log.Field) {
+	lr := opentracing.LogRecord{
+		Fields: fields,
+	}
+	defer s.onLogFields(lr)
+	s.Lock()
+	defer s.Unlock()
+	if s.trim() || s.tracer.options.dropAllLogs {
+		return
+	}
+	if lr.Timestamp.IsZero() {
+		lr.Timestamp = time.Now()
+	}
+	s.appendLog(lr)
+}
+
+func (s *spanImpl) LogEvent(event string) {
+	s.Log(opentracing.LogData{
+		Event: event,
+	})
+}
+
+func (s *spanImpl) LogEventWithPayload(event string, payload interface{}) {
+	s.Log(opentracing.LogData{
+		Event:   event,
+		Payload: payload,
+	})
+}
+
+func (s *spanImpl) Log(ld opentracing.LogData) {
+	defer s.onLog(ld)
+	s.Lock()
+	defer s.Unlock()
+	if s.trim() || s.tracer.options.dropAllLogs {
+		return
+	}
+
+	if ld.Timestamp.IsZero() {
+		ld.Timestamp = time.Now()
+	}
+
+	s.appendLog(ld.ToLogRecord())
+}
+
+func (s *spanImpl) Finish() {
+	s.FinishWithOptions(opentracing.FinishOptions{})
+}
+
+// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
+// the end (i.e. pos circular left shifts).
+func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
+	// This algorithm is described in:
+	//    http://www.cplusplus.com/reference/algorithm/rotate
+	for first, middle, next := 0, pos, pos; first != middle; {
+		buf[first], buf[next] = buf[next], buf[first]
+		first++
+		next++
+		if next == len(buf) {
+			next = middle
+		} else if first == middle {
+			middle = next
+		}
+	}
+}
+
+func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) {
+	finishTime := opts.FinishTime
+	if finishTime.IsZero() {
+		finishTime = time.Now()
+	}
+	duration := finishTime.Sub(s.raw.Start)
+
+	if s.observer != nil {
+		s.observer.OnFinish(opts)
+	}
+
+	s.Lock()
+	defer s.Unlock()
+
+	for _, lr := range opts.LogRecords {
+		s.appendLog(lr)
+	}
+	for _, ld := range opts.BulkLogData {
+		s.appendLog(ld.ToLogRecord())
+	}
+
+	if s.numDroppedLogs > 0 {
+		// We dropped some log events, which means that we used part of Logs as a
+		// circular buffer (see appendLog). De-circularize it.
+		numOld := (len(s.raw.Logs) - 1) / 2
+		numNew := len(s.raw.Logs) - numOld
+		rotateLogBuffer(s.raw.Logs[numOld:], s.numDroppedLogs%numNew)
+
+		// Replace the log in the middle (the oldest "new" log) with information
+		// about the dropped logs. This means that we are effectively dropping one
+		// more "new" log.
+		numDropped := s.numDroppedLogs + 1
+		s.raw.Logs[numOld] = opentracing.LogRecord{
+			// Keep the timestamp of the last dropped event.
+			Timestamp: s.raw.Logs[numOld].Timestamp,
+			Fields: []log.Field{
+				log.String("event", "dropped Span logs"),
+				log.Int("dropped_log_count", numDropped),
+				log.String("component", "zipkintracer"),
+			},
+		}
+	}
+
+	s.raw.Duration = duration
+
+	s.onFinish(s.raw)
+	s.tracer.options.recorder.RecordSpan(s.raw)
+
+	// Last chance to get options before the span is possibly reset.
+	poolEnabled := s.tracer.options.enableSpanPool
+	if s.tracer.options.debugAssertUseAfterFinish {
+		// This makes it much more likely to catch a panic on any subsequent
+		// operation since s.tracer is accessed on every call to `Lock`.
+		// We don't call `reset()` here to preserve the logs in the Span
+		// which are printed when the assertion triggers.
+		s.tracer = nil
+	}
+
+	if poolEnabled {
+		spanPool.Put(s)
+	}
+}
+
+func (s *spanImpl) Tracer() opentracing.Tracer {
+	return s.tracer
+}
+
+func (s *spanImpl) Context() opentracing.SpanContext {
+	return s.raw.Context
+}
+
+func (s *spanImpl) SetBaggageItem(key, val string) opentracing.Span {
+	s.onBaggage(key, val)
+	if s.trim() {
+		return s
+	}
+
+	s.Lock()
+	defer s.Unlock()
+	s.raw.Context = s.raw.Context.WithBaggageItem(key, val)
+	return s
+}
+
+func (s *spanImpl) BaggageItem(key string) string {
+	s.Lock()
+	defer s.Unlock()
+	return s.raw.Context.Baggage[key]
+}
+
+func (s *spanImpl) Operation() string {
+	return s.raw.Operation
+}
+
+func (s *spanImpl) Start() time.Time {
+	return s.raw.Start
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go
new file mode 100644
index 00000000..9b51d874
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go
@@ -0,0 +1,7 @@
+// Autogenerated by Thrift Compiler (1.0.0-dev)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package scribe
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go
new file mode 100644
index 00000000..be5fbae0
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go
@@ -0,0 +1,24 @@
+// Autogenerated by Thrift Compiler (1.0.0-dev)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package scribe
+
+import (
+	"bytes"
+	"reflect"
+	"context"
+	"fmt"
+	"github.com/apache/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = reflect.DeepEqual
+var _ = bytes.Equal
+
+
+func init() {
+}
+
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go
new file mode 100644
index 00000000..3767688b
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go
@@ -0,0 +1,550 @@
+// Autogenerated by Thrift Compiler (1.0.0-dev)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package scribe
+
+import (
+	"bytes"
+	"reflect"
+	"database/sql/driver"
+	"errors"
+	"context"
+	"fmt"
+	"github.com/apache/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = reflect.DeepEqual
+var _ = bytes.Equal
+
+type ResultCode int64
+const (
+  ResultCode_OK ResultCode = 0
+  ResultCode_TRY_LATER ResultCode = 1
+)
+
+func (p ResultCode) String() string {
+  switch p {
+  case ResultCode_OK: return "OK"
+  case ResultCode_TRY_LATER: return "TRY_LATER"
+  }
+  return "<UNSET>"
+}
+
+func ResultCodeFromString(s string) (ResultCode, error) {
+  switch s {
+  case "OK": return ResultCode_OK, nil 
+  case "TRY_LATER": return ResultCode_TRY_LATER, nil 
+  }
+  return ResultCode(0), fmt.Errorf("not a valid ResultCode string")
+}
+
+
+func ResultCodePtr(v ResultCode) *ResultCode { return &v }
+
+func (p ResultCode) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *ResultCode) UnmarshalText(text []byte) error {
+q, err := ResultCodeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *ResultCode) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = ResultCode(v)
+return nil
+}
+
+func (p * ResultCode) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+// Attributes:
+//  - Category
+//  - Message
+type LogEntry struct {
+  Category string `thrift:"category,1" db:"category" json:"category"`
+  Message string `thrift:"message,2" db:"message" json:"message"`
+}
+
+func NewLogEntry() *LogEntry {
+  return &LogEntry{}
+}
+
+
+func (p *LogEntry) GetCategory() string {
+  return p.Category
+}
+
+func (p *LogEntry) GetMessage() string {
+  return p.Message
+}
+func (p *LogEntry) Read(iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField2(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *LogEntry)  ReadField1(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Category = v
+}
+  return nil
+}
+
+func (p *LogEntry)  ReadField2(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Message = v
+}
+  return nil
+}
+
+func (p *LogEntry) Write(oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin("LogEntry"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(oprot); err != nil { return err }
+    if err := p.writeField2(oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *LogEntry) writeField1(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("category", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:category: ", p), err) }
+  if err := oprot.WriteString(string(p.Category)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.category (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:category: ", p), err) }
+  return err
+}
+
+func (p *LogEntry) writeField2(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("message", thrift.STRING, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:message: ", p), err) }
+  if err := oprot.WriteString(string(p.Message)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.message (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:message: ", p), err) }
+  return err
+}
+
+func (p *LogEntry) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("LogEntry(%+v)", *p)
+}
+
+type Scribe interface {
+  // Parameters:
+  //  - Messages
+  Log(ctx context.Context, messages []*LogEntry) (r ResultCode, err error)
+}
+
+type ScribeClient struct {
+  c thrift.TClient
+}
+
+// Deprecated: Use NewScribe instead
+func NewScribeClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ScribeClient {
+  return &ScribeClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+// Deprecated: Use NewScribe instead
+func NewScribeClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ScribeClient {
+  return &ScribeClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewScribeClient(c thrift.TClient) *ScribeClient {
+  return &ScribeClient{
+    c: c,
+  }
+}
+
+// Parameters:
+//  - Messages
+func (p *ScribeClient) Log(ctx context.Context, messages []*LogEntry) (r ResultCode, err error) {
+  var _args0 ScribeLogArgs
+  _args0.Messages = messages
+  var _result1 ScribeLogResult
+  if err = p.c.Call(ctx, "Log", &_args0, &_result1); err != nil {
+    return
+  }
+  return _result1.GetSuccess(), nil
+}
+
+type ScribeProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler Scribe
+}
+
+func (p *ScribeProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *ScribeProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *ScribeProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewScribeProcessor(handler Scribe) *ScribeProcessor {
+
+  self2 := &ScribeProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self2.processorMap["Log"] = &scribeProcessorLog{handler:handler}
+return self2
+}
+
+func (p *ScribeProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err := iprot.ReadMessageBegin()
+  if err != nil { return false, err }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(thrift.STRUCT)
+  iprot.ReadMessageEnd()
+  x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+  x3.Write(oprot)
+  oprot.WriteMessageEnd()
+  oprot.Flush()
+  return false, x3
+
+}
+
+type scribeProcessorLog struct {
+  handler Scribe
+}
+
+func (p *scribeProcessorLog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := ScribeLogArgs{}
+  if err = args.Read(iprot); err != nil {
+    iprot.ReadMessageEnd()
+    x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+    oprot.WriteMessageBegin("Log", thrift.EXCEPTION, seqId)
+    x.Write(oprot)
+    oprot.WriteMessageEnd()
+    oprot.Flush()
+    return false, err
+  }
+
+  iprot.ReadMessageEnd()
+  result := ScribeLogResult{}
+var retval ResultCode
+  var err2 error
+  if retval, err2 = p.handler.Log(ctx, args.Messages); err2 != nil {
+    x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Log: " + err2.Error())
+    oprot.WriteMessageBegin("Log", thrift.EXCEPTION, seqId)
+    x.Write(oprot)
+    oprot.WriteMessageEnd()
+    oprot.Flush()
+    return true, err2
+  } else {
+    result.Success = &retval
+}
+  if err2 = oprot.WriteMessageBegin("Log", thrift.REPLY, seqId); err2 != nil {
+    err = err2
+  }
+  if err2 = result.Write(oprot); err == nil && err2 != nil {
+    err = err2
+  }
+  if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+    err = err2
+  }
+  if err2 = oprot.Flush(); err == nil && err2 != nil {
+    err = err2
+  }
+  if err != nil {
+    return
+  }
+  return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Messages
+type ScribeLogArgs struct {
+  Messages []*LogEntry `thrift:"messages,1" db:"messages" json:"messages"`
+}
+
+func NewScribeLogArgs() *ScribeLogArgs {
+  return &ScribeLogArgs{}
+}
+
+
+func (p *ScribeLogArgs) GetMessages() []*LogEntry {
+  return p.Messages
+}
+func (p *ScribeLogArgs) Read(iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField1(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *ScribeLogArgs)  ReadField1(iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin()
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*LogEntry, 0, size)
+  p.Messages =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem4 := &LogEntry{}
+    if err := _elem4.Read(iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+    }
+    p.Messages = append(p.Messages, _elem4)
+  }
+  if err := iprot.ReadListEnd(); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *ScribeLogArgs) Write(oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin("Log_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ScribeLogArgs) writeField1(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("messages", thrift.LIST, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:messages: ", p), err) }
+  if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Messages)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Messages {
+    if err := v.Write(oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:messages: ", p), err) }
+  return err
+}
+
+func (p *ScribeLogArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ScribeLogArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type ScribeLogResult struct {
+  Success *ResultCode `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewScribeLogResult() *ScribeLogResult {
+  return &ScribeLogResult{}
+}
+
+var ScribeLogResult_Success_DEFAULT ResultCode
+func (p *ScribeLogResult) GetSuccess() ResultCode {
+  if !p.IsSetSuccess() {
+    return ScribeLogResult_Success_DEFAULT
+  }
+return *p.Success
+}
+func (p *ScribeLogResult) IsSetSuccess() bool {
+  return p.Success != nil
+}
+
+func (p *ScribeLogResult) Read(iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 0:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField0(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *ScribeLogResult)  ReadField0(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(); err != nil {
+  return thrift.PrependError("error reading field 0: ", err)
+} else {
+  temp := ResultCode(v)
+  p.Success = &temp
+}
+  return nil
+}
+
+func (p *ScribeLogResult) Write(oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin("Log_result"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField0(oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ScribeLogResult) writeField0(oprot thrift.TProtocol) (err error) {
+  if p.IsSetSuccess() {
+    if err := oprot.WriteFieldBegin("success", thrift.I32, 0); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+    if err := oprot.WriteI32(int32(*p.Success)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+  }
+  return err
+}
+
+func (p *ScribeLogResult) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ScribeLogResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go
new file mode 100644
index 00000000..2d5ebe7f
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go
@@ -0,0 +1,7 @@
+// Autogenerated by Thrift Compiler (1.0.0-dev)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go
new file mode 100644
index 00000000..6a662eae
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go
@@ -0,0 +1,45 @@
+// Autogenerated by Thrift Compiler (1.0.0-dev)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+	"bytes"
+	"reflect"
+	"context"
+	"fmt"
+	"github.com/apache/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = reflect.DeepEqual
+var _ = bytes.Equal
+
+const CLIENT_SEND = "cs"
+const CLIENT_RECV = "cr"
+const SERVER_SEND = "ss"
+const SERVER_RECV = "sr"
+const WIRE_SEND = "ws"
+const WIRE_RECV = "wr"
+const CLIENT_SEND_FRAGMENT = "csf"
+const CLIENT_RECV_FRAGMENT = "crf"
+const SERVER_SEND_FRAGMENT = "ssf"
+const SERVER_RECV_FRAGMENT = "srf"
+const HTTP_HOST = "http.host"
+const HTTP_METHOD = "http.method"
+const HTTP_PATH = "http.path"
+const HTTP_URL = "http.url"
+const HTTP_STATUS_CODE = "http.status_code"
+const HTTP_REQUEST_SIZE = "http.request.size"
+const HTTP_RESPONSE_SIZE = "http.response.size"
+const LOCAL_COMPONENT = "lc"
+const ERROR = "error"
+const CLIENT_ADDR = "ca"
+const SERVER_ADDR = "sa"
+
+func init() {
+}
+
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go
new file mode 100644
index 00000000..42f048a3
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go
@@ -0,0 +1,1285 @@
+// Autogenerated by Thrift Compiler (1.0.0-dev)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+	"bytes"
+	"reflect"
+	"database/sql/driver"
+	"errors"
+	"context"
+	"fmt"
+	"github.com/apache/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = reflect.DeepEqual
+var _ = bytes.Equal
+
+//A subset of thrift base types, except BYTES.
+type AnnotationType int64
+const (
+  AnnotationType_BOOL AnnotationType = 0
+  AnnotationType_BYTES AnnotationType = 1
+  AnnotationType_I16 AnnotationType = 2
+  AnnotationType_I32 AnnotationType = 3
+  AnnotationType_I64 AnnotationType = 4
+  AnnotationType_DOUBLE AnnotationType = 5
+  AnnotationType_STRING AnnotationType = 6
+)
+
+func (p AnnotationType) String() string {
+  switch p {
+  case AnnotationType_BOOL: return "BOOL"
+  case AnnotationType_BYTES: return "BYTES"
+  case AnnotationType_I16: return "I16"
+  case AnnotationType_I32: return "I32"
+  case AnnotationType_I64: return "I64"
+  case AnnotationType_DOUBLE: return "DOUBLE"
+  case AnnotationType_STRING: return "STRING"
+  }
+  return "<UNSET>"
+}
+
+func AnnotationTypeFromString(s string) (AnnotationType, error) {
+  switch s {
+  case "BOOL": return AnnotationType_BOOL, nil 
+  case "BYTES": return AnnotationType_BYTES, nil 
+  case "I16": return AnnotationType_I16, nil 
+  case "I32": return AnnotationType_I32, nil 
+  case "I64": return AnnotationType_I64, nil 
+  case "DOUBLE": return AnnotationType_DOUBLE, nil 
+  case "STRING": return AnnotationType_STRING, nil 
+  }
+  return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
+}
+
+
+func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
+
+func (p AnnotationType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *AnnotationType) UnmarshalText(text []byte) error {
+q, err := AnnotationTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *AnnotationType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = AnnotationType(v)
+return nil
+}
+
+func (p * AnnotationType) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+// Indicates the network context of a service recording an annotation with two
+// exceptions.
+// 
+// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
+// the endpoint indicates the source or destination of an RPC. This exception
+// allows zipkin to display network context of uninstrumented services, or
+// clients such as web browsers.
+// 
+// Attributes:
+//  - Ipv4: IPv4 host address packed into 4 bytes.
+// 
+// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
+//  - Port: IPv4 port or 0, if unknown.
+// 
+// Note: this is to be treated as an unsigned integer, so watch for negatives.
+//  - ServiceName: Classifier of a source or destination in lowercase, such as "zipkin-web".
+// 
+// This is the primary parameter for trace lookup, so should be intuitive as
+// possible, for example, matching names in service discovery.
+// 
+// Conventionally, when the service name isn't known, service_name = "unknown".
+// However, it is also permissible to set service_name = "" (empty string).
+// The difference in the latter usage is that the span will not be queryable
+// by service name unless more information is added to the span with non-empty
+// service name, e.g. an additional annotation from the server.
+// 
+// Particularly clients may not have a reliable service name at ingest. One
+// approach is to set service_name to "" at ingest, and later assign a
+// better label based on binary annotations, such as user agent.
+//  - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
+type Endpoint struct {
+  Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"`
+  Port int16 `thrift:"port,2" db:"port" json:"port"`
+  ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"`
+  Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"`
+}
+
+func NewEndpoint() *Endpoint {
+  return &Endpoint{}
+}
+
+
+func (p *Endpoint) GetIpv4() int32 {
+  return p.Ipv4
+}
+
+func (p *Endpoint) GetPort() int16 {
+  return p.Port
+}
+
+func (p *Endpoint) GetServiceName() string {
+  return p.ServiceName
+}
+var Endpoint_Ipv6_DEFAULT []byte
+
+func (p *Endpoint) GetIpv6() []byte {
+  return p.Ipv6
+}
+func (p *Endpoint) IsSetIpv6() bool {
+  return p.Ipv6 != nil
+}
+
+func (p *Endpoint) Read(iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField1(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I16 {
+        if err := p.ReadField2(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField3(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField4(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *Endpoint)  ReadField1(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Ipv4 = v
+}
+  return nil
+}
+
+func (p *Endpoint)  ReadField2(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI16(); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Port = v
+}
+  return nil
+}
+
+func (p *Endpoint)  ReadField3(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.ServiceName = v
+}
+  return nil
+}
+
+func (p *Endpoint)  ReadField4(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBinary(); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.Ipv6 = v
+}
+  return nil
+}
+
+func (p *Endpoint) Write(oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin("Endpoint"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(oprot); err != nil { return err }
+    if err := p.writeField2(oprot); err != nil { return err }
+    if err := p.writeField3(oprot); err != nil { return err }
+    if err := p.writeField4(oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) }
+  if err := oprot.WriteI32(int32(p.Ipv4)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) }
+  return err
+}
+
+func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) }
+  if err := oprot.WriteI16(int16(p.Port)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) }
+  return err
+}
+
+func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) }
+  if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) }
+  return err
+}
+
+func (p *Endpoint) writeField4(oprot thrift.TProtocol) (err error) {
+  if p.IsSetIpv6() {
+    if err := oprot.WriteFieldBegin("ipv6", thrift.STRING, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) }
+    if err := oprot.WriteBinary(p.Ipv6); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) }
+  }
+  return err
+}
+
+func (p *Endpoint) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Endpoint(%+v)", *p)
+}
+
+// Associates an event that explains latency with a timestamp.
+// 
+// Unlike log statements, annotations are often codes: for example "sr".
+// 
+// Attributes:
+//  - Timestamp: Microseconds from epoch.
+// 
+// This value should use the most precise value possible. For example,
+// gettimeofday or multiplying currentTimeMillis by 1000.
+//  - Value: Usually a short tag indicating an event, like "sr" or "finagle.retry".
+//  - Host: The host that recorded the value, primarily for query by service name.
+type Annotation struct {
+  Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"`
+  Value string `thrift:"value,2" db:"value" json:"value"`
+  Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"`
+}
+
+func NewAnnotation() *Annotation {
+  return &Annotation{}
+}
+
+
+func (p *Annotation) GetTimestamp() int64 {
+  return p.Timestamp
+}
+
+func (p *Annotation) GetValue() string {
+  return p.Value
+}
+var Annotation_Host_DEFAULT *Endpoint
+func (p *Annotation) GetHost() *Endpoint {
+  if !p.IsSetHost() {
+    return Annotation_Host_DEFAULT
+  }
+return p.Host
+}
+func (p *Annotation) IsSetHost() bool {
+  return p.Host != nil
+}
+
+func (p *Annotation) Read(iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField2(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField3(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *Annotation)  ReadField1(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Timestamp = v
+}
+  return nil
+}
+
+func (p *Annotation)  ReadField2(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Value = v
+}
+  return nil
+}
+
+func (p *Annotation)  ReadField3(iprot thrift.TProtocol) error {
+  p.Host = &Endpoint{}
+  if err := p.Host.Read(iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+  }
+  return nil
+}
+
+func (p *Annotation) Write(oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin("Annotation"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(oprot); err != nil { return err }
+    if err := p.writeField2(oprot); err != nil { return err }
+    if err := p.writeField3(oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
+  if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
+  return err
+}
+
+func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+  if err := oprot.WriteString(string(p.Value)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+  return err
+}
+
+func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) {
+  if p.IsSetHost() {
+    if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) }
+    if err := p.Host.Write(oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+    }
+    if err := oprot.WriteFieldEnd(); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) }
+  }
+  return err
+}
+
+func (p *Annotation) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Annotation(%+v)", *p)
+}
+
+// Binary annotations are tags applied to a Span to give it context. For
+// example, a binary annotation of HTTP_PATH ("http.path") could the path
+// to a resource in a RPC call.
+// 
+// Binary annotations of type STRING are always queryable, though more a
+// historical implementation detail than a structural concern.
+// 
+// Binary annotations can repeat, and vary on the host. Similar to Annotation,
+// the host indicates who logged the event. This allows you to tell the
+// difference between the client and server side of the same key. For example,
+// the key "http.path" might be different on the client and server side due to
+// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
+// you can see the different points of view, which often help in debugging.
+// 
+// Attributes:
+//  - Key: Name used to lookup spans, such as "http.path" or "finagle.version".
+//  - Value: Serialized thrift bytes, in TBinaryProtocol format.
+// 
+// For legacy reasons, byte order is big-endian. See THRIFT-3217.
+//  - AnnotationType: The thrift type of value, most often STRING.
+// 
+// annotation_type shouldn't vary for the same key.
+//  - Host: The host that recorded value, allowing query by service name or address.
+// 
+// There are two exceptions: when key is "ca" or "sa", this is the source or
+// destination of an RPC. This exception allows zipkin to display network
+// context of uninstrumented services, such as browsers or databases.
+type BinaryAnnotation struct {
+  Key string `thrift:"key,1" db:"key" json:"key"`
+  Value []byte `thrift:"value,2" db:"value" json:"value"`
+  AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"`
+  Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"`
+}
+
+func NewBinaryAnnotation() *BinaryAnnotation {
+  return &BinaryAnnotation{}
+}
+
+
+func (p *BinaryAnnotation) GetKey() string {
+  return p.Key
+}
+
+func (p *BinaryAnnotation) GetValue() []byte {
+  return p.Value
+}
+
+func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
+  return p.AnnotationType
+}
+var BinaryAnnotation_Host_DEFAULT *Endpoint
+func (p *BinaryAnnotation) GetHost() *Endpoint {
+  if !p.IsSetHost() {
+    return BinaryAnnotation_Host_DEFAULT
+  }
+return p.Host
+}
+func (p *BinaryAnnotation) IsSetHost() bool {
+  return p.Host != nil
+}
+
+func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField2(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField3(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField4(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField1(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Key = v
+}
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField2(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBinary(); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Value = v
+}
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField3(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  temp := AnnotationType(v)
+  p.AnnotationType = temp
+}
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField4(iprot thrift.TProtocol) error {
+  p.Host = &Endpoint{}
+  if err := p.Host.Read(iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+  }
+  return nil
+}
+
+func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(oprot); err != nil { return err }
+    if err := p.writeField2(oprot); err != nil { return err }
+    if err := p.writeField3(oprot); err != nil { return err }
+    if err := p.writeField4(oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
+  if err := oprot.WriteString(string(p.Key)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
+  return err
+}
+
+func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+  if err := oprot.WriteBinary(p.Value); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+  return err
+}
+
+func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) }
+  if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) }
+  return err
+}
+
+func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) {
+  if p.IsSetHost() {
+    if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) }
+    if err := p.Host.Write(oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+    }
+    if err := oprot.WriteFieldEnd(); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) }
+  }
+  return err
+}
+
+func (p *BinaryAnnotation) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
+}
+
+// A trace is a series of spans (often RPC calls) which form a latency tree.
+// 
+// Spans are usually created by instrumentation in RPC clients or servers, but
+// can also represent in-process activity. Annotations in spans are similar to
+// log statements, and are sometimes created directly by application developers
+// to indicate events of interest, such as a cache miss.
+// 
+// The root span is where parent_id = Nil; it usually has the longest duration
+// in the trace.
+// 
+// Span identifiers are packed into i64s, but should be treated opaquely.
+// String encoding is fixed-width lower-hex, to avoid signed interpretation.
+// 
+// Attributes:
+//  - TraceID: Unique 8-byte identifier for a trace, set on all spans within it.
+//  - Name: Span name in lowercase, rpc method for example. Conventionally, when the
+// span name isn't known, name = "unknown".
+//  - ID: Unique 8-byte identifier of this span within a trace. A span is uniquely
+// identified in storage by (trace_id, id).
+//  - ParentID: The parent's Span.id; absent if this the root span in a trace.
+//  - Annotations: Associates events that explain latency with a timestamp. Unlike log
+// statements, annotations are often codes: for example SERVER_RECV("sr").
+// Annotations are sorted ascending by timestamp.
+//  - BinaryAnnotations: Tags a span with context, usually to support query or aggregation. For
+// example, a binary annotation key could be "http.path".
+//  - Debug: True is a request to store this span even if it overrides sampling policy.
+//  - Timestamp: Epoch microseconds of the start of this span, absent if this an incomplete
+// span.
+// 
+// This value should be set directly by instrumentation, using the most
+// precise value possible. For example, gettimeofday or syncing nanoTime
+// against a tick of currentTimeMillis.
+// 
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this via Annotation.timestamp.
+// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
+// 
+// Timestamp is nullable for input only. Spans without a timestamp cannot be
+// presented in a timeline: Span stores should not output spans missing a
+// timestamp.
+// 
+// There are two known edge-cases where this could be absent: both cases
+// exist when a collector receives a span in parts and a binary annotation
+// precedes a timestamp. This is possible when..
+//  - The span is in-flight (ex not yet received a timestamp)
+//  - The span's start event was lost
+//  - Duration: Measurement in microseconds of the critical path, if known. Durations of
+// less than one microsecond must be rounded up to 1 microsecond.
+// 
+// This value should be set directly, as opposed to implicitly via annotation
+// timestamps. Doing so encourages precision decoupled from problems of
+// clocks, such as skew or NTP updates causing time to move backwards.
+// 
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this by subtracting Annotation.timestamp.
+// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
+// 
+// If this field is persisted as unset, zipkin will continue to work, except
+// duration query support will be implementation-specific. Similarly, setting
+// this field non-atomically is implementation-specific.
+// 
+// This field is i64 vs i32 to support spans longer than 35 minutes.
+//  - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
+// means the trace uses 128 bit traceIds instead of 64 bit.
+type Span struct {
+  TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"`
+  // unused field # 2
+  Name string `thrift:"name,3" db:"name" json:"name"`
+  ID int64 `thrift:"id,4" db:"id" json:"id"`
+  ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"`
+  Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"`
+  // unused field # 7
+  BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"`
+  Debug bool `thrift:"debug,9" db:"debug" json:"debug,omitempty"`
+  Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"`
+  Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"`
+  TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"`
+}
+
+func NewSpan() *Span {
+  return &Span{}
+}
+
+
+func (p *Span) GetTraceID() int64 {
+  return p.TraceID
+}
+
+func (p *Span) GetName() string {
+  return p.Name
+}
+
+func (p *Span) GetID() int64 {
+  return p.ID
+}
+var Span_ParentID_DEFAULT int64
+func (p *Span) GetParentID() int64 {
+  if !p.IsSetParentID() {
+    return Span_ParentID_DEFAULT
+  }
+return *p.ParentID
+}
+
+func (p *Span) GetAnnotations() []*Annotation {
+  return p.Annotations
+}
+
+func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
+  return p.BinaryAnnotations
+}
+var Span_Debug_DEFAULT bool = false
+
+func (p *Span) GetDebug() bool {
+  return p.Debug
+}
+var Span_Timestamp_DEFAULT int64
+func (p *Span) GetTimestamp() int64 {
+  if !p.IsSetTimestamp() {
+    return Span_Timestamp_DEFAULT
+  }
+return *p.Timestamp
+}
+var Span_Duration_DEFAULT int64
+func (p *Span) GetDuration() int64 {
+  if !p.IsSetDuration() {
+    return Span_Duration_DEFAULT
+  }
+return *p.Duration
+}
+var Span_TraceIDHigh_DEFAULT int64
+func (p *Span) GetTraceIDHigh() int64 {
+  if !p.IsSetTraceIDHigh() {
+    return Span_TraceIDHigh_DEFAULT
+  }
+return *p.TraceIDHigh
+}
+func (p *Span) IsSetParentID() bool {
+  return p.ParentID != nil
+}
+
+func (p *Span) IsSetDebug() bool {
+  return p.Debug != Span_Debug_DEFAULT
+}
+
+func (p *Span) IsSetTimestamp() bool {
+  return p.Timestamp != nil
+}
+
+func (p *Span) IsSetDuration() bool {
+  return p.Duration != nil
+}
+
+func (p *Span) IsSetTraceIDHigh() bool {
+  return p.TraceIDHigh != nil
+}
+
+func (p *Span) Read(iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField3(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField4(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 5:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField5(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 6:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField6(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 8:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField8(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 9:
+      if fieldTypeId == thrift.BOOL {
+        if err := p.ReadField9(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 10:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField10(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 11:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField11(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 12:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField12(iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField1(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.TraceID = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField3(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.Name = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField4(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.ID = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField5(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(); err != nil {
+  return thrift.PrependError("error reading field 5: ", err)
+} else {
+  p.ParentID = &v
+}
+  return nil
+}
+
+func (p *Span)  ReadField6(iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin()
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Annotation, 0, size)
+  p.Annotations =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem0 := &Annotation{}
+    if err := _elem0.Read(iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+    }
+    p.Annotations = append(p.Annotations, _elem0)
+  }
+  if err := iprot.ReadListEnd(); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField8(iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin()
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*BinaryAnnotation, 0, size)
+  p.BinaryAnnotations =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem1 := &BinaryAnnotation{}
+    if err := _elem1.Read(iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+    }
+    p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
+  }
+  if err := iprot.ReadListEnd(); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField9(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBool(); err != nil {
+  return thrift.PrependError("error reading field 9: ", err)
+} else {
+  p.Debug = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField10(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(); err != nil {
+  return thrift.PrependError("error reading field 10: ", err)
+} else {
+  p.Timestamp = &v
+}
+  return nil
+}
+
+func (p *Span)  ReadField11(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(); err != nil {
+  return thrift.PrependError("error reading field 11: ", err)
+} else {
+  p.Duration = &v
+}
+  return nil
+}
+
+func (p *Span)  ReadField12(iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(); err != nil {
+  return thrift.PrependError("error reading field 12: ", err)
+} else {
+  p.TraceIDHigh = &v
+}
+  return nil
+}
+
+func (p *Span) Write(oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin("Span"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(oprot); err != nil { return err }
+    if err := p.writeField3(oprot); err != nil { return err }
+    if err := p.writeField4(oprot); err != nil { return err }
+    if err := p.writeField5(oprot); err != nil { return err }
+    if err := p.writeField6(oprot); err != nil { return err }
+    if err := p.writeField8(oprot); err != nil { return err }
+    if err := p.writeField9(oprot); err != nil { return err }
+    if err := p.writeField10(oprot); err != nil { return err }
+    if err := p.writeField11(oprot); err != nil { return err }
+    if err := p.writeField12(oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) }
+  if err := oprot.WriteI64(int64(p.TraceID)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) }
+  if err := oprot.WriteString(string(p.Name)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) }
+  if err := oprot.WriteI64(int64(p.ID)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
+  if p.IsSetParentID() {
+    if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) }
+    if err := oprot.WriteI64(int64(*p.ParentID)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) }
+  if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Annotations {
+    if err := v.Write(oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) }
+  if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.BinaryAnnotations {
+    if err := v.Write(oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
+  if p.IsSetDebug() {
+    if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) }
+    if err := oprot.WriteBool(bool(p.Debug)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
+  if p.IsSetTimestamp() {
+    if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) }
+    if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
+  if p.IsSetDuration() {
+    if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) }
+    if err := oprot.WriteI64(int64(*p.Duration)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField12(oprot thrift.TProtocol) (err error) {
+  if p.IsSetTraceIDHigh() {
+    if err := oprot.WriteFieldBegin("trace_id_high", thrift.I64, 12); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) }
+    if err := oprot.WriteI64(int64(*p.TraceIDHigh)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Span(%+v)", *p)
+}
+
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go
new file mode 100644
index 00000000..5754d5fc
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go
@@ -0,0 +1,440 @@
+package zipkintracer
+
+import (
+	"errors"
+	"time"
+
+	opentracing "github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+
+	otobserver "github.com/opentracing-contrib/go-observer"
+	"github.com/openzipkin/zipkin-go-opentracing/flag"
+)
+
+// ErrInvalidEndpoint will be thrown if hostPort parameter is corrupted or host
+// can't be resolved
+var ErrInvalidEndpoint = errors.New("Invalid Endpoint. Please check hostPort parameter")
+
+// Tracer extends the opentracing.Tracer interface with methods to
+// probe implementation state, for use by zipkintracer consumers.
+type Tracer interface {
+	opentracing.Tracer
+
+	// Options gets the Options used in New() or NewWithOptions().
+	Options() TracerOptions
+}
+
+// TracerOptions allows creating a customized Tracer.
+type TracerOptions struct {
+	// shouldSample is a function which is called when creating a new Span and
+	// determines whether that Span is sampled. The randomized TraceID is supplied
+	// to allow deterministic sampling decisions to be made across different nodes.
+	shouldSample func(traceID uint64) bool
+	// trimUnsampledSpans turns potentially expensive operations on unsampled
+	// Spans into no-ops. More precisely, tags and log events are silently
+	// discarded. If NewSpanEventListener is set, the callbacks will still fire.
+	trimUnsampledSpans bool
+	// recorder receives Spans which have been finished.
+	recorder SpanRecorder
+	// newSpanEventListener can be used to enhance the tracer by effectively
+	// attaching external code to trace events. See NetTraceIntegrator for a
+	// practical example, and event.go for the list of possible events.
+	newSpanEventListener func() func(SpanEvent)
+	// dropAllLogs turns log events on all Spans into no-ops.
+	// If NewSpanEventListener is set, the callbacks will still fire.
+	dropAllLogs bool
+	// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
+	// value). If a span has more logs than this value, logs are dropped as
+	// necessary (and replaced with a log describing how many were dropped).
+	//
+	// About half of the MaxLogPerSpan logs kept are the oldest logs, and about
+	// half are the newest logs.
+	//
+	// If NewSpanEventListener is set, the callbacks will still fire for all log
+	// events. This value is ignored if DropAllLogs is true.
+	maxLogsPerSpan int
+	// debugAssertSingleGoroutine internally records the ID of the goroutine
+	// creating each Span and verifies that no operation is carried out on
+	// it on a different goroutine.
+	// Provided strictly for development purposes.
+	// Passing Spans between goroutine without proper synchronization often
+	// results in use-after-Finish() errors. For a simple example, consider the
+	// following pseudocode:
+	//
+	//  func (s *Server) Handle(req http.Request) error {
+	//    sp := s.StartSpan("server")
+	//    defer sp.Finish()
+	//    wait := s.queueProcessing(opentracing.ContextWithSpan(context.Background(), sp), req)
+	//    select {
+	//    case resp := <-wait:
+	//      return resp.Error
+	//    case <-time.After(10*time.Second):
+	//      sp.LogEvent("timed out waiting for processing")
+	//      return ErrTimedOut
+	//    }
+	//  }
+	//
+	// This looks reasonable at first, but a request which spends more than ten
+	// seconds in the queue is abandoned by the main goroutine and its trace
+	// finished, leading to use-after-finish when the request is finally
+	// processed. Note also that even joining on to a finished Span via
+	// StartSpanWithOptions constitutes an illegal operation.
+	//
+	// Code bases which do not require (or decide they do not want) Spans to
+	// be passed across goroutine boundaries can run with this flag enabled in
+	// tests to increase their chances of spotting wrong-doers.
+	debugAssertSingleGoroutine bool
+	// debugAssertUseAfterFinish is provided strictly for development purposes.
+	// When set, it attempts to exacerbate issues emanating from use of Spans
+	// after calling Finish by running additional assertions.
+	debugAssertUseAfterFinish bool
+	// enableSpanPool enables the use of a pool, so that the tracer reuses spans
+	// after Finish has been called on it. Adds a slight performance gain as it
+	// reduces allocations. However, if you have any use-after-finish race
+	// conditions the code may panic.
+	enableSpanPool bool
+	// logger ...
+	logger Logger
+	// clientServerSameSpan allows for Zipkin V1 style span per RPC. This places
+	// both client end and server end of a RPC call into the same span.
+	clientServerSameSpan bool
+	// debugMode activates Zipkin's debug request allowing for all Spans originating
+	// from this tracer to pass through and bypass sampling. Use with extreme care
+	// as it might flood your system if you have many traces starting from the
+	// service you are instrumenting.
+	debugMode bool
+	// traceID128Bit enables the generation of 128 bit traceIDs in case the tracer
+	// needs to create a root span. By default regular 64 bit traceIDs are used.
+	// Regardless of this setting, the library will propagate and support both
+	// 64 and 128 bit incoming traces from upstream sources.
+	traceID128Bit bool
+
+	observer otobserver.Observer
+}
+
+// TracerOption allows for functional options.
+// See: http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type TracerOption func(opts *TracerOptions) error
+
+// WithSampler allows one to add a Sampler function
+func WithSampler(sampler Sampler) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.shouldSample = sampler
+		return nil
+	}
+}
+
+// TrimUnsampledSpans option
+func TrimUnsampledSpans(trim bool) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.trimUnsampledSpans = trim
+		return nil
+	}
+}
+
+// DropAllLogs option
+func DropAllLogs(dropAllLogs bool) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.dropAllLogs = dropAllLogs
+		return nil
+	}
+}
+
+// WithLogger option
+func WithLogger(logger Logger) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.logger = logger
+		return nil
+	}
+}
+
+// DebugAssertSingleGoroutine option
+func DebugAssertSingleGoroutine(val bool) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.debugAssertSingleGoroutine = val
+		return nil
+	}
+}
+
+// DebugAssertUseAfterFinish option
+func DebugAssertUseAfterFinish(val bool) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.debugAssertUseAfterFinish = val
+		return nil
+	}
+}
+
+// TraceID128Bit option
+func TraceID128Bit(val bool) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.traceID128Bit = val
+		return nil
+	}
+}
+
+// ClientServerSameSpan allows to place client-side and server-side annotations
+// for a RPC call in the same span (Zipkin V1 behavior) or different spans
+// (more in line with other tracing solutions). By default this Tracer
+// uses shared host spans (so client-side and server-side in the same span).
+// If using separate spans you might run into trouble with Zipkin V1 as clock
+// skew issues can't be remedied at Zipkin server side.
+func ClientServerSameSpan(val bool) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.clientServerSameSpan = val
+		return nil
+	}
+}
+
+// DebugMode allows to set the tracer to Zipkin debug mode
+func DebugMode(val bool) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.debugMode = val
+		return nil
+	}
+}
+
+// EnableSpanPool ...
+func EnableSpanPool(val bool) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.enableSpanPool = val
+		return nil
+	}
+}
+
+// NewSpanEventListener option
+func NewSpanEventListener(f func() func(SpanEvent)) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.newSpanEventListener = f
+		return nil
+	}
+}
+
+// WithMaxLogsPerSpan option
+func WithMaxLogsPerSpan(limit int) TracerOption {
+	return func(opts *TracerOptions) error {
+		if limit < 5 || limit > 10000 {
+			return errors.New("invalid MaxLogsPerSpan limit. Should be between 5 and 10000")
+		}
+		opts.maxLogsPerSpan = limit
+		return nil
+	}
+}
+
+// NewTracer creates a new OpenTracing compatible Zipkin Tracer.
+func NewTracer(recorder SpanRecorder, options ...TracerOption) (opentracing.Tracer, error) {
+	opts := &TracerOptions{
+		recorder:             recorder,
+		shouldSample:         alwaysSample,
+		trimUnsampledSpans:   false,
+		newSpanEventListener: func() func(SpanEvent) { return nil },
+		logger:               &nopLogger{},
+		debugAssertSingleGoroutine: false,
+		debugAssertUseAfterFinish:  false,
+		clientServerSameSpan:       true,
+		debugMode:                  false,
+		traceID128Bit:              false,
+		maxLogsPerSpan:             10000,
+		observer:                   nil,
+	}
+	for _, o := range options {
+		err := o(opts)
+		if err != nil {
+			return nil, err
+		}
+	}
+	rval := &tracerImpl{options: *opts}
+	rval.textPropagator = &textMapPropagator{rval}
+	rval.binaryPropagator = &binaryPropagator{rval}
+	rval.accessorPropagator = &accessorPropagator{rval}
+	return rval, nil
+}
+
+// Implements the `Tracer` interface.
+type tracerImpl struct {
+	options            TracerOptions
+	textPropagator     *textMapPropagator
+	binaryPropagator   *binaryPropagator
+	accessorPropagator *accessorPropagator
+}
+
+func (t *tracerImpl) StartSpan(
+	operationName string,
+	opts ...opentracing.StartSpanOption,
+) opentracing.Span {
+	sso := opentracing.StartSpanOptions{}
+	for _, o := range opts {
+		o.Apply(&sso)
+	}
+	return t.startSpanWithOptions(operationName, sso)
+}
+
+func (t *tracerImpl) getSpan() *spanImpl {
+	if t.options.enableSpanPool {
+		sp := spanPool.Get().(*spanImpl)
+		sp.reset()
+		return sp
+	}
+	return &spanImpl{}
+}
+
+func (t *tracerImpl) startSpanWithOptions(
+	operationName string,
+	opts opentracing.StartSpanOptions,
+) opentracing.Span {
+	// Start time.
+	startTime := opts.StartTime
+	if startTime.IsZero() {
+		startTime = time.Now()
+	}
+
+	// Tags.
+	tags := opts.Tags
+
+	// Build the new span. This is the only allocation: We'll return this as
+	// an opentracing.Span.
+	sp := t.getSpan()
+
+	if t.options.observer != nil {
+		sp.observer, _ = t.options.observer.OnStartSpan(sp, operationName, opts)
+	}
+
+	// Look for a parent in the list of References.
+	//
+	// TODO: would be nice if basictracer did something with all
+	// References, not just the first one.
+ReferencesLoop:
+	for _, ref := range opts.References {
+		switch ref.Type {
+		case opentracing.ChildOfRef:
+			refCtx := ref.ReferencedContext.(SpanContext)
+			sp.raw.Context.TraceID = refCtx.TraceID
+			sp.raw.Context.ParentSpanID = &refCtx.SpanID
+			sp.raw.Context.Sampled = refCtx.Sampled
+			sp.raw.Context.Flags = refCtx.Flags
+			sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed
+
+			if t.options.clientServerSameSpan &&
+				tags[string(ext.SpanKind)] == ext.SpanKindRPCServer.Value {
+				sp.raw.Context.SpanID = refCtx.SpanID
+				sp.raw.Context.ParentSpanID = refCtx.ParentSpanID
+				sp.raw.Context.Owner = false
+			} else {
+				sp.raw.Context.SpanID = randomID()
+				sp.raw.Context.ParentSpanID = &refCtx.SpanID
+				sp.raw.Context.Owner = true
+			}
+
+			if l := len(refCtx.Baggage); l > 0 {
+				sp.raw.Context.Baggage = make(map[string]string, l)
+				for k, v := range refCtx.Baggage {
+					sp.raw.Context.Baggage[k] = v
+				}
+			}
+			break ReferencesLoop
+		case opentracing.FollowsFromRef:
+			refCtx := ref.ReferencedContext.(SpanContext)
+			sp.raw.Context.TraceID = refCtx.TraceID
+			sp.raw.Context.ParentSpanID = &refCtx.SpanID
+			sp.raw.Context.Sampled = refCtx.Sampled
+			sp.raw.Context.Flags = refCtx.Flags
+			sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed
+
+			sp.raw.Context.SpanID = randomID()
+			sp.raw.Context.ParentSpanID = &refCtx.SpanID
+			sp.raw.Context.Owner = true
+
+			if l := len(refCtx.Baggage); l > 0 {
+				sp.raw.Context.Baggage = make(map[string]string, l)
+				for k, v := range refCtx.Baggage {
+					sp.raw.Context.Baggage[k] = v
+				}
+			}
+			break ReferencesLoop
+		}
+	}
+	if sp.raw.Context.TraceID.Empty() {
+		// No parent Span found; allocate new trace and span ids and determine
+		// the Sampled status.
+		if t.options.traceID128Bit {
+			sp.raw.Context.TraceID.High = randomID()
+		}
+		sp.raw.Context.TraceID.Low, sp.raw.Context.SpanID = randomID2()
+		sp.raw.Context.Sampled = t.options.shouldSample(sp.raw.Context.TraceID.Low)
+		sp.raw.Context.Flags = flag.IsRoot
+		sp.raw.Context.Owner = true
+	}
+	if t.options.debugMode {
+		sp.raw.Context.Flags |= flag.Debug
+	}
+	return t.startSpanInternal(
+		sp,
+		operationName,
+		startTime,
+		tags,
+	)
+}
+
+func (t *tracerImpl) startSpanInternal(
+	sp *spanImpl,
+	operationName string,
+	startTime time.Time,
+	tags opentracing.Tags,
+) opentracing.Span {
+	sp.tracer = t
+	if t.options.newSpanEventListener != nil {
+		sp.event = t.options.newSpanEventListener()
+	}
+	sp.raw.Operation = operationName
+	sp.raw.Start = startTime
+	sp.raw.Duration = -1
+	sp.raw.Tags = tags
+
+	if t.options.debugAssertSingleGoroutine {
+		sp.SetTag(debugGoroutineIDTag, curGoroutineID())
+	}
+	defer sp.onCreate(operationName)
+	return sp
+}
+
+type delegatorType struct{}
+
+// Delegator is the format to use for DelegatingCarrier.
+var Delegator delegatorType
+
+func (t *tracerImpl) Inject(sc opentracing.SpanContext, format interface{}, carrier interface{}) error {
+	switch format {
+	case opentracing.TextMap, opentracing.HTTPHeaders:
+		return t.textPropagator.Inject(sc, carrier)
+	case opentracing.Binary:
+		return t.binaryPropagator.Inject(sc, carrier)
+	}
+	if _, ok := format.(delegatorType); ok {
+		return t.accessorPropagator.Inject(sc, carrier)
+	}
+	return opentracing.ErrUnsupportedFormat
+}
+
+func (t *tracerImpl) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
+	switch format {
+	case opentracing.TextMap, opentracing.HTTPHeaders:
+		return t.textPropagator.Extract(carrier)
+	case opentracing.Binary:
+		return t.binaryPropagator.Extract(carrier)
+	}
+	if _, ok := format.(delegatorType); ok {
+		return t.accessorPropagator.Extract(carrier)
+	}
+	return nil, opentracing.ErrUnsupportedFormat
+}
+
+func (t *tracerImpl) Options() TracerOptions {
+	return t.options
+}
+
+// WithObserver assigns an initialized observer to opts.observer
+func WithObserver(observer otobserver.Observer) TracerOption {
+	return func(opts *TracerOptions) error {
+		opts.observer = observer
+		return nil
+	}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go
new file mode 100644
index 00000000..a8058ba4
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go
@@ -0,0 +1,38 @@
+package types
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// TraceID is a 128 bit number internally stored as 2x uint64 (high & low).
+type TraceID struct {
+	High uint64
+	Low  uint64
+}
+
+// TraceIDFromHex returns the TraceID from a Hex string.
+func TraceIDFromHex(h string) (t TraceID, err error) {
+	if len(h) > 16 {
+		if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil {
+			return
+		}
+		t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64)
+		return
+	}
+	t.Low, err = strconv.ParseUint(h, 16, 64)
+	return
+}
+
+// ToHex outputs the 128-bit traceID as hex string.
+func (t TraceID) ToHex() string {
+	if t.High == 0 {
+		return fmt.Sprintf("%016x", t.Low)
+	}
+	return fmt.Sprintf("%016x%016x", t.High, t.Low)
+}
+
+// Empty returns if TraceID has zero value
+func (t TraceID) Empty() bool {
+	return t.Low == 0 && t.High == 0
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go
new file mode 100644
index 00000000..27066150
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go
@@ -0,0 +1,25 @@
+package zipkintracer
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+)
+
+var (
+	seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano()))
+	// The golang rand generators are *not* intrinsically thread-safe.
+	seededIDLock sync.Mutex
+)
+
+func randomID() uint64 {
+	seededIDLock.Lock()
+	defer seededIDLock.Unlock()
+	return uint64(seededIDGen.Int63())
+}
+
+func randomID2() (uint64, uint64) {
+	seededIDLock.Lock()
+	defer seededIDLock.Unlock()
+	return uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63())
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go
new file mode 100644
index 00000000..79364998
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go
@@ -0,0 +1,65 @@
+package wire
+
+import (
+	"github.com/openzipkin/zipkin-go-opentracing/flag"
+	"github.com/openzipkin/zipkin-go-opentracing/types"
+)
+
+// ProtobufCarrier is a DelegatingCarrier that uses protocol buffers as the
+// the underlying datastructure. The reason for implementing DelagatingCarrier
+// is to allow for end users to serialize the underlying protocol buffers using
+// jsonpb or any other serialization forms they want.
+type ProtobufCarrier TracerState
+
+// SetState set's the tracer state.
+func (p *ProtobufCarrier) SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) {
+	p.TraceId = traceID.Low
+	p.TraceIdHigh = traceID.High
+	p.SpanId = spanID
+	if parentSpanID == nil {
+		flags |= flag.IsRoot
+		p.ParentSpanId = 0
+	} else {
+		flags &^= flag.IsRoot
+		p.ParentSpanId = *parentSpanID
+	}
+	flags |= flag.SamplingSet
+	if sampled {
+		flags |= flag.Sampled
+		p.Sampled = sampled
+	} else {
+		flags &^= flag.Sampled
+	}
+	p.Flags = uint64(flags)
+}
+
+// State returns the tracer state.
+func (p *ProtobufCarrier) State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) {
+	traceID.Low = p.TraceId
+	traceID.High = p.TraceIdHigh
+	spanID = p.SpanId
+	sampled = p.Sampled
+	flags = flag.Flags(p.Flags)
+	if flags&flag.IsRoot == 0 {
+		parentSpanID = &p.ParentSpanId
+	}
+	return traceID, spanID, parentSpanID, sampled, flags
+}
+
+// SetBaggageItem sets a baggage item.
+func (p *ProtobufCarrier) SetBaggageItem(key, value string) {
+	if p.BaggageItems == nil {
+		p.BaggageItems = map[string]string{key: value}
+		return
+	}
+
+	p.BaggageItems[key] = value
+}
+
+// GetBaggage iterates over each baggage item and executes the callback with
+// the key:value pair.
+func (p *ProtobufCarrier) GetBaggage(f func(k, v string)) {
+	for k, v := range p.BaggageItems {
+		f(k, v)
+	}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go
new file mode 100644
index 00000000..0eb355ba
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go
@@ -0,0 +1,6 @@
+package wire
+
+//go:generate protoc --gogofaster_out=$GOPATH/src wire.proto
+
+// Run `go get github.com/gogo/protobuf/protoc-gen-gogofaster` to install the
+// gogofaster generator binary.
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto
new file mode 100644
index 00000000..df425d5b
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto
@@ -0,0 +1,13 @@
+syntax = "proto3";
+package zipkintracer_go.wire;
+option go_package = "github.com/openzipkin/zipkin-go-opentracing/wire";
+
+message TracerState {
+  fixed64 trace_id = 1;
+  fixed64 span_id = 2;
+  bool    sampled = 3;
+  map<string, string> baggage_items = 4;
+  fixed64 trace_id_high = 20;
+  fixed64 parent_span_id = 21;
+  fixed64 flags = 22;
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go
new file mode 100644
index 00000000..e06ca4cb
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go
@@ -0,0 +1,72 @@
+package zipkintracer
+
+import (
+	"encoding/binary"
+	"net"
+	"strconv"
+	"strings"
+
+	"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
+)
+
+// makeEndpoint takes the hostport and service name that represent this Zipkin
+// service, and returns an endpoint that's embedded into the Zipkin core Span
+// type. It will return a nil endpoint if the input parameters are malformed.
+func makeEndpoint(hostport, serviceName string) (ep *zipkincore.Endpoint) {
+	ep = zipkincore.NewEndpoint()
+
+	// Set the ServiceName
+	ep.ServiceName = serviceName
+
+	if strings.IndexByte(hostport, ':') < 0 {
+		// "<host>" becomes "<host>:0"
+		hostport = hostport + ":0"
+	}
+
+	// try to parse provided "<host>:<port>"
+	host, port, err := net.SplitHostPort(hostport)
+	if err != nil {
+		// if unparsable, return as "undefined:0"
+		return
+	}
+
+	// try to set port number
+	p, _ := strconv.ParseUint(port, 10, 16)
+	ep.Port = int16(p)
+
+	// if <host> is a domain name, look it up
+	addrs, err := net.LookupIP(host)
+	if err != nil {
+		// return as "undefined:<port>"
+		return
+	}
+
+	var addr4, addr16 net.IP
+	for i := range addrs {
+		addr := addrs[i].To4()
+		if addr == nil {
+			// IPv6
+			if addr16 == nil {
+				addr16 = addrs[i].To16() // IPv6 - 16 bytes
+			}
+		} else {
+			// IPv4
+			if addr4 == nil {
+				addr4 = addr // IPv4 - 4 bytes
+			}
+		}
+		if addr16 != nil && addr4 != nil {
+			// IPv4 & IPv6 have been set, we can stop looking further
+			break
+		}
+	}
+	// default to 0 filled 4 byte array for IPv4 if IPv6 only host was found
+	if addr4 == nil {
+		addr4 = make([]byte, 4)
+	}
+
+	// set IPv4 and IPv6 addresses
+	ep.Ipv4 = (int32)(binary.BigEndian.Uint32(addr4))
+	ep.Ipv6 = []byte(addr16)
+	return
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go
new file mode 100644
index 00000000..42600a99
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go
@@ -0,0 +1,218 @@
+package zipkintracer
+
+import (
+	"encoding/binary"
+	"fmt"
+	"net"
+	"strconv"
+	"time"
+
+	otext "github.com/opentracing/opentracing-go/ext"
+	"github.com/opentracing/opentracing-go/log"
+
+	"github.com/openzipkin/zipkin-go-opentracing/flag"
+	"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
+)
+
+var (
+	// SpanKindResource will be regarded as a SA annotation by Zipkin.
+	SpanKindResource = otext.SpanKindEnum("resource")
+)
+
+// Recorder implements the SpanRecorder interface.
+type Recorder struct {
+	collector    Collector
+	debug        bool
+	endpoint     *zipkincore.Endpoint
+	materializer func(logFields []log.Field) ([]byte, error)
+}
+
+// RecorderOption allows for functional options.
+type RecorderOption func(r *Recorder)
+
+// WithLogFmtMaterializer will convert OpenTracing Log fields to a LogFmt representation.
+func WithLogFmtMaterializer() RecorderOption {
+	return func(r *Recorder) {
+		r.materializer = MaterializeWithLogFmt
+	}
+}
+
+// WithJSONMaterializer will convert OpenTracing Log fields to a JSON representation.
+func WithJSONMaterializer() RecorderOption {
+	return func(r *Recorder) {
+		r.materializer = MaterializeWithJSON
+	}
+}
+
+// WithStrictMaterializer will only record event Log fields and discard the rest.
+func WithStrictMaterializer() RecorderOption {
+	return func(r *Recorder) {
+		r.materializer = StrictZipkinMaterializer
+	}
+}
+
+// NewRecorder creates a new Zipkin Recorder backed by the provided Collector.
+//
+// hostPort and serviceName allow you to set the default Zipkin endpoint
+// information which will be added to the application's standard core
+// annotations. hostPort will be resolved into an IPv4 and/or IPv6 address and
+// Port number, serviceName will be used as the application's service
+// identifier.
+//
+// If application does not listen for incoming requests or an endpoint Context
+// does not involve network address and/or port these cases can be solved like
+// this:
+//  # port is not applicable:
+//  NewRecorder(c, debug, "192.168.1.12:0", "ServiceA")
+//
+//  # network address and port are not applicable:
+//  NewRecorder(c, debug, "0.0.0.0:0", "ServiceB")
+func NewRecorder(c Collector, debug bool, hostPort, serviceName string, options ...RecorderOption) SpanRecorder {
+	r := &Recorder{
+		collector:    c,
+		debug:        debug,
+		endpoint:     makeEndpoint(hostPort, serviceName),
+		materializer: MaterializeWithLogFmt,
+	}
+	for _, opts := range options {
+		opts(r)
+	}
+	return r
+}
+
+// RecordSpan converts a RawSpan into the Zipkin representation of a span
+// and records it to the underlying collector.
+func (r *Recorder) RecordSpan(sp RawSpan) {
+	if !sp.Context.Sampled {
+		return
+	}
+
+	var parentSpanID *int64
+	if sp.Context.ParentSpanID != nil {
+		id := int64(*sp.Context.ParentSpanID)
+		parentSpanID = &id
+	}
+
+	var traceIDHigh *int64
+	if sp.Context.TraceID.High > 0 {
+		tidh := int64(sp.Context.TraceID.High)
+		traceIDHigh = &tidh
+	}
+
+	span := &zipkincore.Span{
+		Name:        sp.Operation,
+		ID:          int64(sp.Context.SpanID),
+		TraceID:     int64(sp.Context.TraceID.Low),
+		TraceIDHigh: traceIDHigh,
+		ParentID:    parentSpanID,
+		Debug:       r.debug || (sp.Context.Flags&flag.Debug == flag.Debug),
+	}
+	// only send timestamp and duration if this process owns the current span.
+	if sp.Context.Owner {
+		timestamp := sp.Start.UnixNano() / 1e3
+		duration := sp.Duration.Nanoseconds() / 1e3
+		// since we always time our spans we will round up to 1 microsecond if the
+		// span took less.
+		if duration == 0 {
+			duration = 1
+		}
+		span.Timestamp = &timestamp
+		span.Duration = &duration
+	}
+	if kind, ok := sp.Tags[string(otext.SpanKind)]; ok {
+		switch kind {
+		case otext.SpanKindRPCClient, otext.SpanKindRPCClientEnum:
+			annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint)
+			annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint)
+		case otext.SpanKindRPCServer, otext.SpanKindRPCServerEnum:
+			annotate(span, sp.Start, zipkincore.SERVER_RECV, r.endpoint)
+			annotate(span, sp.Start.Add(sp.Duration), zipkincore.SERVER_SEND, r.endpoint)
+		case SpanKindResource:
+			serviceName, ok := sp.Tags[string(otext.PeerService)]
+			if !ok {
+				serviceName = r.endpoint.GetServiceName()
+			}
+			host, ok := sp.Tags[string(otext.PeerHostname)].(string)
+			if !ok {
+				if r.endpoint.GetIpv4() > 0 {
+					ip := make([]byte, 4)
+					binary.BigEndian.PutUint32(ip, uint32(r.endpoint.GetIpv4()))
+					host = net.IP(ip).To4().String()
+				} else {
+					ip := r.endpoint.GetIpv6()
+					host = net.IP(ip).String()
+				}
+			}
+			var sPort string
+			port, ok := sp.Tags[string(otext.PeerPort)]
+			if !ok {
+				sPort = strconv.FormatInt(int64(r.endpoint.GetPort()), 10)
+			} else {
+				sPort = strconv.FormatInt(int64(port.(uint16)), 10)
+			}
+			re := makeEndpoint(net.JoinHostPort(host, sPort), serviceName.(string))
+			if re != nil {
+				annotateBinary(span, zipkincore.SERVER_ADDR, serviceName, re)
+			} else {
+				fmt.Printf("endpoint creation failed: host: %q port: %q", host, sPort)
+			}
+			annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint)
+			annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint)
+		default:
+			annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint)
+		}
+		delete(sp.Tags, string(otext.SpanKind))
+	} else {
+		annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint)
+	}
+
+	for key, value := range sp.Tags {
+		annotateBinary(span, key, value, r.endpoint)
+	}
+
+	for _, spLog := range sp.Logs {
+		if len(spLog.Fields) == 1 && spLog.Fields[0].Key() == "event" {
+			// proper Zipkin annotation
+			annotate(span, spLog.Timestamp, fmt.Sprintf("%+v", spLog.Fields[0].Value()), r.endpoint)
+			continue
+		}
+		// OpenTracing Log with key-value pair(s). Try to materialize using the
+		// materializer chosen for the recorder.
+		if logs, err := r.materializer(spLog.Fields); err != nil {
+			fmt.Printf("Materialization of OpenTracing LogFields failed: %+v", err)
+		} else {
+			annotate(span, spLog.Timestamp, string(logs), r.endpoint)
+		}
+	}
+	_ = r.collector.Collect(span)
+}
+
+// annotate annotates the span with the given value.
+func annotate(span *zipkincore.Span, timestamp time.Time, value string, host *zipkincore.Endpoint) {
+	if timestamp.IsZero() {
+		timestamp = time.Now()
+	}
+	span.Annotations = append(span.Annotations, &zipkincore.Annotation{
+		Timestamp: timestamp.UnixNano() / 1e3,
+		Value:     value,
+		Host:      host,
+	})
+}
+
+// annotateBinary annotates the span with a key and a value that will be []byte
+// encoded.
+func annotateBinary(span *zipkincore.Span, key string, value interface{}, host *zipkincore.Endpoint) {
+	if b, ok := value.(bool); ok {
+		if b {
+			value = "true"
+		} else {
+			value = "false"
+		}
+	}
+	span.BinaryAnnotations = append(span.BinaryAnnotations, &zipkincore.BinaryAnnotation{
+		Key:            key,
+		Value:          []byte(fmt.Sprintf("%+v", value)),
+		AnnotationType: zipkincore.AnnotationType_STRING,
+		Host:           host,
+	})
+}
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
new file mode 100644
index 00000000..c2bb6e4a
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.gitignore
@@ -0,0 +1,31 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
new file mode 100644
index 00000000..78be21cc
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+
+go:
+  - 1.x
+
+script: 
+ - go test -v -cpu=2
+ - go test -v -cpu=2 -race
\ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE
new file mode 100644
index 00000000..bd899d83
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
new file mode 100644
index 00000000..dd3c9d47
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/README.md
@@ -0,0 +1,31 @@
+[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4)
+[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
+
+# lz4
+LZ4 compression and decompression in pure Go
+
+## Usage
+
+```go
+import "github.com/pierrec/lz4"
+```
+
+## Description
+
+Package lz4 implements reading and writing lz4 compressed data (a frame),
+as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+using an io.Reader (decompression) and io.Writer (compression).
+It is designed to minimize memory usage while maximizing throughput by being able to
+[de]compress data concurrently.
+
+The Reader and the Writer support concurrent processing provided the supplied buffers are
+large enough (in multiples of BlockMaxSize) and there is no block dependency.
+Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently.
+The runtime.GOMAXPROCS() value is used to apply concurrency or not.
+
+Although the block level compression and decompression functions are exposed and are fully compatible
+with the lz4 block format definition, they are low level and should not be used directly.
+For a complete description of an lz4 compressed block, see:
+http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+
+See https://github.com/Cyan4973/lz4 for the reference C implementation.
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
new file mode 100644
index 00000000..44e3eaaa
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -0,0 +1,454 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"errors"
+)
+
+// block represents a frame data block.
+// Used when compressing or decompressing frame blocks concurrently.
+type block struct {
+	compressed bool
+	zdata      []byte // compressed data
+	data       []byte // decompressed data
+	offset     int    // offset within the data as with block dependency the 64Kb window is prepended to it
+	checksum   uint32 // compressed data checksum
+	err        error  // error while [de]compressing
+}
+
+var (
+	// ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted.
+	ErrInvalidSource = errors.New("lz4: invalid source")
+	// ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when
+	// the supplied buffer for [de]compression is too small.
+	ErrShortBuffer = errors.New("lz4: short buffer")
+)
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+	return n + n/255 + 16
+}
+
+// UncompressBlock decompresses the source buffer into the destination one,
+// starting at the di index and returning the decompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte, di int) (int, error) {
+	si, sn, di0 := 0, len(src), di
+	if sn == 0 {
+		return 0, nil
+	}
+
+	for {
+		// literals and match lengths (token)
+		lLen := int(src[si] >> 4)
+		mLen := int(src[si] & 0xF)
+		if si++; si == sn {
+			return di, ErrInvalidSource
+		}
+
+		// literals
+		if lLen > 0 {
+			if lLen == 0xF {
+				for src[si] == 0xFF {
+					lLen += 0xFF
+					if si++; si == sn {
+						return di - di0, ErrInvalidSource
+					}
+				}
+				lLen += int(src[si])
+				if si++; si == sn {
+					return di - di0, ErrInvalidSource
+				}
+			}
+			if len(dst)-di < lLen || si+lLen > sn {
+				return di - di0, ErrShortBuffer
+			}
+			di += copy(dst[di:], src[si:si+lLen])
+
+			if si += lLen; si >= sn {
+				return di - di0, nil
+			}
+		}
+
+		if si += 2; si >= sn {
+			return di, ErrInvalidSource
+		}
+		offset := int(src[si-2]) | int(src[si-1])<<8
+		if di-offset < 0 || offset == 0 {
+			return di - di0, ErrInvalidSource
+		}
+
+		// match
+		if mLen == 0xF {
+			for src[si] == 0xFF {
+				mLen += 0xFF
+				if si++; si == sn {
+					return di - di0, ErrInvalidSource
+				}
+			}
+			mLen += int(src[si])
+			if si++; si == sn {
+				return di - di0, ErrInvalidSource
+			}
+		}
+		// minimum match length is 4
+		mLen += 4
+		if len(dst)-di <= mLen {
+			return di - di0, ErrShortBuffer
+		}
+
+		// copy the match (NB. match is at least 4 bytes long)
+		if mLen >= offset {
+			bytesToCopy := offset * (mLen / offset)
+			// Efficiently copy the match dst[di-offset:di] into the slice
+			// dst[di:di+bytesToCopy]
+			expanded := dst[di-offset : di+bytesToCopy]
+			n := offset
+			for n <= bytesToCopy+offset {
+				copy(expanded[n:], expanded[:n])
+				n *= 2
+			}
+			di += bytesToCopy
+			mLen -= bytesToCopy
+		}
+
+		di += copy(dst[di:], dst[di-offset:di-offset+mLen])
+	}
+}
+
+// CompressBlock compresses the source buffer starting at soffet into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlock(src, dst []byte, soffset int) (int, error) {
+	sn, dn := len(src)-mfLimit, len(dst)
+	if sn <= 0 || dn == 0 || soffset >= sn {
+		return 0, nil
+	}
+	var si, di int
+
+	// fast scan strategy:
+	// we only need a hash table to store the last sequences (4 bytes)
+	var hashTable [1 << hashLog]int
+	var hashShift = uint((minMatch * 8) - hashLog)
+
+	// Initialise the hash table with the first 64Kb of the input buffer
+	// (used when compressing dependent blocks)
+	for si < soffset {
+		h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+		si++
+		hashTable[h] = si
+	}
+
+	anchor := si
+	fma := 1 << skipStrength
+	for si < sn-minMatch {
+		// hash the next 4 bytes (sequence)...
+		h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+		// -1 to separate existing entries from new ones
+		ref := hashTable[h] - 1
+		// ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving)
+		hashTable[h] = si + 1
+		// no need to check the last 3 bytes in the first literal 4 bytes as
+		// this guarantees that the next match, if any, is compressed with
+		// a lower size, since to have some compression we must have:
+		// ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size)
+		// => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap
+		// and by definition we do have:
+		// ll >= 1, ml >= 4
+		// => ll+ml >= 5
+		// => so overlap must be 0
+
+		// the sequence is new, out of bound (64kb) or not valid: try next sequence
+		if ref < 0 || fma&(1<<skipStrength-1) < 4 ||
+			(si-ref)>>winSizeLog > 0 ||
+			src[ref] != src[si] ||
+			src[ref+1] != src[si+1] ||
+			src[ref+2] != src[si+2] ||
+			src[ref+3] != src[si+3] {
+			// variable step: improves performance on non-compressible data
+			si += fma >> skipStrength
+			fma++
+			continue
+		}
+		// match found
+		fma = 1 << skipStrength
+		lLen := si - anchor
+		offset := si - ref
+
+		// encode match length part 1
+		si += minMatch
+		mLen := si // match length has minMatch already
+		for si <= sn && src[si] == src[si-offset] {
+			si++
+		}
+		mLen = si - mLen
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// encode literals length
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			if di++; di == dn {
+				return di, ErrShortBuffer
+			}
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				if di++; di == dn {
+					return di, ErrShortBuffer
+				}
+			}
+			dst[di] = byte(l)
+		}
+		if di++; di == dn {
+			return di, ErrShortBuffer
+		}
+
+		// literals
+		if di+lLen >= dn {
+			return di, ErrShortBuffer
+		}
+		di += copy(dst[di:], src[anchor:anchor+lLen])
+		anchor = si
+
+		// encode offset
+		if di += 2; di >= dn {
+			return di, ErrShortBuffer
+		}
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// encode match length part 2
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				if di++; di == dn {
+					return di, ErrShortBuffer
+				}
+			}
+			dst[di] = byte(mLen)
+			if di++; di == dn {
+				return di, ErrShortBuffer
+			}
+		}
+	}
+
+	if anchor == 0 {
+		// incompressible
+		return 0, nil
+	}
+
+	// last literals
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		if di++; di == dn {
+			return di, ErrShortBuffer
+		}
+		lLen -= 0xF
+		for ; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			if di++; di == dn {
+				return di, ErrShortBuffer
+			}
+		}
+		dst[di] = byte(lLen)
+	}
+	if di++; di == dn {
+		return di, ErrShortBuffer
+	}
+
+	// write literals
+	src = src[anchor:]
+	switch n := di + len(src); {
+	case n > dn:
+		return di, ErrShortBuffer
+	case n >= sn:
+		// incompressible
+		return 0, nil
+	}
+	di += copy(dst[di:], src)
+	return di, nil
+}
+
+// CompressBlockHC compresses the source buffer starting at soffet into the destination one.
+// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlockHC(src, dst []byte, soffset int) (int, error) {
+	sn, dn := len(src)-mfLimit, len(dst)
+	if sn <= 0 || dn == 0 || soffset >= sn {
+		return 0, nil
+	}
+	var si, di int
+
+	// Hash Chain strategy:
+	// we need a hash table and a chain table
+	// the chain table cannot contain more entries than the window size (64Kb entries)
+	var hashTable [1 << hashLog]int
+	var chainTable [winSize]int
+	var hashShift = uint((minMatch * 8) - hashLog)
+
+	// Initialise the hash table with the first 64Kb of the input buffer
+	// (used when compressing dependent blocks)
+	for si < soffset {
+		h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+		chainTable[si&winMask] = hashTable[h]
+		si++
+		hashTable[h] = si
+	}
+
+	anchor := si
+	for si < sn-minMatch {
+		// hash the next 4 bytes (sequence)...
+		h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+
+		// follow the chain until out of window and give the longest match
+		mLen := 0
+		offset := 0
+		for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 {
+			// the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length
+			if src[next+mLen] == src[si+mLen] {
+				for ml := 0; ; ml++ {
+					if src[next+ml] != src[si+ml] || si+ml > sn {
+						// found a longer match, keep its position and length
+						if mLen < ml && ml >= minMatch {
+							mLen = ml
+							offset = si - next
+						}
+						break
+					}
+				}
+			}
+		}
+		chainTable[si&winMask] = hashTable[h]
+		hashTable[h] = si + 1
+
+		// no match found
+		if mLen == 0 {
+			si++
+			continue
+		}
+
+		// match found
+		// update hash/chain tables with overlaping bytes:
+		// si already hashed, add everything from si+1 up to the match length
+		for si, ml := si+1, si+mLen; si < ml; {
+			h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+			chainTable[si&winMask] = hashTable[h]
+			si++
+			hashTable[h] = si
+		}
+
+		lLen := si - anchor
+		si += mLen
+		mLen -= minMatch // match length does not include minMatch
+
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// encode literals length
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			if di++; di == dn {
+				return di, ErrShortBuffer
+			}
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				if di++; di == dn {
+					return di, ErrShortBuffer
+				}
+			}
+			dst[di] = byte(l)
+		}
+		if di++; di == dn {
+			return di, ErrShortBuffer
+		}
+
+		// literals
+		if di+lLen >= dn {
+			return di, ErrShortBuffer
+		}
+		di += copy(dst[di:], src[anchor:anchor+lLen])
+		anchor = si
+
+		// encode offset
+		if di += 2; di >= dn {
+			return di, ErrShortBuffer
+		}
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// encode match length part 2
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				if di++; di == dn {
+					return di, ErrShortBuffer
+				}
+			}
+			dst[di] = byte(mLen)
+			if di++; di == dn {
+				return di, ErrShortBuffer
+			}
+		}
+	}
+
+	if anchor == 0 {
+		// incompressible
+		return 0, nil
+	}
+
+	// last literals
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		if di++; di == dn {
+			return di, ErrShortBuffer
+		}
+		lLen -= 0xF
+		for ; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			if di++; di == dn {
+				return di, ErrShortBuffer
+			}
+		}
+		dst[di] = byte(lLen)
+	}
+	if di++; di == dn {
+		return di, ErrShortBuffer
+	}
+
+	// write literals
+	src = src[anchor:]
+	switch n := di + len(src); {
+	case n > dn:
+		return di, ErrShortBuffer
+	case n >= sn:
+		// incompressible
+		return 0, nil
+	}
+	di += copy(dst[di:], src)
+	return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
new file mode 100644
index 00000000..ddb82f66
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4.go
@@ -0,0 +1,105 @@
+// Package lz4 implements reading and writing lz4 compressed data (a frame),
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+// using an io.Reader (decompression) and io.Writer (compression).
+// It is designed to minimize memory usage while maximizing throughput by being able to
+// [de]compress data concurrently.
+//
+// The Reader and the Writer support concurrent processing provided the supplied buffers are
+// large enough (in multiples of BlockMaxSize) and there is no block dependency.
+// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently.
+// The runtime.GOMAXPROCS() value is used to apply concurrency or not.
+//
+// Although the block level compression and decompression functions are exposed and are fully compatible
+// with the lz4 block format definition, they are low level and should not be used directly.
+// For a complete description of an lz4 compressed block, see:
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+//
+// See https://github.com/Cyan4973/lz4 for the reference C implementation.
+package lz4
+
+import (
+	"hash"
+	"sync"
+
+	"github.com/pierrec/xxHash/xxHash32"
+)
+
+const (
+	// Extension is the LZ4 frame file name extension
+	Extension = ".lz4"
+	// Version is the LZ4 frame format version
+	Version = 1
+
+	frameMagic     = uint32(0x184D2204)
+	frameSkipMagic = uint32(0x184D2A50)
+
+	// The following constants are used to setup the compression algorithm.
+	minMatch   = 4  // the minimum size of the match sequence size (4 bytes)
+	winSizeLog = 16 // LZ4 64Kb window size limit
+	winSize    = 1 << winSizeLog
+	winMask    = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+	// hashLog determines the size of the hash table used to quickly find a previous match position.
+	// Its value influences the compression speed and memory usage, the lower the faster,
+	// but at the expense of the compression ratio.
+	// 16 seems to be the best compromise.
+	hashLog       = 16
+	hashTableSize = 1 << hashLog
+	hashShift     = uint((minMatch * 8) - hashLog)
+
+	mfLimit      = 8 + minMatch // The last match cannot start within the last 12 bytes.
+	skipStrength = 6            // variable step for fast scan
+
+	hasher = uint32(2654435761) // prime number used to hash minMatch
+)
+
+// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
+var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20}
+var bsMapValue = map[int]byte{}
+
+// Reversed.
+func init() {
+	for i, v := range bsMapID {
+		bsMapValue[v] = i
+	}
+}
+
+// Header describes the various flags that can be set on a Writer or obtained from a Reader.
+// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
+//
+// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
+// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency).
+type Header struct {
+	BlockDependency bool   // compressed blocks are dependent (one block depends on the last 64Kb of the previous one)
+	BlockChecksum   bool   // compressed blocks are checksumed
+	NoChecksum      bool   // frame checksum
+	BlockMaxSize    int    // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
+	Size            uint64 // the frame total size. It is _not_ computed by the Writer.
+	HighCompression bool   // use high compression (only for the Writer)
+	done            bool   // whether the descriptor was processed (Read or Write and checked)
+	// Removed as not supported
+	// 	Dict            bool   // a dictionary id is to be used
+	// 	DictID          uint32 // the dictionary id read from the frame, if any.
+}
+
+// xxhPool wraps the standard pool for xxHash items.
+// Putting items back in the pool automatically resets them.
+type xxhPool struct {
+	sync.Pool
+}
+
+func (p *xxhPool) Get() hash.Hash32 {
+	return p.Pool.Get().(hash.Hash32)
+}
+
+func (p *xxhPool) Put(h hash.Hash32) {
+	h.Reset()
+	p.Pool.Put(h)
+}
+
+// hashPool is used by readers and writers and contains xxHash items.
+var hashPool = xxhPool{
+	Pool: sync.Pool{
+		New: func() interface{} { return xxHash32.New(0) },
+	},
+}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
new file mode 100644
index 00000000..9f7fd604
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/reader.go
@@ -0,0 +1,364 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash"
+	"io"
+	"io/ioutil"
+	"runtime"
+	"sync"
+	"sync/atomic"
+)
+
+// ErrInvalid is returned when the data being read is not an LZ4 archive
+// (LZ4 magic number detection failed).
+var ErrInvalid = errors.New("invalid lz4 data")
+
+// errEndOfBlock is returned by readBlock when it has reached the last block of the frame.
+// It is not an error.
+var errEndOfBlock = errors.New("end of block")
+
+// Reader implements the LZ4 frame decoder.
+// The Header is set after the first call to Read().
+// The Header may change between Read() calls in case of concatenated frames.
+type Reader struct {
+	Pos int64 // position within the source
+	Header
+	src      io.Reader
+	checksum hash.Hash32    // frame hash
+	wg       sync.WaitGroup // decompressing go routine wait group
+	data     []byte         // buffered decompressed data
+	window   []byte         // 64Kb decompressed data window
+}
+
+// NewReader returns a new LZ4 frame decoder.
+// No access to the underlying io.Reader is performed.
+func NewReader(src io.Reader) *Reader {
+	return &Reader{
+		src:      src,
+		checksum: hashPool.Get(),
+	}
+}
+
+// readHeader checks the frame magic number and parses the frame descriptoz.
+// Skippable frames are supported even as a first frame although the LZ4
+// specifications recommends skippable frames not to be used as first frames.
+func (z *Reader) readHeader(first bool) error {
+	defer z.checksum.Reset()
+
+	for {
+		var magic uint32
+		if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil {
+			if !first && err == io.ErrUnexpectedEOF {
+				return io.EOF
+			}
+			return err
+		}
+		z.Pos += 4
+		if magic>>8 == frameSkipMagic>>8 {
+			var skipSize uint32
+			if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil {
+				return err
+			}
+			z.Pos += 4
+			m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
+			z.Pos += m
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		if magic != frameMagic {
+			return ErrInvalid
+		}
+		break
+	}
+
+	// header
+	var buf [8]byte
+	if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
+		return err
+	}
+	z.Pos += 2
+
+	b := buf[0]
+	if b>>6 != Version {
+		return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version)
+	}
+	z.BlockDependency = b>>5&1 == 0
+	z.BlockChecksum = b>>4&1 > 0
+	frameSize := b>>3&1 > 0
+	z.NoChecksum = b>>2&1 == 0
+	// 	z.Dict = b&1 > 0
+
+	bmsID := buf[1] >> 4 & 0x7
+	bSize, ok := bsMapID[bmsID]
+	if !ok {
+		return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID)
+	}
+	z.BlockMaxSize = bSize
+
+	z.checksum.Write(buf[0:2])
+
+	if frameSize {
+		if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil {
+			return err
+		}
+		z.Pos += 8
+		binary.LittleEndian.PutUint64(buf[:], z.Size)
+		z.checksum.Write(buf[0:8])
+	}
+
+	// 	if z.Dict {
+	// 		if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil {
+	// 			return err
+	// 		}
+	// 		z.Pos += 4
+	// 		binary.LittleEndian.PutUint32(buf[:], z.DictID)
+	// 		z.checksum.Write(buf[0:4])
+	// 	}
+
+	// header checksum
+	if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
+		return err
+	}
+	z.Pos++
+	if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
+		return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h)
+	}
+
+	z.Header.done = true
+
+	return nil
+}
+
+// Read decompresses data from the underlying source into the supplied buffer.
+//
+// Since there can be multiple streams concatenated, Header values may
+// change between calls to Read(). If that is the case, no data is actually read from
+// the underlying io.Reader, to allow for potential input buffer resizing.
+//
+// Data is buffered if the input buffer is too small, and exhausted upon successive calls.
+//
+// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is
+// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value.
+func (z *Reader) Read(buf []byte) (n int, err error) {
+	if !z.Header.done {
+		if err = z.readHeader(true); err != nil {
+			return
+		}
+	}
+
+	if len(buf) == 0 {
+		return
+	}
+
+	// exhaust remaining data from previous Read()
+	if len(z.data) > 0 {
+		n = copy(buf, z.data)
+		z.data = z.data[n:]
+		if len(z.data) == 0 {
+			z.data = nil
+		}
+		return
+	}
+
+	// Break up the input buffer into BlockMaxSize blocks with at least one block.
+	// Then decompress into each of them concurrently if possible (no dependency).
+	// In case of dependency, the first block will be missing the window (except on the
+	// very first call), the rest will have it already since it comes from the previous block.
+	wbuf := buf
+	zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize
+	zblocks := make([]block, zn)
+	for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ {
+		zb := &zblocks[zi]
+		// last block may be too small
+		if len(wbuf) < z.BlockMaxSize+len(z.window) {
+			wbuf = make([]byte, z.BlockMaxSize+len(z.window))
+		}
+		copy(wbuf, z.window)
+		if zb.err = z.readBlock(wbuf, zb); zb.err != nil {
+			break
+		}
+		wbuf = wbuf[z.BlockMaxSize:]
+		if !z.BlockDependency {
+			z.wg.Add(1)
+			go z.decompressBlock(zb, &abort)
+			continue
+		}
+		// cannot decompress concurrently when dealing with block dependency
+		z.decompressBlock(zb, nil)
+		// the last block may not contain enough data
+		if len(z.window) == 0 {
+			z.window = make([]byte, winSize)
+		}
+		if len(zb.data) >= winSize {
+			copy(z.window, zb.data[len(zb.data)-winSize:])
+		} else {
+			copy(z.window, z.window[len(zb.data):])
+			copy(z.window[len(zb.data)+1:], zb.data)
+		}
+	}
+	z.wg.Wait()
+
+	// since a block size may be less then BlockMaxSize, trim the decompressed buffers
+	for _, zb := range zblocks {
+		if zb.err != nil {
+			if zb.err == errEndOfBlock {
+				return n, z.close()
+			}
+			return n, zb.err
+		}
+		bLen := len(zb.data)
+		if !z.NoChecksum {
+			z.checksum.Write(zb.data)
+		}
+		m := copy(buf[n:], zb.data)
+		// buffer the remaining data (this is necessarily the last block)
+		if m < bLen {
+			z.data = zb.data[m:]
+		}
+		n += m
+	}
+
+	return
+}
+
+// readBlock reads an entire frame block from the frame.
+// The input buffer is the one that will receive the decompressed data.
+// If the end of the frame is detected, it returns the errEndOfBlock error.
+func (z *Reader) readBlock(buf []byte, b *block) error {
+	var bLen uint32
+	if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil {
+		return err
+	}
+	atomic.AddInt64(&z.Pos, 4)
+
+	switch {
+	case bLen == 0:
+		return errEndOfBlock
+	case bLen&(1<<31) == 0:
+		b.compressed = true
+		b.data = buf
+		b.zdata = make([]byte, bLen)
+	default:
+		bLen = bLen & (1<<31 - 1)
+		if int(bLen) > len(buf) {
+			return fmt.Errorf("lz4.Read: invalid block size: %d", bLen)
+		}
+		b.data = buf[:bLen]
+		b.zdata = buf[:bLen]
+	}
+	if _, err := io.ReadFull(z.src, b.zdata); err != nil {
+		return err
+	}
+
+	if z.BlockChecksum {
+		if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil {
+			return err
+		}
+		xxh := hashPool.Get()
+		defer hashPool.Put(xxh)
+		xxh.Write(b.zdata)
+		if h := xxh.Sum32(); h != b.checksum {
+			return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum)
+		}
+	}
+
+	return nil
+}
+
+// decompressBlock decompresses a frame block.
+// In case of an error, the block err is set with it and abort is set to 1.
+func (z *Reader) decompressBlock(b *block, abort *uint32) {
+	if abort != nil {
+		defer z.wg.Done()
+	}
+	if b.compressed {
+		n := len(z.window)
+		m, err := UncompressBlock(b.zdata, b.data, n)
+		if err != nil {
+			if abort != nil {
+				atomic.StoreUint32(abort, 1)
+			}
+			b.err = err
+			return
+		}
+		b.data = b.data[n : n+m]
+	}
+	atomic.AddInt64(&z.Pos, int64(len(b.data)))
+}
+
+// close validates the frame checksum (if any) and checks the next frame (if any).
+func (z *Reader) close() error {
+	if !z.NoChecksum {
+		var checksum uint32
+		if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil {
+			return err
+		}
+		if checksum != z.checksum.Sum32() {
+			return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum)
+		}
+	}
+
+	// get ready for the next concatenated frame, but do not change the position
+	pos := z.Pos
+	z.Reset(z.src)
+	z.Pos = pos
+
+	// since multiple frames can be concatenated, check for another one
+	return z.readHeader(false)
+}
+
+// Reset discards the Reader's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) {
+	z.Header = Header{}
+	z.Pos = 0
+	z.src = r
+	z.checksum.Reset()
+	z.data = nil
+	z.window = nil
+}
+
+// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer.
+// Returns the number of bytes written.
+func (z *Reader) WriteTo(w io.Writer) (n int64, err error) {
+	cpus := runtime.GOMAXPROCS(0)
+	var buf []byte
+
+	// The initial buffer being nil, the first Read will be only read the compressed frame options.
+	// The buffer can then be sized appropriately to support maximum concurrency decompression.
+	// If multiple frames are concatenated, Read() will return with no data decompressed but with
+	// potentially changed options. The buffer will be resized accordingly, always trying to
+	// maximize concurrency.
+	for {
+		nsize := 0
+		// the block max size can change if multiple streams are concatenated.
+		// Check it after every Read().
+		if z.BlockDependency {
+			// in case of dependency, we cannot decompress concurrently,
+			// so allocate the minimum buffer + window size
+			nsize = len(z.window) + z.BlockMaxSize
+		} else {
+			// if no dependency, allocate a buffer large enough for concurrent decompression
+			nsize = cpus * z.BlockMaxSize
+		}
+		if nsize != len(buf) {
+			buf = make([]byte, nsize)
+		}
+
+		m, er := z.Read(buf)
+		if er != nil && er != io.EOF {
+			return n, er
+		}
+		m, err = w.Write(buf[:m])
+		n += int64(m)
+		if err != nil || er == io.EOF {
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
new file mode 100644
index 00000000..b1b712fe
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/writer.go
@@ -0,0 +1,377 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"hash"
+	"io"
+	"runtime"
+)
+
+// Writer implements the LZ4 frame encoder.
+type Writer struct {
+	Header
+	dst      io.Writer
+	checksum hash.Hash32 // frame checksum
+	data     []byte      // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with
+	window   []byte      // last 64KB of decompressed data (block dependency) + blockMaxSize buffer
+
+	zbCompressBuf []byte // buffer for compressing lz4 blocks
+	writeSizeBuf  []byte // four-byte slice for writing checksums and sizes in writeblock
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+// No access to the underlying io.Writer is performed.
+// The supplied Header is checked at the first Write.
+// It is ok to change it before the first Write but then not until a Reset() is performed.
+func NewWriter(dst io.Writer) *Writer {
+	return &Writer{
+		dst:      dst,
+		checksum: hashPool.Get(),
+		Header: Header{
+			BlockMaxSize: 4 << 20,
+		},
+		writeSizeBuf: make([]byte, 4),
+	}
+}
+
+// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
+func (z *Writer) writeHeader() error {
+	// Default to 4Mb if BlockMaxSize is not set
+	if z.Header.BlockMaxSize == 0 {
+		z.Header.BlockMaxSize = 4 << 20
+	}
+	// the only option that need to be validated
+	bSize, ok := bsMapValue[z.Header.BlockMaxSize]
+	if !ok {
+		return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize)
+	}
+
+	// magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
+	// Size and DictID are optional
+	var buf [19]byte
+
+	// set the fixed size data: magic number, block max size and flags
+	binary.LittleEndian.PutUint32(buf[0:], frameMagic)
+	flg := byte(Version << 6)
+	if !z.Header.BlockDependency {
+		flg |= 1 << 5
+	}
+	if z.Header.BlockChecksum {
+		flg |= 1 << 4
+	}
+	if z.Header.Size > 0 {
+		flg |= 1 << 3
+	}
+	if !z.Header.NoChecksum {
+		flg |= 1 << 2
+	}
+	//  if z.Header.Dict {
+	//      flg |= 1
+	//  }
+	buf[4] = flg
+	buf[5] = bSize << 4
+
+	// current buffer size: magic(4) + flags(1) + block max size (1)
+	n := 6
+	// optional items
+	if z.Header.Size > 0 {
+		binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
+		n += 8
+	}
+	//  if z.Header.Dict {
+	//      binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID)
+	//      n += 4
+	//  }
+
+	// header checksum includes the flags, block max size and optional Size and DictID
+	z.checksum.Write(buf[4:n])
+	buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF)
+	z.checksum.Reset()
+
+	// header ready, write it out
+	if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
+		return err
+	}
+	z.Header.done = true
+
+	// initialize buffers dependent on header info
+	z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize)
+
+	return nil
+}
+
+// Write compresses data from the supplied buffer into the underlying io.Writer.
+// Write does not return until the data has been written.
+//
+// If the input buffer is large enough (typically in multiples of BlockMaxSize)
+// the data will be compressed concurrently.
+//
+// Write never buffers any data unless in BlockDependency mode where it may
+// do so until it has 64Kb of data, after which it never buffers any.
+func (z *Writer) Write(buf []byte) (n int, err error) {
+	if !z.Header.done {
+		if err = z.writeHeader(); err != nil {
+			return
+		}
+	}
+
+	if len(buf) == 0 {
+		return
+	}
+
+	if !z.NoChecksum {
+		z.checksum.Write(buf)
+	}
+
+	// with block dependency, require at least 64Kb of data to work with
+	// not having 64Kb only matters initially to setup the first window
+	bl := 0
+	if z.BlockDependency && len(z.window) == 0 {
+		bl = len(z.data)
+		z.data = append(z.data, buf...)
+		if len(z.data) < winSize {
+			return len(buf), nil
+		}
+		buf = z.data
+		z.data = nil
+	}
+
+	// Break up the input buffer into BlockMaxSize blocks, provisioning the left over block.
+	// Then compress into each of them concurrently if possible (no dependency).
+	var (
+		zb       block
+		wbuf     = buf
+		zn       = len(wbuf) / z.BlockMaxSize
+		zi       = 0
+		leftover = len(buf) % z.BlockMaxSize
+	)
+
+loop:
+	for zi < zn {
+		if z.BlockDependency {
+			if zi == 0 {
+				// first block does not have the window
+				zb.data = append(z.window, wbuf[:z.BlockMaxSize]...)
+				zb.offset = len(z.window)
+				wbuf = wbuf[z.BlockMaxSize-winSize:]
+			} else {
+				// set the uncompressed data including the window from previous block
+				zb.data = wbuf[:z.BlockMaxSize+winSize]
+				zb.offset = winSize
+				wbuf = wbuf[z.BlockMaxSize:]
+			}
+		} else {
+			zb.data = wbuf[:z.BlockMaxSize]
+			wbuf = wbuf[z.BlockMaxSize:]
+		}
+
+		goto write
+	}
+
+	// left over
+	if leftover > 0 {
+		zb = block{data: wbuf}
+		if z.BlockDependency {
+			if zn == 0 {
+				zb.data = append(z.window, zb.data...)
+				zb.offset = len(z.window)
+			} else {
+				zb.offset = winSize
+			}
+		}
+
+		leftover = 0
+		goto write
+	}
+
+	if z.BlockDependency {
+		if len(z.window) == 0 {
+			z.window = make([]byte, winSize)
+		}
+		// last buffer may be shorter than the window
+		if len(buf) >= winSize {
+			copy(z.window, buf[len(buf)-winSize:])
+		} else {
+			copy(z.window, z.window[len(buf):])
+			copy(z.window[len(buf)+1:], buf)
+		}
+	}
+
+	return
+
+write:
+	zb = z.compressBlock(zb)
+	_, err = z.writeBlock(zb)
+
+	written := len(zb.data)
+	if bl > 0 {
+		if written >= bl {
+			written -= bl
+			bl = 0
+		} else {
+			bl -= written
+			written = 0
+		}
+	}
+
+	n += written
+	// remove the window in zb.data
+	if z.BlockDependency {
+		if zi == 0 {
+			n -= len(z.window)
+		} else {
+			n -= winSize
+		}
+	}
+	if err != nil {
+		return
+	}
+	zi++
+	goto loop
+}
+
+// compressBlock compresses a block.
+func (z *Writer) compressBlock(zb block) block {
+	// compressed block size cannot exceed the input's
+	var (
+		n    int
+		err  error
+		zbuf = z.zbCompressBuf
+	)
+	if z.HighCompression {
+		n, err = CompressBlockHC(zb.data, zbuf, zb.offset)
+	} else {
+		n, err = CompressBlock(zb.data, zbuf, zb.offset)
+	}
+
+	// compressible and compressed size smaller than decompressed: ok!
+	if err == nil && n > 0 && len(zb.zdata) < len(zb.data) {
+		zb.compressed = true
+		zb.zdata = zbuf[:n]
+	} else {
+		zb.compressed = false
+		zb.zdata = zb.data[zb.offset:]
+	}
+
+	if z.BlockChecksum {
+		xxh := hashPool.Get()
+		xxh.Write(zb.zdata)
+		zb.checksum = xxh.Sum32()
+		hashPool.Put(xxh)
+	}
+
+	return zb
+}
+
+// writeBlock writes a frame block to the underlying io.Writer (size, data).
+func (z *Writer) writeBlock(zb block) (int, error) {
+	bLen := uint32(len(zb.zdata))
+	if !zb.compressed {
+		bLen |= 1 << 31
+	}
+
+	n := 0
+
+	binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen)
+	n, err := z.dst.Write(z.writeSizeBuf)
+	if err != nil {
+		return n, err
+	}
+
+	m, err := z.dst.Write(zb.zdata)
+	n += m
+	if err != nil {
+		return n, err
+	}
+
+	if z.BlockChecksum {
+		binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum)
+		m, err := z.dst.Write(z.writeSizeBuf)
+		n += m
+
+		if err != nil {
+			return n, err
+		}
+	}
+
+	return n, nil
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+// Flush does not return until the data has been written.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// Flush is only required when in BlockDependency mode and the total of
+// data written is less than 64Kb.
+func (z *Writer) Flush() error {
+	if len(z.data) == 0 {
+		return nil
+	}
+
+	zb := z.compressBlock(block{data: z.data})
+	if _, err := z.writeBlock(zb); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+	if !z.Header.done {
+		if err := z.writeHeader(); err != nil {
+			return err
+		}
+	}
+
+	// buffered data for the block dependency window
+	if z.BlockDependency && len(z.data) > 0 {
+		zb := block{data: z.data}
+		if _, err := z.writeBlock(z.compressBlock(zb)); err != nil {
+			return err
+		}
+	}
+
+	if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil {
+		return err
+	}
+	if !z.NoChecksum {
+		if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Reset clears the state of the Writer z such that it is equivalent to its
+// initial state from NewWriter, but instead writing to w.
+// No access to the underlying io.Writer is performed.
+func (z *Writer) Reset(w io.Writer) {
+	z.Header = Header{}
+	z.dst = w
+	z.checksum.Reset()
+	z.data = nil
+	z.window = nil
+}
+
+// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer.
+// Returns the number of bytes read.
+// It does not close the Writer.
+func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+	cpus := runtime.GOMAXPROCS(0)
+	buf := make([]byte, cpus*z.BlockMaxSize)
+	for {
+		m, er := io.ReadFull(r, buf)
+		n += int64(m)
+		if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF {
+			if _, err = z.Write(buf[:m]); err != nil {
+				return
+			}
+			if er == nil {
+				continue
+			}
+			return
+		}
+		return n, er
+	}
+}
diff --git a/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/pierrec/xxHash/LICENSE
new file mode 100644
index 00000000..c1418f3f
--- /dev/null
+++ b/vendor/github.com/pierrec/xxHash/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2014, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go
new file mode 100644
index 00000000..411504e4
--- /dev/null
+++ b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go
@@ -0,0 +1,205 @@
+// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version).
+// (https://github.com/Cyan4973/xxHash/)
+package xxHash32
+
+import "hash"
+
+const (
+	prime32_1 = 2654435761
+	prime32_2 = 2246822519
+	prime32_3 = 3266489917
+	prime32_4 = 668265263
+	prime32_5 = 374761393
+)
+
+type xxHash struct {
+	seed     uint32
+	v1       uint32
+	v2       uint32
+	v3       uint32
+	v4       uint32
+	totalLen uint64
+	buf      [16]byte
+	bufused  int
+}
+
+// New returns a new Hash32 instance.
+func New(seed uint32) hash.Hash32 {
+	xxh := &xxHash{seed: seed}
+	xxh.Reset()
+	return xxh
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh xxHash) Sum(b []byte) []byte {
+	h32 := xxh.Sum32()
+	return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *xxHash) Reset() {
+	xxh.v1 = xxh.seed + prime32_1 + prime32_2
+	xxh.v2 = xxh.seed + prime32_2
+	xxh.v3 = xxh.seed
+	xxh.v4 = xxh.seed - prime32_1
+	xxh.totalLen = 0
+	xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *xxHash) Size() int {
+	return 4
+}
+
+// BlockSize gives the minimum number of bytes accepted by Write().
+func (xxh *xxHash) BlockSize() int {
+	return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *xxHash) Write(input []byte) (int, error) {
+	n := len(input)
+	m := xxh.bufused
+
+	xxh.totalLen += uint64(n)
+
+	r := len(xxh.buf) - m
+	if n < r {
+		copy(xxh.buf[m:], input)
+		xxh.bufused += len(input)
+		return n, nil
+	}
+
+	p := 0
+	if m > 0 {
+		// some data left from previous update
+		copy(xxh.buf[xxh.bufused:], input[:r])
+		xxh.bufused += len(input) - r
+
+		// fast rotl(13)
+		p32 := xxh.v1 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
+		xxh.v1 = (p32<<13 | p32>>19) * prime32_1
+		p += 4
+		p32 = xxh.v2 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
+		xxh.v2 = (p32<<13 | p32>>19) * prime32_1
+		p += 4
+		p32 = xxh.v3 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
+		xxh.v3 = (p32<<13 | p32>>19) * prime32_1
+		p += 4
+		p32 = xxh.v4 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
+		xxh.v4 = (p32<<13 | p32>>19) * prime32_1
+
+		p = r
+		xxh.bufused = 0
+	}
+
+	for n := n - 16; p <= n; {
+		p32 := xxh.v1 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
+		xxh.v1 = (p32<<13 | p32>>19) * prime32_1
+		p += 4
+		p32 = xxh.v2 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
+		xxh.v2 = (p32<<13 | p32>>19) * prime32_1
+		p += 4
+		p32 = xxh.v3 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
+		xxh.v3 = (p32<<13 | p32>>19) * prime32_1
+		p += 4
+		p32 = xxh.v4 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
+		xxh.v4 = (p32<<13 | p32>>19) * prime32_1
+		p += 4
+	}
+
+	copy(xxh.buf[xxh.bufused:], input[p:])
+	xxh.bufused += len(input) - p
+
+	return n, nil
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *xxHash) Sum32() uint32 {
+	h32 := uint32(xxh.totalLen)
+	if xxh.totalLen >= 16 {
+		h32 += ((xxh.v1 << 1) | (xxh.v1 >> 31)) +
+			((xxh.v2 << 7) | (xxh.v2 >> 25)) +
+			((xxh.v3 << 12) | (xxh.v3 >> 20)) +
+			((xxh.v4 << 18) | (xxh.v4 >> 14))
+	} else {
+		h32 += xxh.seed + prime32_5
+	}
+
+	p := 0
+	n := xxh.bufused
+	for n := n - 4; p <= n; p += 4 {
+		h32 += (uint32(xxh.buf[p+3])<<24 | uint32(xxh.buf[p+2])<<16 | uint32(xxh.buf[p+1])<<8 | uint32(xxh.buf[p])) * prime32_3
+		h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4
+	}
+	for ; p < n; p++ {
+		h32 += uint32(xxh.buf[p]) * prime32_5
+		h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime32_2
+	h32 ^= h32 >> 13
+	h32 *= prime32_3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// Checksum returns the 32bits Hash value.
+func Checksum(input []byte, seed uint32) uint32 {
+	n := len(input)
+	h32 := uint32(n)
+
+	if n < 16 {
+		h32 += seed + prime32_5
+	} else {
+		v1 := seed + prime32_1 + prime32_2
+		v2 := seed + prime32_2
+		v3 := seed
+		v4 := seed - prime32_1
+		p := 0
+		for p <= n-16 {
+			v1 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
+			v1 = (v1<<13 | v1>>19) * prime32_1
+			p += 4
+			v2 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
+			v2 = (v2<<13 | v2>>19) * prime32_1
+			p += 4
+			v3 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
+			v3 = (v3<<13 | v3>>19) * prime32_1
+			p += 4
+			v4 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
+			v4 = (v4<<13 | v4>>19) * prime32_1
+			p += 4
+		}
+		input = input[p:]
+		n -= p
+		h32 += ((v1 << 1) | (v1 >> 31)) +
+			((v2 << 7) | (v2 >> 25)) +
+			((v3 << 12) | (v3 >> 20)) +
+			((v4 << 18) | (v4 >> 14))
+	}
+
+	p := 0
+	for p <= n-4 {
+		h32 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_3
+		h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4
+		p += 4
+	}
+	for p < n {
+		h32 += uint32(input[p]) * prime32_5
+		h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1
+		p++
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime32_2
+	h32 ^= h32 >> 13
+	h32 *= prime32_3
+	h32 ^= h32 >> 16
+
+	return h32
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore
new file mode 100644
index 00000000..83c8f823
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.gitignore
@@ -0,0 +1,9 @@
+*.[68]
+*.a
+*.out
+*.swp
+_obj
+_testmain.go
+cmd/metrics-bench/metrics-bench
+cmd/metrics-example/metrics-example
+cmd/never-read/never-read
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
new file mode 100644
index 00000000..117763e6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+    - 1.3
+    - 1.4
+    - 1.5
+    - 1.6
+    - 1.7
+    - 1.8
+    - 1.9
+
+script:
+    - ./validate.sh
+
+# this should give us faster builds according to 
+# http://docs.travis-ci.com/user/migrating-from-legacy/
+sudo: false
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
new file mode 100644
index 00000000..363fa9ee
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/LICENSE
@@ -0,0 +1,29 @@
+Copyright 2012 Richard Crowley. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    1.  Redistributions of source code must retain the above copyright
+        notice, this list of conditions and the following disclaimer.
+
+    2.  Redistributions in binary form must reproduce the above
+        copyright notice, this list of conditions and the following
+        disclaimer in the documentation and/or other materials provided
+        with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation
+are those of the authors and should not be interpreted as representing
+official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
new file mode 100644
index 00000000..17cea76d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -0,0 +1,167 @@
+go-metrics
+==========
+
+![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master)
+
+Go port of Coda Hale's Metrics library: <https://github.com/dropwizard/metrics>.
+
+Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
+
+Usage
+-----
+
+Create and update metrics:
+
+```go
+c := metrics.NewCounter()
+metrics.Register("foo", c)
+c.Inc(47)
+
+g := metrics.NewGauge()
+metrics.Register("bar", g)
+g.Update(47)
+
+r := NewRegistry()
+g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
+
+s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
+h := metrics.NewHistogram(s)
+metrics.Register("baz", h)
+h.Update(47)
+
+m := metrics.NewMeter()
+metrics.Register("quux", m)
+m.Mark(47)
+
+t := metrics.NewTimer()
+metrics.Register("bang", t)
+t.Time(func() {})
+t.Update(47)
+```
+
+Register() is not threadsafe. For threadsafe metric registration use
+GetOrRegister:
+
+```go
+t := metrics.GetOrRegisterTimer("account.create.latency", nil)
+t.Time(func() {})
+t.Update(47)
+```
+
+**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will
+leak memory:
+
+```go
+// Will call Stop() on the Meter to allow for garbage collection
+metrics.Unregister("quux")
+// Or similarly for a Timer that embeds a Meter
+metrics.Unregister("bang")
+```
+
+Periodically log every metric in human-readable form to standard error:
+
+```go
+go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+```
+
+Periodically log every metric in slightly-more-parseable form to syslog:
+
+```go
+w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
+```
+
+Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
+
+```go
+
+import "github.com/cyberdelia/go-metrics-graphite"
+
+addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
+```
+
+Periodically emit every metric into InfluxDB:
+
+**NOTE:** this has been pulled out of the library due to constant fluctuations
+in the InfluxDB API. In fact, all client libraries are on their way out. see
+issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
+[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
+
+```go
+import "github.com/vrischmann/go-metrics-influxdb"
+
+go influxdb.InfluxDB(metrics.DefaultRegistry,
+  10e9, 
+  "127.0.0.1:8086", 
+  "database-name", 
+  "username", 
+  "password"
+)
+```
+
+Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
+
+**Note**: the client included with this repository under the `librato` package
+has been deprecated and moved to the repository linked above.
+
+```go
+import "github.com/mihasya/go-metrics-librato"
+
+go librato.Librato(metrics.DefaultRegistry,
+    10e9,                  // interval
+    "example@example.com", // account owner email address
+    "token",               // Librato API token
+    "hostname",            // source
+    []float64{0.95},       // percentiles to send
+    time.Millisecond,      // time unit
+)
+```
+
+Periodically emit every metric to StatHat:
+
+```go
+import "github.com/rcrowley/go-metrics/stathat"
+
+go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
+```
+
+Maintain all metrics along with expvars at `/debug/metrics`:
+
+This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
+but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
+as well as all your go-metrics.
+
+
+```go
+import "github.com/rcrowley/go-metrics/exp"
+
+exp.Exp(metrics.DefaultRegistry)
+```
+
+Installation
+------------
+
+```sh
+go get github.com/rcrowley/go-metrics
+```
+
+StatHat support additionally requires their Go client:
+
+```sh
+go get github.com/stathat/go
+```
+
+Publishing Metrics
+------------------
+
+Clients are available for the following destinations:
+
+* Librato - https://github.com/mihasya/go-metrics-librato
+* Graphite - https://github.com/cyberdelia/go-metrics-graphite
+* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb
+* Ganglia - https://github.com/appscode/metlia
+* Prometheus - https://github.com/deathowl/go-metrics-prometheus
+* DataDog - https://github.com/syntaqx/go-metrics-datadog
+* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx
+* Honeycomb - https://github.com/getspine/go-metrics-honeycomb
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
new file mode 100644
index 00000000..bb7b039c
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/counter.go
@@ -0,0 +1,112 @@
+package metrics
+
+import "sync/atomic"
+
+// Counters hold an int64 value that can be incremented and decremented.
+type Counter interface {
+	Clear()
+	Count() int64
+	Dec(int64)
+	Inc(int64)
+	Snapshot() Counter
+}
+
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
+func GetOrRegisterCounter(name string, r Registry) Counter {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewCounter).(Counter)
+}
+
+// NewCounter constructs a new StandardCounter.
+func NewCounter() Counter {
+	if UseNilMetrics {
+		return NilCounter{}
+	}
+	return &StandardCounter{0}
+}
+
+// NewRegisteredCounter constructs and registers a new StandardCounter.
+func NewRegisteredCounter(name string, r Registry) Counter {
+	c := NewCounter()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+	panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+	panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+	panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
+type NilCounter struct{}
+
+// Clear is a no-op.
+func (NilCounter) Clear() {}
+
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
+
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
+
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
+
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardCounter struct {
+	count int64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounter) Clear() {
+	atomic.StoreInt64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *StandardCounter) Count() int64 {
+	return atomic.LoadInt64(&c.count)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounter) Dec(i int64) {
+	atomic.AddInt64(&c.count, -i)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounter) Inc(i int64) {
+	atomic.AddInt64(&c.count, i)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+	return CounterSnapshot(c.Count())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
new file mode 100644
index 00000000..043ccefa
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/debug.go
@@ -0,0 +1,76 @@
+package metrics
+
+import (
+	"runtime/debug"
+	"time"
+)
+
+var (
+	debugMetrics struct {
+		GCStats struct {
+			LastGC Gauge
+			NumGC  Gauge
+			Pause  Histogram
+			//PauseQuantiles Histogram
+			PauseTotal Gauge
+		}
+		ReadGCStats Timer
+	}
+	gcStats debug.GCStats
+)
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats.  This is designed to be called as a goroutine.
+func CaptureDebugGCStats(r Registry, d time.Duration) {
+	for _ = range time.Tick(d) {
+		CaptureDebugGCStatsOnce(r)
+	}
+}
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats.  This is designed to be called in a background goroutine.
+// Giving a registry which has not been given to RegisterDebugGCStats will
+// panic.
+//
+// Be careful (but much less so) with this because debug.ReadGCStats calls
+// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
+// operation, isn't something you want to be doing all the time.
+func CaptureDebugGCStatsOnce(r Registry) {
+	lastGC := gcStats.LastGC
+	t := time.Now()
+	debug.ReadGCStats(&gcStats)
+	debugMetrics.ReadGCStats.UpdateSince(t)
+
+	debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
+	debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+	if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
+		debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
+	}
+	//debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
+	debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
+}
+
+// Register metrics for the Go garbage collector statistics exported in
+// debug.GCStats.  The metrics are named by their fully-qualified Go symbols,
+// i.e. debug.GCStats.PauseTotal.
+func RegisterDebugGCStats(r Registry) {
+	debugMetrics.GCStats.LastGC = NewGauge()
+	debugMetrics.GCStats.NumGC = NewGauge()
+	debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+	//debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+	debugMetrics.GCStats.PauseTotal = NewGauge()
+	debugMetrics.ReadGCStats = NewTimer()
+
+	r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+	r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+	r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+	//r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+	r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+	r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+}
+
+// Allocate an initial slice for gcStats.Pause to avoid allocations during
+// normal operation.
+func init() {
+	gcStats.Pause = make([]time.Duration, 11)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
new file mode 100644
index 00000000..a8183dd7
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/ewma.go
@@ -0,0 +1,138 @@
+package metrics
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+	Rate() float64
+	Snapshot() EWMA
+	Tick()
+	Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+	if UseNilMetrics {
+		return NilEWMA{}
+	}
+	return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+	panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+	panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick.  It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+	uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+	alpha     float64
+	rate      uint64
+	init      uint32
+	mutex     sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9)
+	return currentRate
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+	return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average.  It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+	// Optimization to avoid mutex locking in the hot-path.
+	if atomic.LoadUint32(&a.init) == 1 {
+		a.updateRate(a.fetchInstantRate())
+	} else {
+		// Slow-path: this is only needed on the first Tick() and preserves transactional updating
+		// of init and rate in the else block. The first conditional is needed below because
+		// a different thread could have set a.init = 1 between the time of the first atomic load and when
+		// the lock was acquired.
+		a.mutex.Lock()
+		if atomic.LoadUint32(&a.init) == 1 {
+			// The fetchInstantRate() uses atomic loading, which is unecessary in this critical section
+			// but again, this section is only invoked on the first successful Tick() operation.
+			a.updateRate(a.fetchInstantRate())
+		} else {
+			atomic.StoreUint32(&a.init, 1)
+			atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate()))
+		}
+		a.mutex.Unlock()
+	}
+}
+
+func (a *StandardEWMA) fetchInstantRate() float64 {
+	count := atomic.LoadInt64(&a.uncounted)
+	atomic.AddInt64(&a.uncounted, -count)
+	instantRate := float64(count) / float64(5e9)
+	return instantRate
+}
+
+func (a *StandardEWMA) updateRate(instantRate float64) {
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate))
+	currentRate += a.alpha * (instantRate - currentRate)
+	atomic.StoreUint64(&a.rate, math.Float64bits(currentRate))
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+	atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
new file mode 100644
index 00000000..cb57a938
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge.go
@@ -0,0 +1,120 @@
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+	Snapshot() Gauge
+	Update(int64)
+	Value() int64
+}
+
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
+func GetOrRegisterGauge(name string, r Registry) Gauge {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewGauge).(Gauge)
+}
+
+// NewGauge constructs a new StandardGauge.
+func NewGauge() Gauge {
+	if UseNilMetrics {
+		return NilGauge{}
+	}
+	return &StandardGauge{0}
+}
+
+// NewRegisteredGauge constructs and registers a new StandardGauge.
+func NewRegisteredGauge(name string, r Registry) Gauge {
+	c := NewGauge()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGauge(f func() int64) Gauge {
+	if UseNilMetrics {
+		return NilGauge{}
+	}
+	return &FunctionalGauge{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
+	c := NewFunctionalGauge(f)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+	panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{}
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+	value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+	return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+	atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+	return atomic.LoadInt64(&g.value)
+}
+
+// FunctionalGauge returns value from given function
+type FunctionalGauge struct {
+	value func() int64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGauge) Value() int64 {
+	return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGauge) Update(int64) {
+	panic("Update called on a FunctionalGauge")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
new file mode 100644
index 00000000..3962e6db
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
@@ -0,0 +1,125 @@
+package metrics
+
+import (
+	"math"
+	"sync/atomic"
+)
+
+// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64 interface {
+	Snapshot() GaugeFloat64
+	Update(float64)
+	Value() float64
+}
+
+// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
+// new StandardGaugeFloat64.
+func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
+}
+
+// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
+func NewGaugeFloat64() GaugeFloat64 {
+	if UseNilMetrics {
+		return NilGaugeFloat64{}
+	}
+	return &StandardGaugeFloat64{
+		value: 0.0,
+	}
+}
+
+// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
+func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
+	c := NewGaugeFloat64()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
+	if UseNilMetrics {
+		return NilGaugeFloat64{}
+	}
+	return &FunctionalGaugeFloat64{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
+	c := NewFunctionalGaugeFloat64(f)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type GaugeFloat64Snapshot float64
+
+// Snapshot returns the snapshot.
+func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
+
+// Update panics.
+func (GaugeFloat64Snapshot) Update(float64) {
+	panic("Update called on a GaugeFloat64Snapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGaugeFloat64 struct{}
+
+// Snapshot is a no-op.
+func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
+
+// Update is a no-op.
+func (NilGaugeFloat64) Update(v float64) {}
+
+// Value is a no-op.
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
+
+// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
+// sync.Mutex to manage a single float64 value.
+type StandardGaugeFloat64 struct {
+	value uint64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
+	return GaugeFloat64Snapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeFloat64) Update(v float64) {
+	atomic.StoreUint64(&g.value, math.Float64bits(v))
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGaugeFloat64) Value() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&g.value))
+}
+
+// FunctionalGaugeFloat64 returns value from given function
+type FunctionalGaugeFloat64 struct {
+	value func() float64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGaugeFloat64) Value() float64 {
+	return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGaugeFloat64) Update(float64) {
+	panic("Update called on a FunctionalGaugeFloat64")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
new file mode 100644
index 00000000..abd0a7d2
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/graphite.go
@@ -0,0 +1,113 @@
+package metrics
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"net"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// GraphiteConfig provides a container with configuration parameters for
+// the Graphite exporter
+type GraphiteConfig struct {
+	Addr          *net.TCPAddr  // Network address to connect to
+	Registry      Registry      // Registry to be exported
+	FlushInterval time.Duration // Flush interval
+	DurationUnit  time.Duration // Time conversion unit for durations
+	Prefix        string        // Prefix to be prepended to metric names
+	Percentiles   []float64     // Percentiles to export from timers and histograms
+}
+
+// Graphite is a blocking exporter function which reports metrics in r
+// to a graphite server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+	GraphiteWithConfig(GraphiteConfig{
+		Addr:          addr,
+		Registry:      r,
+		FlushInterval: d,
+		DurationUnit:  time.Nanosecond,
+		Prefix:        prefix,
+		Percentiles:   []float64{0.5, 0.75, 0.95, 0.99, 0.999},
+	})
+}
+
+// GraphiteWithConfig is a blocking exporter function just like Graphite,
+// but it takes a GraphiteConfig instead.
+func GraphiteWithConfig(c GraphiteConfig) {
+	log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+	for _ = range time.Tick(c.FlushInterval) {
+		if err := graphite(&c); nil != err {
+			log.Println(err)
+		}
+	}
+}
+
+// GraphiteOnce performs a single submission to Graphite, returning a
+// non-nil error on failed connections. This can be used in a loop
+// similar to GraphiteWithConfig for custom error handling.
+func GraphiteOnce(c GraphiteConfig) error {
+	log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+	return graphite(&c)
+}
+
+func graphite(c *GraphiteConfig) error {
+	now := time.Now().Unix()
+	du := float64(c.DurationUnit)
+	conn, err := net.DialTCP("tcp", nil, c.Addr)
+	if nil != err {
+		return err
+	}
+	defer conn.Close()
+	w := bufio.NewWriter(conn)
+	c.Registry.Each(func(name string, i interface{}) {
+		switch metric := i.(type) {
+		case Counter:
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+		case Gauge:
+			fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+		case GaugeFloat64:
+			fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles(c.Percentiles)
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
+			for psIdx, psKey := range c.Percentiles {
+				key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+				fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+			}
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
+			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
+			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
+			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles(c.Percentiles)
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
+			for psIdx, psKey := range c.Percentiles {
+				key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+				fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+			}
+			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
+			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
+			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
+			fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
+		}
+		w.Flush()
+	})
+	return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
new file mode 100644
index 00000000..445131ca
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
@@ -0,0 +1,61 @@
+package metrics
+
+// Healthchecks hold an error value describing an arbitrary up/down status.
+type Healthcheck interface {
+	Check()
+	Error() error
+	Healthy()
+	Unhealthy(error)
+}
+
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
+func NewHealthcheck(f func(Healthcheck)) Healthcheck {
+	if UseNilMetrics {
+		return NilHealthcheck{}
+	}
+	return &StandardHealthcheck{nil, f}
+}
+
+// NilHealthcheck is a no-op.
+type NilHealthcheck struct{}
+
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
+
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
+
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
+
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
+
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
+type StandardHealthcheck struct {
+	err error
+	f   func(Healthcheck)
+}
+
+// Check runs the healthcheck function to update the healthcheck's status.
+func (h *StandardHealthcheck) Check() {
+	h.f(h)
+}
+
+// Error returns the healthcheck's status, which will be nil if it is healthy.
+func (h *StandardHealthcheck) Error() error {
+	return h.err
+}
+
+// Healthy marks the healthcheck as healthy.
+func (h *StandardHealthcheck) Healthy() {
+	h.err = nil
+}
+
+// Unhealthy marks the healthcheck as unhealthy.  The error is stored and
+// may be retrieved by the Error method.
+func (h *StandardHealthcheck) Unhealthy(err error) {
+	h.err = err
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
new file mode 100644
index 00000000..dbc837fe
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/histogram.go
@@ -0,0 +1,202 @@
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Sample() Sample
+	Snapshot() Histogram
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Variance() float64
+}
+
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
+}
+
+// NewHistogram constructs a new StandardHistogram from a Sample.
+func NewHistogram(s Sample) Histogram {
+	if UseNilMetrics {
+		return NilHistogram{}
+	}
+	return &StandardHistogram{sample: s}
+}
+
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
+func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
+	c := NewHistogram(s)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+	sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+	panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample at the time the snapshot was taken.
+func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+	panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct{}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilHistogram) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+	sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+	return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample.
+func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
new file mode 100644
index 00000000..174b9477
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/json.go
@@ -0,0 +1,31 @@
+package metrics
+
+import (
+	"encoding/json"
+	"io"
+	"time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(r.GetAll())
+}
+
+// WriteJSON writes metrics from the given registry  periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+	for _ = range time.Tick(d) {
+		WriteJSONOnce(r, w)
+	}
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+	json.NewEncoder(w).Encode(r)
+}
+
+func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(p.GetAll())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
new file mode 100644
index 00000000..f8074c04
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/log.go
@@ -0,0 +1,80 @@
+package metrics
+
+import (
+	"time"
+)
+
+type Logger interface {
+	Printf(format string, v ...interface{})
+}
+
+func Log(r Registry, freq time.Duration, l Logger) {
+	LogScaled(r, freq, time.Nanosecond, l)
+}
+
+// Output each metric in the given registry periodically using the given
+// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
+func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
+	du := float64(scale)
+	duSuffix := scale.String()[1:]
+
+	for _ = range time.Tick(freq) {
+		r.Each(func(name string, i interface{}) {
+			switch metric := i.(type) {
+			case Counter:
+				l.Printf("counter %s\n", name)
+				l.Printf("  count:       %9d\n", metric.Count())
+			case Gauge:
+				l.Printf("gauge %s\n", name)
+				l.Printf("  value:       %9d\n", metric.Value())
+			case GaugeFloat64:
+				l.Printf("gauge %s\n", name)
+				l.Printf("  value:       %f\n", metric.Value())
+			case Healthcheck:
+				metric.Check()
+				l.Printf("healthcheck %s\n", name)
+				l.Printf("  error:       %v\n", metric.Error())
+			case Histogram:
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				l.Printf("histogram %s\n", name)
+				l.Printf("  count:       %9d\n", h.Count())
+				l.Printf("  min:         %9d\n", h.Min())
+				l.Printf("  max:         %9d\n", h.Max())
+				l.Printf("  mean:        %12.2f\n", h.Mean())
+				l.Printf("  stddev:      %12.2f\n", h.StdDev())
+				l.Printf("  median:      %12.2f\n", ps[0])
+				l.Printf("  75%%:         %12.2f\n", ps[1])
+				l.Printf("  95%%:         %12.2f\n", ps[2])
+				l.Printf("  99%%:         %12.2f\n", ps[3])
+				l.Printf("  99.9%%:       %12.2f\n", ps[4])
+			case Meter:
+				m := metric.Snapshot()
+				l.Printf("meter %s\n", name)
+				l.Printf("  count:       %9d\n", m.Count())
+				l.Printf("  1-min rate:  %12.2f\n", m.Rate1())
+				l.Printf("  5-min rate:  %12.2f\n", m.Rate5())
+				l.Printf("  15-min rate: %12.2f\n", m.Rate15())
+				l.Printf("  mean rate:   %12.2f\n", m.RateMean())
+			case Timer:
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				l.Printf("timer %s\n", name)
+				l.Printf("  count:       %9d\n", t.Count())
+				l.Printf("  min:         %12.2f%s\n", float64(t.Min())/du, duSuffix)
+				l.Printf("  max:         %12.2f%s\n", float64(t.Max())/du, duSuffix)
+				l.Printf("  mean:        %12.2f%s\n", t.Mean()/du, duSuffix)
+				l.Printf("  stddev:      %12.2f%s\n", t.StdDev()/du, duSuffix)
+				l.Printf("  median:      %12.2f%s\n", ps[0]/du, duSuffix)
+				l.Printf("  75%%:         %12.2f%s\n", ps[1]/du, duSuffix)
+				l.Printf("  95%%:         %12.2f%s\n", ps[2]/du, duSuffix)
+				l.Printf("  99%%:         %12.2f%s\n", ps[3]/du, duSuffix)
+				l.Printf("  99.9%%:       %12.2f%s\n", ps[4]/du, duSuffix)
+				l.Printf("  1-min rate:  %12.2f\n", t.Rate1())
+				l.Printf("  5-min rate:  %12.2f\n", t.Rate5())
+				l.Printf("  15-min rate: %12.2f\n", t.Rate15())
+				l.Printf("  mean rate:   %12.2f\n", t.RateMean())
+			}
+		})
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
new file mode 100644
index 00000000..47454f54
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/memory.md
@@ -0,0 +1,285 @@
+Memory usage
+============
+
+(Highly unscientific.)
+
+Command used to gather static memory usage:
+
+```sh
+grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
+```
+
+Program used to gather baseline memory usage:
+
+```go
+package main
+
+import "time"
+
+func main() {
+	time.Sleep(600e9)
+}
+```
+
+Baseline
+--------
+
+```
+VmPeak:    42604 kB
+VmSize:    42604 kB
+VmLck:         0 kB
+VmHWM:      1120 kB
+VmRSS:      1120 kB
+VmData:    35460 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        36 kB
+VmSwap:        0 kB
+```
+
+Program used to gather metric memory usage (with other metrics being similar):
+
+```go
+package main
+
+import (
+	"fmt"
+	"metrics"
+	"time"
+)
+
+func main() {
+	fmt.Sprintf("foo")
+	metrics.NewRegistry()
+	time.Sleep(600e9)
+}
+```
+
+1000 counters registered
+------------------------
+
+```
+VmPeak:    44016 kB
+VmSize:    44016 kB
+VmLck:         0 kB
+VmHWM:      1928 kB
+VmRSS:      1928 kB
+VmData:    36868 kB
+VmStk:       136 kB
+VmExe:      1024 kB
+VmLib:      1848 kB
+VmPTE:        40 kB
+VmSwap:        0 kB
+```
+
+**1.412 kB virtual, TODO 0.808 kB resident per counter.**
+
+100000 counters registered
+--------------------------
+
+```
+VmPeak:    55024 kB
+VmSize:    55024 kB
+VmLck:         0 kB
+VmHWM:     12440 kB
+VmRSS:     12440 kB
+VmData:    47876 kB
+VmStk:       136 kB
+VmExe:      1024 kB
+VmLib:      1848 kB
+VmPTE:        64 kB
+VmSwap:        0 kB
+```
+
+**0.1242 kB virtual, 0.1132 kB resident per counter.**
+
+1000 gauges registered
+----------------------
+
+```
+VmPeak:    44012 kB
+VmSize:    44012 kB
+VmLck:         0 kB
+VmHWM:      1928 kB
+VmRSS:      1928 kB
+VmData:    36868 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        40 kB
+VmSwap:        0 kB
+```
+
+**1.408 kB virtual, 0.808 kB resident per counter.**
+
+100000 gauges registered
+------------------------
+
+```
+VmPeak:    55020 kB
+VmSize:    55020 kB
+VmLck:         0 kB
+VmHWM:     12432 kB
+VmRSS:     12432 kB
+VmData:    47876 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        60 kB
+VmSwap:        0 kB
+```
+
+**0.12416 kB virtual, 0.11312 resident per gauge.**
+
+1000 histograms with a uniform sample size of 1028
+--------------------------------------------------
+
+```
+VmPeak:    72272 kB
+VmSize:    72272 kB
+VmLck:         0 kB
+VmHWM:     16204 kB
+VmRSS:     16204 kB
+VmData:    65100 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:        80 kB
+VmSwap:        0 kB
+```
+
+**29.668 kB virtual, TODO 15.084 resident per histogram.**
+
+10000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak:   256912 kB
+VmSize:   256912 kB
+VmLck:         0 kB
+VmHWM:    146204 kB
+VmRSS:    146204 kB
+VmData:   249740 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:       448 kB
+VmSwap:        0 kB
+```
+
+**21.4308 kB virtual, 14.5084 kB resident per histogram.**
+
+50000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak:   908112 kB
+VmSize:   908112 kB
+VmLck:         0 kB
+VmHWM:    645832 kB
+VmRSS:    645588 kB
+VmData:   900940 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:      1716 kB
+VmSwap:     1544 kB
+```
+
+**17.31016 kB virtual, 12.88936 kB resident per histogram.**
+
+1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+-------------------------------------------------------------------------------------
+
+```
+VmPeak:    62480 kB
+VmSize:    62480 kB
+VmLck:         0 kB
+VmHWM:     11572 kB
+VmRSS:     11572 kB
+VmData:    55308 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:        64 kB
+VmSwap:        0 kB
+```
+
+**19.876 kB virtual, 10.452 kB resident per histogram.**
+
+10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak:   153296 kB
+VmSize:   153296 kB
+VmLck:         0 kB
+VmHWM:    101176 kB
+VmRSS:    101176 kB
+VmData:   146124 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:       240 kB
+VmSwap:        0 kB
+```
+
+**11.0692 kB virtual, 10.0056 kB resident per histogram.**
+
+50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak:   557264 kB
+VmSize:   557264 kB
+VmLck:         0 kB
+VmHWM:    501056 kB
+VmRSS:    501056 kB
+VmData:   550092 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:      1032 kB
+VmSwap:        0 kB
+```
+
+**10.2932 kB virtual, 9.99872 kB resident per histogram.**
+
+1000 meters
+-----------
+
+```
+VmPeak:    74504 kB
+VmSize:    74504 kB
+VmLck:         0 kB
+VmHWM:     24124 kB
+VmRSS:     24124 kB
+VmData:    67340 kB
+VmStk:       136 kB
+VmExe:      1040 kB
+VmLib:      1848 kB
+VmPTE:        92 kB
+VmSwap:        0 kB
+```
+
+**31.9 kB virtual, 23.004 kB resident per meter.**
+
+10000 meters
+------------
+
+```
+VmPeak:   278920 kB
+VmSize:   278920 kB
+VmLck:         0 kB
+VmHWM:    227300 kB
+VmRSS:    227300 kB
+VmData:   271756 kB
+VmStk:       136 kB
+VmExe:      1040 kB
+VmLib:      1848 kB
+VmPTE:       488 kB
+VmSwap:        0 kB
+```
+
+**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
new file mode 100644
index 00000000..7807406a
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/meter.go
@@ -0,0 +1,257 @@
+package metrics
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+	Count() int64
+	Mark(int64)
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+	Snapshot() Meter
+	Stop()
+}
+
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterMeter(name string, r Registry) Meter {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewMeter).(Meter)
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
+func NewMeter() Meter {
+	if UseNilMetrics {
+		return NilMeter{}
+	}
+	m := newStandardMeter()
+	arbiter.Lock()
+	defer arbiter.Unlock()
+	arbiter.meters[m] = struct{}{}
+	if !arbiter.started {
+		arbiter.started = true
+		go arbiter.tick()
+	}
+	return m
+}
+
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredMeter(name string, r Registry) Meter {
+	c := NewMeter()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+	count                          int64
+	rate1, rate5, rate15, rateMean uint64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+	panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// Stop is a no-op.
+func (m *MeterSnapshot) Stop() {}
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{}
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
+
+// Stop is a no-op.
+func (NilMeter) Stop() {}
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+	// Only used on stop.
+	lock        sync.Mutex
+	snapshot    *MeterSnapshot
+	a1, a5, a15 EWMA
+	startTime   time.Time
+	stopped     uint32
+}
+
+func newStandardMeter() *StandardMeter {
+	return &StandardMeter{
+		snapshot:  &MeterSnapshot{},
+		a1:        NewEWMA1(),
+		a5:        NewEWMA5(),
+		a15:       NewEWMA15(),
+		startTime: time.Now(),
+	}
+}
+
+// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
+func (m *StandardMeter) Stop() {
+	m.lock.Lock()
+	stopped := m.stopped
+	m.stopped = 1
+	m.lock.Unlock()
+	if stopped != 1 {
+		arbiter.Lock()
+		delete(arbiter.meters, m)
+		arbiter.Unlock()
+	}
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+	return atomic.LoadInt64(&m.snapshot.count)
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+	if atomic.LoadUint32(&m.stopped) == 1 {
+		return
+	}
+
+	atomic.AddInt64(&m.snapshot.count, n)
+
+	m.a1.Update(n)
+	m.a5.Update(n)
+	m.a15.Update(n)
+	m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1))
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5))
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15))
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean))
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+	copiedSnapshot := MeterSnapshot{
+		count:    atomic.LoadInt64(&m.snapshot.count),
+		rate1:    atomic.LoadUint64(&m.snapshot.rate1),
+		rate5:    atomic.LoadUint64(&m.snapshot.rate5),
+		rate15:   atomic.LoadUint64(&m.snapshot.rate15),
+		rateMean: atomic.LoadUint64(&m.snapshot.rateMean),
+	}
+	return &copiedSnapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+	rate1 := math.Float64bits(m.a1.Rate())
+	rate5 := math.Float64bits(m.a5.Rate())
+	rate15 := math.Float64bits(m.a15.Rate())
+	rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds())
+
+	atomic.StoreUint64(&m.snapshot.rate1, rate1)
+	atomic.StoreUint64(&m.snapshot.rate5, rate5)
+	atomic.StoreUint64(&m.snapshot.rate15, rate15)
+	atomic.StoreUint64(&m.snapshot.rateMean, rateMean)
+}
+
+func (m *StandardMeter) tick() {
+	m.a1.Tick()
+	m.a5.Tick()
+	m.a15.Tick()
+	m.updateSnapshot()
+}
+
+// meterArbiter ticks meters every 5s from a single goroutine.
+// meters are references in a set for future stopping.
+type meterArbiter struct {
+	sync.RWMutex
+	started bool
+	meters  map[*StandardMeter]struct{}
+	ticker  *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+	for {
+		select {
+		case <-ma.ticker.C:
+			ma.tickMeters()
+		}
+	}
+}
+
+func (ma *meterArbiter) tickMeters() {
+	ma.RLock()
+	defer ma.RUnlock()
+	for meter := range ma.meters {
+		meter.tick()
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
new file mode 100644
index 00000000..b97a49ed
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/metrics.go
@@ -0,0 +1,13 @@
+// Go port of Coda Hale's Metrics library
+//
+// <https://github.com/rcrowley/go-metrics>
+//
+// Coda Hale's original work: <https://github.com/codahale/metrics>
+package metrics
+
+// UseNilMetrics is checked by the constructor functions for all of the
+// standard metrics.  If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
new file mode 100644
index 00000000..266b6c93
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
@@ -0,0 +1,119 @@
+package metrics
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"net"
+	"os"
+	"strings"
+	"time"
+)
+
+var shortHostName string = ""
+
+// OpenTSDBConfig provides a container with configuration parameters for
+// the OpenTSDB exporter
+type OpenTSDBConfig struct {
+	Addr          *net.TCPAddr  // Network address to connect to
+	Registry      Registry      // Registry to be exported
+	FlushInterval time.Duration // Flush interval
+	DurationUnit  time.Duration // Time conversion unit for durations
+	Prefix        string        // Prefix to be prepended to metric names
+}
+
+// OpenTSDB is a blocking exporter function which reports metrics in r
+// to a TSDB server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+	OpenTSDBWithConfig(OpenTSDBConfig{
+		Addr:          addr,
+		Registry:      r,
+		FlushInterval: d,
+		DurationUnit:  time.Nanosecond,
+		Prefix:        prefix,
+	})
+}
+
+// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
+// but it takes a OpenTSDBConfig instead.
+func OpenTSDBWithConfig(c OpenTSDBConfig) {
+	for _ = range time.Tick(c.FlushInterval) {
+		if err := openTSDB(&c); nil != err {
+			log.Println(err)
+		}
+	}
+}
+
+func getShortHostname() string {
+	if shortHostName == "" {
+		host, _ := os.Hostname()
+		if index := strings.Index(host, "."); index > 0 {
+			shortHostName = host[:index]
+		} else {
+			shortHostName = host
+		}
+	}
+	return shortHostName
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+	shortHostname := getShortHostname()
+	now := time.Now().Unix()
+	du := float64(c.DurationUnit)
+	conn, err := net.DialTCP("tcp", nil, c.Addr)
+	if nil != err {
+		return err
+	}
+	defer conn.Close()
+	w := bufio.NewWriter(conn)
+	c.Registry.Each(func(name string, i interface{}) {
+		switch metric := i.(type) {
+		case Counter:
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+		case Gauge:
+			fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+		case GaugeFloat64:
+			fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
+		}
+		w.Flush()
+	})
+	return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
new file mode 100644
index 00000000..b3bab64e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -0,0 +1,363 @@
+package metrics
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// DuplicateMetric is the error returned by Registry.Register when a metric
+// already exists.  If you mean to Register that metric you must first
+// Unregister the existing metric.
+type DuplicateMetric string
+
+func (err DuplicateMetric) Error() string {
+	return fmt.Sprintf("duplicate metric: %s", string(err))
+}
+
+// A Registry holds references to a set of metrics by name and can iterate
+// over them, calling callback functions provided by the user.
+//
+// This is an interface so as to encourage other structs to implement
+// the Registry API as appropriate.
+type Registry interface {
+
+	// Call the given function for each registered metric.
+	Each(func(string, interface{}))
+
+	// Get the metric by the given name or nil if none is registered.
+	Get(string) interface{}
+
+	// GetAll metrics in the Registry.
+	GetAll() map[string]map[string]interface{}
+
+	// Gets an existing metric or registers the given one.
+	// The interface can be the metric to register if not found in registry,
+	// or a function returning the metric for lazy instantiation.
+	GetOrRegister(string, interface{}) interface{}
+
+	// Register the given metric under the given name.
+	Register(string, interface{}) error
+
+	// Run all registered healthchecks.
+	RunHealthchecks()
+
+	// Unregister the metric with the given name.
+	Unregister(string)
+
+	// Unregister all metrics.  (Mostly for testing.)
+	UnregisterAll()
+}
+
+// The standard implementation of a Registry is a mutex-protected map
+// of names to metrics.
+type StandardRegistry struct {
+	metrics map[string]interface{}
+	mutex   sync.RWMutex
+}
+
+// Create a new registry.
+func NewRegistry() Registry {
+	return &StandardRegistry{metrics: make(map[string]interface{})}
+}
+
+// Call the given function for each registered metric.
+func (r *StandardRegistry) Each(f func(string, interface{})) {
+	for name, i := range r.registered() {
+		f(name, i)
+	}
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *StandardRegistry) Get(name string) interface{} {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	return r.metrics[name]
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+	// access the read lock first which should be re-entrant
+	r.mutex.RLock()
+	metric, ok := r.metrics[name]
+	r.mutex.RUnlock()
+	if ok {
+		return metric
+	}
+
+	// only take the write lock if we'll be modifying the metrics map
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	if metric, ok := r.metrics[name]; ok {
+		return metric
+	}
+	if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+		i = v.Call(nil)[0].Interface()
+	}
+	r.register(name, i)
+	return i
+}
+
+// Register the given metric under the given name.  Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func (r *StandardRegistry) Register(name string, i interface{}) error {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	return r.register(name, i)
+}
+
+// Run all registered healthchecks.
+func (r *StandardRegistry) RunHealthchecks() {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	for _, i := range r.metrics {
+		if h, ok := i.(Healthcheck); ok {
+			h.Check()
+		}
+	}
+}
+
+// GetAll metrics in the Registry
+func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
+	data := make(map[string]map[string]interface{})
+	r.Each(func(name string, i interface{}) {
+		values := make(map[string]interface{})
+		switch metric := i.(type) {
+		case Counter:
+			values["count"] = metric.Count()
+		case Gauge:
+			values["value"] = metric.Value()
+		case GaugeFloat64:
+			values["value"] = metric.Value()
+		case Healthcheck:
+			values["error"] = nil
+			metric.Check()
+			if err := metric.Error(); nil != err {
+				values["error"] = metric.Error().Error()
+			}
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = h.Count()
+			values["min"] = h.Min()
+			values["max"] = h.Max()
+			values["mean"] = h.Mean()
+			values["stddev"] = h.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+		case Meter:
+			m := metric.Snapshot()
+			values["count"] = m.Count()
+			values["1m.rate"] = m.Rate1()
+			values["5m.rate"] = m.Rate5()
+			values["15m.rate"] = m.Rate15()
+			values["mean.rate"] = m.RateMean()
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = t.Count()
+			values["min"] = t.Min()
+			values["max"] = t.Max()
+			values["mean"] = t.Mean()
+			values["stddev"] = t.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+			values["1m.rate"] = t.Rate1()
+			values["5m.rate"] = t.Rate5()
+			values["15m.rate"] = t.Rate15()
+			values["mean.rate"] = t.RateMean()
+		}
+		data[name] = values
+	})
+	return data
+}
+
+// Unregister the metric with the given name.
+func (r *StandardRegistry) Unregister(name string) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.stop(name)
+	delete(r.metrics, name)
+}
+
+// Unregister all metrics.  (Mostly for testing.)
+func (r *StandardRegistry) UnregisterAll() {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	for name, _ := range r.metrics {
+		r.stop(name)
+		delete(r.metrics, name)
+	}
+}
+
+func (r *StandardRegistry) register(name string, i interface{}) error {
+	if _, ok := r.metrics[name]; ok {
+		return DuplicateMetric(name)
+	}
+	switch i.(type) {
+	case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+		r.metrics[name] = i
+	}
+	return nil
+}
+
+func (r *StandardRegistry) registered() map[string]interface{} {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	metrics := make(map[string]interface{}, len(r.metrics))
+	for name, i := range r.metrics {
+		metrics[name] = i
+	}
+	return metrics
+}
+
+func (r *StandardRegistry) stop(name string) {
+	if i, ok := r.metrics[name]; ok {
+		if s, ok := i.(Stoppable); ok {
+			s.Stop()
+		}
+	}
+}
+
+// Stoppable defines the metrics which has to be stopped.
+type Stoppable interface {
+	Stop()
+}
+
+type PrefixedRegistry struct {
+	underlying Registry
+	prefix     string
+}
+
+func NewPrefixedRegistry(prefix string) Registry {
+	return &PrefixedRegistry{
+		underlying: NewRegistry(),
+		prefix:     prefix,
+	}
+}
+
+func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
+	return &PrefixedRegistry{
+		underlying: parent,
+		prefix:     prefix,
+	}
+}
+
+// Call the given function for each registered metric.
+func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
+	wrappedFn := func(prefix string) func(string, interface{}) {
+		return func(name string, iface interface{}) {
+			if strings.HasPrefix(name, prefix) {
+				fn(name, iface)
+			} else {
+				return
+			}
+		}
+	}
+
+	baseRegistry, prefix := findPrefix(r, "")
+	baseRegistry.Each(wrappedFn(prefix))
+}
+
+func findPrefix(registry Registry, prefix string) (Registry, string) {
+	switch r := registry.(type) {
+	case *PrefixedRegistry:
+		return findPrefix(r.underlying, r.prefix+prefix)
+	case *StandardRegistry:
+		return r, prefix
+	}
+	return nil, ""
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *PrefixedRegistry) Get(name string) interface{} {
+	realName := r.prefix + name
+	return r.underlying.Get(realName)
+}
+
+// Gets an existing metric or registers the given one.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+	realName := r.prefix + name
+	return r.underlying.GetOrRegister(realName, metric)
+}
+
+// Register the given metric under the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
+	realName := r.prefix + name
+	return r.underlying.Register(realName, metric)
+}
+
+// Run all registered healthchecks.
+func (r *PrefixedRegistry) RunHealthchecks() {
+	r.underlying.RunHealthchecks()
+}
+
+// GetAll metrics in the Registry
+func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} {
+	return r.underlying.GetAll()
+}
+
+// Unregister the metric with the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Unregister(name string) {
+	realName := r.prefix + name
+	r.underlying.Unregister(realName)
+}
+
+// Unregister all metrics.  (Mostly for testing.)
+func (r *PrefixedRegistry) UnregisterAll() {
+	r.underlying.UnregisterAll()
+}
+
+var DefaultRegistry Registry = NewRegistry()
+
+// Call the given function for each registered metric.
+func Each(f func(string, interface{})) {
+	DefaultRegistry.Each(f)
+}
+
+// Get the metric by the given name or nil if none is registered.
+func Get(name string) interface{} {
+	return DefaultRegistry.Get(name)
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+func GetOrRegister(name string, i interface{}) interface{} {
+	return DefaultRegistry.GetOrRegister(name, i)
+}
+
+// Register the given metric under the given name.  Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func Register(name string, i interface{}) error {
+	return DefaultRegistry.Register(name, i)
+}
+
+// Register the given metric under the given name.  Panics if a metric by the
+// given name is already registered.
+func MustRegister(name string, i interface{}) {
+	if err := Register(name, i); err != nil {
+		panic(err)
+	}
+}
+
+// Run all registered healthchecks.
+func RunHealthchecks() {
+	DefaultRegistry.RunHealthchecks()
+}
+
+// Unregister the metric with the given name.
+func Unregister(name string) {
+	DefaultRegistry.Unregister(name)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
new file mode 100644
index 00000000..11c6b785
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime.go
@@ -0,0 +1,212 @@
+package metrics
+
+import (
+	"runtime"
+	"runtime/pprof"
+	"time"
+)
+
+var (
+	memStats       runtime.MemStats
+	runtimeMetrics struct {
+		MemStats struct {
+			Alloc         Gauge
+			BuckHashSys   Gauge
+			DebugGC       Gauge
+			EnableGC      Gauge
+			Frees         Gauge
+			HeapAlloc     Gauge
+			HeapIdle      Gauge
+			HeapInuse     Gauge
+			HeapObjects   Gauge
+			HeapReleased  Gauge
+			HeapSys       Gauge
+			LastGC        Gauge
+			Lookups       Gauge
+			Mallocs       Gauge
+			MCacheInuse   Gauge
+			MCacheSys     Gauge
+			MSpanInuse    Gauge
+			MSpanSys      Gauge
+			NextGC        Gauge
+			NumGC         Gauge
+			GCCPUFraction GaugeFloat64
+			PauseNs       Histogram
+			PauseTotalNs  Gauge
+			StackInuse    Gauge
+			StackSys      Gauge
+			Sys           Gauge
+			TotalAlloc    Gauge
+		}
+		NumCgoCall   Gauge
+		NumGoroutine Gauge
+		NumThread    Gauge
+		ReadMemStats Timer
+	}
+	frees       uint64
+	lookups     uint64
+	mallocs     uint64
+	numGC       uint32
+	numCgoCalls int64
+
+	threadCreateProfile = pprof.Lookup("threadcreate")
+)
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats.  This is designed to be called as a goroutine.
+func CaptureRuntimeMemStats(r Registry, d time.Duration) {
+	for _ = range time.Tick(d) {
+		CaptureRuntimeMemStatsOnce(r)
+	}
+}
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats.  This is designed to be called in a background
+// goroutine.  Giving a registry which has not been given to
+// RegisterRuntimeMemStats will panic.
+//
+// Be very careful with this because runtime.ReadMemStats calls the C
+// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
+// and that last one does what it says on the tin.
+func CaptureRuntimeMemStatsOnce(r Registry) {
+	t := time.Now()
+	runtime.ReadMemStats(&memStats) // This takes 50-200us.
+	runtimeMetrics.ReadMemStats.UpdateSince(t)
+
+	runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
+	runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
+	if memStats.DebugGC {
+		runtimeMetrics.MemStats.DebugGC.Update(1)
+	} else {
+		runtimeMetrics.MemStats.DebugGC.Update(0)
+	}
+	if memStats.EnableGC {
+		runtimeMetrics.MemStats.EnableGC.Update(1)
+	} else {
+		runtimeMetrics.MemStats.EnableGC.Update(0)
+	}
+
+	runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
+	runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
+	runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
+	runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
+	runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
+	runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
+	runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
+	runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
+	runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
+	runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
+	runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
+	runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
+	runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
+	runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
+	runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
+	runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
+	runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
+
+	// <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
+	i := numGC % uint32(len(memStats.PauseNs))
+	ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+	if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+		for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
+	} else {
+		if i > ii {
+			for ; i < uint32(len(memStats.PauseNs)); i++ {
+				runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+			}
+			i = 0
+		}
+		for ; i < ii; i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
+	}
+	frees = memStats.Frees
+	lookups = memStats.Lookups
+	mallocs = memStats.Mallocs
+	numGC = memStats.NumGC
+
+	runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
+	runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
+	runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
+	runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
+	runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
+
+	currentNumCgoCalls := numCgoCall()
+	runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
+	numCgoCalls = currentNumCgoCalls
+
+	runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
+
+	runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
+}
+
+// Register runtimeMetrics for the Go runtime statistics exported in runtime and
+// specifically runtime.MemStats.  The runtimeMetrics are named by their
+// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
+func RegisterRuntimeMemStats(r Registry) {
+	runtimeMetrics.MemStats.Alloc = NewGauge()
+	runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+	runtimeMetrics.MemStats.DebugGC = NewGauge()
+	runtimeMetrics.MemStats.EnableGC = NewGauge()
+	runtimeMetrics.MemStats.Frees = NewGauge()
+	runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+	runtimeMetrics.MemStats.HeapIdle = NewGauge()
+	runtimeMetrics.MemStats.HeapInuse = NewGauge()
+	runtimeMetrics.MemStats.HeapObjects = NewGauge()
+	runtimeMetrics.MemStats.HeapReleased = NewGauge()
+	runtimeMetrics.MemStats.HeapSys = NewGauge()
+	runtimeMetrics.MemStats.LastGC = NewGauge()
+	runtimeMetrics.MemStats.Lookups = NewGauge()
+	runtimeMetrics.MemStats.Mallocs = NewGauge()
+	runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+	runtimeMetrics.MemStats.MCacheSys = NewGauge()
+	runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+	runtimeMetrics.MemStats.MSpanSys = NewGauge()
+	runtimeMetrics.MemStats.NextGC = NewGauge()
+	runtimeMetrics.MemStats.NumGC = NewGauge()
+	runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
+	runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+	runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+	runtimeMetrics.MemStats.StackInuse = NewGauge()
+	runtimeMetrics.MemStats.StackSys = NewGauge()
+	runtimeMetrics.MemStats.Sys = NewGauge()
+	runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+	runtimeMetrics.NumCgoCall = NewGauge()
+	runtimeMetrics.NumGoroutine = NewGauge()
+	runtimeMetrics.NumThread = NewGauge()
+	runtimeMetrics.ReadMemStats = NewTimer()
+
+	r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+	r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+	r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+	r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+	r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+	r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+	r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+	r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+	r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+	r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+	r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+	r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+	r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+	r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+	r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+	r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+	r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+	r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+	r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+	r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+	r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
+	r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+	r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+	r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+	r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+	r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+	r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+	r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+	r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+	r.Register("runtime.NumThread", runtimeMetrics.NumThread)
+	r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
new file mode 100644
index 00000000..e3391f4e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
@@ -0,0 +1,10 @@
+// +build cgo
+// +build !appengine
+
+package metrics
+
+import "runtime"
+
+func numCgoCall() int64 {
+	return runtime.NumCgoCall()
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
new file mode 100644
index 00000000..ca12c05b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+	return memStats.GCCPUFraction
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
new file mode 100644
index 00000000..616a3b47
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
@@ -0,0 +1,7 @@
+// +build !cgo appengine
+
+package metrics
+
+func numCgoCall() int64 {
+	return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
new file mode 100644
index 00000000..be96aa6f
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build !go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+	return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
new file mode 100644
index 00000000..fecee5ef
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/sample.go
@@ -0,0 +1,616 @@
+package metrics
+
+import (
+	"math"
+	"math/rand"
+	"sort"
+	"sync"
+	"time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Size() int
+	Snapshot() Sample
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Values() []int64
+	Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir.  See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+// <http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf>
+type ExpDecaySample struct {
+	alpha         float64
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	t0, t1        time.Time
+	values        *expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+	if UseNilMetrics {
+		return NilSample{}
+	}
+	s := &ExpDecaySample{
+		alpha:         alpha,
+		reservoirSize: reservoirSize,
+		t0:            time.Now(),
+		values:        newExpDecaySampleHeap(reservoirSize),
+	}
+	s.t1 = s.t0.Add(rescaleThreshold)
+	return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.t0 = time.Now()
+	s.t1 = s.t0.Add(rescaleThreshold)
+	s.values.Clear()
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+	return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+	return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+	return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+	return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.values.Size()
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+	return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+	return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+	s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+	return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp.  This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if s.values.Size() == s.reservoirSize {
+		s.values.Pop()
+	}
+	s.values.Push(expDecaySample{
+		k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+		v: v,
+	})
+	if t.After(s.t1) {
+		values := s.values.Values()
+		t0 := s.t0
+		s.values.Clear()
+		s.t0 = t
+		s.t1 = s.t0.Add(rescaleThreshold)
+		for _, v := range values {
+			v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
+			s.values.Push(v)
+		}
+	}
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var max int64 = math.MinInt64
+	for _, v := range values {
+		if max < v {
+			max = v
+		}
+	}
+	return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var min int64 = math.MaxInt64
+	for _, v := range values {
+		if min > v {
+			min = v
+		}
+	}
+	return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+	return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+	scores := make([]float64, len(ps))
+	size := len(values)
+	if size > 0 {
+		sort.Sort(values)
+		for i, p := range ps {
+			pos := p * float64(size+1)
+			if pos < 1.0 {
+				scores[i] = float64(values[0])
+			} else if pos >= float64(size) {
+				scores[i] = float64(values[size-1])
+			} else {
+				lower := float64(values[int(pos)-1])
+				upper := float64(values[int(pos)])
+				scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+			}
+		}
+	}
+	return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+	count  int64
+	values []int64
+}
+
+func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
+	return &SampleSnapshot{
+		count:  count,
+		values: values,
+	}
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+	panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+	panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+	return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+	var sum int64
+	for _, v := range values {
+		sum += v
+	}
+	return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	m := SampleMean(values)
+	var sum float64
+	for _, v := range values {
+		d := float64(v) - m
+		sum += d * d
+	}
+	return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
+type UniformSample struct {
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	values        []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+	if UseNilMetrics {
+		return NilSample{}
+	}
+	return &UniformSample{
+		reservoirSize: reservoirSize,
+		values:        make([]int64, 0, reservoirSize),
+	}
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if len(s.values) < s.reservoirSize {
+		s.values = append(s.values, v)
+	} else {
+		r := rand.Int63n(s.count)
+		if r < int64(len(s.values)) {
+			s.values[int(r)] = v
+		}
+	}
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+	k float64
+	v int64
+}
+
+func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
+	return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+// The internal implementation is copied from the standard library's container/heap
+type expDecaySampleHeap struct {
+	s []expDecaySample
+}
+
+func (h *expDecaySampleHeap) Clear() {
+	h.s = h.s[:0]
+}
+
+func (h *expDecaySampleHeap) Push(s expDecaySample) {
+	n := len(h.s)
+	h.s = h.s[0 : n+1]
+	h.s[n] = s
+	h.up(n)
+}
+
+func (h *expDecaySampleHeap) Pop() expDecaySample {
+	n := len(h.s) - 1
+	h.s[0], h.s[n] = h.s[n], h.s[0]
+	h.down(0, n)
+
+	n = len(h.s)
+	s := h.s[n-1]
+	h.s = h.s[0 : n-1]
+	return s
+}
+
+func (h *expDecaySampleHeap) Size() int {
+	return len(h.s)
+}
+
+func (h *expDecaySampleHeap) Values() []expDecaySample {
+	return h.s
+}
+
+func (h *expDecaySampleHeap) up(j int) {
+	for {
+		i := (j - 1) / 2 // parent
+		if i == j || !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		j = i
+	}
+}
+
+func (h *expDecaySampleHeap) down(i, n int) {
+	for {
+		j1 := 2*i + 1
+		if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+			break
+		}
+		j := j1 // left child
+		if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
+			j = j2 // = 2*i + 2  // right child
+		}
+		if !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		i = j
+	}
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int           { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
new file mode 100644
index 00000000..693f1908
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/syslog.go
@@ -0,0 +1,78 @@
+// +build !windows
+
+package metrics
+
+import (
+	"fmt"
+	"log/syslog"
+	"time"
+)
+
+// Output each metric in the given registry to syslog periodically using
+// the given syslogger.
+func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
+	for _ = range time.Tick(d) {
+		r.Each(func(name string, i interface{}) {
+			switch metric := i.(type) {
+			case Counter:
+				w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+			case Gauge:
+				w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+			case GaugeFloat64:
+				w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+			case Healthcheck:
+				metric.Check()
+				w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
+			case Histogram:
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				w.Info(fmt.Sprintf(
+					"histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
+					name,
+					h.Count(),
+					h.Min(),
+					h.Max(),
+					h.Mean(),
+					h.StdDev(),
+					ps[0],
+					ps[1],
+					ps[2],
+					ps[3],
+					ps[4],
+				))
+			case Meter:
+				m := metric.Snapshot()
+				w.Info(fmt.Sprintf(
+					"meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
+					name,
+					m.Count(),
+					m.Rate1(),
+					m.Rate5(),
+					m.Rate15(),
+					m.RateMean(),
+				))
+			case Timer:
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				w.Info(fmt.Sprintf(
+					"timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
+					name,
+					t.Count(),
+					t.Min(),
+					t.Max(),
+					t.Mean(),
+					t.StdDev(),
+					ps[0],
+					ps[1],
+					ps[2],
+					ps[3],
+					ps[4],
+					t.Rate1(),
+					t.Rate5(),
+					t.Rate15(),
+					t.RateMean(),
+				))
+			}
+		})
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
new file mode 100644
index 00000000..d6ec4c62
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/timer.go
@@ -0,0 +1,329 @@
+package metrics
+
+import (
+	"sync"
+	"time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+	Snapshot() Timer
+	StdDev() float64
+	Stop()
+	Sum() int64
+	Time(func())
+	Update(time.Duration)
+	UpdateSince(time.Time)
+	Variance() float64
+}
+
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterTimer(name string, r Registry) Timer {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewTimer).(Timer)
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewCustomTimer(h Histogram, m Meter) Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		histogram: h,
+		meter:     m,
+	}
+}
+
+// NewRegisteredTimer constructs and registers a new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredTimer(name string, r Registry) Timer {
+	c := NewTimer()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewTimer() Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+		meter:     NewMeter(),
+	}
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+	h Histogram
+	m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Stop is a no-op.
+func (NilTimer) Stop() {}
+
+// Sum is a no-op.
+func (NilTimer) Sum() int64 { return 0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+	histogram Histogram
+	meter     Meter
+	mutex     sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+	return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+	return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+	return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+	return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+	return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+	return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+	return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+	return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return &TimerSnapshot{
+		histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+		meter:     t.meter.Snapshot().(*MeterSnapshot),
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+	return t.histogram.StdDev()
+}
+
+// Stop stops the meter.
+func (t *StandardTimer) Stop() {
+	t.meter.Stop()
+}
+
+// Sum returns the sum in the sample.
+func (t *StandardTimer) Sum() int64 {
+	return t.histogram.Sum()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+	ts := time.Now()
+	f()
+	t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(d))
+	t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(time.Since(ts)))
+	t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+	return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+	histogram *HistogramSnapshot
+	meter     *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Stop is a no-op.
+func (t *TimerSnapshot) Stop() {}
+
+// Sum returns the sum at the time the snapshot was taken.
+func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+	panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+	panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+	panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
new file mode 100755
index 00000000..c4ae91e6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/validate.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+# check there are no formatting issues
+GOFMT_LINES=`gofmt -l . | wc -l | xargs`
+test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
+
+# run the tests for the root package
+go test -race .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
new file mode 100644
index 00000000..091e971d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/writer.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"time"
+)
+
+// Write sorts writes each metric in the given registry periodically to the
+// given io.Writer.
+func Write(r Registry, d time.Duration, w io.Writer) {
+	for _ = range time.Tick(d) {
+		WriteOnce(r, w)
+	}
+}
+
+// WriteOnce sorts and writes metrics in the given registry to the given
+// io.Writer.
+func WriteOnce(r Registry, w io.Writer) {
+	var namedMetrics namedMetricSlice
+	r.Each(func(name string, i interface{}) {
+		namedMetrics = append(namedMetrics, namedMetric{name, i})
+	})
+
+	sort.Sort(namedMetrics)
+	for _, namedMetric := range namedMetrics {
+		switch metric := namedMetric.m.(type) {
+		case Counter:
+			fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", metric.Count())
+		case Gauge:
+			fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  value:       %9d\n", metric.Value())
+		case GaugeFloat64:
+			fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  value:       %f\n", metric.Value())
+		case Healthcheck:
+			metric.Check()
+			fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  error:       %v\n", metric.Error())
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", h.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", h.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", h.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", h.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", h.StdDev())
+			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
+			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
+			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
+			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
+			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "meter %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", m.Count())
+			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", m.Rate1())
+			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", m.Rate5())
+			fmt.Fprintf(w, "  15-min rate: %12.2f\n", m.Rate15())
+			fmt.Fprintf(w, "  mean rate:   %12.2f\n", m.RateMean())
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "timer %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", t.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", t.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", t.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", t.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", t.StdDev())
+			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
+			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
+			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
+			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
+			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
+			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", t.Rate1())
+			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", t.Rate5())
+			fmt.Fprintf(w, "  15-min rate: %12.2f\n", t.Rate15())
+			fmt.Fprintf(w, "  mean rate:   %12.2f\n", t.RateMean())
+		}
+	}
+}
+
+type namedMetric struct {
+	name string
+	m    interface{}
+}
+
+// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
+type namedMetricSlice []namedMetric
+
+func (nms namedMetricSlice) Len() int { return len(nms) }
+
+func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
+
+func (nms namedMetricSlice) Less(i, j int) bool {
+	return nms[i].name < nms[j].name
+}
diff --git a/vendor/github.com/sercand/kuberesolver/.gitignore b/vendor/github.com/sercand/kuberesolver/.gitignore
new file mode 100644
index 00000000..24846343
--- /dev/null
+++ b/vendor/github.com/sercand/kuberesolver/.gitignore
@@ -0,0 +1,2 @@
+.idea
+kuberesolver.iml
diff --git a/vendor/github.com/sercand/kuberesolver/README.md b/vendor/github.com/sercand/kuberesolver/README.md
new file mode 100644
index 00000000..ba3a8044
--- /dev/null
+++ b/vendor/github.com/sercand/kuberesolver/README.md
@@ -0,0 +1,18 @@
+# kuberesolver
+Grpc Client-Side Load Balancer with Kubernetes name resolver
+
+```go
+//New balancer for default namespace
+balancer := kuberesolver.New() 
+//Dials with RoundRobin lb and kubernetes name resolver. if url schema is not 'kubernetes' than uses dns
+cc, err := balancer.Dial("kubernetes://service-name:portname", opts...) 
+// or, add balancer as dial option bu this does not fallback to dns if schema is not 'kubernetes'
+cc, err := grpc.Dial("kubernetes://service-name:portname", balancer.DialOption(), opts...)
+```
+An url can be one of the following
+```
+kubernetes://service-name:portname    uses kubernetes api to fetch endpoints and port names
+kubernetes://service-name:8080        uses kubernetes api to fetch endpoints but uses given port
+dns://service-name:8080               does not use lb
+service-name:8080
+```
diff --git a/vendor/github.com/sercand/kuberesolver/balancer.go b/vendor/github.com/sercand/kuberesolver/balancer.go
new file mode 100644
index 00000000..d03162de
--- /dev/null
+++ b/vendor/github.com/sercand/kuberesolver/balancer.go
@@ -0,0 +1,135 @@
+package kuberesolver
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/naming"
+)
+
+type Balancer struct {
+	Namespace string
+	client    *k8sClient
+	resolvers []*kubeResolver
+}
+
+type TargetUrlType int32
+
+const (
+	TargetTypeDNS        TargetUrlType = 0
+	TargetTypeKubernetes TargetUrlType = 1
+	kubernetesSchema                   = "kubernetes"
+	dnsSchema                          = "dns"
+)
+
+type targetInfo struct {
+	urlType           TargetUrlType
+	target            string
+	port              string
+	resolveByPortName bool
+	useFirstPort      bool
+}
+
+func parseTarget(target string) (targetInfo, error) {
+	u, err := url.Parse(target)
+	if err != nil {
+		return targetInfo{}, err
+	}
+	ti := targetInfo{}
+	if u.Scheme == kubernetesSchema {
+		ti.urlType = TargetTypeKubernetes
+		spl := strings.Split(u.Host, ":")
+		if len(spl) == 2 {
+			ti.target = spl[0]
+			ti.port = spl[1]
+			ti.useFirstPort = false
+			if _, err := strconv.Atoi(ti.port); err != nil {
+				ti.resolveByPortName = true
+			} else {
+				ti.resolveByPortName = false
+			}
+		} else {
+			ti.target = spl[0]
+			ti.useFirstPort = true
+		}
+	} else if u.Scheme == dnsSchema {
+		ti.urlType = TargetTypeDNS
+		ti.target = u.Host
+	} else {
+		ti.urlType = TargetTypeDNS
+		ti.target = target
+	}
+	return ti, nil
+}
+
+//Resolver returns Resolver for grpc
+func (b *Balancer) Resolver() naming.Resolver {
+	return newResolver(b.client, b.Namespace)
+}
+
+//DialOption returns grpc.DialOption with RoundRobin balancer and resolver
+func (b *Balancer) DialOption() grpc.DialOption {
+	rs := newResolver(b.client, b.Namespace)
+	return grpc.WithBalancer(grpc.RoundRobin(rs))
+}
+
+// Dial calls grpc.Dial, also parses target and uses load balancer if necessary
+func (b *Balancer) Dial(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+	pt, err := parseTarget(target)
+	if err != nil {
+		return nil, err
+	}
+	switch pt.urlType {
+	case TargetTypeKubernetes:
+		if b.client == nil {
+			return nil, errors.New("application is not running inside kubernetes")
+		}
+		grpclog.Printf("kuberesolver: using kubernetes resolver target=%s", pt.target)
+		rs := newResolver(b.client, b.Namespace)
+		b.resolvers = append(b.resolvers, rs)
+		opts := append(opts, grpc.WithBalancer(grpc.RoundRobin(rs)))
+		return grpc.Dial(target, opts...)
+	case TargetTypeDNS:
+		return grpc.Dial(pt.target, opts...)
+	default:
+		return nil, errors.New("Unknown target type")
+	}
+}
+
+func (b *Balancer) Healthy() error {
+	for _, r := range b.resolvers {
+		if r.watcher != nil {
+			if len(r.watcher.endpoints) == 0 {
+				return fmt.Errorf("target does not have endpoints")
+			}
+		}
+	}
+	return nil
+}
+
+// IsInCluster returns true if application is running inside kubernetes cluster
+func (b *Balancer) IsInCluster() bool {
+	return b.client != nil
+}
+
+// New creates a Balancer with "default" namespace
+func New() *Balancer {
+	return NewWithNamespace("default")
+}
+
+// NewWithNamespace creates a Balancer with given namespace.
+func NewWithNamespace(namespace string) *Balancer {
+	client, err := newInClusterClient()
+	if err != nil {
+		grpclog.Printf("kuberesolver: application is not running inside kubernetes")
+	}
+	return &Balancer{
+		Namespace: namespace,
+		client:    client,
+	}
+}
diff --git a/vendor/github.com/sercand/kuberesolver/kubernetes.go b/vendor/github.com/sercand/kuberesolver/kubernetes.go
new file mode 100644
index 00000000..737880ee
--- /dev/null
+++ b/vendor/github.com/sercand/kuberesolver/kubernetes.go
@@ -0,0 +1,71 @@
+package kuberesolver
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"os"
+	"time"
+)
+
+const (
+	serviceAccountToken  = "/var/run/secrets/kubernetes.io/serviceaccount/token"
+	serviceAccountCACert = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+)
+
+type k8sClient struct {
+	host       string
+	token      string
+	httpClient *http.Client
+}
+
+func (kc *k8sClient) getRequest(url string) (*http.Request, error) {
+	req, err := http.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	if len(kc.token) > 0 {
+		req.Header.Set("Authorization", "Bearer "+kc.token)
+	}
+	return req, nil
+}
+
+func (kc *k8sClient) Do(req *http.Request) (*http.Response, error) {
+	return kc.httpClient.Do(req)
+}
+
+/*
+KUBERNETES_SERVICE_PORT=443
+KUBERNETES_SERVICE_PORT_HTTPS=443
+KUBERNETES_SERVICE_HOST=10.0.0.1
+*/
+func newInClusterClient() (*k8sClient, error) {
+	host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
+	if len(host) == 0 || len(port) == 0 {
+		return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
+	}
+	token, err := ioutil.ReadFile(serviceAccountToken)
+	if err != nil {
+		return nil, err
+	}
+	ca, err := ioutil.ReadFile(serviceAccountCACert)
+	if err != nil {
+		return nil, err
+	}
+	certPool := x509.NewCertPool()
+	certPool.AppendCertsFromPEM(ca)
+	transport := &http.Transport{TLSClientConfig: &tls.Config{
+		MinVersion: tls.VersionTLS10,
+		RootCAs:    certPool,
+	}}
+	httpClient := &http.Client{Transport: transport, Timeout: time.Nanosecond * 0}
+
+	return &k8sClient{
+		host:       "https://" + net.JoinHostPort(host, port),
+		token:      string(token),
+		httpClient: httpClient,
+	}, nil
+}
diff --git a/vendor/github.com/sercand/kuberesolver/models.go b/vendor/github.com/sercand/kuberesolver/models.go
new file mode 100644
index 00000000..3cf13fa6
--- /dev/null
+++ b/vendor/github.com/sercand/kuberesolver/models.go
@@ -0,0 +1,42 @@
+package kuberesolver
+
+type EventType string
+
+const (
+	Added    EventType = "ADDED"
+	Modified EventType = "MODIFIED"
+	Deleted  EventType = "DELETED"
+	Error    EventType = "ERROR"
+)
+
+// Event represents a single event to a watched resource.
+type Event struct {
+	Type   EventType `json:"type"`
+	Object Endpoints `json:"object"`
+}
+
+type Endpoints struct {
+	Kind       string   `json:"kind"`
+	ApiVersion string   `json:"apiVersion"`
+	Metadata   Metadata `json:"metadata"`
+	Subsets    []Subset `json:"subsets"`
+}
+
+type Metadata struct {
+	Name            string `json:"name"`
+	ResourceVersion string `json:"resourceVersion"`
+}
+
+type Subset struct {
+	Addresses []Address `json:"addresses"`
+	Ports     []Port    `json:"ports"`
+}
+
+type Address struct {
+	IP string `json:"ip"`
+}
+
+type Port struct {
+	Name string `json:"name"`
+	Port int    `json:"port"`
+}
diff --git a/vendor/github.com/sercand/kuberesolver/resolver.go b/vendor/github.com/sercand/kuberesolver/resolver.go
new file mode 100644
index 00000000..1c6a92d4
--- /dev/null
+++ b/vendor/github.com/sercand/kuberesolver/resolver.go
@@ -0,0 +1,86 @@
+package kuberesolver
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/naming"
+)
+
+// kubeResolver resolves service names using Kubernetes endpoints.
+type kubeResolver struct {
+	k8sClient *k8sClient
+	namespace string
+	watcher   *watcher
+}
+
+// NewResolver returns a new Kubernetes resolver.
+func newResolver(client *k8sClient, namespace string) *kubeResolver {
+	if namespace == "" {
+		namespace = "default"
+	}
+	return &kubeResolver{client, namespace, nil}
+}
+
+// Resolve creates a Kubernetes watcher for the named target.
+func (r *kubeResolver) Resolve(target string) (naming.Watcher, error) {
+	pt, err := parseTarget(target)
+	if err != nil {
+		return nil, err
+	}
+	resultChan := make(chan watchResult)
+	stopCh := make(chan struct{})
+	wtarget := pt.target
+	go until(func() {
+		err := r.watch(wtarget, stopCh, resultChan)
+		if err != nil {
+			grpclog.Printf("kuberesolver: watching ended with error='%v', will reconnect again", err)
+		}
+	}, time.Second, stopCh)
+
+	r.watcher = &watcher{
+		target:    pt,
+		endpoints: make(map[string]interface{}),
+		stopCh:    stopCh,
+		result:    resultChan,
+	}
+	return r.watcher, nil
+}
+
+func (r *kubeResolver) watch(target string, stopCh <-chan struct{}, resultCh chan<- watchResult) error {
+	u, err := url.Parse(fmt.Sprintf("%s/api/v1/watch/namespaces/%s/endpoints/%s",
+		r.k8sClient.host, r.namespace, target))
+	if err != nil {
+		return err
+	}
+	req, err := r.k8sClient.getRequest(u.String())
+	if err != nil {
+		return err
+	}
+	resp, err := r.k8sClient.Do(req)
+	if err != nil {
+		return err
+	}
+	if resp.StatusCode != http.StatusOK {
+		defer resp.Body.Close()
+		rbody, _ := ioutil.ReadAll(resp.Body)
+		return fmt.Errorf("invalid response code %d: %s", resp.StatusCode, rbody)
+	}
+	sw := newStreamWatcher(resp.Body)
+	for {
+		select {
+		case <-stopCh:
+			return nil
+		case up, more := <-sw.ResultChan():
+			if more {
+				resultCh <- watchResult{err: nil, ep: &up}
+			} else {
+				return nil
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/sercand/kuberesolver/stream.go b/vendor/github.com/sercand/kuberesolver/stream.go
new file mode 100644
index 00000000..51aab49b
--- /dev/null
+++ b/vendor/github.com/sercand/kuberesolver/stream.go
@@ -0,0 +1,105 @@
+package kuberesolver
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"sync"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+// Interface can be implemented by anything that knows how to watch and report changes.
+type watchInterface interface {
+	// Stops watching. Will close the channel returned by ResultChan(). Releases
+	// any resources used by the watch.
+	Stop()
+
+	// Returns a chan which will receive all the events. If an error occurs
+	// or Stop() is called, this channel will be closed, in which case the
+	// watch should be completely cleaned up.
+	ResultChan() <-chan Event
+}
+
+// StreamWatcher turns any stream for which you can write a Decoder interface
+// into a watch.Interface.
+type streamWatcher struct {
+	result  chan Event
+	r       io.ReadCloser
+	decoder *json.Decoder
+	sync.Mutex
+	stopped bool
+}
+
+// NewStreamWatcher creates a StreamWatcher from the given io.ReadClosers.
+func newStreamWatcher(r io.ReadCloser) watchInterface {
+	sw := &streamWatcher{
+		r:       r,
+		decoder: json.NewDecoder(r),
+		result:  make(chan Event),
+	}
+	go sw.receive()
+	return sw
+}
+
+// ResultChan implements Interface.
+func (sw *streamWatcher) ResultChan() <-chan Event {
+	return sw.result
+}
+
+// Stop implements Interface.
+func (sw *streamWatcher) Stop() {
+	sw.Lock()
+	defer sw.Unlock()
+	if !sw.stopped {
+		sw.stopped = true
+		sw.r.Close()
+	}
+}
+
+// stopping returns true if Stop() was called previously.
+func (sw *streamWatcher) stopping() bool {
+	sw.Lock()
+	defer sw.Unlock()
+	return sw.stopped
+}
+
+// receive reads result from the decoder in a loop and sends down the result channel.
+func (sw *streamWatcher) receive() {
+	defer close(sw.result)
+	defer sw.Stop()
+	for {
+		obj, err := sw.Decode()
+		if err != nil {
+			// Ignore expected error.
+			if sw.stopping() {
+				return
+			}
+			switch err {
+			case io.EOF:
+				// watch closed normally
+			case io.ErrUnexpectedEOF:
+				grpclog.Printf("kuberesolver: Unexpected EOF during watch stream event decoding: %v", err)
+			default:
+				grpclog.Printf("kuberesolver: Unable to decode an event from the watch stream: %v", err)
+			}
+			return
+		}
+		sw.result <- obj
+	}
+}
+
+// Decode blocks until it can return the next object in the writer. Returns an error
+// if the writer is closed or an object can't be decoded.
+func (sw *streamWatcher) Decode() (Event, error) {
+	var got Event
+	if err := sw.decoder.Decode(&got); err != nil {
+		return Event{}, err
+	}
+	switch got.Type {
+	case Added, Modified, Deleted, Error:
+		return got, nil
+	default:
+		return Event{}, fmt.Errorf("got invalid watch event type: %v", got.Type)
+	}
+}
diff --git a/vendor/github.com/sercand/kuberesolver/util.go b/vendor/github.com/sercand/kuberesolver/util.go
new file mode 100644
index 00000000..e0726092
--- /dev/null
+++ b/vendor/github.com/sercand/kuberesolver/util.go
@@ -0,0 +1,35 @@
+package kuberesolver
+
+import (
+	"runtime/debug"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+func until(f func(), period time.Duration, stopCh <-chan struct{}) {
+	select {
+	case <-stopCh:
+		return
+	default:
+	}
+	for {
+		func() {
+			defer handleCrash()
+			f()
+		}()
+		select {
+		case <-stopCh:
+			return
+		case <-time.After(period):
+		}
+	}
+}
+
+// HandleCrash simply catches a crash and logs an error. Meant to be called via defer.
+func handleCrash() {
+	if r := recover(); r != nil {
+		callers := string(debug.Stack())
+		grpclog.Printf("kuberesolver: recovered from panic: %#v (%v)\n%v", r, r, callers)
+	}
+}
diff --git a/vendor/github.com/sercand/kuberesolver/watcher.go b/vendor/github.com/sercand/kuberesolver/watcher.go
new file mode 100644
index 00000000..42437bd4
--- /dev/null
+++ b/vendor/github.com/sercand/kuberesolver/watcher.go
@@ -0,0 +1,95 @@
+package kuberesolver
+
+import (
+	"net"
+	"strconv"
+	"sync"
+
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/naming"
+)
+
+type watchResult struct {
+	ep  *Event
+	err error
+}
+
+// A Watcher provides name resolution updates from Kubernetes endpoints
+// identified by name.
+type watcher struct {
+	target    targetInfo
+	endpoints map[string]interface{}
+	stopCh    chan struct{}
+	result    chan watchResult
+	sync.Mutex
+	stopped bool
+}
+
+// Close closes the watcher, cleaning up any open connections.
+func (w *watcher) Close() {
+	close(w.stopCh)
+}
+
+// Next updates the endpoints for the name being watched.
+func (w *watcher) Next() ([]*naming.Update, error) {
+	updates := make([]*naming.Update, 0)
+	updatedEndpoints := make(map[string]interface{})
+	var ep Event
+
+	select {
+	case <-w.stopCh:
+		w.Lock()
+		if !w.stopped {
+			w.stopped = true
+		}
+		w.Unlock()
+		return updates, nil
+	case r := <-w.result:
+		if r.err == nil {
+			ep = *r.ep
+		} else {
+			return updates, r.err
+		}
+	}
+	for _, subset := range ep.Object.Subsets {
+		port := ""
+		if w.target.useFirstPort {
+			port = strconv.Itoa(subset.Ports[0].Port)
+		} else if w.target.resolveByPortName {
+			for _, p := range subset.Ports {
+				if p.Name == w.target.port {
+					port = strconv.Itoa(p.Port)
+					break
+				}
+			}
+		} else {
+			port = w.target.port
+		}
+
+		if len(port) == 0 {
+			port = strconv.Itoa(subset.Ports[0].Port)
+		}
+		for _, address := range subset.Addresses {
+			endpoint := net.JoinHostPort(address.IP, port)
+			updatedEndpoints[endpoint] = nil
+		}
+	}
+
+	// Create updates to add new endpoints.
+	for addr, md := range updatedEndpoints {
+		if _, ok := w.endpoints[addr]; !ok {
+			updates = append(updates, &naming.Update{naming.Add, addr, md})
+			grpclog.Printf("kuberesolver: %s ADDED to %s", addr, w.target.target)
+		}
+	}
+
+	// Create updates to delete old endpoints.
+	for addr := range w.endpoints {
+		if _, ok := updatedEndpoints[addr]; !ok {
+			updates = append(updates, &naming.Update{naming.Delete, addr, nil})
+			grpclog.Printf("kuberesolver: %s DELETED from %s", addr, w.target.target)
+		}
+	}
+	w.endpoints = updatedEndpoints
+	return updates, nil
+}
diff --git a/vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go b/vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go
new file mode 100644
index 00000000..78e4a771
--- /dev/null
+++ b/vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go
@@ -0,0 +1,150 @@
+package loki
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"sync"
+
+	"github.com/apache/thrift/lib/go/thrift"
+	"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
+)
+
+// Want to be able to support a service doing 100 QPS with a 15s scrape interval
+var globalCollector = NewCollector(15 * 100)
+
+type Collector struct {
+	mtx      sync.Mutex
+	traceIDs map[int64]int // map from trace ID to index in traces
+	traces   []trace
+	next     int
+	length   int
+}
+
+type trace struct {
+	traceID int64
+	spans   []*zipkincore.Span
+}
+
+func NewCollector(capacity int) *Collector {
+	return &Collector{
+		traceIDs: make(map[int64]int, capacity),
+		traces:   make([]trace, capacity, capacity),
+		next:     0,
+		length:   0,
+	}
+}
+
+func (c *Collector) Collect(span *zipkincore.Span) error {
+	if span == nil {
+		return fmt.Errorf("cannot collect nil span")
+	}
+
+	c.mtx.Lock()
+	defer c.mtx.Unlock()
+
+	traceID := span.GetTraceID()
+	idx, ok := c.traceIDs[traceID]
+	if !ok {
+		// Pick a slot in c.spans for this trace
+		idx = c.next
+		c.next++
+		c.next %= cap(c.traces) // wrap
+
+		// If the slot it occupied, we'll need to clear the trace ID index,
+		// otherwise we'll need to number of traces.
+		if c.length == cap(c.traces) {
+			delete(c.traceIDs, c.traces[idx].traceID)
+		} else {
+			c.length++
+		}
+
+		// Initialise said slot.
+		c.traceIDs[traceID] = idx
+		c.traces[idx].traceID = traceID
+		c.traces[idx].spans = c.traces[idx].spans[:0]
+	}
+
+	c.traces[idx].spans = append(c.traces[idx].spans, span)
+	return nil
+}
+
+func (*Collector) Close() error {
+	return nil
+}
+
+func (c *Collector) gather() []*zipkincore.Span {
+	c.mtx.Lock()
+	defer c.mtx.Unlock()
+
+	spans := make([]*zipkincore.Span, 0, c.length)
+	i, count := c.next-c.length, 0
+	if i < 0 {
+		i = cap(c.traces) + i
+	}
+	for count < c.length {
+		i %= cap(c.traces)
+		spans = append(spans, c.traces[i].spans...)
+		delete(c.traceIDs, c.traces[i].traceID)
+		i++
+		count++
+	}
+	c.length = 0
+	if len(c.traceIDs) != 0 {
+		panic("didn't clear all trace ids")
+	}
+	return spans
+}
+
+func (c *Collector) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	spans := c.gather()
+	if err := WriteSpans(spans, w); err != nil {
+		log.Printf("error writing spans: %v", err)
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+}
+
+func WriteSpans(spans []*zipkincore.Span, w io.Writer) error {
+	transport := thrift.NewStreamTransportW(w)
+	protocol := thrift.NewTCompactProtocol(transport)
+
+	if err := protocol.WriteListBegin(thrift.STRUCT, len(spans)); err != nil {
+		return err
+	}
+	for _, span := range spans {
+		if err := span.Write(protocol); err != nil {
+			return err
+		}
+	}
+	if err := protocol.WriteListEnd(); err != nil {
+		return err
+	}
+	return protocol.Flush()
+}
+
+func ReadSpans(r io.Reader) ([]*zipkincore.Span, error) {
+	transport := thrift.NewStreamTransportR(r)
+	protocol := thrift.NewTCompactProtocol(transport)
+	ttype, size, err := protocol.ReadListBegin()
+	if err != nil {
+		return nil, err
+	}
+	spans := make([]*zipkincore.Span, 0, size)
+	if ttype != thrift.STRUCT {
+		return nil, fmt.Errorf("unexpected type: %v", ttype)
+	}
+	for i := 0; i < size; i++ {
+		span := zipkincore.NewSpan()
+		if err := span.Read(protocol); err != nil {
+			return nil, err
+		}
+		spans = append(spans, span)
+	}
+	return spans, protocol.ReadListEnd()
+}
+
+func Handler() http.Handler {
+	return globalCollector
+}
diff --git a/vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go b/vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go
new file mode 100644
index 00000000..b3efe9e9
--- /dev/null
+++ b/vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go
@@ -0,0 +1,27 @@
+package loki
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/openzipkin/zipkin-go-opentracing"
+)
+
+func NewTracer() (opentracing.Tracer, error) {
+	// create recorder.
+	hostname, err := os.Hostname()
+	if err != nil {
+		return nil, err
+	}
+	recorder := zipkintracer.NewRecorder(globalCollector, false, hostname, "")
+
+	// create tracer.
+	tracer, err := zipkintracer.NewTracer(recorder)
+	if err != nil {
+		fmt.Printf("unable to create Zipkin tracer: %+v", err)
+		os.Exit(-1)
+	}
+
+	return tracer, nil
+}
diff --git a/vendor/github.com/weaveworks/common/httpgrpc/server/server.go b/vendor/github.com/weaveworks/common/httpgrpc/server/server.go
new file mode 100644
index 00000000..0ccad0a6
--- /dev/null
+++ b/vendor/github.com/weaveworks/common/httpgrpc/server/server.go
@@ -0,0 +1,183 @@
+package server
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"strings"
+	"sync"
+
+	"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
+	"github.com/mwitkow/go-grpc-middleware"
+	"github.com/opentracing/opentracing-go"
+	"github.com/sercand/kuberesolver"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+
+	"github.com/weaveworks/common/httpgrpc"
+	"github.com/weaveworks/common/middleware"
+)
+
+// Server implements HTTPServer.  HTTPServer is a generated interface that gRPC
+// servers must implement.
+type Server struct {
+	handler http.Handler
+}
+
+// NewServer makes a new Server.
+func NewServer(handler http.Handler) *Server {
+	return &Server{
+		handler: handler,
+	}
+}
+
+// Handle implements HTTPServer.
+func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) {
+	req, err := http.NewRequest(r.Method, r.Url, ioutil.NopCloser(bytes.NewReader(r.Body)))
+	if err != nil {
+		return nil, err
+	}
+	req = req.WithContext(ctx)
+	toHeader(r.Headers, req.Header)
+	req.RequestURI = r.Url
+	recorder := httptest.NewRecorder()
+	s.handler.ServeHTTP(recorder, req)
+	resp := &httpgrpc.HTTPResponse{
+		Code:    int32(recorder.Code),
+		Headers: fromHeader(recorder.Header()),
+		Body:    recorder.Body.Bytes(),
+	}
+	if recorder.Code/100 == 5 {
+		return nil, httpgrpc.ErrorFromHTTPResponse(resp)
+	}
+	return resp, err
+}
+
+// Client is a http.Handler that forwards the request over gRPC.
+type Client struct {
+	mtx       sync.RWMutex
+	service   string
+	namespace string
+	port      string
+	client    httpgrpc.HTTPClient
+	conn      *grpc.ClientConn
+}
+
+// ParseURL deals with direct:// style URLs, as well as kubernetes:// urls.
+// For backwards compatibility it treats URLs without schems as kubernetes://.
+func ParseURL(unparsed string) (string, []grpc.DialOption, error) {
+	parsed, err := url.Parse(unparsed)
+	if err != nil {
+		return "", nil, err
+	}
+
+	scheme, host := parsed.Scheme, parsed.Host
+	if !strings.Contains(unparsed, "://") {
+		scheme, host = "kubernetes", unparsed
+	}
+
+	switch scheme {
+	case "direct":
+		return host, nil, err
+
+	case "kubernetes":
+		host, port, err := net.SplitHostPort(host)
+		if err != nil {
+			return "", nil, err
+		}
+		parts := strings.SplitN(host, ".", 2)
+		service, namespace := parts[0], "default"
+		if len(parts) == 2 {
+			namespace = parts[1]
+		}
+		balancer := kuberesolver.NewWithNamespace(namespace)
+		address := fmt.Sprintf("kubernetes://%s:%s", service, port)
+		dialOptions := []grpc.DialOption{balancer.DialOption()}
+		return address, dialOptions, nil
+
+	default:
+		return "", nil, fmt.Errorf("unrecognised scheme: %s", parsed.Scheme)
+	}
+}
+
+// NewClient makes a new Client, given a kubernetes service address.
+func NewClient(address string) (*Client, error) {
+	address, dialOptions, err := ParseURL(address)
+	if err != nil {
+		return nil, err
+	}
+
+	dialOptions = append(
+		dialOptions,
+		grpc.WithInsecure(),
+		grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(
+			otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()),
+			middleware.ClientUserHeaderInterceptor,
+		)),
+	)
+
+	conn, err := grpc.Dial(address, dialOptions...)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Client{
+		client: httpgrpc.NewHTTPClient(conn),
+		conn:   conn,
+	}, nil
+}
+
+// ServeHTTP implements http.Handler
+func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	body, err := ioutil.ReadAll(r.Body)
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+	req := &httpgrpc.HTTPRequest{
+		Method:  r.Method,
+		Url:     r.RequestURI,
+		Body:    body,
+		Headers: fromHeader(r.Header),
+	}
+
+	resp, err := c.client.Handle(r.Context(), req)
+	if err != nil {
+		// Some errors will actually contain a valid resp, just need to unpack it
+		var ok bool
+		resp, ok = httpgrpc.HTTPResponseFromError(err)
+
+		if !ok {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+			return
+		}
+	}
+
+	toHeader(resp.Headers, w.Header())
+	w.WriteHeader(int(resp.Code))
+	if _, err := w.Write(resp.Body); err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+}
+
+func toHeader(hs []*httpgrpc.Header, header http.Header) {
+	for _, h := range hs {
+		header[h.Key] = h.Values
+	}
+}
+
+func fromHeader(hs http.Header) []*httpgrpc.Header {
+	result := make([]*httpgrpc.Header, 0, len(hs))
+	for k, vs := range hs {
+		result = append(result, &httpgrpc.Header{
+			Key:    k,
+			Values: vs,
+		})
+	}
+	return result
+}
diff --git a/vendor/github.com/weaveworks/common/server/fake_server.proto b/vendor/github.com/weaveworks/common/server/fake_server.proto
new file mode 100644
index 00000000..2520d944
--- /dev/null
+++ b/vendor/github.com/weaveworks/common/server/fake_server.proto
@@ -0,0 +1,17 @@
+syntax = "proto3";
+
+package server;
+
+option go_package = "github.com/weaveworks/common/server";
+
+import "google/protobuf/empty.proto";
+
+service FakeServer {
+    rpc Succeed(google.protobuf.Empty) returns (google.protobuf.Empty) {};
+    rpc FailWithError(google.protobuf.Empty) returns (google.protobuf.Empty) {};
+    rpc FailWithHTTPError(FailWithHTTPErrorRequest) returns (google.protobuf.Empty) {};
+}
+
+message FailWithHTTPErrorRequest {
+  int32 Code = 1;
+}
diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go
new file mode 100644
index 00000000..8cf02ee4
--- /dev/null
+++ b/vendor/github.com/weaveworks/common/server/server.go
@@ -0,0 +1,189 @@
+package server
+
+import (
+	"flag"
+	"fmt"
+	"net"
+	"net/http"
+	_ "net/http/pprof" // anonymous import to get the pprof handler registered
+	"time"
+
+	"github.com/gorilla/mux"
+	"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
+	"github.com/mwitkow/go-grpc-middleware"
+	"github.com/opentracing-contrib/go-stdlib/nethttp"
+	"github.com/opentracing/opentracing-go"
+	"github.com/prometheus/client_golang/prometheus"
+	log "github.com/sirupsen/logrus"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+
+	"github.com/weaveworks-experiments/loki/pkg/client"
+	"github.com/weaveworks/common/httpgrpc"
+	httpgrpc_server "github.com/weaveworks/common/httpgrpc/server"
+	"github.com/weaveworks/common/instrument"
+	"github.com/weaveworks/common/middleware"
+	"github.com/weaveworks/common/signals"
+)
+
+func init() {
+	tracer, err := loki.NewTracer()
+	if err != nil {
+		panic(fmt.Sprintf("Failed to create tracer: %v", err))
+	} else {
+		opentracing.InitGlobalTracer(tracer)
+	}
+}
+
+// Config for a Server
+type Config struct {
+	MetricsNamespace string
+	HTTPListenPort   int
+	GRPCListenPort   int
+
+	RegisterInstrumentation bool
+	ExcludeRequestInLog     bool
+
+	ServerGracefulShutdownTimeout time.Duration
+	HTTPServerReadTimeout         time.Duration
+	HTTPServerWriteTimeout        time.Duration
+	HTTPServerIdleTimeout         time.Duration
+
+	GRPCOptions    []grpc.ServerOption
+	GRPCMiddleware []grpc.UnaryServerInterceptor
+	HTTPMiddleware []middleware.Interface
+}
+
+// RegisterFlags adds the flags required to config this to the given FlagSet
+func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
+	f.IntVar(&cfg.HTTPListenPort, "server.http-listen-port", 80, "HTTP server listen port.")
+	f.IntVar(&cfg.GRPCListenPort, "server.grpc-listen-port", 9095, "gRPC server listen port.")
+	f.BoolVar(&cfg.RegisterInstrumentation, "server.register-instrumentation", true, "Register the intrumentation handlers (/metrics etc).")
+	f.DurationVar(&cfg.ServerGracefulShutdownTimeout, "server.graceful-shutdown-timeout", 5*time.Second, "Timeout for graceful shutdowns")
+	f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 5*time.Second, "Read timeout for HTTP server")
+	f.DurationVar(&cfg.HTTPServerWriteTimeout, "server.http-write-timeout", 5*time.Second, "Write timeout for HTTP server")
+	f.DurationVar(&cfg.HTTPServerIdleTimeout, "server.http-idle-timeout", 120*time.Second, "Idle timeout for HTTP server")
+}
+
+// Server wraps a HTTP and gRPC server, and some common initialization.
+//
+// Servers will be automatically instrumented for Prometheus metrics
+// and Loki tracing.  HTTP over gRPC
+type Server struct {
+	cfg          Config
+	handler      *signals.Handler
+	httpListener net.Listener
+	grpcListener net.Listener
+	httpServer   *http.Server
+
+	HTTP *mux.Router
+	GRPC *grpc.Server
+}
+
+// New makes a new Server
+func New(cfg Config) (*Server, error) {
+	// Setup listeners first, so we can fail early if the port is in use.
+	httpListener, err := net.Listen("tcp", fmt.Sprintf(":%d", cfg.HTTPListenPort))
+	if err != nil {
+		return nil, err
+	}
+
+	grpcListener, err := net.Listen("tcp", fmt.Sprintf(":%d", cfg.GRPCListenPort))
+	if err != nil {
+		return nil, err
+	}
+
+	// Prometheus histograms for requests.
+	requestDuration := prometheus.NewHistogramVec(prometheus.HistogramOpts{
+		Namespace: cfg.MetricsNamespace,
+		Name:      "request_duration_seconds",
+		Help:      "Time (in seconds) spent serving HTTP requests.",
+		Buckets:   instrument.DefBuckets,
+	}, []string{"method", "route", "status_code", "ws"})
+	prometheus.MustRegister(requestDuration)
+
+	// Setup gRPC server
+	serverLog := middleware.GRPCServerLog{WithRequest: !cfg.ExcludeRequestInLog}
+	grpcMiddleware := []grpc.UnaryServerInterceptor{
+		serverLog.UnaryServerInterceptor,
+		middleware.ServerInstrumentInterceptor(requestDuration),
+		otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()),
+	}
+	grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...)
+	grpcOptions := []grpc.ServerOption{
+		grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
+			grpcMiddleware...,
+		)),
+	}
+	grpcOptions = append(grpcOptions, cfg.GRPCOptions...)
+	grpcServer := grpc.NewServer(grpcOptions...)
+
+	// Setup HTTP server
+	router := mux.NewRouter()
+	if cfg.RegisterInstrumentation {
+		RegisterInstrumentation(router)
+	}
+	httpMiddleware := []middleware.Interface{
+		middleware.Log{},
+		middleware.Instrument{
+			Duration:     requestDuration,
+			RouteMatcher: router,
+		},
+		middleware.Func(func(handler http.Handler) http.Handler {
+			return nethttp.Middleware(opentracing.GlobalTracer(), handler)
+		}),
+	}
+	httpMiddleware = append(httpMiddleware, cfg.HTTPMiddleware...)
+	httpServer := &http.Server{
+		ReadTimeout:  cfg.HTTPServerReadTimeout,
+		WriteTimeout: cfg.HTTPServerWriteTimeout,
+		IdleTimeout:  cfg.HTTPServerIdleTimeout,
+		Handler:      middleware.Merge(httpMiddleware...).Wrap(router),
+	}
+
+	return &Server{
+		cfg:          cfg,
+		httpListener: httpListener,
+		grpcListener: grpcListener,
+		httpServer:   httpServer,
+		handler:      signals.NewHandler(log.StandardLogger()),
+
+		HTTP: router,
+		GRPC: grpcServer,
+	}, nil
+}
+
+// RegisterInstrumentation on the given router.
+func RegisterInstrumentation(router *mux.Router) {
+	router.Handle("/metrics", prometheus.Handler())
+	router.Handle("/traces", loki.Handler())
+	router.PathPrefix("/debug/pprof").Handler(http.DefaultServeMux)
+}
+
+// Run the server; blocks until SIGTERM is received.
+func (s *Server) Run() {
+	go s.httpServer.Serve(s.httpListener)
+
+	// Setup gRPC server
+	// for HTTP over gRPC, ensure we don't double-count the middleware
+	httpgrpc.RegisterHTTPServer(s.GRPC, httpgrpc_server.NewServer(s.HTTP))
+	go s.GRPC.Serve(s.grpcListener)
+	defer s.GRPC.GracefulStop()
+
+	// Wait for a signal
+	s.handler.Loop()
+}
+
+// Stop unblocks Run().
+func (s *Server) Stop() {
+	s.handler.Stop()
+}
+
+// Shutdown the server, gracefully.  Should be defered after New().
+func (s *Server) Shutdown() {
+	ctx, cancel := context.WithTimeout(context.Background(), s.cfg.ServerGracefulShutdownTimeout)
+	defer cancel() // releases resources if httpServer.Shutdown completes before timeout elapses
+
+	s.httpServer.Shutdown(ctx)
+	s.GRPC.Stop()
+}
diff --git a/vendor/github.com/weaveworks/common/signals/signals.go b/vendor/github.com/weaveworks/common/signals/signals.go
new file mode 100644
index 00000000..cca60b9b
--- /dev/null
+++ b/vendor/github.com/weaveworks/common/signals/signals.go
@@ -0,0 +1,75 @@
+package signals
+
+import (
+	"os"
+	"os/signal"
+	"runtime"
+	"syscall"
+)
+
+// SignalReceiver represents a subsystem/server/... that can be stopped or
+// queried about the status with a signal
+type SignalReceiver interface {
+	Stop() error
+}
+
+// Logger is something to log too.
+type Logger interface {
+	Infof(format string, args ...interface{})
+}
+
+// Handler handles signals, can be interrupted.
+// On SIGINT or SIGTERM it will exit, on SIGQUIT it
+// will dump goroutine stacks to the Logger.
+type Handler struct {
+	log       Logger
+	receivers []SignalReceiver
+	quit      chan struct{}
+}
+
+// NewHandler makes a new Handler.
+func NewHandler(log Logger, receivers ...SignalReceiver) *Handler {
+	return &Handler{
+		log:       log,
+		receivers: receivers,
+		quit:      make(chan struct{}),
+	}
+}
+
+// Stop the handler
+func (h *Handler) Stop() {
+	close(h.quit)
+}
+
+// Loop handles signals.
+func (h *Handler) Loop() {
+	sigs := make(chan os.Signal, 1)
+	signal.Notify(sigs, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM)
+	buf := make([]byte, 1<<20)
+	for {
+		select {
+		case <-h.quit:
+			h.log.Infof("=== Handler.Stop()'d ===")
+			return
+		case sig := <-sigs:
+			switch sig {
+			case syscall.SIGINT, syscall.SIGTERM:
+				h.log.Infof("=== received SIGINT/SIGTERM ===\n*** exiting")
+				for _, subsystem := range h.receivers {
+					subsystem.Stop()
+				}
+				return
+			case syscall.SIGQUIT:
+				stacklen := runtime.Stack(buf, true)
+				h.log.Infof("=== received SIGQUIT ===\n*** goroutine dump...\n%s\n*** end", buf[:stacklen])
+			}
+		}
+	}
+}
+
+// SignalHandlerLoop blocks until it receives a SIGINT, SIGTERM or SIGQUIT.
+// For SIGINT and SIGTERM, it exits; for SIGQUIT is print a goroutine stack
+// dump.
+func SignalHandlerLoop(log Logger, ss ...SignalReceiver) {
+	NewHandler(log, ss...).Loop()
+}
diff --git a/vendor/github.com/weaveworks/cortex/pkg/util/wire/BUILD.bazel b/vendor/github.com/weaveworks/cortex/pkg/util/wire/BUILD.bazel
new file mode 100644
index 00000000..54af2338
--- /dev/null
+++ b/vendor/github.com/weaveworks/cortex/pkg/util/wire/BUILD.bazel
@@ -0,0 +1,8 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "go_default_library",
+    srcs = ["bytes.go"],
+    importpath = "github.com/weaveworks/cortex/pkg/util/wire",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/weaveworks/cortex/pkg/util/wire/bytes.go b/vendor/github.com/weaveworks/cortex/pkg/util/wire/bytes.go
new file mode 100644
index 00000000..dfabadd8
--- /dev/null
+++ b/vendor/github.com/weaveworks/cortex/pkg/util/wire/bytes.go
@@ -0,0 +1,39 @@
+package wire
+
+import (
+	"bytes"
+)
+
+// Bytes exists to stop proto copying the byte array
+type Bytes []byte
+
+// Marshal just returns bs
+func (bs *Bytes) Marshal() ([]byte, error) {
+	return []byte(*bs), nil
+}
+
+// MarshalTo copies Bytes to data
+func (bs *Bytes) MarshalTo(data []byte) (n int, err error) {
+	return copy(data, *bs), nil
+}
+
+// Unmarshal updates Bytes to be data, without a copy
+func (bs *Bytes) Unmarshal(data []byte) error {
+	*bs = data
+	return nil
+}
+
+// Size returns the length of Bytes
+func (bs *Bytes) Size() int {
+	return len(*bs)
+}
+
+// Equal returns true if other equals Bytes
+func (bs *Bytes) Equal(other Bytes) bool {
+	return bytes.Equal(*bs, other)
+}
+
+// Compare Bytes to other
+func (bs *Bytes) Compare(other Bytes) int {
+	return bytes.Compare(*bs, other)
+}
-- 
GitLab